problem_id
stringlengths 18
21
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
54
| prompt
stringlengths 1.28k
64.2k
| golden_diff
stringlengths 166
811
| verification_info
stringlengths 604
118k
|
---|---|---|---|---|---|---|
gh_patches_debug_1500 | rasdani/github-patches | git_diff | aws__aws-cli-4397 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
codecommit: tag-resource does not permit `:` character in tag key
The following command incorrectly returns an error even though colon characters (`:`) in tag keys are legal:
```
aws codecommit tag-resource --resource-arn <arn> --tags namespace:KeyName=value
```
Expected behavior: a new tag is applied to the resource with the name `namespace:KeyName` and value `value`.
Actual behavior:
```
Error parsing parameter '--tags': Expected: '=', received: ':' for input:
namespace:KeyName=value
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/shorthand.py`
Content:
```
1 # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """Module for parsing shorthand syntax.
14
15 This module parses any CLI options that use a "shorthand"
16 syntax::
17
18 --foo A=b,C=d
19 |------|
20 |
21 Shorthand syntax
22
23
24 This module provides two main classes to do this.
25 First, there's a ``ShorthandParser`` class. This class works
26 on a purely syntactic level. It looks only at the string value
27 provided to it in order to figure out how the string should be parsed.
28
29 However, because there was a pre-existing shorthand parser, we need
30 to remain backwards compatible with the previous parser. One of the
31 things the previous parser did was use the associated JSON model to
32 control how the expression was parsed.
33
34 In order to accommodate this a post processing class is provided that
35 takes the parsed values from the ``ShorthandParser`` as well as the
36 corresponding JSON model for the CLI argument and makes any adjustments
37 necessary to maintain backwards compatibility. This is done in the
38 ``BackCompatVisitor`` class.
39
40 """
41 import re
42 import string
43
44
45 _EOF = object()
46
47
48 class _NamedRegex(object):
49 def __init__(self, name, regex_str):
50 self.name = name
51 self.regex = re.compile(regex_str, re.UNICODE)
52
53 def match(self, value):
54 return self.regex.match(value)
55
56
57 class ShorthandParseError(Exception):
58 def __init__(self, value, expected, actual, index):
59 self.value = value
60 self.expected = expected
61 self.actual = actual
62 self.index = index
63 msg = self._construct_msg()
64 super(ShorthandParseError, self).__init__(msg)
65
66 def _construct_msg(self):
67 consumed, remaining, num_spaces = self.value, '', self.index
68 if '\n' in self.value[:self.index]:
69 # If there's newlines in the consumed expression, we want
70 # to make sure we're only counting the spaces
71 # from the last newline:
72 # foo=bar,\n
73 # bar==baz
74 # ^
75 last_newline = self.value[:self.index].rindex('\n')
76 num_spaces = self.index - last_newline - 1
77 if '\n' in self.value[self.index:]:
78 # If there's newline in the remaining, divide value
79 # into consumed and remainig
80 # foo==bar,\n
81 # ^
82 # bar=baz
83 next_newline = self.index + self.value[self.index:].index('\n')
84 consumed = self.value[:next_newline]
85 remaining = self.value[next_newline:]
86 msg = (
87 "Expected: '%s', received: '%s' for input:\n"
88 "%s\n"
89 "%s"
90 "%s"
91 ) % (self.expected, self.actual, consumed,
92 ' ' * num_spaces + '^', remaining)
93 return msg
94
95
96 class ShorthandParser(object):
97 """Parses shorthand syntax in the CLI.
98
99 Note that this parser does not rely on any JSON models to control
100 how to parse the shorthand syntax.
101
102 """
103
104 _SINGLE_QUOTED = _NamedRegex('singled quoted', r'\'(?:\\\\|\\\'|[^\'])*\'')
105 _DOUBLE_QUOTED = _NamedRegex('double quoted', r'"(?:\\\\|\\"|[^"])*"')
106 _START_WORD = u'\!\#-&\(-\+\--\<\>-Z\\\\-z\u007c-\uffff'
107 _FIRST_FOLLOW_CHARS = u'\s\!\#-&\(-\+\--\\\\\^-\|~-\uffff'
108 _SECOND_FOLLOW_CHARS = u'\s\!\#-&\(-\+\--\<\>-\uffff'
109 _ESCAPED_COMMA = '(\\\\,)'
110 _FIRST_VALUE = _NamedRegex(
111 'first',
112 u'({escaped_comma}|[{start_word}])'
113 u'({escaped_comma}|[{follow_chars}])*'.format(
114 escaped_comma=_ESCAPED_COMMA,
115 start_word=_START_WORD,
116 follow_chars=_FIRST_FOLLOW_CHARS,
117 ))
118 _SECOND_VALUE = _NamedRegex(
119 'second',
120 u'({escaped_comma}|[{start_word}])'
121 u'({escaped_comma}|[{follow_chars}])*'.format(
122 escaped_comma=_ESCAPED_COMMA,
123 start_word=_START_WORD,
124 follow_chars=_SECOND_FOLLOW_CHARS,
125 ))
126
127 def __init__(self):
128 self._tokens = []
129
130 def parse(self, value):
131 """Parse shorthand syntax.
132
133 For example::
134
135 parser = ShorthandParser()
136 parser.parse('a=b') # {'a': 'b'}
137 parser.parse('a=b,c') # {'a': ['b', 'c']}
138
139 :tpye value: str
140 :param value: Any value that needs to be parsed.
141
142 :return: Parsed value, which will be a dictionary.
143 """
144 self._input_value = value
145 self._index = 0
146 return self._parameter()
147
148 def _parameter(self):
149 # parameter = keyval *("," keyval)
150 params = {}
151 params.update(self._keyval())
152 while self._index < len(self._input_value):
153 self._expect(',', consume_whitespace=True)
154 params.update(self._keyval())
155 return params
156
157 def _keyval(self):
158 # keyval = key "=" [values]
159 key = self._key()
160 self._expect('=', consume_whitespace=True)
161 values = self._values()
162 return {key: values}
163
164 def _key(self):
165 # key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\-_.#/]
166 valid_chars = string.ascii_letters + string.digits + '-_.#/'
167 start = self._index
168 while not self._at_eof():
169 if self._current() not in valid_chars:
170 break
171 self._index += 1
172 return self._input_value[start:self._index]
173
174 def _values(self):
175 # values = csv-list / explicit-list / hash-literal
176 if self._at_eof():
177 return ''
178 elif self._current() == '[':
179 return self._explicit_list()
180 elif self._current() == '{':
181 return self._hash_literal()
182 else:
183 return self._csv_value()
184
185 def _csv_value(self):
186 # Supports either:
187 # foo=bar -> 'bar'
188 # ^
189 # foo=bar,baz -> ['bar', 'baz']
190 # ^
191 first_value = self._first_value()
192 self._consume_whitespace()
193 if self._at_eof() or self._input_value[self._index] != ',':
194 return first_value
195 self._expect(',', consume_whitespace=True)
196 csv_list = [first_value]
197 # Try to parse remaining list values.
198 # It's possible we don't parse anything:
199 # a=b,c=d
200 # ^-here
201 # In the case above, we'll hit the ShorthandParser,
202 # backtrack to the comma, and return a single scalar
203 # value 'b'.
204 while True:
205 try:
206 current = self._second_value()
207 self._consume_whitespace()
208 if self._at_eof():
209 csv_list.append(current)
210 break
211 self._expect(',', consume_whitespace=True)
212 csv_list.append(current)
213 except ShorthandParseError:
214 # Backtrack to the previous comma.
215 # This can happen when we reach this case:
216 # foo=a,b,c=d,e=f
217 # ^-start
218 # foo=a,b,c=d,e=f
219 # ^-error, "expected ',' received '='
220 # foo=a,b,c=d,e=f
221 # ^-backtrack to here.
222 if self._at_eof():
223 raise
224 self._backtrack_to(',')
225 break
226 if len(csv_list) == 1:
227 # Then this was a foo=bar case, so we expect
228 # this to parse to a scalar value 'bar', i.e
229 # {"foo": "bar"} instead of {"bar": ["bar"]}
230 return first_value
231 return csv_list
232
233 def _value(self):
234 result = self._FIRST_VALUE.match(self._input_value[self._index:])
235 if result is not None:
236 consumed = self._consume_matched_regex(result)
237 return consumed.replace('\\,', ',').rstrip()
238 return ''
239
240 def _explicit_list(self):
241 # explicit-list = "[" [value *(",' value)] "]"
242 self._expect('[', consume_whitespace=True)
243 values = []
244 while self._current() != ']':
245 val = self._explicit_values()
246 values.append(val)
247 self._consume_whitespace()
248 if self._current() != ']':
249 self._expect(',')
250 self._consume_whitespace()
251 self._expect(']')
252 return values
253
254 def _explicit_values(self):
255 # values = csv-list / explicit-list / hash-literal
256 if self._current() == '[':
257 return self._explicit_list()
258 elif self._current() == '{':
259 return self._hash_literal()
260 else:
261 return self._first_value()
262
263 def _hash_literal(self):
264 self._expect('{', consume_whitespace=True)
265 keyvals = {}
266 while self._current() != '}':
267 key = self._key()
268 self._expect('=', consume_whitespace=True)
269 v = self._explicit_values()
270 self._consume_whitespace()
271 if self._current() != '}':
272 self._expect(',')
273 self._consume_whitespace()
274 keyvals[key] = v
275 self._expect('}')
276 return keyvals
277
278 def _first_value(self):
279 # first-value = value / single-quoted-val / double-quoted-val
280 if self._current() == "'":
281 return self._single_quoted_value()
282 elif self._current() == '"':
283 return self._double_quoted_value()
284 return self._value()
285
286 def _single_quoted_value(self):
287 # single-quoted-value = %x27 *(val-escaped-single) %x27
288 # val-escaped-single = %x20-26 / %x28-7F / escaped-escape /
289 # (escape single-quote)
290 return self._consume_quoted(self._SINGLE_QUOTED, escaped_char="'")
291
292 def _consume_quoted(self, regex, escaped_char=None):
293 value = self._must_consume_regex(regex)[1:-1]
294 if escaped_char is not None:
295 value = value.replace("\\%s" % escaped_char, escaped_char)
296 value = value.replace("\\\\", "\\")
297 return value
298
299 def _double_quoted_value(self):
300 return self._consume_quoted(self._DOUBLE_QUOTED, escaped_char='"')
301
302 def _second_value(self):
303 if self._current() == "'":
304 return self._single_quoted_value()
305 elif self._current() == '"':
306 return self._double_quoted_value()
307 else:
308 consumed = self._must_consume_regex(self._SECOND_VALUE)
309 return consumed.replace('\\,', ',').rstrip()
310
311 def _expect(self, char, consume_whitespace=False):
312 if consume_whitespace:
313 self._consume_whitespace()
314 if self._index >= len(self._input_value):
315 raise ShorthandParseError(self._input_value, char,
316 'EOF', self._index)
317 actual = self._input_value[self._index]
318 if actual != char:
319 raise ShorthandParseError(self._input_value, char,
320 actual, self._index)
321 self._index += 1
322 if consume_whitespace:
323 self._consume_whitespace()
324
325 def _must_consume_regex(self, regex):
326 result = regex.match(self._input_value[self._index:])
327 if result is not None:
328 return self._consume_matched_regex(result)
329 raise ShorthandParseError(self._input_value, '<%s>' % regex.name,
330 '<none>', self._index)
331
332 def _consume_matched_regex(self, result):
333 start, end = result.span()
334 v = self._input_value[self._index+start:self._index+end]
335 self._index += (end - start)
336 return v
337
338 def _current(self):
339 # If the index is at the end of the input value,
340 # then _EOF will be returned.
341 if self._index < len(self._input_value):
342 return self._input_value[self._index]
343 return _EOF
344
345 def _at_eof(self):
346 return self._index >= len(self._input_value)
347
348 def _backtrack_to(self, char):
349 while self._index >= 0 and self._input_value[self._index] != char:
350 self._index -= 1
351
352 def _consume_whitespace(self):
353 while self._current() != _EOF and self._current() in string.whitespace:
354 self._index += 1
355
356
357 class ModelVisitor(object):
358 def visit(self, params, model):
359 self._visit({}, model, '', params)
360
361 def _visit(self, parent, shape, name, value):
362 method = getattr(self, '_visit_%s' % shape.type_name,
363 self._visit_scalar)
364 method(parent, shape, name, value)
365
366 def _visit_structure(self, parent, shape, name, value):
367 if not isinstance(value, dict):
368 return
369 for member_name, member_shape in shape.members.items():
370 self._visit(value, member_shape, member_name,
371 value.get(member_name))
372
373 def _visit_list(self, parent, shape, name, value):
374 if not isinstance(value, list):
375 return
376 for i, element in enumerate(value):
377 self._visit(value, shape.member, i, element)
378
379 def _visit_map(self, parent, shape, name, value):
380 if not isinstance(value, dict):
381 return
382 value_shape = shape.value
383 for k, v in value.items():
384 self._visit(value, value_shape, k, v)
385
386 def _visit_scalar(self, parent, shape, name, value):
387 pass
388
389
390 class BackCompatVisitor(ModelVisitor):
391 def _visit_list(self, parent, shape, name, value):
392 if not isinstance(value, list):
393 # Convert a -> [a] because they specified
394 # "foo=bar", but "bar" should really be ["bar"].
395 if value is not None:
396 parent[name] = [value]
397 else:
398 return super(BackCompatVisitor, self)._visit_list(
399 parent, shape, name, value)
400
401 def _visit_scalar(self, parent, shape, name, value):
402 if value is None:
403 return
404 type_name = shape.type_name
405 if type_name in ['integer', 'long']:
406 parent[name] = int(value)
407 elif type_name in ['double', 'float']:
408 parent[name] = float(value)
409 elif type_name == 'boolean':
410 # We want to make sure we only set a value
411 # only if "true"/"false" is specified.
412 if value.lower() == 'true':
413 parent[name] = True
414 elif value.lower() == 'false':
415 parent[name] = False
416
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/shorthand.py b/awscli/shorthand.py
--- a/awscli/shorthand.py
+++ b/awscli/shorthand.py
@@ -163,7 +163,7 @@
def _key(self):
# key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\-_.#/]
- valid_chars = string.ascii_letters + string.digits + '-_.#/'
+ valid_chars = string.ascii_letters + string.digits + '-_.#/:'
start = self._index
while not self._at_eof():
if self._current() not in valid_chars:
| {"golden_diff": "diff --git a/awscli/shorthand.py b/awscli/shorthand.py\n--- a/awscli/shorthand.py\n+++ b/awscli/shorthand.py\n@@ -163,7 +163,7 @@\n \n def _key(self):\n # key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\\-_.#/]\n- valid_chars = string.ascii_letters + string.digits + '-_.#/'\n+ valid_chars = string.ascii_letters + string.digits + '-_.#/:'\n start = self._index\n while not self._at_eof():\n if self._current() not in valid_chars:\n", "issue": "codecommit: tag-resource does not permit `:` character in tag key\nThe following command incorrectly returns an error even though colon characters (`:`) in tag keys are legal:\r\n\r\n```\r\naws codecommit tag-resource --resource-arn <arn> --tags namespace:KeyName=value\r\n```\r\n\r\nExpected behavior: a new tag is applied to the resource with the name `namespace:KeyName` and value `value`.\r\n\r\nActual behavior:\r\n\r\n```\r\nError parsing parameter '--tags': Expected: '=', received: ':' for input:\r\nnamespace:KeyName=value\r\n```\n", "before_files": [{"content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Module for parsing shorthand syntax.\n\nThis module parses any CLI options that use a \"shorthand\"\nsyntax::\n\n --foo A=b,C=d\n |------|\n |\n Shorthand syntax\n\n\nThis module provides two main classes to do this.\nFirst, there's a ``ShorthandParser`` class. This class works\non a purely syntactic level. It looks only at the string value\nprovided to it in order to figure out how the string should be parsed.\n\nHowever, because there was a pre-existing shorthand parser, we need\nto remain backwards compatible with the previous parser. One of the\nthings the previous parser did was use the associated JSON model to\ncontrol how the expression was parsed.\n\nIn order to accommodate this a post processing class is provided that\ntakes the parsed values from the ``ShorthandParser`` as well as the\ncorresponding JSON model for the CLI argument and makes any adjustments\nnecessary to maintain backwards compatibility. This is done in the\n``BackCompatVisitor`` class.\n\n\"\"\"\nimport re\nimport string\n\n\n_EOF = object()\n\n\nclass _NamedRegex(object):\n def __init__(self, name, regex_str):\n self.name = name\n self.regex = re.compile(regex_str, re.UNICODE)\n\n def match(self, value):\n return self.regex.match(value)\n\n\nclass ShorthandParseError(Exception):\n def __init__(self, value, expected, actual, index):\n self.value = value\n self.expected = expected\n self.actual = actual\n self.index = index\n msg = self._construct_msg()\n super(ShorthandParseError, self).__init__(msg)\n\n def _construct_msg(self):\n consumed, remaining, num_spaces = self.value, '', self.index\n if '\\n' in self.value[:self.index]:\n # If there's newlines in the consumed expression, we want\n # to make sure we're only counting the spaces\n # from the last newline:\n # foo=bar,\\n\n # bar==baz\n # ^\n last_newline = self.value[:self.index].rindex('\\n')\n num_spaces = self.index - last_newline - 1\n if '\\n' in self.value[self.index:]:\n # If there's newline in the remaining, divide value\n # into consumed and remainig\n # foo==bar,\\n\n # ^\n # bar=baz\n next_newline = self.index + self.value[self.index:].index('\\n')\n consumed = self.value[:next_newline]\n remaining = self.value[next_newline:]\n msg = (\n \"Expected: '%s', received: '%s' for input:\\n\"\n \"%s\\n\"\n \"%s\"\n \"%s\"\n ) % (self.expected, self.actual, consumed,\n ' ' * num_spaces + '^', remaining)\n return msg\n\n\nclass ShorthandParser(object):\n \"\"\"Parses shorthand syntax in the CLI.\n\n Note that this parser does not rely on any JSON models to control\n how to parse the shorthand syntax.\n\n \"\"\"\n\n _SINGLE_QUOTED = _NamedRegex('singled quoted', r'\\'(?:\\\\\\\\|\\\\\\'|[^\\'])*\\'')\n _DOUBLE_QUOTED = _NamedRegex('double quoted', r'\"(?:\\\\\\\\|\\\\\"|[^\"])*\"')\n _START_WORD = u'\\!\\#-&\\(-\\+\\--\\<\\>-Z\\\\\\\\-z\\u007c-\\uffff'\n _FIRST_FOLLOW_CHARS = u'\\s\\!\\#-&\\(-\\+\\--\\\\\\\\\\^-\\|~-\\uffff'\n _SECOND_FOLLOW_CHARS = u'\\s\\!\\#-&\\(-\\+\\--\\<\\>-\\uffff'\n _ESCAPED_COMMA = '(\\\\\\\\,)'\n _FIRST_VALUE = _NamedRegex(\n 'first',\n u'({escaped_comma}|[{start_word}])'\n u'({escaped_comma}|[{follow_chars}])*'.format(\n escaped_comma=_ESCAPED_COMMA,\n start_word=_START_WORD,\n follow_chars=_FIRST_FOLLOW_CHARS,\n ))\n _SECOND_VALUE = _NamedRegex(\n 'second',\n u'({escaped_comma}|[{start_word}])'\n u'({escaped_comma}|[{follow_chars}])*'.format(\n escaped_comma=_ESCAPED_COMMA,\n start_word=_START_WORD,\n follow_chars=_SECOND_FOLLOW_CHARS,\n ))\n\n def __init__(self):\n self._tokens = []\n\n def parse(self, value):\n \"\"\"Parse shorthand syntax.\n\n For example::\n\n parser = ShorthandParser()\n parser.parse('a=b') # {'a': 'b'}\n parser.parse('a=b,c') # {'a': ['b', 'c']}\n\n :tpye value: str\n :param value: Any value that needs to be parsed.\n\n :return: Parsed value, which will be a dictionary.\n \"\"\"\n self._input_value = value\n self._index = 0\n return self._parameter()\n\n def _parameter(self):\n # parameter = keyval *(\",\" keyval)\n params = {}\n params.update(self._keyval())\n while self._index < len(self._input_value):\n self._expect(',', consume_whitespace=True)\n params.update(self._keyval())\n return params\n\n def _keyval(self):\n # keyval = key \"=\" [values]\n key = self._key()\n self._expect('=', consume_whitespace=True)\n values = self._values()\n return {key: values}\n\n def _key(self):\n # key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\\-_.#/]\n valid_chars = string.ascii_letters + string.digits + '-_.#/'\n start = self._index\n while not self._at_eof():\n if self._current() not in valid_chars:\n break\n self._index += 1\n return self._input_value[start:self._index]\n\n def _values(self):\n # values = csv-list / explicit-list / hash-literal\n if self._at_eof():\n return ''\n elif self._current() == '[':\n return self._explicit_list()\n elif self._current() == '{':\n return self._hash_literal()\n else:\n return self._csv_value()\n\n def _csv_value(self):\n # Supports either:\n # foo=bar -> 'bar'\n # ^\n # foo=bar,baz -> ['bar', 'baz']\n # ^\n first_value = self._first_value()\n self._consume_whitespace()\n if self._at_eof() or self._input_value[self._index] != ',':\n return first_value\n self._expect(',', consume_whitespace=True)\n csv_list = [first_value]\n # Try to parse remaining list values.\n # It's possible we don't parse anything:\n # a=b,c=d\n # ^-here\n # In the case above, we'll hit the ShorthandParser,\n # backtrack to the comma, and return a single scalar\n # value 'b'.\n while True:\n try:\n current = self._second_value()\n self._consume_whitespace()\n if self._at_eof():\n csv_list.append(current)\n break\n self._expect(',', consume_whitespace=True)\n csv_list.append(current)\n except ShorthandParseError:\n # Backtrack to the previous comma.\n # This can happen when we reach this case:\n # foo=a,b,c=d,e=f\n # ^-start\n # foo=a,b,c=d,e=f\n # ^-error, \"expected ',' received '='\n # foo=a,b,c=d,e=f\n # ^-backtrack to here.\n if self._at_eof():\n raise\n self._backtrack_to(',')\n break\n if len(csv_list) == 1:\n # Then this was a foo=bar case, so we expect\n # this to parse to a scalar value 'bar', i.e\n # {\"foo\": \"bar\"} instead of {\"bar\": [\"bar\"]}\n return first_value\n return csv_list\n\n def _value(self):\n result = self._FIRST_VALUE.match(self._input_value[self._index:])\n if result is not None:\n consumed = self._consume_matched_regex(result)\n return consumed.replace('\\\\,', ',').rstrip()\n return ''\n\n def _explicit_list(self):\n # explicit-list = \"[\" [value *(\",' value)] \"]\"\n self._expect('[', consume_whitespace=True)\n values = []\n while self._current() != ']':\n val = self._explicit_values()\n values.append(val)\n self._consume_whitespace()\n if self._current() != ']':\n self._expect(',')\n self._consume_whitespace()\n self._expect(']')\n return values\n\n def _explicit_values(self):\n # values = csv-list / explicit-list / hash-literal\n if self._current() == '[':\n return self._explicit_list()\n elif self._current() == '{':\n return self._hash_literal()\n else:\n return self._first_value()\n\n def _hash_literal(self):\n self._expect('{', consume_whitespace=True)\n keyvals = {}\n while self._current() != '}':\n key = self._key()\n self._expect('=', consume_whitespace=True)\n v = self._explicit_values()\n self._consume_whitespace()\n if self._current() != '}':\n self._expect(',')\n self._consume_whitespace()\n keyvals[key] = v\n self._expect('}')\n return keyvals\n\n def _first_value(self):\n # first-value = value / single-quoted-val / double-quoted-val\n if self._current() == \"'\":\n return self._single_quoted_value()\n elif self._current() == '\"':\n return self._double_quoted_value()\n return self._value()\n\n def _single_quoted_value(self):\n # single-quoted-value = %x27 *(val-escaped-single) %x27\n # val-escaped-single = %x20-26 / %x28-7F / escaped-escape /\n # (escape single-quote)\n return self._consume_quoted(self._SINGLE_QUOTED, escaped_char=\"'\")\n\n def _consume_quoted(self, regex, escaped_char=None):\n value = self._must_consume_regex(regex)[1:-1]\n if escaped_char is not None:\n value = value.replace(\"\\\\%s\" % escaped_char, escaped_char)\n value = value.replace(\"\\\\\\\\\", \"\\\\\")\n return value\n\n def _double_quoted_value(self):\n return self._consume_quoted(self._DOUBLE_QUOTED, escaped_char='\"')\n\n def _second_value(self):\n if self._current() == \"'\":\n return self._single_quoted_value()\n elif self._current() == '\"':\n return self._double_quoted_value()\n else:\n consumed = self._must_consume_regex(self._SECOND_VALUE)\n return consumed.replace('\\\\,', ',').rstrip()\n\n def _expect(self, char, consume_whitespace=False):\n if consume_whitespace:\n self._consume_whitespace()\n if self._index >= len(self._input_value):\n raise ShorthandParseError(self._input_value, char,\n 'EOF', self._index)\n actual = self._input_value[self._index]\n if actual != char:\n raise ShorthandParseError(self._input_value, char,\n actual, self._index)\n self._index += 1\n if consume_whitespace:\n self._consume_whitespace()\n\n def _must_consume_regex(self, regex):\n result = regex.match(self._input_value[self._index:])\n if result is not None:\n return self._consume_matched_regex(result)\n raise ShorthandParseError(self._input_value, '<%s>' % regex.name,\n '<none>', self._index)\n\n def _consume_matched_regex(self, result):\n start, end = result.span()\n v = self._input_value[self._index+start:self._index+end]\n self._index += (end - start)\n return v\n\n def _current(self):\n # If the index is at the end of the input value,\n # then _EOF will be returned.\n if self._index < len(self._input_value):\n return self._input_value[self._index]\n return _EOF\n\n def _at_eof(self):\n return self._index >= len(self._input_value)\n\n def _backtrack_to(self, char):\n while self._index >= 0 and self._input_value[self._index] != char:\n self._index -= 1\n\n def _consume_whitespace(self):\n while self._current() != _EOF and self._current() in string.whitespace:\n self._index += 1\n\n\nclass ModelVisitor(object):\n def visit(self, params, model):\n self._visit({}, model, '', params)\n\n def _visit(self, parent, shape, name, value):\n method = getattr(self, '_visit_%s' % shape.type_name,\n self._visit_scalar)\n method(parent, shape, name, value)\n\n def _visit_structure(self, parent, shape, name, value):\n if not isinstance(value, dict):\n return\n for member_name, member_shape in shape.members.items():\n self._visit(value, member_shape, member_name,\n value.get(member_name))\n\n def _visit_list(self, parent, shape, name, value):\n if not isinstance(value, list):\n return\n for i, element in enumerate(value):\n self._visit(value, shape.member, i, element)\n\n def _visit_map(self, parent, shape, name, value):\n if not isinstance(value, dict):\n return\n value_shape = shape.value\n for k, v in value.items():\n self._visit(value, value_shape, k, v)\n\n def _visit_scalar(self, parent, shape, name, value):\n pass\n\n\nclass BackCompatVisitor(ModelVisitor):\n def _visit_list(self, parent, shape, name, value):\n if not isinstance(value, list):\n # Convert a -> [a] because they specified\n # \"foo=bar\", but \"bar\" should really be [\"bar\"].\n if value is not None:\n parent[name] = [value]\n else:\n return super(BackCompatVisitor, self)._visit_list(\n parent, shape, name, value)\n\n def _visit_scalar(self, parent, shape, name, value):\n if value is None:\n return\n type_name = shape.type_name\n if type_name in ['integer', 'long']:\n parent[name] = int(value)\n elif type_name in ['double', 'float']:\n parent[name] = float(value)\n elif type_name == 'boolean':\n # We want to make sure we only set a value\n # only if \"true\"/\"false\" is specified.\n if value.lower() == 'true':\n parent[name] = True\n elif value.lower() == 'false':\n parent[name] = False\n", "path": "awscli/shorthand.py"}], "after_files": [{"content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Module for parsing shorthand syntax.\n\nThis module parses any CLI options that use a \"shorthand\"\nsyntax::\n\n --foo A=b,C=d\n |------|\n |\n Shorthand syntax\n\n\nThis module provides two main classes to do this.\nFirst, there's a ``ShorthandParser`` class. This class works\non a purely syntactic level. It looks only at the string value\nprovided to it in order to figure out how the string should be parsed.\n\nHowever, because there was a pre-existing shorthand parser, we need\nto remain backwards compatible with the previous parser. One of the\nthings the previous parser did was use the associated JSON model to\ncontrol how the expression was parsed.\n\nIn order to accommodate this a post processing class is provided that\ntakes the parsed values from the ``ShorthandParser`` as well as the\ncorresponding JSON model for the CLI argument and makes any adjustments\nnecessary to maintain backwards compatibility. This is done in the\n``BackCompatVisitor`` class.\n\n\"\"\"\nimport re\nimport string\n\n\n_EOF = object()\n\n\nclass _NamedRegex(object):\n def __init__(self, name, regex_str):\n self.name = name\n self.regex = re.compile(regex_str, re.UNICODE)\n\n def match(self, value):\n return self.regex.match(value)\n\n\nclass ShorthandParseError(Exception):\n def __init__(self, value, expected, actual, index):\n self.value = value\n self.expected = expected\n self.actual = actual\n self.index = index\n msg = self._construct_msg()\n super(ShorthandParseError, self).__init__(msg)\n\n def _construct_msg(self):\n consumed, remaining, num_spaces = self.value, '', self.index\n if '\\n' in self.value[:self.index]:\n # If there's newlines in the consumed expression, we want\n # to make sure we're only counting the spaces\n # from the last newline:\n # foo=bar,\\n\n # bar==baz\n # ^\n last_newline = self.value[:self.index].rindex('\\n')\n num_spaces = self.index - last_newline - 1\n if '\\n' in self.value[self.index:]:\n # If there's newline in the remaining, divide value\n # into consumed and remainig\n # foo==bar,\\n\n # ^\n # bar=baz\n next_newline = self.index + self.value[self.index:].index('\\n')\n consumed = self.value[:next_newline]\n remaining = self.value[next_newline:]\n msg = (\n \"Expected: '%s', received: '%s' for input:\\n\"\n \"%s\\n\"\n \"%s\"\n \"%s\"\n ) % (self.expected, self.actual, consumed,\n ' ' * num_spaces + '^', remaining)\n return msg\n\n\nclass ShorthandParser(object):\n \"\"\"Parses shorthand syntax in the CLI.\n\n Note that this parser does not rely on any JSON models to control\n how to parse the shorthand syntax.\n\n \"\"\"\n\n _SINGLE_QUOTED = _NamedRegex('singled quoted', r'\\'(?:\\\\\\\\|\\\\\\'|[^\\'])*\\'')\n _DOUBLE_QUOTED = _NamedRegex('double quoted', r'\"(?:\\\\\\\\|\\\\\"|[^\"])*\"')\n _START_WORD = u'\\!\\#-&\\(-\\+\\--\\<\\>-Z\\\\\\\\-z\\u007c-\\uffff'\n _FIRST_FOLLOW_CHARS = u'\\s\\!\\#-&\\(-\\+\\--\\\\\\\\\\^-\\|~-\\uffff'\n _SECOND_FOLLOW_CHARS = u'\\s\\!\\#-&\\(-\\+\\--\\<\\>-\\uffff'\n _ESCAPED_COMMA = '(\\\\\\\\,)'\n _FIRST_VALUE = _NamedRegex(\n 'first',\n u'({escaped_comma}|[{start_word}])'\n u'({escaped_comma}|[{follow_chars}])*'.format(\n escaped_comma=_ESCAPED_COMMA,\n start_word=_START_WORD,\n follow_chars=_FIRST_FOLLOW_CHARS,\n ))\n _SECOND_VALUE = _NamedRegex(\n 'second',\n u'({escaped_comma}|[{start_word}])'\n u'({escaped_comma}|[{follow_chars}])*'.format(\n escaped_comma=_ESCAPED_COMMA,\n start_word=_START_WORD,\n follow_chars=_SECOND_FOLLOW_CHARS,\n ))\n\n def __init__(self):\n self._tokens = []\n\n def parse(self, value):\n \"\"\"Parse shorthand syntax.\n\n For example::\n\n parser = ShorthandParser()\n parser.parse('a=b') # {'a': 'b'}\n parser.parse('a=b,c') # {'a': ['b', 'c']}\n\n :tpye value: str\n :param value: Any value that needs to be parsed.\n\n :return: Parsed value, which will be a dictionary.\n \"\"\"\n self._input_value = value\n self._index = 0\n return self._parameter()\n\n def _parameter(self):\n # parameter = keyval *(\",\" keyval)\n params = {}\n params.update(self._keyval())\n while self._index < len(self._input_value):\n self._expect(',', consume_whitespace=True)\n params.update(self._keyval())\n return params\n\n def _keyval(self):\n # keyval = key \"=\" [values]\n key = self._key()\n self._expect('=', consume_whitespace=True)\n values = self._values()\n return {key: values}\n\n def _key(self):\n # key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\\-_.#/]\n valid_chars = string.ascii_letters + string.digits + '-_.#/:'\n start = self._index\n while not self._at_eof():\n if self._current() not in valid_chars:\n break\n self._index += 1\n return self._input_value[start:self._index]\n\n def _values(self):\n # values = csv-list / explicit-list / hash-literal\n if self._at_eof():\n return ''\n elif self._current() == '[':\n return self._explicit_list()\n elif self._current() == '{':\n return self._hash_literal()\n else:\n return self._csv_value()\n\n def _csv_value(self):\n # Supports either:\n # foo=bar -> 'bar'\n # ^\n # foo=bar,baz -> ['bar', 'baz']\n # ^\n first_value = self._first_value()\n self._consume_whitespace()\n if self._at_eof() or self._input_value[self._index] != ',':\n return first_value\n self._expect(',', consume_whitespace=True)\n csv_list = [first_value]\n # Try to parse remaining list values.\n # It's possible we don't parse anything:\n # a=b,c=d\n # ^-here\n # In the case above, we'll hit the ShorthandParser,\n # backtrack to the comma, and return a single scalar\n # value 'b'.\n while True:\n try:\n current = self._second_value()\n self._consume_whitespace()\n if self._at_eof():\n csv_list.append(current)\n break\n self._expect(',', consume_whitespace=True)\n csv_list.append(current)\n except ShorthandParseError:\n # Backtrack to the previous comma.\n # This can happen when we reach this case:\n # foo=a,b,c=d,e=f\n # ^-start\n # foo=a,b,c=d,e=f\n # ^-error, \"expected ',' received '='\n # foo=a,b,c=d,e=f\n # ^-backtrack to here.\n if self._at_eof():\n raise\n self._backtrack_to(',')\n break\n if len(csv_list) == 1:\n # Then this was a foo=bar case, so we expect\n # this to parse to a scalar value 'bar', i.e\n # {\"foo\": \"bar\"} instead of {\"bar\": [\"bar\"]}\n return first_value\n return csv_list\n\n def _value(self):\n result = self._FIRST_VALUE.match(self._input_value[self._index:])\n if result is not None:\n consumed = self._consume_matched_regex(result)\n return consumed.replace('\\\\,', ',').rstrip()\n return ''\n\n def _explicit_list(self):\n # explicit-list = \"[\" [value *(\",' value)] \"]\"\n self._expect('[', consume_whitespace=True)\n values = []\n while self._current() != ']':\n val = self._explicit_values()\n values.append(val)\n self._consume_whitespace()\n if self._current() != ']':\n self._expect(',')\n self._consume_whitespace()\n self._expect(']')\n return values\n\n def _explicit_values(self):\n # values = csv-list / explicit-list / hash-literal\n if self._current() == '[':\n return self._explicit_list()\n elif self._current() == '{':\n return self._hash_literal()\n else:\n return self._first_value()\n\n def _hash_literal(self):\n self._expect('{', consume_whitespace=True)\n keyvals = {}\n while self._current() != '}':\n key = self._key()\n self._expect('=', consume_whitespace=True)\n v = self._explicit_values()\n self._consume_whitespace()\n if self._current() != '}':\n self._expect(',')\n self._consume_whitespace()\n keyvals[key] = v\n self._expect('}')\n return keyvals\n\n def _first_value(self):\n # first-value = value / single-quoted-val / double-quoted-val\n if self._current() == \"'\":\n return self._single_quoted_value()\n elif self._current() == '\"':\n return self._double_quoted_value()\n return self._value()\n\n def _single_quoted_value(self):\n # single-quoted-value = %x27 *(val-escaped-single) %x27\n # val-escaped-single = %x20-26 / %x28-7F / escaped-escape /\n # (escape single-quote)\n return self._consume_quoted(self._SINGLE_QUOTED, escaped_char=\"'\")\n\n def _consume_quoted(self, regex, escaped_char=None):\n value = self._must_consume_regex(regex)[1:-1]\n if escaped_char is not None:\n value = value.replace(\"\\\\%s\" % escaped_char, escaped_char)\n value = value.replace(\"\\\\\\\\\", \"\\\\\")\n return value\n\n def _double_quoted_value(self):\n return self._consume_quoted(self._DOUBLE_QUOTED, escaped_char='\"')\n\n def _second_value(self):\n if self._current() == \"'\":\n return self._single_quoted_value()\n elif self._current() == '\"':\n return self._double_quoted_value()\n else:\n consumed = self._must_consume_regex(self._SECOND_VALUE)\n return consumed.replace('\\\\,', ',').rstrip()\n\n def _expect(self, char, consume_whitespace=False):\n if consume_whitespace:\n self._consume_whitespace()\n if self._index >= len(self._input_value):\n raise ShorthandParseError(self._input_value, char,\n 'EOF', self._index)\n actual = self._input_value[self._index]\n if actual != char:\n raise ShorthandParseError(self._input_value, char,\n actual, self._index)\n self._index += 1\n if consume_whitespace:\n self._consume_whitespace()\n\n def _must_consume_regex(self, regex):\n result = regex.match(self._input_value[self._index:])\n if result is not None:\n return self._consume_matched_regex(result)\n raise ShorthandParseError(self._input_value, '<%s>' % regex.name,\n '<none>', self._index)\n\n def _consume_matched_regex(self, result):\n start, end = result.span()\n v = self._input_value[self._index+start:self._index+end]\n self._index += (end - start)\n return v\n\n def _current(self):\n # If the index is at the end of the input value,\n # then _EOF will be returned.\n if self._index < len(self._input_value):\n return self._input_value[self._index]\n return _EOF\n\n def _at_eof(self):\n return self._index >= len(self._input_value)\n\n def _backtrack_to(self, char):\n while self._index >= 0 and self._input_value[self._index] != char:\n self._index -= 1\n\n def _consume_whitespace(self):\n while self._current() != _EOF and self._current() in string.whitespace:\n self._index += 1\n\n\nclass ModelVisitor(object):\n def visit(self, params, model):\n self._visit({}, model, '', params)\n\n def _visit(self, parent, shape, name, value):\n method = getattr(self, '_visit_%s' % shape.type_name,\n self._visit_scalar)\n method(parent, shape, name, value)\n\n def _visit_structure(self, parent, shape, name, value):\n if not isinstance(value, dict):\n return\n for member_name, member_shape in shape.members.items():\n self._visit(value, member_shape, member_name,\n value.get(member_name))\n\n def _visit_list(self, parent, shape, name, value):\n if not isinstance(value, list):\n return\n for i, element in enumerate(value):\n self._visit(value, shape.member, i, element)\n\n def _visit_map(self, parent, shape, name, value):\n if not isinstance(value, dict):\n return\n value_shape = shape.value\n for k, v in value.items():\n self._visit(value, value_shape, k, v)\n\n def _visit_scalar(self, parent, shape, name, value):\n pass\n\n\nclass BackCompatVisitor(ModelVisitor):\n def _visit_list(self, parent, shape, name, value):\n if not isinstance(value, list):\n # Convert a -> [a] because they specified\n # \"foo=bar\", but \"bar\" should really be [\"bar\"].\n if value is not None:\n parent[name] = [value]\n else:\n return super(BackCompatVisitor, self)._visit_list(\n parent, shape, name, value)\n\n def _visit_scalar(self, parent, shape, name, value):\n if value is None:\n return\n type_name = shape.type_name\n if type_name in ['integer', 'long']:\n parent[name] = int(value)\n elif type_name in ['double', 'float']:\n parent[name] = float(value)\n elif type_name == 'boolean':\n # We want to make sure we only set a value\n # only if \"true\"/\"false\" is specified.\n if value.lower() == 'true':\n parent[name] = True\n elif value.lower() == 'false':\n parent[name] = False\n", "path": "awscli/shorthand.py"}]} |
gh_patches_debug_1501 | rasdani/github-patches | git_diff | localstack__localstack-3366 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Usage of `docker inspect ..` is fragile, depends on how and what built the docker image
[x] bug report
[ ] feature request
# Detailed description
`lambda_executor.py` current retrieves the container entrypoint from the docker image via `docker inspect --format="{{ .ContainerConfig.Entrypoint }}" ..`. This is fragile and may be missing depending on how the image in question is built. There is a `config` block _and_ a `containerconfig` block that are mostly the same, but sometimes different depending what built and what version of that thing built the image, for example we are seeing the entrypoint missing on images built with Docker for Mac 2.5.0.1, but not on earlier versions, others using `podman` are noticing the fragility in other projects:
https://github.com/containers/podman/issues/2017
## Expected behavior
entrypoint value is picked up from a validly built container
## Actual behavior
entrypoint is sometimes an empty string, which then for a `provided` lambda executor ends up with a script error trying to execute the handler name.
The simple fix is to change `--format="{{ .ContainerConfig.Entrypoint }}"` to `--format="{{ .Config.Entrypoint }}"` which seems like the more canonical way of getting that value.
┆Issue is synchronized with this [Jira Task](https://localstack.atlassian.net/browse/LOC-54) by [Unito](https://www.unito.io/learn-more)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/awslambda/lambda_executors.py`
Content:
```
1 import os
2 import re
3 import sys
4 import glob
5 import json
6 import time
7 import logging
8 import threading
9 import subprocess
10 import six
11 import base64
12 from multiprocessing import Process, Queue
13 try:
14 from shlex import quote as cmd_quote
15 except ImportError:
16 from pipes import quote as cmd_quote # for Python 2.7
17 from localstack import config
18 from localstack.utils import bootstrap
19 from localstack.utils.aws import aws_stack
20 from localstack.utils.common import (
21 CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker,
22 to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port)
23 from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
24 from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue
25 from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched
26
27 # constants
28 LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
29 LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
30 EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
31
32 LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
33 LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
34 LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
35 LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
36 LAMBDA_RUNTIME_NODEJS = 'nodejs'
37 LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
38 LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
39 LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
40 LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
41 LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
42 LAMBDA_RUNTIME_JAVA8 = 'java8'
43 LAMBDA_RUNTIME_JAVA11 = 'java11'
44 LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
45 LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
46 LAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'
47 LAMBDA_RUNTIME_GOLANG = 'go1.x'
48 LAMBDA_RUNTIME_RUBY = 'ruby'
49 LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
50 LAMBDA_RUNTIME_PROVIDED = 'provided'
51
52 LAMBDA_SERVER_UNIQUE_PORTS = 500
53 LAMBDA_SERVER_PORT_OFFSET = 5000
54
55 LAMBDA_API_UNIQUE_PORTS = 500
56 LAMBDA_API_PORT_OFFSET = 9000
57
58 # logger
59 LOG = logging.getLogger(__name__)
60
61 # maximum time a pre-allocated container can sit idle before getting killed
62 MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
63
64 # SQS event source name
65 EVENT_SOURCE_SQS = 'aws:sqs'
66
67 # IP address of main Docker container (lazily initialized)
68 DOCKER_MAIN_CONTAINER_IP = None
69
70 # whether to use our custom Java executor, or the default from lambci
71 # TODO: deprecated, should be removed in the future
72 USE_CUSTOM_JAVA_EXECUTOR = False
73
74
75 def get_from_event(event, key):
76 try:
77 return event['Records'][0][key]
78 except KeyError:
79 return None
80
81
82 def is_java_lambda(lambda_details):
83 runtime = getattr(lambda_details, 'runtime', lambda_details)
84 return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
85
86
87 def is_nodejs_runtime(lambda_details):
88 runtime = getattr(lambda_details, 'runtime', lambda_details)
89 return runtime.startswith('nodejs')
90
91
92 def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
93 log_group_name = '/aws/lambda/%s' % func_details.name()
94 container_id = container_id or short_uid()
95 invocation_time = invocation_time or int(time.time() * 1000)
96 invocation_time_secs = int(invocation_time / 1000)
97 time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
98 log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
99 return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
100
101
102 def get_main_endpoint_from_container():
103 global DOCKER_MAIN_CONTAINER_IP
104 if DOCKER_MAIN_CONTAINER_IP is None:
105 DOCKER_MAIN_CONTAINER_IP = False
106 try:
107 if in_docker():
108 DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
109 LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)
110 except Exception as e:
111 container_name = bootstrap.get_main_container_name()
112 LOG.info('Unable to get IP address of main Docker container "%s": %s' %
113 (container_name, e))
114 # return main container IP, or fall back to Docker host (bridge IP, or host DNS address)
115 return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
116
117
118 class LambdaExecutor(object):
119 """ Base class for Lambda executors. Subclasses must overwrite the _execute method """
120 def __init__(self):
121 # keeps track of each function arn and the last time it was invoked
122 self.function_invoke_times = {}
123
124 def _prepare_environment(self, func_details):
125 # setup environment pre-defined variables for docker environment
126 result = func_details.envvars.copy()
127
128 # injecting aws credentials into docker environment if not provided
129 aws_stack.inject_test_credentials_into_env(result)
130
131 return result
132
133 def execute(self, func_arn, func_details, event, context=None, version=None,
134 asynchronous=False, callback=None):
135 def do_execute(*args):
136
137 @cloudwatched('lambda')
138 def _run(func_arn=None):
139 # set the invocation time in milliseconds
140 invocation_time = int(time.time() * 1000)
141 # start the execution
142 raised_error = None
143 result = None
144 dlq_sent = None
145 try:
146 result = self._execute(func_arn, func_details, event, context, version)
147 except Exception as e:
148 raised_error = e
149 if asynchronous:
150 if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
151 sqs_queue_arn = get_from_event(event, 'eventSourceARN')
152 if sqs_queue_arn:
153 # event source is SQS, send event back to dead letter queue
154 dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
155 else:
156 # event source is not SQS, send back to lambda dead letter queue
157 lambda_error_to_dead_letter_queue(func_details, event, e)
158 raise e
159 finally:
160 self.function_invoke_times[func_arn] = invocation_time
161 callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
162 # return final result
163 return result
164
165 return _run(func_arn=func_arn)
166
167 # Inform users about asynchronous mode of the lambda execution.
168 if asynchronous:
169 LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
170 FuncThread(do_execute).start()
171 return None, 'Lambda executed asynchronously.'
172
173 return do_execute()
174
175 def _execute(self, func_arn, func_details, event, context=None, version=None):
176 """ This method must be overwritten by subclasses. """
177 raise Exception('Not implemented.')
178
179 def startup(self):
180 pass
181
182 def cleanup(self, arn=None):
183 pass
184
185 def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
186 kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}
187
188 is_provided = func_details.runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
189 if func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':
190 # Note: certain "provided" runtimes (e.g., Rust programs) can block when we pass in
191 # the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
192 env_vars = {
193 'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),
194 'AWS_LAMBDA_EVENT_BODY': to_str(event),
195 'DOCKER_LAMBDA_USE_STDIN': '1'
196 }
197 event = None
198 cmd = re.sub(r'(.*)(%s\s+(run|start))' % self._docker_cmd(), r'\1echo $AWS_LAMBDA_EVENT_BODY | \2', cmd)
199
200 process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
201 result, log_output = process.communicate(input=event)
202 try:
203 result = to_str(result).strip()
204 except Exception:
205 pass
206 log_output = to_str(log_output).strip()
207 return_code = process.returncode
208 # Note: The user's code may have been logging to stderr, in which case the logs
209 # will be part of the "result" variable here. Hence, make sure that we extract
210 # only the *last* line of "result" and consider anything above that as log output.
211 if isinstance(result, six.string_types) and '\n' in result:
212 additional_logs, _, result = result.rpartition('\n')
213 log_output += '\n%s' % additional_logs
214
215 log_formatted = log_output.strip().replace('\n', '\n> ')
216 func_arn = func_details and func_details.arn()
217 LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
218
219 # store log output - TODO get live logs from `process` above?
220 _store_logs(func_details, log_output)
221
222 if return_code != 0:
223 raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
224 (return_code, result, log_output))
225
226 return result
227
228
229 class ContainerInfo:
230 """ Contains basic information about a docker container. """
231 def __init__(self, name, entry_point):
232 self.name = name
233 self.entry_point = entry_point
234
235
236 class LambdaExecutorContainers(LambdaExecutor):
237 """ Abstract executor class for executing Lambda functions in Docker containers """
238
239 def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
240 raise Exception('Not implemented')
241
242 def _docker_cmd(self):
243 """ Return the string to be used for running Docker commands. """
244 return config.DOCKER_CMD
245
246 def prepare_event(self, environment, event_body):
247 """ Return the event as a stdin string. """
248 # amend the environment variables for execution
249 environment['AWS_LAMBDA_EVENT_BODY'] = event_body
250 return None
251
252 def _execute(self, func_arn, func_details, event, context=None, version=None):
253 lambda_cwd = func_details.cwd
254 runtime = func_details.runtime
255 handler = func_details.handler
256 environment = self._prepare_environment(func_details)
257
258 # configure USE_SSL in environment
259 if config.USE_SSL:
260 environment['USE_SSL'] = '1'
261
262 # prepare event body
263 if not event:
264 LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
265 event = {}
266 event_body = json.dumps(json_safe(event))
267 stdin = self.prepare_event(environment, event_body)
268
269 main_endpoint = get_main_endpoint_from_container()
270
271 environment['LOCALSTACK_HOSTNAME'] = main_endpoint
272 environment['EDGE_PORT'] = str(config.EDGE_PORT)
273 environment['_HANDLER'] = handler
274 if os.environ.get('HTTP_PROXY'):
275 environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
276 if func_details.timeout:
277 environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
278 if context:
279 environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
280 environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
281 environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
282 environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})
283 if context.client_context is not None:
284 environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(
285 base64.b64decode(to_bytes(context.client_context))))
286
287 # custom command to execute in the container
288 command = ''
289 events_file = ''
290
291 if USE_CUSTOM_JAVA_EXECUTOR and is_java_lambda(runtime):
292 # if running a Java Lambda with our custom executor, set up classpath arguments
293 java_opts = Util.get_java_opts()
294 stdin = None
295 # copy executor jar into temp directory
296 target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
297 if not os.path.exists(target_file):
298 cp_r(LAMBDA_EXECUTOR_JAR, target_file)
299 # TODO cleanup once we have custom Java Docker image
300 taskdir = '/var/task'
301 events_file = '_lambda.events.%s.json' % short_uid()
302 save_file(os.path.join(lambda_cwd, events_file), event_body)
303 classpath = Util.get_java_classpath(target_file)
304 command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
305 (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))
306
307 # accept any self-signed certificates for outgoing calls from the Lambda
308 if is_nodejs_runtime(runtime):
309 environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
310
311 # determine the command to be executed (implemented by subclasses)
312 cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
313
314 # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
315 LOG.info('Running lambda cmd: %s' % cmd)
316 result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
317
318 # clean up events file
319 events_file and os.path.exists(events_file) and rm_rf(events_file)
320
321 return result
322
323
324 class LambdaExecutorReuseContainers(LambdaExecutorContainers):
325 """ Executor class for executing Lambda functions in re-usable Docker containers """
326 def __init__(self):
327 super(LambdaExecutorReuseContainers, self).__init__()
328 # locking thread for creation/destruction of docker containers.
329 self.docker_container_lock = threading.RLock()
330
331 # On each invocation we try to construct a port unlikely to conflict
332 # with a previously invoked lambda function. This is a problem with at
333 # least the lambci/lambda:go1.x container, which execs a go program that
334 # attempts to bind to the same default port.
335 self.next_port = 0
336 self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
337 self.port_offset = LAMBDA_SERVER_PORT_OFFSET
338
339 def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
340 # check whether the Lambda has been invoked before
341 has_been_invoked_before = func_arn in self.function_invoke_times
342
343 # Choose a port for this invocation
344 with self.docker_container_lock:
345 env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
346 self.next_port = (self.next_port + 1) % self.max_port
347
348 # create/verify the docker container is running.
349 LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
350 container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
351
352 # Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
353 # passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
354 # available for docker exec, to better support very large Lambda events (very long environment values)
355 exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
356
357 if not command:
358 command = '%s %s' % (container_info.entry_point, handler)
359
360 # determine files to be copied into the container
361 copy_command = ''
362 docker_cmd = self._docker_cmd()
363 if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
364 # if this is the first invocation: copy the entire folder into the container
365 copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
366
367 cmd = (
368 '%s'
369 ' %s exec'
370 ' %s' # env variables
371 ' %s' # container name
372 ' %s' # run cmd
373 ) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
374 LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
375
376 return cmd
377
378 def startup(self):
379 self.cleanup()
380 # start a process to remove idle containers
381 if config.LAMBDA_REMOVE_CONTAINERS:
382 self.start_idle_container_destroyer_interval()
383
384 def cleanup(self, arn=None):
385 if arn:
386 self.function_invoke_times.pop(arn, None)
387 return self.destroy_docker_container(arn)
388 self.function_invoke_times = {}
389 return self.destroy_existing_docker_containers()
390
391 def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
392 """
393 Prepares a persistent docker container for a specific function.
394 :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
395 :param func_arn: The ARN of the lambda function.
396 :param env_vars: The environment variables for the lambda.
397 :param lambda_cwd: The local directory containing the code for the lambda function.
398 :return: ContainerInfo class containing the container name and default entry point.
399 """
400 with self.docker_container_lock:
401 # Get the container name and id.
402 container_name = self.get_container_name(func_arn)
403 docker_cmd = self._docker_cmd()
404
405 status = self.get_docker_container_status(func_arn)
406 LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
407
408 docker_image = Util.docker_image_for_runtime(runtime)
409 rm_flag = Util.get_docker_remove_flag()
410
411 # Container is not running or doesn't exist.
412 if status < 1:
413 # Make sure the container does not exist in any form/state.
414 self.destroy_docker_container(func_arn)
415
416 env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
417
418 network = config.LAMBDA_DOCKER_NETWORK
419 network_str = '--network="%s"' % network if network else ''
420
421 dns = config.LAMBDA_DOCKER_DNS
422 dns_str = '--dns="%s"' % dns if dns else ''
423
424 mount_volume = not config.LAMBDA_REMOTE_DOCKER
425 lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
426 if (':' in lambda_cwd and '\\' in lambda_cwd):
427 lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
428 mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
429
430 # Create and start the container
431 LOG.debug('Creating container: %s' % container_name)
432 cmd = (
433 '%s create'
434 ' %s' # --rm flag
435 ' --name "%s"'
436 ' --entrypoint /bin/bash' # Load bash when it starts.
437 ' %s'
438 ' --interactive' # Keeps the container running bash.
439 ' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
440 ' -e HOSTNAME="$HOSTNAME"'
441 ' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
442 ' -e EDGE_PORT="$EDGE_PORT"'
443 ' %s' # env_vars
444 ' %s' # network
445 ' %s' # dns
446 ' %s'
447 ) % (docker_cmd, rm_flag, container_name, mount_volume_str,
448 env_vars_str, network_str, dns_str, docker_image)
449 LOG.debug(cmd)
450 run(cmd)
451
452 if not mount_volume:
453 LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
454 cmd = (
455 '%s cp'
456 ' "%s/." "%s:/var/task"'
457 ) % (docker_cmd, lambda_cwd, container_name)
458 LOG.debug(cmd)
459 run(cmd)
460
461 LOG.debug('Starting container: %s' % container_name)
462 cmd = '%s start %s' % (docker_cmd, container_name)
463 LOG.debug(cmd)
464 run(cmd)
465 # give the container some time to start up
466 time.sleep(1)
467
468 # Get the entry point for the image.
469 LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
470 cmd = (
471 '%s image inspect'
472 ' --format="{{ .ContainerConfig.Entrypoint }}"'
473 ' %s'
474 ) % (docker_cmd, docker_image)
475
476 LOG.debug(cmd)
477 run_result = run(cmd)
478
479 entry_point = run_result.strip('[]\n\r ')
480
481 container_network = self.get_docker_container_network(func_arn)
482
483 LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
484 % (entry_point, container_name, container_network))
485
486 return ContainerInfo(container_name, entry_point)
487
488 def destroy_docker_container(self, func_arn):
489 """
490 Stops and/or removes a docker container for a specific lambda function ARN.
491 :param func_arn: The ARN of the lambda function.
492 :return: None
493 """
494 with self.docker_container_lock:
495 status = self.get_docker_container_status(func_arn)
496 docker_cmd = self._docker_cmd()
497
498 # Get the container name and id.
499 container_name = self.get_container_name(func_arn)
500
501 if status == 1:
502 LOG.debug('Stopping container: %s' % container_name)
503 cmd = '%s stop -t0 %s' % (docker_cmd, container_name)
504
505 LOG.debug(cmd)
506 run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
507
508 status = self.get_docker_container_status(func_arn)
509
510 if status == -1:
511 LOG.debug('Removing container: %s' % container_name)
512 cmd = '%s rm %s' % (docker_cmd, container_name)
513
514 LOG.debug(cmd)
515 run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
516
517 def get_all_container_names(self):
518 """
519 Returns a list of container names for lambda containers.
520 :return: A String[] localstack docker container names for each function.
521 """
522 with self.docker_container_lock:
523 LOG.debug('Getting all lambda containers names.')
524 cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
525 LOG.debug(cmd)
526 cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
527
528 if len(cmd_result) > 0:
529 container_names = cmd_result.split('\n')
530 else:
531 container_names = []
532
533 return container_names
534
535 def destroy_existing_docker_containers(self):
536 """
537 Stops and/or removes all lambda docker containers for localstack.
538 :return: None
539 """
540 with self.docker_container_lock:
541 container_names = self.get_all_container_names()
542
543 LOG.debug('Removing %d containers.' % len(container_names))
544 for container_name in container_names:
545 cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
546 LOG.debug(cmd)
547 run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
548
549 def get_docker_container_status(self, func_arn):
550 """
551 Determine the status of a docker container.
552 :param func_arn: The ARN of the lambda function.
553 :return: 1 If the container is running,
554 -1 if the container exists but is not running
555 0 if the container does not exist.
556 """
557 with self.docker_container_lock:
558 # Get the container name and id.
559 container_name = self.get_container_name(func_arn)
560
561 # Check if the container is already running
562 # Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
563 # systems. Therefore, we use a combination of filter and grep to get the results.
564 cmd = ("docker ps -a --filter name='%s' "
565 '--format "{{ .Status }} - {{ .Names }}" '
566 '| grep -w "%s" | cat') % (container_name, container_name)
567 LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
568 cmd_result = run(cmd)
569
570 # If the container doesn't exist. Create and start it.
571 container_status = cmd_result.strip()
572
573 if len(container_status) == 0:
574 return 0
575
576 if container_status.lower().startswith('up '):
577 return 1
578
579 return -1
580
581 def get_docker_container_network(self, func_arn):
582 """
583 Determine the network of a docker container.
584 :param func_arn: The ARN of the lambda function.
585 :return: name of the container network
586 """
587 with self.docker_container_lock:
588 status = self.get_docker_container_status(func_arn)
589 # container does not exist
590 if status == 0:
591 return ''
592
593 # Get the container name.
594 container_name = self.get_container_name(func_arn)
595 docker_cmd = self._docker_cmd()
596
597 # Get the container network
598 LOG.debug('Getting container network: %s' % container_name)
599 cmd = (
600 '%s inspect %s'
601 ' --format "{{ .HostConfig.NetworkMode }}"'
602 ) % (docker_cmd, container_name)
603
604 LOG.debug(cmd)
605 cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
606
607 container_network = cmd_result.strip()
608
609 return container_network
610
611 def idle_container_destroyer(self):
612 """
613 Iterates though all the lambda containers and destroys any container that has
614 been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
615 :return: None
616 """
617 LOG.info('Checking if there are idle containers.')
618 current_time = int(time.time() * 1000)
619 for func_arn, last_run_time in dict(self.function_invoke_times).items():
620 duration = current_time - last_run_time
621
622 # not enough idle time has passed
623 if duration < MAX_CONTAINER_IDLE_TIME_MS:
624 continue
625
626 # container has been idle, destroy it.
627 self.destroy_docker_container(func_arn)
628
629 def start_idle_container_destroyer_interval(self):
630 """
631 Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
632 Thus checking for idle containers and destroying them.
633 :return: None
634 """
635 self.idle_container_destroyer()
636 threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
637
638 def get_container_name(self, func_arn):
639 """
640 Given a function ARN, returns a valid docker container name.
641 :param func_arn: The ARN of the lambda function.
642 :return: A docker compatible name for the arn.
643 """
644 return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
645
646
647 class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
648 def __init__(self):
649 super(LambdaExecutorSeparateContainers, self).__init__()
650 self.max_port = LAMBDA_API_UNIQUE_PORTS
651 self.port_offset = LAMBDA_API_PORT_OFFSET
652
653 def prepare_event(self, environment, event_body):
654 # Tell Lambci to use STDIN for the event
655 environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
656 return event_body.encode()
657
658 def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
659 entrypoint = ''
660 if command:
661 entrypoint = ' --entrypoint ""'
662 else:
663 command = '"%s"' % handler
664
665 # add Docker Lambda env vars
666 network = config.LAMBDA_DOCKER_NETWORK
667 network_str = '--network="%s"' % network if network else ''
668 if network == 'host':
669 port = get_free_tcp_port()
670 env_vars['DOCKER_LAMBDA_API_PORT'] = port
671 env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
672
673 dns = config.LAMBDA_DOCKER_DNS
674 dns_str = '--dns="%s"' % dns if dns else ''
675
676 env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
677 debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
678 docker_cmd = self._docker_cmd()
679 docker_image = Util.docker_image_for_runtime(runtime)
680 rm_flag = Util.get_docker_remove_flag()
681
682 if config.LAMBDA_REMOTE_DOCKER:
683 cmd = (
684 'CONTAINER_ID="$(%s create -i'
685 ' %s' # entrypoint
686 ' %s' # debug_docker_java_port
687 ' %s' # env
688 ' %s' # network
689 ' %s' # dns
690 ' %s' # --rm flag
691 ' %s %s' # image and command
692 ')";'
693 '%s cp "%s/." "$CONTAINER_ID:/var/task"; '
694 '%s start -ai "$CONTAINER_ID";'
695 ) % (docker_cmd, entrypoint, debug_docker_java_port,
696 env_vars_string, network_str, dns_str, rm_flag,
697 docker_image, command,
698 docker_cmd, lambda_cwd,
699 docker_cmd)
700 else:
701 lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
702 cmd = (
703 '%s run -i'
704 ' %s -v "%s":/var/task'
705 ' %s'
706 ' %s' # network
707 ' %s' # dns
708 ' %s' # --rm flag
709 ' %s %s'
710 ) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
711 network_str, dns_str, rm_flag, docker_image, command)
712 return cmd
713
714
715 class LambdaExecutorLocal(LambdaExecutor):
716 def _execute(self, func_arn, func_details, event, context=None, version=None):
717 lambda_cwd = func_details.cwd
718 environment = self._prepare_environment(func_details)
719
720 # execute the Lambda function in a forked sub-process, sync result via queue
721 queue = Queue()
722
723 lambda_function = func_details.function(version)
724
725 def do_execute():
726 # now we're executing in the child process, safe to change CWD and ENV
727 path_before = sys.path
728 try:
729 if lambda_cwd:
730 os.chdir(lambda_cwd)
731 sys.path = [lambda_cwd] + sys.path
732 if environment:
733 os.environ.update(environment)
734 result = lambda_function(event, context)
735 queue.put(result)
736 finally:
737 sys.path = path_before
738
739 process = Process(target=do_execute)
740 with CaptureOutput() as c:
741 process.run()
742 result = queue.get()
743
744 # Make sure to keep the log line below, to ensure the log stream gets created
745 log_output = 'START: Lambda %s started via "local" executor ...' % func_arn
746 # TODO: Interweaving stdout/stderr currently not supported
747 for stream in (c.stdout(), c.stderr()):
748 if stream:
749 log_output += ('\n' if log_output else '') + stream
750
751 # store logs to CloudWatch
752 _store_logs(func_details, log_output)
753
754 return result
755
756 def execute_java_lambda(self, event, context, main_file, func_details=None):
757 handler = func_details.handler
758 opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
759 event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
760 save_file(event_file, json.dumps(json_safe(event)))
761 TMP_FILES.append(event_file)
762 class_name = handler.split('::')[0]
763 classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
764 cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
765 LOG.warning(cmd)
766 result = self.run_lambda_executor(cmd, func_details=func_details)
767 return result
768
769
770 class Util:
771 debug_java_port = False
772
773 @classmethod
774 def get_java_opts(cls):
775 opts = config.LAMBDA_JAVA_OPTS or ''
776 # Replace _debug_port_ with a random free port
777 if '_debug_port_' in opts:
778 if not cls.debug_java_port:
779 cls.debug_java_port = get_free_tcp_port()
780 opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
781 else:
782 # Parse the debug port from opts
783 m = re.match('.*address=(.+:)?(\\d+).*', opts)
784 if m is not None:
785 cls.debug_java_port = m.groups()[1]
786
787 return opts
788
789 @classmethod
790 def get_host_path_for_path_in_docker(cls, path):
791 return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
792 r'%s/\1' % config.HOST_TMP_FOLDER, path)
793
794 @classmethod
795 def format_windows_path(cls, path):
796 temp = path.replace(':', '').replace('\\', '/')
797 if len(temp) >= 1 and temp[:1] != '/':
798 temp = '/' + temp
799 temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
800 return temp
801
802 @classmethod
803 def docker_image_for_runtime(cls, runtime):
804 docker_tag = runtime
805 docker_image = config.LAMBDA_CONTAINER_REGISTRY
806 # TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
807 # See https://github.com/lambci/docker-lambda/pull/218
808 lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']
809 if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
810 docker_tag = '20191117-%s' % docker_tag
811 return '"%s:%s"' % (docker_image, docker_tag)
812
813 @classmethod
814 def get_docker_remove_flag(cls):
815 return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
816
817 @classmethod
818 def get_java_classpath(cls, archive):
819 """
820 Return the Java classpath, using the parent folder of the
821 given archive as the base folder.
822
823 The result contains any *.jar files in the base folder, as
824 well as any JAR files in the "lib/*" subfolder living
825 alongside the supplied java archive (.jar or .zip).
826
827 :param archive: an absolute path to a .jar or .zip Java archive
828 :return: the Java classpath, relative to the base dir of "archive"
829 """
830 entries = ['.']
831 base_dir = os.path.dirname(archive)
832 for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:
833 for entry in glob.glob(pattern % base_dir):
834 if os.path.realpath(archive) != os.path.realpath(entry):
835 entries.append(os.path.relpath(entry, base_dir))
836 # make sure to append the localstack-utils.jar at the end of the classpath
837 # https://github.com/localstack/localstack/issues/1160
838 entries.append(os.path.relpath(archive, base_dir))
839 entries.append('*.jar')
840 entries.append('java/lib/*.jar')
841 result = ':'.join(entries)
842 return result
843
844
845 # --------------
846 # GLOBAL STATE
847 # --------------
848
849 EXECUTOR_LOCAL = LambdaExecutorLocal()
850 EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
851 EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
852 DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
853 # the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
854 AVAILABLE_EXECUTORS = {
855 'local': EXECUTOR_LOCAL,
856 'docker': EXECUTOR_CONTAINERS_SEPARATE,
857 'docker-reuse': EXECUTOR_CONTAINERS_REUSE
858 }
859
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/awslambda/lambda_executors.py b/localstack/services/awslambda/lambda_executors.py
--- a/localstack/services/awslambda/lambda_executors.py
+++ b/localstack/services/awslambda/lambda_executors.py
@@ -469,7 +469,7 @@
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
- ' --format="{{ .ContainerConfig.Entrypoint }}"'
+ ' --format="{{ .Config.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
| {"golden_diff": "diff --git a/localstack/services/awslambda/lambda_executors.py b/localstack/services/awslambda/lambda_executors.py\n--- a/localstack/services/awslambda/lambda_executors.py\n+++ b/localstack/services/awslambda/lambda_executors.py\n@@ -469,7 +469,7 @@\n LOG.debug('Getting the entrypoint for image: %s' % (docker_image))\n cmd = (\n '%s image inspect'\n- ' --format=\"{{ .ContainerConfig.Entrypoint }}\"'\n+ ' --format=\"{{ .Config.Entrypoint }}\"'\n ' %s'\n ) % (docker_cmd, docker_image)\n", "issue": "Usage of `docker inspect ..` is fragile, depends on how and what built the docker image\n[x] bug report\n[ ] feature request\n\n# Detailed description\n`lambda_executor.py` current retrieves the container entrypoint from the docker image via `docker inspect --format=\"{{ .ContainerConfig.Entrypoint }}\" ..`. This is fragile and may be missing depending on how the image in question is built. There is a `config` block _and_ a `containerconfig` block that are mostly the same, but sometimes different depending what built and what version of that thing built the image, for example we are seeing the entrypoint missing on images built with Docker for Mac 2.5.0.1, but not on earlier versions, others using `podman` are noticing the fragility in other projects:\n\nhttps://github.com/containers/podman/issues/2017\n\n## Expected behavior\nentrypoint value is picked up from a validly built container\n\n## Actual behavior\nentrypoint is sometimes an empty string, which then for a `provided` lambda executor ends up with a script error trying to execute the handler name.\n\nThe simple fix is to change `--format=\"{{ .ContainerConfig.Entrypoint }}\"` to `--format=\"{{ .Config.Entrypoint }}\"` which seems like the more canonical way of getting that value.\n\n\n\n\u2506Issue is synchronized with this [Jira Task](https://localstack.atlassian.net/browse/LOC-54) by [Unito](https://www.unito.io/learn-more)\n\n", "before_files": [{"content": "import os\nimport re\nimport sys\nimport glob\nimport json\nimport time\nimport logging\nimport threading\nimport subprocess\nimport six\nimport base64\nfrom multiprocessing import Process, Queue\ntry:\n from shlex import quote as cmd_quote\nexcept ImportError:\n from pipes import quote as cmd_quote # for Python 2.7\nfrom localstack import config\nfrom localstack.utils import bootstrap\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import (\n CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker,\n to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port)\nfrom localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR\nfrom localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue\nfrom localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched\n\n# constants\nLAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR\nLAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'\nEVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER\n\nLAMBDA_RUNTIME_PYTHON27 = 'python2.7'\nLAMBDA_RUNTIME_PYTHON36 = 'python3.6'\nLAMBDA_RUNTIME_PYTHON37 = 'python3.7'\nLAMBDA_RUNTIME_PYTHON38 = 'python3.8'\nLAMBDA_RUNTIME_NODEJS = 'nodejs'\nLAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'\nLAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'\nLAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'\nLAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'\nLAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'\nLAMBDA_RUNTIME_JAVA8 = 'java8'\nLAMBDA_RUNTIME_JAVA11 = 'java11'\nLAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'\nLAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'\nLAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'\nLAMBDA_RUNTIME_GOLANG = 'go1.x'\nLAMBDA_RUNTIME_RUBY = 'ruby'\nLAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\nLAMBDA_RUNTIME_PROVIDED = 'provided'\n\nLAMBDA_SERVER_UNIQUE_PORTS = 500\nLAMBDA_SERVER_PORT_OFFSET = 5000\n\nLAMBDA_API_UNIQUE_PORTS = 500\nLAMBDA_API_PORT_OFFSET = 9000\n\n# logger\nLOG = logging.getLogger(__name__)\n\n# maximum time a pre-allocated container can sit idle before getting killed\nMAX_CONTAINER_IDLE_TIME_MS = 600 * 1000\n\n# SQS event source name\nEVENT_SOURCE_SQS = 'aws:sqs'\n\n# IP address of main Docker container (lazily initialized)\nDOCKER_MAIN_CONTAINER_IP = None\n\n# whether to use our custom Java executor, or the default from lambci\n# TODO: deprecated, should be removed in the future\nUSE_CUSTOM_JAVA_EXECUTOR = False\n\n\ndef get_from_event(event, key):\n try:\n return event['Records'][0][key]\n except KeyError:\n return None\n\n\ndef is_java_lambda(lambda_details):\n runtime = getattr(lambda_details, 'runtime', lambda_details)\n return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]\n\n\ndef is_nodejs_runtime(lambda_details):\n runtime = getattr(lambda_details, 'runtime', lambda_details)\n return runtime.startswith('nodejs')\n\n\ndef _store_logs(func_details, log_output, invocation_time=None, container_id=None):\n log_group_name = '/aws/lambda/%s' % func_details.name()\n container_id = container_id or short_uid()\n invocation_time = invocation_time or int(time.time() * 1000)\n invocation_time_secs = int(invocation_time / 1000)\n time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))\n log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)\n return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)\n\n\ndef get_main_endpoint_from_container():\n global DOCKER_MAIN_CONTAINER_IP\n if DOCKER_MAIN_CONTAINER_IP is None:\n DOCKER_MAIN_CONTAINER_IP = False\n try:\n if in_docker():\n DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()\n LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)\n except Exception as e:\n container_name = bootstrap.get_main_container_name()\n LOG.info('Unable to get IP address of main Docker container \"%s\": %s' %\n (container_name, e))\n # return main container IP, or fall back to Docker host (bridge IP, or host DNS address)\n return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER\n\n\nclass LambdaExecutor(object):\n \"\"\" Base class for Lambda executors. Subclasses must overwrite the _execute method \"\"\"\n def __init__(self):\n # keeps track of each function arn and the last time it was invoked\n self.function_invoke_times = {}\n\n def _prepare_environment(self, func_details):\n # setup environment pre-defined variables for docker environment\n result = func_details.envvars.copy()\n\n # injecting aws credentials into docker environment if not provided\n aws_stack.inject_test_credentials_into_env(result)\n\n return result\n\n def execute(self, func_arn, func_details, event, context=None, version=None,\n asynchronous=False, callback=None):\n def do_execute(*args):\n\n @cloudwatched('lambda')\n def _run(func_arn=None):\n # set the invocation time in milliseconds\n invocation_time = int(time.time() * 1000)\n # start the execution\n raised_error = None\n result = None\n dlq_sent = None\n try:\n result = self._execute(func_arn, func_details, event, context, version)\n except Exception as e:\n raised_error = e\n if asynchronous:\n if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:\n sqs_queue_arn = get_from_event(event, 'eventSourceARN')\n if sqs_queue_arn:\n # event source is SQS, send event back to dead letter queue\n dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)\n else:\n # event source is not SQS, send back to lambda dead letter queue\n lambda_error_to_dead_letter_queue(func_details, event, e)\n raise e\n finally:\n self.function_invoke_times[func_arn] = invocation_time\n callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)\n # return final result\n return result\n\n return _run(func_arn=func_arn)\n\n # Inform users about asynchronous mode of the lambda execution.\n if asynchronous:\n LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')\n FuncThread(do_execute).start()\n return None, 'Lambda executed asynchronously.'\n\n return do_execute()\n\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n \"\"\" This method must be overwritten by subclasses. \"\"\"\n raise Exception('Not implemented.')\n\n def startup(self):\n pass\n\n def cleanup(self, arn=None):\n pass\n\n def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):\n kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}\n\n is_provided = func_details.runtime.startswith(LAMBDA_RUNTIME_PROVIDED)\n if func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':\n # Note: certain \"provided\" runtimes (e.g., Rust programs) can block when we pass in\n # the event payload via stdin, hence we rewrite the command to \"echo ... | ...\" below\n env_vars = {\n 'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),\n 'AWS_LAMBDA_EVENT_BODY': to_str(event),\n 'DOCKER_LAMBDA_USE_STDIN': '1'\n }\n event = None\n cmd = re.sub(r'(.*)(%s\\s+(run|start))' % self._docker_cmd(), r'\\1echo $AWS_LAMBDA_EVENT_BODY | \\2', cmd)\n\n process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)\n result, log_output = process.communicate(input=event)\n try:\n result = to_str(result).strip()\n except Exception:\n pass\n log_output = to_str(log_output).strip()\n return_code = process.returncode\n # Note: The user's code may have been logging to stderr, in which case the logs\n # will be part of the \"result\" variable here. Hence, make sure that we extract\n # only the *last* line of \"result\" and consider anything above that as log output.\n if isinstance(result, six.string_types) and '\\n' in result:\n additional_logs, _, result = result.rpartition('\\n')\n log_output += '\\n%s' % additional_logs\n\n log_formatted = log_output.strip().replace('\\n', '\\n> ')\n func_arn = func_details and func_details.arn()\n LOG.debug('Lambda %s result / log output:\\n%s\\n> %s' % (func_arn, result.strip(), log_formatted))\n\n # store log output - TODO get live logs from `process` above?\n _store_logs(func_details, log_output)\n\n if return_code != 0:\n raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\\n%s' %\n (return_code, result, log_output))\n\n return result\n\n\nclass ContainerInfo:\n \"\"\" Contains basic information about a docker container. \"\"\"\n def __init__(self, name, entry_point):\n self.name = name\n self.entry_point = entry_point\n\n\nclass LambdaExecutorContainers(LambdaExecutor):\n \"\"\" Abstract executor class for executing Lambda functions in Docker containers \"\"\"\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n raise Exception('Not implemented')\n\n def _docker_cmd(self):\n \"\"\" Return the string to be used for running Docker commands. \"\"\"\n return config.DOCKER_CMD\n\n def prepare_event(self, environment, event_body):\n \"\"\" Return the event as a stdin string. \"\"\"\n # amend the environment variables for execution\n environment['AWS_LAMBDA_EVENT_BODY'] = event_body\n return None\n\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n lambda_cwd = func_details.cwd\n runtime = func_details.runtime\n handler = func_details.handler\n environment = self._prepare_environment(func_details)\n\n # configure USE_SSL in environment\n if config.USE_SSL:\n environment['USE_SSL'] = '1'\n\n # prepare event body\n if not event:\n LOG.warning('Empty event body specified for invocation of Lambda \"%s\"' % func_arn)\n event = {}\n event_body = json.dumps(json_safe(event))\n stdin = self.prepare_event(environment, event_body)\n\n main_endpoint = get_main_endpoint_from_container()\n\n environment['LOCALSTACK_HOSTNAME'] = main_endpoint\n environment['EDGE_PORT'] = str(config.EDGE_PORT)\n environment['_HANDLER'] = handler\n if os.environ.get('HTTP_PROXY'):\n environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n if func_details.timeout:\n environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)\n if context:\n environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name\n environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version\n environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn\n environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})\n if context.client_context is not None:\n environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(\n base64.b64decode(to_bytes(context.client_context))))\n\n # custom command to execute in the container\n command = ''\n events_file = ''\n\n if USE_CUSTOM_JAVA_EXECUTOR and is_java_lambda(runtime):\n # if running a Java Lambda with our custom executor, set up classpath arguments\n java_opts = Util.get_java_opts()\n stdin = None\n # copy executor jar into temp directory\n target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))\n if not os.path.exists(target_file):\n cp_r(LAMBDA_EXECUTOR_JAR, target_file)\n # TODO cleanup once we have custom Java Docker image\n taskdir = '/var/task'\n events_file = '_lambda.events.%s.json' % short_uid()\n save_file(os.path.join(lambda_cwd, events_file), event_body)\n classpath = Util.get_java_classpath(target_file)\n command = (\"bash -c 'cd %s; java %s -cp \\\"%s\\\" \\\"%s\\\" \\\"%s\\\" \\\"%s\\\"'\" %\n (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))\n\n # accept any self-signed certificates for outgoing calls from the Lambda\n if is_nodejs_runtime(runtime):\n environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'\n\n # determine the command to be executed (implemented by subclasses)\n cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)\n\n # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!\n LOG.info('Running lambda cmd: %s' % cmd)\n result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)\n\n # clean up events file\n events_file and os.path.exists(events_file) and rm_rf(events_file)\n\n return result\n\n\nclass LambdaExecutorReuseContainers(LambdaExecutorContainers):\n \"\"\" Executor class for executing Lambda functions in re-usable Docker containers \"\"\"\n def __init__(self):\n super(LambdaExecutorReuseContainers, self).__init__()\n # locking thread for creation/destruction of docker containers.\n self.docker_container_lock = threading.RLock()\n\n # On each invocation we try to construct a port unlikely to conflict\n # with a previously invoked lambda function. This is a problem with at\n # least the lambci/lambda:go1.x container, which execs a go program that\n # attempts to bind to the same default port.\n self.next_port = 0\n self.max_port = LAMBDA_SERVER_UNIQUE_PORTS\n self.port_offset = LAMBDA_SERVER_PORT_OFFSET\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n # check whether the Lambda has been invoked before\n has_been_invoked_before = func_arn in self.function_invoke_times\n\n # Choose a port for this invocation\n with self.docker_container_lock:\n env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)\n self.next_port = (self.next_port + 1) % self.max_port\n\n # create/verify the docker container is running.\n LOG.debug('Priming docker container with runtime \"%s\" and arn \"%s\".', runtime, func_arn)\n container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)\n\n # Note: currently \"docker exec\" does not support --env-file, i.e., environment variables can only be\n # passed directly on the command line, using \"-e\" below. TODO: Update this code once --env-file is\n # available for docker exec, to better support very large Lambda events (very long environment values)\n exec_env_vars = ' '.join(['-e {}=\"${}\"'.format(k, k) for (k, v) in env_vars.items()])\n\n if not command:\n command = '%s %s' % (container_info.entry_point, handler)\n\n # determine files to be copied into the container\n copy_command = ''\n docker_cmd = self._docker_cmd()\n if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:\n # if this is the first invocation: copy the entire folder into the container\n copy_command = '%s cp \"%s/.\" \"%s:/var/task\";' % (docker_cmd, lambda_cwd, container_info.name)\n\n cmd = (\n '%s'\n ' %s exec'\n ' %s' # env variables\n ' %s' # container name\n ' %s' # run cmd\n ) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)\n LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)\n\n return cmd\n\n def startup(self):\n self.cleanup()\n # start a process to remove idle containers\n if config.LAMBDA_REMOVE_CONTAINERS:\n self.start_idle_container_destroyer_interval()\n\n def cleanup(self, arn=None):\n if arn:\n self.function_invoke_times.pop(arn, None)\n return self.destroy_docker_container(arn)\n self.function_invoke_times = {}\n return self.destroy_existing_docker_containers()\n\n def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):\n \"\"\"\n Prepares a persistent docker container for a specific function.\n :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.\n :param func_arn: The ARN of the lambda function.\n :param env_vars: The environment variables for the lambda.\n :param lambda_cwd: The local directory containing the code for the lambda function.\n :return: ContainerInfo class containing the container name and default entry point.\n \"\"\"\n with self.docker_container_lock:\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n docker_cmd = self._docker_cmd()\n\n status = self.get_docker_container_status(func_arn)\n LOG.debug('Priming docker container (status \"%s\"): %s' % (status, container_name))\n\n docker_image = Util.docker_image_for_runtime(runtime)\n rm_flag = Util.get_docker_remove_flag()\n\n # Container is not running or doesn't exist.\n if status < 1:\n # Make sure the container does not exist in any form/state.\n self.destroy_docker_container(func_arn)\n\n env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])\n\n network = config.LAMBDA_DOCKER_NETWORK\n network_str = '--network=\"%s\"' % network if network else ''\n\n dns = config.LAMBDA_DOCKER_DNS\n dns_str = '--dns=\"%s\"' % dns if dns else ''\n\n mount_volume = not config.LAMBDA_REMOTE_DOCKER\n lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)\n if (':' in lambda_cwd and '\\\\' in lambda_cwd):\n lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)\n mount_volume_str = '-v \"%s\":/var/task' % lambda_cwd_on_host if mount_volume else ''\n\n # Create and start the container\n LOG.debug('Creating container: %s' % container_name)\n cmd = (\n '%s create'\n ' %s' # --rm flag\n ' --name \"%s\"'\n ' --entrypoint /bin/bash' # Load bash when it starts.\n ' %s'\n ' --interactive' # Keeps the container running bash.\n ' -e AWS_LAMBDA_EVENT_BODY=\"$AWS_LAMBDA_EVENT_BODY\"'\n ' -e HOSTNAME=\"$HOSTNAME\"'\n ' -e LOCALSTACK_HOSTNAME=\"$LOCALSTACK_HOSTNAME\"'\n ' -e EDGE_PORT=\"$EDGE_PORT\"'\n ' %s' # env_vars\n ' %s' # network\n ' %s' # dns\n ' %s'\n ) % (docker_cmd, rm_flag, container_name, mount_volume_str,\n env_vars_str, network_str, dns_str, docker_image)\n LOG.debug(cmd)\n run(cmd)\n\n if not mount_volume:\n LOG.debug('Copying files to container \"%s\" from \"%s\".' % (container_name, lambda_cwd))\n cmd = (\n '%s cp'\n ' \"%s/.\" \"%s:/var/task\"'\n ) % (docker_cmd, lambda_cwd, container_name)\n LOG.debug(cmd)\n run(cmd)\n\n LOG.debug('Starting container: %s' % container_name)\n cmd = '%s start %s' % (docker_cmd, container_name)\n LOG.debug(cmd)\n run(cmd)\n # give the container some time to start up\n time.sleep(1)\n\n # Get the entry point for the image.\n LOG.debug('Getting the entrypoint for image: %s' % (docker_image))\n cmd = (\n '%s image inspect'\n ' --format=\"{{ .ContainerConfig.Entrypoint }}\"'\n ' %s'\n ) % (docker_cmd, docker_image)\n\n LOG.debug(cmd)\n run_result = run(cmd)\n\n entry_point = run_result.strip('[]\\n\\r ')\n\n container_network = self.get_docker_container_network(func_arn)\n\n LOG.debug('Using entrypoint \"%s\" for container \"%s\" on network \"%s\".'\n % (entry_point, container_name, container_network))\n\n return ContainerInfo(container_name, entry_point)\n\n def destroy_docker_container(self, func_arn):\n \"\"\"\n Stops and/or removes a docker container for a specific lambda function ARN.\n :param func_arn: The ARN of the lambda function.\n :return: None\n \"\"\"\n with self.docker_container_lock:\n status = self.get_docker_container_status(func_arn)\n docker_cmd = self._docker_cmd()\n\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n\n if status == 1:\n LOG.debug('Stopping container: %s' % container_name)\n cmd = '%s stop -t0 %s' % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n status = self.get_docker_container_status(func_arn)\n\n if status == -1:\n LOG.debug('Removing container: %s' % container_name)\n cmd = '%s rm %s' % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n def get_all_container_names(self):\n \"\"\"\n Returns a list of container names for lambda containers.\n :return: A String[] localstack docker container names for each function.\n \"\"\"\n with self.docker_container_lock:\n LOG.debug('Getting all lambda containers names.')\n cmd = '%s ps -a --filter=\"name=localstack_lambda_*\" --format \"{{.Names}}\"' % self._docker_cmd()\n LOG.debug(cmd)\n cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()\n\n if len(cmd_result) > 0:\n container_names = cmd_result.split('\\n')\n else:\n container_names = []\n\n return container_names\n\n def destroy_existing_docker_containers(self):\n \"\"\"\n Stops and/or removes all lambda docker containers for localstack.\n :return: None\n \"\"\"\n with self.docker_container_lock:\n container_names = self.get_all_container_names()\n\n LOG.debug('Removing %d containers.' % len(container_names))\n for container_name in container_names:\n cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n def get_docker_container_status(self, func_arn):\n \"\"\"\n Determine the status of a docker container.\n :param func_arn: The ARN of the lambda function.\n :return: 1 If the container is running,\n -1 if the container exists but is not running\n 0 if the container does not exist.\n \"\"\"\n with self.docker_container_lock:\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n\n # Check if the container is already running\n # Note: filtering by *exact* name using regex filter '^...$' seems unstable on some\n # systems. Therefore, we use a combination of filter and grep to get the results.\n cmd = (\"docker ps -a --filter name='%s' \"\n '--format \"{{ .Status }} - {{ .Names }}\" '\n '| grep -w \"%s\" | cat') % (container_name, container_name)\n LOG.debug('Getting status for container \"%s\": %s' % (container_name, cmd))\n cmd_result = run(cmd)\n\n # If the container doesn't exist. Create and start it.\n container_status = cmd_result.strip()\n\n if len(container_status) == 0:\n return 0\n\n if container_status.lower().startswith('up '):\n return 1\n\n return -1\n\n def get_docker_container_network(self, func_arn):\n \"\"\"\n Determine the network of a docker container.\n :param func_arn: The ARN of the lambda function.\n :return: name of the container network\n \"\"\"\n with self.docker_container_lock:\n status = self.get_docker_container_status(func_arn)\n # container does not exist\n if status == 0:\n return ''\n\n # Get the container name.\n container_name = self.get_container_name(func_arn)\n docker_cmd = self._docker_cmd()\n\n # Get the container network\n LOG.debug('Getting container network: %s' % container_name)\n cmd = (\n '%s inspect %s'\n ' --format \"{{ .HostConfig.NetworkMode }}\"'\n ) % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n container_network = cmd_result.strip()\n\n return container_network\n\n def idle_container_destroyer(self):\n \"\"\"\n Iterates though all the lambda containers and destroys any container that has\n been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.\n :return: None\n \"\"\"\n LOG.info('Checking if there are idle containers.')\n current_time = int(time.time() * 1000)\n for func_arn, last_run_time in dict(self.function_invoke_times).items():\n duration = current_time - last_run_time\n\n # not enough idle time has passed\n if duration < MAX_CONTAINER_IDLE_TIME_MS:\n continue\n\n # container has been idle, destroy it.\n self.destroy_docker_container(func_arn)\n\n def start_idle_container_destroyer_interval(self):\n \"\"\"\n Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.\n Thus checking for idle containers and destroying them.\n :return: None\n \"\"\"\n self.idle_container_destroyer()\n threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()\n\n def get_container_name(self, func_arn):\n \"\"\"\n Given a function ARN, returns a valid docker container name.\n :param func_arn: The ARN of the lambda function.\n :return: A docker compatible name for the arn.\n \"\"\"\n return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)\n\n\nclass LambdaExecutorSeparateContainers(LambdaExecutorContainers):\n def __init__(self):\n super(LambdaExecutorSeparateContainers, self).__init__()\n self.max_port = LAMBDA_API_UNIQUE_PORTS\n self.port_offset = LAMBDA_API_PORT_OFFSET\n\n def prepare_event(self, environment, event_body):\n # Tell Lambci to use STDIN for the event\n environment['DOCKER_LAMBDA_USE_STDIN'] = '1'\n return event_body.encode()\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n entrypoint = ''\n if command:\n entrypoint = ' --entrypoint \"\"'\n else:\n command = '\"%s\"' % handler\n\n # add Docker Lambda env vars\n network = config.LAMBDA_DOCKER_NETWORK\n network_str = '--network=\"%s\"' % network if network else ''\n if network == 'host':\n port = get_free_tcp_port()\n env_vars['DOCKER_LAMBDA_API_PORT'] = port\n env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port\n\n dns = config.LAMBDA_DOCKER_DNS\n dns_str = '--dns=\"%s\"' % dns if dns else ''\n\n env_vars_string = ' '.join(['-e {}=\"${}\"'.format(k, k) for (k, v) in env_vars.items()])\n debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''\n docker_cmd = self._docker_cmd()\n docker_image = Util.docker_image_for_runtime(runtime)\n rm_flag = Util.get_docker_remove_flag()\n\n if config.LAMBDA_REMOTE_DOCKER:\n cmd = (\n 'CONTAINER_ID=\"$(%s create -i'\n ' %s' # entrypoint\n ' %s' # debug_docker_java_port\n ' %s' # env\n ' %s' # network\n ' %s' # dns\n ' %s' # --rm flag\n ' %s %s' # image and command\n ')\";'\n '%s cp \"%s/.\" \"$CONTAINER_ID:/var/task\"; '\n '%s start -ai \"$CONTAINER_ID\";'\n ) % (docker_cmd, entrypoint, debug_docker_java_port,\n env_vars_string, network_str, dns_str, rm_flag,\n docker_image, command,\n docker_cmd, lambda_cwd,\n docker_cmd)\n else:\n lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)\n cmd = (\n '%s run -i'\n ' %s -v \"%s\":/var/task'\n ' %s'\n ' %s' # network\n ' %s' # dns\n ' %s' # --rm flag\n ' %s %s'\n ) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,\n network_str, dns_str, rm_flag, docker_image, command)\n return cmd\n\n\nclass LambdaExecutorLocal(LambdaExecutor):\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n lambda_cwd = func_details.cwd\n environment = self._prepare_environment(func_details)\n\n # execute the Lambda function in a forked sub-process, sync result via queue\n queue = Queue()\n\n lambda_function = func_details.function(version)\n\n def do_execute():\n # now we're executing in the child process, safe to change CWD and ENV\n path_before = sys.path\n try:\n if lambda_cwd:\n os.chdir(lambda_cwd)\n sys.path = [lambda_cwd] + sys.path\n if environment:\n os.environ.update(environment)\n result = lambda_function(event, context)\n queue.put(result)\n finally:\n sys.path = path_before\n\n process = Process(target=do_execute)\n with CaptureOutput() as c:\n process.run()\n result = queue.get()\n\n # Make sure to keep the log line below, to ensure the log stream gets created\n log_output = 'START: Lambda %s started via \"local\" executor ...' % func_arn\n # TODO: Interweaving stdout/stderr currently not supported\n for stream in (c.stdout(), c.stderr()):\n if stream:\n log_output += ('\\n' if log_output else '') + stream\n\n # store logs to CloudWatch\n _store_logs(func_details, log_output)\n\n return result\n\n def execute_java_lambda(self, event, context, main_file, func_details=None):\n handler = func_details.handler\n opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''\n event_file = EVENT_FILE_PATTERN.replace('*', short_uid())\n save_file(event_file, json.dumps(json_safe(event)))\n TMP_FILES.append(event_file)\n class_name = handler.split('::')[0]\n classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)\n cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)\n LOG.warning(cmd)\n result = self.run_lambda_executor(cmd, func_details=func_details)\n return result\n\n\nclass Util:\n debug_java_port = False\n\n @classmethod\n def get_java_opts(cls):\n opts = config.LAMBDA_JAVA_OPTS or ''\n # Replace _debug_port_ with a random free port\n if '_debug_port_' in opts:\n if not cls.debug_java_port:\n cls.debug_java_port = get_free_tcp_port()\n opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))\n else:\n # Parse the debug port from opts\n m = re.match('.*address=(.+:)?(\\\\d+).*', opts)\n if m is not None:\n cls.debug_java_port = m.groups()[1]\n\n return opts\n\n @classmethod\n def get_host_path_for_path_in_docker(cls, path):\n return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,\n r'%s/\\1' % config.HOST_TMP_FOLDER, path)\n\n @classmethod\n def format_windows_path(cls, path):\n temp = path.replace(':', '').replace('\\\\', '/')\n if len(temp) >= 1 and temp[:1] != '/':\n temp = '/' + temp\n temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)\n return temp\n\n @classmethod\n def docker_image_for_runtime(cls, runtime):\n docker_tag = runtime\n docker_image = config.LAMBDA_CONTAINER_REGISTRY\n # TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas\n # See https://github.com/lambci/docker-lambda/pull/218\n lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']\n if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):\n docker_tag = '20191117-%s' % docker_tag\n return '\"%s:%s\"' % (docker_image, docker_tag)\n\n @classmethod\n def get_docker_remove_flag(cls):\n return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''\n\n @classmethod\n def get_java_classpath(cls, archive):\n \"\"\"\n Return the Java classpath, using the parent folder of the\n given archive as the base folder.\n\n The result contains any *.jar files in the base folder, as\n well as any JAR files in the \"lib/*\" subfolder living\n alongside the supplied java archive (.jar or .zip).\n\n :param archive: an absolute path to a .jar or .zip Java archive\n :return: the Java classpath, relative to the base dir of \"archive\"\n \"\"\"\n entries = ['.']\n base_dir = os.path.dirname(archive)\n for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:\n for entry in glob.glob(pattern % base_dir):\n if os.path.realpath(archive) != os.path.realpath(entry):\n entries.append(os.path.relpath(entry, base_dir))\n # make sure to append the localstack-utils.jar at the end of the classpath\n # https://github.com/localstack/localstack/issues/1160\n entries.append(os.path.relpath(archive, base_dir))\n entries.append('*.jar')\n entries.append('java/lib/*.jar')\n result = ':'.join(entries)\n return result\n\n\n# --------------\n# GLOBAL STATE\n# --------------\n\nEXECUTOR_LOCAL = LambdaExecutorLocal()\nEXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()\nEXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()\nDEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE\n# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable\nAVAILABLE_EXECUTORS = {\n 'local': EXECUTOR_LOCAL,\n 'docker': EXECUTOR_CONTAINERS_SEPARATE,\n 'docker-reuse': EXECUTOR_CONTAINERS_REUSE\n}\n", "path": "localstack/services/awslambda/lambda_executors.py"}], "after_files": [{"content": "import os\nimport re\nimport sys\nimport glob\nimport json\nimport time\nimport logging\nimport threading\nimport subprocess\nimport six\nimport base64\nfrom multiprocessing import Process, Queue\ntry:\n from shlex import quote as cmd_quote\nexcept ImportError:\n from pipes import quote as cmd_quote # for Python 2.7\nfrom localstack import config\nfrom localstack.utils import bootstrap\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import (\n CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker,\n to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port)\nfrom localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR\nfrom localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue\nfrom localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched\n\n# constants\nLAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR\nLAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'\nEVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER\n\nLAMBDA_RUNTIME_PYTHON27 = 'python2.7'\nLAMBDA_RUNTIME_PYTHON36 = 'python3.6'\nLAMBDA_RUNTIME_PYTHON37 = 'python3.7'\nLAMBDA_RUNTIME_PYTHON38 = 'python3.8'\nLAMBDA_RUNTIME_NODEJS = 'nodejs'\nLAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'\nLAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'\nLAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'\nLAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'\nLAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'\nLAMBDA_RUNTIME_JAVA8 = 'java8'\nLAMBDA_RUNTIME_JAVA11 = 'java11'\nLAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'\nLAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'\nLAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'\nLAMBDA_RUNTIME_GOLANG = 'go1.x'\nLAMBDA_RUNTIME_RUBY = 'ruby'\nLAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\nLAMBDA_RUNTIME_PROVIDED = 'provided'\n\nLAMBDA_SERVER_UNIQUE_PORTS = 500\nLAMBDA_SERVER_PORT_OFFSET = 5000\n\nLAMBDA_API_UNIQUE_PORTS = 500\nLAMBDA_API_PORT_OFFSET = 9000\n\n# logger\nLOG = logging.getLogger(__name__)\n\n# maximum time a pre-allocated container can sit idle before getting killed\nMAX_CONTAINER_IDLE_TIME_MS = 600 * 1000\n\n# SQS event source name\nEVENT_SOURCE_SQS = 'aws:sqs'\n\n# IP address of main Docker container (lazily initialized)\nDOCKER_MAIN_CONTAINER_IP = None\n\n# whether to use our custom Java executor, or the default from lambci\n# TODO: deprecated, should be removed in the future\nUSE_CUSTOM_JAVA_EXECUTOR = False\n\n\ndef get_from_event(event, key):\n try:\n return event['Records'][0][key]\n except KeyError:\n return None\n\n\ndef is_java_lambda(lambda_details):\n runtime = getattr(lambda_details, 'runtime', lambda_details)\n return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]\n\n\ndef is_nodejs_runtime(lambda_details):\n runtime = getattr(lambda_details, 'runtime', lambda_details)\n return runtime.startswith('nodejs')\n\n\ndef _store_logs(func_details, log_output, invocation_time=None, container_id=None):\n log_group_name = '/aws/lambda/%s' % func_details.name()\n container_id = container_id or short_uid()\n invocation_time = invocation_time or int(time.time() * 1000)\n invocation_time_secs = int(invocation_time / 1000)\n time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))\n log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)\n return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)\n\n\ndef get_main_endpoint_from_container():\n global DOCKER_MAIN_CONTAINER_IP\n if DOCKER_MAIN_CONTAINER_IP is None:\n DOCKER_MAIN_CONTAINER_IP = False\n try:\n if in_docker():\n DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()\n LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)\n except Exception as e:\n container_name = bootstrap.get_main_container_name()\n LOG.info('Unable to get IP address of main Docker container \"%s\": %s' %\n (container_name, e))\n # return main container IP, or fall back to Docker host (bridge IP, or host DNS address)\n return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER\n\n\nclass LambdaExecutor(object):\n \"\"\" Base class for Lambda executors. Subclasses must overwrite the _execute method \"\"\"\n def __init__(self):\n # keeps track of each function arn and the last time it was invoked\n self.function_invoke_times = {}\n\n def _prepare_environment(self, func_details):\n # setup environment pre-defined variables for docker environment\n result = func_details.envvars.copy()\n\n # injecting aws credentials into docker environment if not provided\n aws_stack.inject_test_credentials_into_env(result)\n\n return result\n\n def execute(self, func_arn, func_details, event, context=None, version=None,\n asynchronous=False, callback=None):\n def do_execute(*args):\n\n @cloudwatched('lambda')\n def _run(func_arn=None):\n # set the invocation time in milliseconds\n invocation_time = int(time.time() * 1000)\n # start the execution\n raised_error = None\n result = None\n dlq_sent = None\n try:\n result = self._execute(func_arn, func_details, event, context, version)\n except Exception as e:\n raised_error = e\n if asynchronous:\n if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:\n sqs_queue_arn = get_from_event(event, 'eventSourceARN')\n if sqs_queue_arn:\n # event source is SQS, send event back to dead letter queue\n dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)\n else:\n # event source is not SQS, send back to lambda dead letter queue\n lambda_error_to_dead_letter_queue(func_details, event, e)\n raise e\n finally:\n self.function_invoke_times[func_arn] = invocation_time\n callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)\n # return final result\n return result\n\n return _run(func_arn=func_arn)\n\n # Inform users about asynchronous mode of the lambda execution.\n if asynchronous:\n LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')\n FuncThread(do_execute).start()\n return None, 'Lambda executed asynchronously.'\n\n return do_execute()\n\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n \"\"\" This method must be overwritten by subclasses. \"\"\"\n raise Exception('Not implemented.')\n\n def startup(self):\n pass\n\n def cleanup(self, arn=None):\n pass\n\n def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):\n kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}\n\n is_provided = func_details.runtime.startswith(LAMBDA_RUNTIME_PROVIDED)\n if func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':\n # Note: certain \"provided\" runtimes (e.g., Rust programs) can block when we pass in\n # the event payload via stdin, hence we rewrite the command to \"echo ... | ...\" below\n env_vars = {\n 'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),\n 'AWS_LAMBDA_EVENT_BODY': to_str(event),\n 'DOCKER_LAMBDA_USE_STDIN': '1'\n }\n event = None\n cmd = re.sub(r'(.*)(%s\\s+(run|start))' % self._docker_cmd(), r'\\1echo $AWS_LAMBDA_EVENT_BODY | \\2', cmd)\n\n process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)\n result, log_output = process.communicate(input=event)\n try:\n result = to_str(result).strip()\n except Exception:\n pass\n log_output = to_str(log_output).strip()\n return_code = process.returncode\n # Note: The user's code may have been logging to stderr, in which case the logs\n # will be part of the \"result\" variable here. Hence, make sure that we extract\n # only the *last* line of \"result\" and consider anything above that as log output.\n if isinstance(result, six.string_types) and '\\n' in result:\n additional_logs, _, result = result.rpartition('\\n')\n log_output += '\\n%s' % additional_logs\n\n log_formatted = log_output.strip().replace('\\n', '\\n> ')\n func_arn = func_details and func_details.arn()\n LOG.debug('Lambda %s result / log output:\\n%s\\n> %s' % (func_arn, result.strip(), log_formatted))\n\n # store log output - TODO get live logs from `process` above?\n _store_logs(func_details, log_output)\n\n if return_code != 0:\n raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\\n%s' %\n (return_code, result, log_output))\n\n return result\n\n\nclass ContainerInfo:\n \"\"\" Contains basic information about a docker container. \"\"\"\n def __init__(self, name, entry_point):\n self.name = name\n self.entry_point = entry_point\n\n\nclass LambdaExecutorContainers(LambdaExecutor):\n \"\"\" Abstract executor class for executing Lambda functions in Docker containers \"\"\"\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n raise Exception('Not implemented')\n\n def _docker_cmd(self):\n \"\"\" Return the string to be used for running Docker commands. \"\"\"\n return config.DOCKER_CMD\n\n def prepare_event(self, environment, event_body):\n \"\"\" Return the event as a stdin string. \"\"\"\n # amend the environment variables for execution\n environment['AWS_LAMBDA_EVENT_BODY'] = event_body\n return None\n\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n lambda_cwd = func_details.cwd\n runtime = func_details.runtime\n handler = func_details.handler\n environment = self._prepare_environment(func_details)\n\n # configure USE_SSL in environment\n if config.USE_SSL:\n environment['USE_SSL'] = '1'\n\n # prepare event body\n if not event:\n LOG.warning('Empty event body specified for invocation of Lambda \"%s\"' % func_arn)\n event = {}\n event_body = json.dumps(json_safe(event))\n stdin = self.prepare_event(environment, event_body)\n\n main_endpoint = get_main_endpoint_from_container()\n\n environment['LOCALSTACK_HOSTNAME'] = main_endpoint\n environment['EDGE_PORT'] = str(config.EDGE_PORT)\n environment['_HANDLER'] = handler\n if os.environ.get('HTTP_PROXY'):\n environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n if func_details.timeout:\n environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)\n if context:\n environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name\n environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version\n environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn\n environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})\n if context.client_context is not None:\n environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(\n base64.b64decode(to_bytes(context.client_context))))\n\n # custom command to execute in the container\n command = ''\n events_file = ''\n\n if USE_CUSTOM_JAVA_EXECUTOR and is_java_lambda(runtime):\n # if running a Java Lambda with our custom executor, set up classpath arguments\n java_opts = Util.get_java_opts()\n stdin = None\n # copy executor jar into temp directory\n target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))\n if not os.path.exists(target_file):\n cp_r(LAMBDA_EXECUTOR_JAR, target_file)\n # TODO cleanup once we have custom Java Docker image\n taskdir = '/var/task'\n events_file = '_lambda.events.%s.json' % short_uid()\n save_file(os.path.join(lambda_cwd, events_file), event_body)\n classpath = Util.get_java_classpath(target_file)\n command = (\"bash -c 'cd %s; java %s -cp \\\"%s\\\" \\\"%s\\\" \\\"%s\\\" \\\"%s\\\"'\" %\n (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))\n\n # accept any self-signed certificates for outgoing calls from the Lambda\n if is_nodejs_runtime(runtime):\n environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'\n\n # determine the command to be executed (implemented by subclasses)\n cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)\n\n # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!\n LOG.info('Running lambda cmd: %s' % cmd)\n result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)\n\n # clean up events file\n events_file and os.path.exists(events_file) and rm_rf(events_file)\n\n return result\n\n\nclass LambdaExecutorReuseContainers(LambdaExecutorContainers):\n \"\"\" Executor class for executing Lambda functions in re-usable Docker containers \"\"\"\n def __init__(self):\n super(LambdaExecutorReuseContainers, self).__init__()\n # locking thread for creation/destruction of docker containers.\n self.docker_container_lock = threading.RLock()\n\n # On each invocation we try to construct a port unlikely to conflict\n # with a previously invoked lambda function. This is a problem with at\n # least the lambci/lambda:go1.x container, which execs a go program that\n # attempts to bind to the same default port.\n self.next_port = 0\n self.max_port = LAMBDA_SERVER_UNIQUE_PORTS\n self.port_offset = LAMBDA_SERVER_PORT_OFFSET\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n # check whether the Lambda has been invoked before\n has_been_invoked_before = func_arn in self.function_invoke_times\n\n # Choose a port for this invocation\n with self.docker_container_lock:\n env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)\n self.next_port = (self.next_port + 1) % self.max_port\n\n # create/verify the docker container is running.\n LOG.debug('Priming docker container with runtime \"%s\" and arn \"%s\".', runtime, func_arn)\n container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)\n\n # Note: currently \"docker exec\" does not support --env-file, i.e., environment variables can only be\n # passed directly on the command line, using \"-e\" below. TODO: Update this code once --env-file is\n # available for docker exec, to better support very large Lambda events (very long environment values)\n exec_env_vars = ' '.join(['-e {}=\"${}\"'.format(k, k) for (k, v) in env_vars.items()])\n\n if not command:\n command = '%s %s' % (container_info.entry_point, handler)\n\n # determine files to be copied into the container\n copy_command = ''\n docker_cmd = self._docker_cmd()\n if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:\n # if this is the first invocation: copy the entire folder into the container\n copy_command = '%s cp \"%s/.\" \"%s:/var/task\";' % (docker_cmd, lambda_cwd, container_info.name)\n\n cmd = (\n '%s'\n ' %s exec'\n ' %s' # env variables\n ' %s' # container name\n ' %s' # run cmd\n ) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)\n LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)\n\n return cmd\n\n def startup(self):\n self.cleanup()\n # start a process to remove idle containers\n if config.LAMBDA_REMOVE_CONTAINERS:\n self.start_idle_container_destroyer_interval()\n\n def cleanup(self, arn=None):\n if arn:\n self.function_invoke_times.pop(arn, None)\n return self.destroy_docker_container(arn)\n self.function_invoke_times = {}\n return self.destroy_existing_docker_containers()\n\n def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):\n \"\"\"\n Prepares a persistent docker container for a specific function.\n :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.\n :param func_arn: The ARN of the lambda function.\n :param env_vars: The environment variables for the lambda.\n :param lambda_cwd: The local directory containing the code for the lambda function.\n :return: ContainerInfo class containing the container name and default entry point.\n \"\"\"\n with self.docker_container_lock:\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n docker_cmd = self._docker_cmd()\n\n status = self.get_docker_container_status(func_arn)\n LOG.debug('Priming docker container (status \"%s\"): %s' % (status, container_name))\n\n docker_image = Util.docker_image_for_runtime(runtime)\n rm_flag = Util.get_docker_remove_flag()\n\n # Container is not running or doesn't exist.\n if status < 1:\n # Make sure the container does not exist in any form/state.\n self.destroy_docker_container(func_arn)\n\n env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])\n\n network = config.LAMBDA_DOCKER_NETWORK\n network_str = '--network=\"%s\"' % network if network else ''\n\n dns = config.LAMBDA_DOCKER_DNS\n dns_str = '--dns=\"%s\"' % dns if dns else ''\n\n mount_volume = not config.LAMBDA_REMOTE_DOCKER\n lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)\n if (':' in lambda_cwd and '\\\\' in lambda_cwd):\n lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)\n mount_volume_str = '-v \"%s\":/var/task' % lambda_cwd_on_host if mount_volume else ''\n\n # Create and start the container\n LOG.debug('Creating container: %s' % container_name)\n cmd = (\n '%s create'\n ' %s' # --rm flag\n ' --name \"%s\"'\n ' --entrypoint /bin/bash' # Load bash when it starts.\n ' %s'\n ' --interactive' # Keeps the container running bash.\n ' -e AWS_LAMBDA_EVENT_BODY=\"$AWS_LAMBDA_EVENT_BODY\"'\n ' -e HOSTNAME=\"$HOSTNAME\"'\n ' -e LOCALSTACK_HOSTNAME=\"$LOCALSTACK_HOSTNAME\"'\n ' -e EDGE_PORT=\"$EDGE_PORT\"'\n ' %s' # env_vars\n ' %s' # network\n ' %s' # dns\n ' %s'\n ) % (docker_cmd, rm_flag, container_name, mount_volume_str,\n env_vars_str, network_str, dns_str, docker_image)\n LOG.debug(cmd)\n run(cmd)\n\n if not mount_volume:\n LOG.debug('Copying files to container \"%s\" from \"%s\".' % (container_name, lambda_cwd))\n cmd = (\n '%s cp'\n ' \"%s/.\" \"%s:/var/task\"'\n ) % (docker_cmd, lambda_cwd, container_name)\n LOG.debug(cmd)\n run(cmd)\n\n LOG.debug('Starting container: %s' % container_name)\n cmd = '%s start %s' % (docker_cmd, container_name)\n LOG.debug(cmd)\n run(cmd)\n # give the container some time to start up\n time.sleep(1)\n\n # Get the entry point for the image.\n LOG.debug('Getting the entrypoint for image: %s' % (docker_image))\n cmd = (\n '%s image inspect'\n ' --format=\"{{ .Config.Entrypoint }}\"'\n ' %s'\n ) % (docker_cmd, docker_image)\n\n LOG.debug(cmd)\n run_result = run(cmd)\n\n entry_point = run_result.strip('[]\\n\\r ')\n\n container_network = self.get_docker_container_network(func_arn)\n\n LOG.debug('Using entrypoint \"%s\" for container \"%s\" on network \"%s\".'\n % (entry_point, container_name, container_network))\n\n return ContainerInfo(container_name, entry_point)\n\n def destroy_docker_container(self, func_arn):\n \"\"\"\n Stops and/or removes a docker container for a specific lambda function ARN.\n :param func_arn: The ARN of the lambda function.\n :return: None\n \"\"\"\n with self.docker_container_lock:\n status = self.get_docker_container_status(func_arn)\n docker_cmd = self._docker_cmd()\n\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n\n if status == 1:\n LOG.debug('Stopping container: %s' % container_name)\n cmd = '%s stop -t0 %s' % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n status = self.get_docker_container_status(func_arn)\n\n if status == -1:\n LOG.debug('Removing container: %s' % container_name)\n cmd = '%s rm %s' % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n def get_all_container_names(self):\n \"\"\"\n Returns a list of container names for lambda containers.\n :return: A String[] localstack docker container names for each function.\n \"\"\"\n with self.docker_container_lock:\n LOG.debug('Getting all lambda containers names.')\n cmd = '%s ps -a --filter=\"name=localstack_lambda_*\" --format \"{{.Names}}\"' % self._docker_cmd()\n LOG.debug(cmd)\n cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()\n\n if len(cmd_result) > 0:\n container_names = cmd_result.split('\\n')\n else:\n container_names = []\n\n return container_names\n\n def destroy_existing_docker_containers(self):\n \"\"\"\n Stops and/or removes all lambda docker containers for localstack.\n :return: None\n \"\"\"\n with self.docker_container_lock:\n container_names = self.get_all_container_names()\n\n LOG.debug('Removing %d containers.' % len(container_names))\n for container_name in container_names:\n cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n def get_docker_container_status(self, func_arn):\n \"\"\"\n Determine the status of a docker container.\n :param func_arn: The ARN of the lambda function.\n :return: 1 If the container is running,\n -1 if the container exists but is not running\n 0 if the container does not exist.\n \"\"\"\n with self.docker_container_lock:\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n\n # Check if the container is already running\n # Note: filtering by *exact* name using regex filter '^...$' seems unstable on some\n # systems. Therefore, we use a combination of filter and grep to get the results.\n cmd = (\"docker ps -a --filter name='%s' \"\n '--format \"{{ .Status }} - {{ .Names }}\" '\n '| grep -w \"%s\" | cat') % (container_name, container_name)\n LOG.debug('Getting status for container \"%s\": %s' % (container_name, cmd))\n cmd_result = run(cmd)\n\n # If the container doesn't exist. Create and start it.\n container_status = cmd_result.strip()\n\n if len(container_status) == 0:\n return 0\n\n if container_status.lower().startswith('up '):\n return 1\n\n return -1\n\n def get_docker_container_network(self, func_arn):\n \"\"\"\n Determine the network of a docker container.\n :param func_arn: The ARN of the lambda function.\n :return: name of the container network\n \"\"\"\n with self.docker_container_lock:\n status = self.get_docker_container_status(func_arn)\n # container does not exist\n if status == 0:\n return ''\n\n # Get the container name.\n container_name = self.get_container_name(func_arn)\n docker_cmd = self._docker_cmd()\n\n # Get the container network\n LOG.debug('Getting container network: %s' % container_name)\n cmd = (\n '%s inspect %s'\n ' --format \"{{ .HostConfig.NetworkMode }}\"'\n ) % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n container_network = cmd_result.strip()\n\n return container_network\n\n def idle_container_destroyer(self):\n \"\"\"\n Iterates though all the lambda containers and destroys any container that has\n been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.\n :return: None\n \"\"\"\n LOG.info('Checking if there are idle containers.')\n current_time = int(time.time() * 1000)\n for func_arn, last_run_time in dict(self.function_invoke_times).items():\n duration = current_time - last_run_time\n\n # not enough idle time has passed\n if duration < MAX_CONTAINER_IDLE_TIME_MS:\n continue\n\n # container has been idle, destroy it.\n self.destroy_docker_container(func_arn)\n\n def start_idle_container_destroyer_interval(self):\n \"\"\"\n Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.\n Thus checking for idle containers and destroying them.\n :return: None\n \"\"\"\n self.idle_container_destroyer()\n threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()\n\n def get_container_name(self, func_arn):\n \"\"\"\n Given a function ARN, returns a valid docker container name.\n :param func_arn: The ARN of the lambda function.\n :return: A docker compatible name for the arn.\n \"\"\"\n return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)\n\n\nclass LambdaExecutorSeparateContainers(LambdaExecutorContainers):\n def __init__(self):\n super(LambdaExecutorSeparateContainers, self).__init__()\n self.max_port = LAMBDA_API_UNIQUE_PORTS\n self.port_offset = LAMBDA_API_PORT_OFFSET\n\n def prepare_event(self, environment, event_body):\n # Tell Lambci to use STDIN for the event\n environment['DOCKER_LAMBDA_USE_STDIN'] = '1'\n return event_body.encode()\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n entrypoint = ''\n if command:\n entrypoint = ' --entrypoint \"\"'\n else:\n command = '\"%s\"' % handler\n\n # add Docker Lambda env vars\n network = config.LAMBDA_DOCKER_NETWORK\n network_str = '--network=\"%s\"' % network if network else ''\n if network == 'host':\n port = get_free_tcp_port()\n env_vars['DOCKER_LAMBDA_API_PORT'] = port\n env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port\n\n dns = config.LAMBDA_DOCKER_DNS\n dns_str = '--dns=\"%s\"' % dns if dns else ''\n\n env_vars_string = ' '.join(['-e {}=\"${}\"'.format(k, k) for (k, v) in env_vars.items()])\n debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''\n docker_cmd = self._docker_cmd()\n docker_image = Util.docker_image_for_runtime(runtime)\n rm_flag = Util.get_docker_remove_flag()\n\n if config.LAMBDA_REMOTE_DOCKER:\n cmd = (\n 'CONTAINER_ID=\"$(%s create -i'\n ' %s' # entrypoint\n ' %s' # debug_docker_java_port\n ' %s' # env\n ' %s' # network\n ' %s' # dns\n ' %s' # --rm flag\n ' %s %s' # image and command\n ')\";'\n '%s cp \"%s/.\" \"$CONTAINER_ID:/var/task\"; '\n '%s start -ai \"$CONTAINER_ID\";'\n ) % (docker_cmd, entrypoint, debug_docker_java_port,\n env_vars_string, network_str, dns_str, rm_flag,\n docker_image, command,\n docker_cmd, lambda_cwd,\n docker_cmd)\n else:\n lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)\n cmd = (\n '%s run -i'\n ' %s -v \"%s\":/var/task'\n ' %s'\n ' %s' # network\n ' %s' # dns\n ' %s' # --rm flag\n ' %s %s'\n ) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,\n network_str, dns_str, rm_flag, docker_image, command)\n return cmd\n\n\nclass LambdaExecutorLocal(LambdaExecutor):\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n lambda_cwd = func_details.cwd\n environment = self._prepare_environment(func_details)\n\n # execute the Lambda function in a forked sub-process, sync result via queue\n queue = Queue()\n\n lambda_function = func_details.function(version)\n\n def do_execute():\n # now we're executing in the child process, safe to change CWD and ENV\n path_before = sys.path\n try:\n if lambda_cwd:\n os.chdir(lambda_cwd)\n sys.path = [lambda_cwd] + sys.path\n if environment:\n os.environ.update(environment)\n result = lambda_function(event, context)\n queue.put(result)\n finally:\n sys.path = path_before\n\n process = Process(target=do_execute)\n with CaptureOutput() as c:\n process.run()\n result = queue.get()\n\n # Make sure to keep the log line below, to ensure the log stream gets created\n log_output = 'START: Lambda %s started via \"local\" executor ...' % func_arn\n # TODO: Interweaving stdout/stderr currently not supported\n for stream in (c.stdout(), c.stderr()):\n if stream:\n log_output += ('\\n' if log_output else '') + stream\n\n # store logs to CloudWatch\n _store_logs(func_details, log_output)\n\n return result\n\n def execute_java_lambda(self, event, context, main_file, func_details=None):\n handler = func_details.handler\n opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''\n event_file = EVENT_FILE_PATTERN.replace('*', short_uid())\n save_file(event_file, json.dumps(json_safe(event)))\n TMP_FILES.append(event_file)\n class_name = handler.split('::')[0]\n classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)\n cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)\n LOG.warning(cmd)\n result = self.run_lambda_executor(cmd, func_details=func_details)\n return result\n\n\nclass Util:\n debug_java_port = False\n\n @classmethod\n def get_java_opts(cls):\n opts = config.LAMBDA_JAVA_OPTS or ''\n # Replace _debug_port_ with a random free port\n if '_debug_port_' in opts:\n if not cls.debug_java_port:\n cls.debug_java_port = get_free_tcp_port()\n opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))\n else:\n # Parse the debug port from opts\n m = re.match('.*address=(.+:)?(\\\\d+).*', opts)\n if m is not None:\n cls.debug_java_port = m.groups()[1]\n\n return opts\n\n @classmethod\n def get_host_path_for_path_in_docker(cls, path):\n return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,\n r'%s/\\1' % config.HOST_TMP_FOLDER, path)\n\n @classmethod\n def format_windows_path(cls, path):\n temp = path.replace(':', '').replace('\\\\', '/')\n if len(temp) >= 1 and temp[:1] != '/':\n temp = '/' + temp\n temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)\n return temp\n\n @classmethod\n def docker_image_for_runtime(cls, runtime):\n docker_tag = runtime\n docker_image = config.LAMBDA_CONTAINER_REGISTRY\n # TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas\n # See https://github.com/lambci/docker-lambda/pull/218\n lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']\n if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):\n docker_tag = '20191117-%s' % docker_tag\n return '\"%s:%s\"' % (docker_image, docker_tag)\n\n @classmethod\n def get_docker_remove_flag(cls):\n return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''\n\n @classmethod\n def get_java_classpath(cls, archive):\n \"\"\"\n Return the Java classpath, using the parent folder of the\n given archive as the base folder.\n\n The result contains any *.jar files in the base folder, as\n well as any JAR files in the \"lib/*\" subfolder living\n alongside the supplied java archive (.jar or .zip).\n\n :param archive: an absolute path to a .jar or .zip Java archive\n :return: the Java classpath, relative to the base dir of \"archive\"\n \"\"\"\n entries = ['.']\n base_dir = os.path.dirname(archive)\n for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:\n for entry in glob.glob(pattern % base_dir):\n if os.path.realpath(archive) != os.path.realpath(entry):\n entries.append(os.path.relpath(entry, base_dir))\n # make sure to append the localstack-utils.jar at the end of the classpath\n # https://github.com/localstack/localstack/issues/1160\n entries.append(os.path.relpath(archive, base_dir))\n entries.append('*.jar')\n entries.append('java/lib/*.jar')\n result = ':'.join(entries)\n return result\n\n\n# --------------\n# GLOBAL STATE\n# --------------\n\nEXECUTOR_LOCAL = LambdaExecutorLocal()\nEXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()\nEXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()\nDEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE\n# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable\nAVAILABLE_EXECUTORS = {\n 'local': EXECUTOR_LOCAL,\n 'docker': EXECUTOR_CONTAINERS_SEPARATE,\n 'docker-reuse': EXECUTOR_CONTAINERS_REUSE\n}\n", "path": "localstack/services/awslambda/lambda_executors.py"}]} |
gh_patches_debug_1502 | rasdani/github-patches | git_diff | ckan__ckan-4249 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'_Globals' has no attribute 'user' : exception when using an IAuthenticator on CKAN 2.8.0
I'm putting together a new deployment based on the new CKAN v2.8.0 release. I'm using ckanext-ldap as an authenticator, though it looks like this bug would apply to any authenticator plugin.
This exact setup worked fine on CKAN v2.7.3.
### CKAN Version if known (or site URL)
CKAN v 2.8.0
ckanext-ldap @ `ckan-upgrade-2.8.0a`
### Please describe the expected behaviour
If the IAuthenticator plugin cannot authenticate the user, it does not set `g.user`, and CKAN should run the default authenticator.
### Please describe the actual behaviour
If the IAuthenticator plugin cannot authenticate the user, it does not set `g.user`, and CKAN tries to lookup `g.user` and crashes with traceback:
```
Traceback (most recent call last):
File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1610, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1831, in preprocess_request
rv = func()
File "/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/flask_app.py", line 281, in ckan_before_request
identify_user()
File "/usr/lib/ckan/venv/src/ckan/ckan/views/__init__.py", line 101, in identify_user
if g.user:
File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/flask_app.py", line 334, in __getattr__
return getattr(app_globals.app_globals, name)
AttributeError: '_Globals' object has no attribute 'user'
```
### What steps can be taken to reproduce the issue?
* Install CKAN v2.8.0 as per documented instructions
* Install a plugin that implements IAuthenticator (In this case I am using the ckanext-ldap plugin in the 2.8.0 branch), that may not be able to authenticate the user, so may not set `g.user`.
* Run CKAN normally
* Attempt to load any page.
What is odd is that this section of code at `identify_user` in `ckan/views/__init__.py` has not changed between v2.7.3 and v2.8.0. And the way the authenticator plugin handles/sets `g.user` has not changed either. I'm guessing this is caused by a change in the way the _Globals object behaves when it cannot find an attribute.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/views/__init__.py`
Content:
```
1 # encoding: utf-8
2
3 from paste.deploy.converters import asbool
4 from six import text_type
5
6 import ckan.model as model
7 from ckan.common import g, request, config, session
8 from ckan.lib.helpers import redirect_to as redirect
9 import ckan.plugins as p
10
11 import logging
12 log = logging.getLogger(__name__)
13
14 APIKEY_HEADER_NAME_KEY = u'apikey_header_name'
15 APIKEY_HEADER_NAME_DEFAULT = u'X-CKAN-API-Key'
16
17
18 def check_session_cookie(response):
19 u'''
20 The cookies for auth (auth_tkt) and session (ckan) are separate. This
21 checks whether a user is logged in, and determines the validity of the
22 session cookie, removing it if necessary.
23 '''
24 for cookie in request.cookies:
25 # Remove the ckan session cookie if logged out.
26 if cookie == u'ckan' and not getattr(g, u'user', None):
27 # Check session for valid data (including flash messages)
28 is_valid_cookie_data = False
29 for key, value in session.items():
30 if not key.startswith(u'_') and value:
31 is_valid_cookie_data = True
32 break
33 if not is_valid_cookie_data:
34 if session.id:
35 log.debug(u'No valid session data - deleting session')
36 log.debug(u'Session: %r', session.items())
37 session.delete()
38 else:
39 log.debug(u'No session id - deleting session cookie')
40 response.delete_cookie(cookie)
41 # Remove auth_tkt repoze.who cookie if user not logged in.
42 elif cookie == u'auth_tkt' and not session.id:
43 response.delete_cookie(cookie)
44
45 return response
46
47
48 def set_cors_headers_for_response(response):
49 u'''
50 Set up Access Control Allow headers if either origin_allow_all is True, or
51 the request Origin is in the origin_whitelist.
52 '''
53 if config.get(u'ckan.cors.origin_allow_all') \
54 and request.headers.get(u'Origin'):
55
56 cors_origin_allowed = None
57 if asbool(config.get(u'ckan.cors.origin_allow_all')):
58 cors_origin_allowed = b'*'
59 elif config.get(u'ckan.cors.origin_whitelist') and \
60 request.headers.get(u'Origin') \
61 in config[u'ckan.cors.origin_whitelist'].split(u' '):
62 # set var to the origin to allow it.
63 cors_origin_allowed = request.headers.get(u'Origin')
64
65 if cors_origin_allowed is not None:
66 response.headers[b'Access-Control-Allow-Origin'] = \
67 cors_origin_allowed
68 response.headers[b'Access-Control-Allow-Methods'] = \
69 b'POST, PUT, GET, DELETE, OPTIONS'
70 response.headers[b'Access-Control-Allow-Headers'] = \
71 b'X-CKAN-API-KEY, Authorization, Content-Type'
72
73 return response
74
75
76 def identify_user():
77 u'''Try to identify the user
78 If the user is identified then:
79 g.user = user name (unicode)
80 g.userobj = user object
81 g.author = user name
82 otherwise:
83 g.user = None
84 g.userobj = None
85 g.author = user's IP address (unicode)
86
87 Note: Remember, when running under Pylons, `g` is the Pylons `c` object
88 '''
89 # see if it was proxied first
90 g.remote_addr = request.environ.get(u'HTTP_X_FORWARDED_FOR', u'')
91 if not g.remote_addr:
92 g.remote_addr = request.environ.get(u'REMOTE_ADDR',
93 u'Unknown IP Address')
94
95 # Authentication plugins get a chance to run here break as soon as a user
96 # is identified.
97 authenticators = p.PluginImplementations(p.IAuthenticator)
98 if authenticators:
99 for item in authenticators:
100 item.identify()
101 if g.user:
102 break
103
104 # We haven't identified the user so try the default methods
105 if not getattr(g, u'user', None):
106 _identify_user_default()
107
108 # If we have a user but not the userobj let's get the userobj. This means
109 # that IAuthenticator extensions do not need to access the user model
110 # directly.
111 if g.user and not getattr(g, u'userobj', None):
112 g.userobj = model.User.by_name(g.user)
113
114 # general settings
115 if g.user:
116 g.author = g.user
117 else:
118 g.author = g.remote_addr
119 g.author = text_type(g.author)
120
121
122 def _identify_user_default():
123 u'''
124 Identifies the user using two methods:
125 a) If they logged into the web interface then repoze.who will
126 set REMOTE_USER.
127 b) For API calls they may set a header with an API key.
128 '''
129
130 # environ['REMOTE_USER'] is set by repoze.who if it authenticates a
131 # user's cookie. But repoze.who doesn't check the user (still) exists
132 # in our database - we need to do that here. (Another way would be
133 # with an userid_checker, but that would mean another db access.
134 # See: http://docs.repoze.org/who/1.0/narr.html#module-repoze.who\
135 # .plugins.sql )
136 g.user = request.environ.get(u'REMOTE_USER', u'')
137 if g.user:
138 g.user = g.user.decode(u'utf8')
139 g.userobj = model.User.by_name(g.user)
140
141 if g.userobj is None or not g.userobj.is_active():
142
143 # This occurs when a user that was still logged in is deleted, or
144 # when you are logged in, clean db and then restart (or when you
145 # change your username). There is no user object, so even though
146 # repoze thinks you are logged in and your cookie has
147 # ckan_display_name, we need to force user to logout and login
148 # again to get the User object.
149
150 ev = request.environ
151 if u'repoze.who.plugins' in ev:
152 pth = getattr(ev[u'repoze.who.plugins'][u'friendlyform'],
153 u'logout_handler_path')
154 redirect(pth)
155 else:
156 g.userobj = _get_user_for_apikey()
157 if g.userobj is not None:
158 g.user = g.userobj.name
159
160
161 def _get_user_for_apikey():
162 apikey_header_name = config.get(APIKEY_HEADER_NAME_KEY,
163 APIKEY_HEADER_NAME_DEFAULT)
164 apikey = request.headers.get(apikey_header_name, u'')
165 if not apikey:
166 apikey = request.environ.get(apikey_header_name, u'')
167 if not apikey:
168 # For misunderstanding old documentation (now fixed).
169 apikey = request.environ.get(u'HTTP_AUTHORIZATION', u'')
170 if not apikey:
171 apikey = request.environ.get(u'Authorization', u'')
172 # Forget HTTP Auth credentials (they have spaces).
173 if u' ' in apikey:
174 apikey = u''
175 if not apikey:
176 return None
177 apikey = apikey.decode(u'utf8', u'ignore')
178 log.debug(u'Received API Key: %s' % apikey)
179 query = model.Session.query(model.User)
180 user = query.filter_by(apikey=apikey).first()
181 return user
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/views/__init__.py b/ckan/views/__init__.py
--- a/ckan/views/__init__.py
+++ b/ckan/views/__init__.py
@@ -98,8 +98,11 @@
if authenticators:
for item in authenticators:
item.identify()
- if g.user:
- break
+ try:
+ if g.user:
+ break
+ except AttributeError:
+ continue
# We haven't identified the user so try the default methods
if not getattr(g, u'user', None):
| {"golden_diff": "diff --git a/ckan/views/__init__.py b/ckan/views/__init__.py\n--- a/ckan/views/__init__.py\n+++ b/ckan/views/__init__.py\n@@ -98,8 +98,11 @@\n if authenticators:\n for item in authenticators:\n item.identify()\n- if g.user:\n- break\n+ try:\n+ if g.user:\n+ break\n+ except AttributeError:\n+ continue\n \n # We haven't identified the user so try the default methods\n if not getattr(g, u'user', None):\n", "issue": "'_Globals' has no attribute 'user' : exception when using an IAuthenticator on CKAN 2.8.0\nI'm putting together a new deployment based on the new CKAN v2.8.0 release. I'm using ckanext-ldap as an authenticator, though it looks like this bug would apply to any authenticator plugin.\r\nThis exact setup worked fine on CKAN v2.7.3.\r\n\r\n### CKAN Version if known (or site URL)\r\nCKAN v 2.8.0\r\nckanext-ldap @ `ckan-upgrade-2.8.0a`\r\n\r\n### Please describe the expected behaviour\r\nIf the IAuthenticator plugin cannot authenticate the user, it does not set `g.user`, and CKAN should run the default authenticator.\r\n\r\n### Please describe the actual behaviour\r\nIf the IAuthenticator plugin cannot authenticate the user, it does not set `g.user`, and CKAN tries to lookup `g.user` and crashes with traceback:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py\", line 1982, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py\", line 1614, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py\", line 1517, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py\", line 1610, in full_dispatch_request\r\n rv = self.preprocess_request()\r\n File \"/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py\", line 1831, in preprocess_request\r\n rv = func()\r\n File \"/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/flask_app.py\", line 281, in ckan_before_request\r\n identify_user()\r\n File \"/usr/lib/ckan/venv/src/ckan/ckan/views/__init__.py\", line 101, in identify_user\r\n if g.user:\r\n File \"/usr/lib/ckan/venv/local/lib/python2.7/site-packages/werkzeug/local.py\", line 347, in __getattr__\r\n return getattr(self._get_current_object(), name)\r\n File \"/usr/lib/ckan/venv/local/lib/python2.7/site-packages/werkzeug/local.py\", line 347, in __getattr__\r\n return getattr(self._get_current_object(), name)\r\n File \"/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/flask_app.py\", line 334, in __getattr__\r\n return getattr(app_globals.app_globals, name)\r\nAttributeError: '_Globals' object has no attribute 'user'\r\n```\r\n### What steps can be taken to reproduce the issue? \r\n* Install CKAN v2.8.0 as per documented instructions\r\n* Install a plugin that implements IAuthenticator (In this case I am using the ckanext-ldap plugin in the 2.8.0 branch), that may not be able to authenticate the user, so may not set `g.user`.\r\n* Run CKAN normally\r\n* Attempt to load any page. \r\n\r\nWhat is odd is that this section of code at `identify_user` in `ckan/views/__init__.py` has not changed between v2.7.3 and v2.8.0. And the way the authenticator plugin handles/sets `g.user` has not changed either. I'm guessing this is caused by a change in the way the _Globals object behaves when it cannot find an attribute.\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom paste.deploy.converters import asbool\nfrom six import text_type\n\nimport ckan.model as model\nfrom ckan.common import g, request, config, session\nfrom ckan.lib.helpers import redirect_to as redirect\nimport ckan.plugins as p\n\nimport logging\nlog = logging.getLogger(__name__)\n\nAPIKEY_HEADER_NAME_KEY = u'apikey_header_name'\nAPIKEY_HEADER_NAME_DEFAULT = u'X-CKAN-API-Key'\n\n\ndef check_session_cookie(response):\n u'''\n The cookies for auth (auth_tkt) and session (ckan) are separate. This\n checks whether a user is logged in, and determines the validity of the\n session cookie, removing it if necessary.\n '''\n for cookie in request.cookies:\n # Remove the ckan session cookie if logged out.\n if cookie == u'ckan' and not getattr(g, u'user', None):\n # Check session for valid data (including flash messages)\n is_valid_cookie_data = False\n for key, value in session.items():\n if not key.startswith(u'_') and value:\n is_valid_cookie_data = True\n break\n if not is_valid_cookie_data:\n if session.id:\n log.debug(u'No valid session data - deleting session')\n log.debug(u'Session: %r', session.items())\n session.delete()\n else:\n log.debug(u'No session id - deleting session cookie')\n response.delete_cookie(cookie)\n # Remove auth_tkt repoze.who cookie if user not logged in.\n elif cookie == u'auth_tkt' and not session.id:\n response.delete_cookie(cookie)\n\n return response\n\n\ndef set_cors_headers_for_response(response):\n u'''\n Set up Access Control Allow headers if either origin_allow_all is True, or\n the request Origin is in the origin_whitelist.\n '''\n if config.get(u'ckan.cors.origin_allow_all') \\\n and request.headers.get(u'Origin'):\n\n cors_origin_allowed = None\n if asbool(config.get(u'ckan.cors.origin_allow_all')):\n cors_origin_allowed = b'*'\n elif config.get(u'ckan.cors.origin_whitelist') and \\\n request.headers.get(u'Origin') \\\n in config[u'ckan.cors.origin_whitelist'].split(u' '):\n # set var to the origin to allow it.\n cors_origin_allowed = request.headers.get(u'Origin')\n\n if cors_origin_allowed is not None:\n response.headers[b'Access-Control-Allow-Origin'] = \\\n cors_origin_allowed\n response.headers[b'Access-Control-Allow-Methods'] = \\\n b'POST, PUT, GET, DELETE, OPTIONS'\n response.headers[b'Access-Control-Allow-Headers'] = \\\n b'X-CKAN-API-KEY, Authorization, Content-Type'\n\n return response\n\n\ndef identify_user():\n u'''Try to identify the user\n If the user is identified then:\n g.user = user name (unicode)\n g.userobj = user object\n g.author = user name\n otherwise:\n g.user = None\n g.userobj = None\n g.author = user's IP address (unicode)\n\n Note: Remember, when running under Pylons, `g` is the Pylons `c` object\n '''\n # see if it was proxied first\n g.remote_addr = request.environ.get(u'HTTP_X_FORWARDED_FOR', u'')\n if not g.remote_addr:\n g.remote_addr = request.environ.get(u'REMOTE_ADDR',\n u'Unknown IP Address')\n\n # Authentication plugins get a chance to run here break as soon as a user\n # is identified.\n authenticators = p.PluginImplementations(p.IAuthenticator)\n if authenticators:\n for item in authenticators:\n item.identify()\n if g.user:\n break\n\n # We haven't identified the user so try the default methods\n if not getattr(g, u'user', None):\n _identify_user_default()\n\n # If we have a user but not the userobj let's get the userobj. This means\n # that IAuthenticator extensions do not need to access the user model\n # directly.\n if g.user and not getattr(g, u'userobj', None):\n g.userobj = model.User.by_name(g.user)\n\n # general settings\n if g.user:\n g.author = g.user\n else:\n g.author = g.remote_addr\n g.author = text_type(g.author)\n\n\ndef _identify_user_default():\n u'''\n Identifies the user using two methods:\n a) If they logged into the web interface then repoze.who will\n set REMOTE_USER.\n b) For API calls they may set a header with an API key.\n '''\n\n # environ['REMOTE_USER'] is set by repoze.who if it authenticates a\n # user's cookie. But repoze.who doesn't check the user (still) exists\n # in our database - we need to do that here. (Another way would be\n # with an userid_checker, but that would mean another db access.\n # See: http://docs.repoze.org/who/1.0/narr.html#module-repoze.who\\\n # .plugins.sql )\n g.user = request.environ.get(u'REMOTE_USER', u'')\n if g.user:\n g.user = g.user.decode(u'utf8')\n g.userobj = model.User.by_name(g.user)\n\n if g.userobj is None or not g.userobj.is_active():\n\n # This occurs when a user that was still logged in is deleted, or\n # when you are logged in, clean db and then restart (or when you\n # change your username). There is no user object, so even though\n # repoze thinks you are logged in and your cookie has\n # ckan_display_name, we need to force user to logout and login\n # again to get the User object.\n\n ev = request.environ\n if u'repoze.who.plugins' in ev:\n pth = getattr(ev[u'repoze.who.plugins'][u'friendlyform'],\n u'logout_handler_path')\n redirect(pth)\n else:\n g.userobj = _get_user_for_apikey()\n if g.userobj is not None:\n g.user = g.userobj.name\n\n\ndef _get_user_for_apikey():\n apikey_header_name = config.get(APIKEY_HEADER_NAME_KEY,\n APIKEY_HEADER_NAME_DEFAULT)\n apikey = request.headers.get(apikey_header_name, u'')\n if not apikey:\n apikey = request.environ.get(apikey_header_name, u'')\n if not apikey:\n # For misunderstanding old documentation (now fixed).\n apikey = request.environ.get(u'HTTP_AUTHORIZATION', u'')\n if not apikey:\n apikey = request.environ.get(u'Authorization', u'')\n # Forget HTTP Auth credentials (they have spaces).\n if u' ' in apikey:\n apikey = u''\n if not apikey:\n return None\n apikey = apikey.decode(u'utf8', u'ignore')\n log.debug(u'Received API Key: %s' % apikey)\n query = model.Session.query(model.User)\n user = query.filter_by(apikey=apikey).first()\n return user\n", "path": "ckan/views/__init__.py"}], "after_files": [{"content": "# encoding: utf-8\n\nfrom paste.deploy.converters import asbool\nfrom six import text_type\n\nimport ckan.model as model\nfrom ckan.common import g, request, config, session\nfrom ckan.lib.helpers import redirect_to as redirect\nimport ckan.plugins as p\n\nimport logging\nlog = logging.getLogger(__name__)\n\nAPIKEY_HEADER_NAME_KEY = u'apikey_header_name'\nAPIKEY_HEADER_NAME_DEFAULT = u'X-CKAN-API-Key'\n\n\ndef check_session_cookie(response):\n u'''\n The cookies for auth (auth_tkt) and session (ckan) are separate. This\n checks whether a user is logged in, and determines the validity of the\n session cookie, removing it if necessary.\n '''\n for cookie in request.cookies:\n # Remove the ckan session cookie if logged out.\n if cookie == u'ckan' and not getattr(g, u'user', None):\n # Check session for valid data (including flash messages)\n is_valid_cookie_data = False\n for key, value in session.items():\n if not key.startswith(u'_') and value:\n is_valid_cookie_data = True\n break\n if not is_valid_cookie_data:\n if session.id:\n log.debug(u'No valid session data - deleting session')\n log.debug(u'Session: %r', session.items())\n session.delete()\n else:\n log.debug(u'No session id - deleting session cookie')\n response.delete_cookie(cookie)\n # Remove auth_tkt repoze.who cookie if user not logged in.\n elif cookie == u'auth_tkt' and not session.id:\n response.delete_cookie(cookie)\n\n return response\n\n\ndef set_cors_headers_for_response(response):\n u'''\n Set up Access Control Allow headers if either origin_allow_all is True, or\n the request Origin is in the origin_whitelist.\n '''\n if config.get(u'ckan.cors.origin_allow_all') \\\n and request.headers.get(u'Origin'):\n\n cors_origin_allowed = None\n if asbool(config.get(u'ckan.cors.origin_allow_all')):\n cors_origin_allowed = b'*'\n elif config.get(u'ckan.cors.origin_whitelist') and \\\n request.headers.get(u'Origin') \\\n in config[u'ckan.cors.origin_whitelist'].split(u' '):\n # set var to the origin to allow it.\n cors_origin_allowed = request.headers.get(u'Origin')\n\n if cors_origin_allowed is not None:\n response.headers[b'Access-Control-Allow-Origin'] = \\\n cors_origin_allowed\n response.headers[b'Access-Control-Allow-Methods'] = \\\n b'POST, PUT, GET, DELETE, OPTIONS'\n response.headers[b'Access-Control-Allow-Headers'] = \\\n b'X-CKAN-API-KEY, Authorization, Content-Type'\n\n return response\n\n\ndef identify_user():\n u'''Try to identify the user\n If the user is identified then:\n g.user = user name (unicode)\n g.userobj = user object\n g.author = user name\n otherwise:\n g.user = None\n g.userobj = None\n g.author = user's IP address (unicode)\n\n Note: Remember, when running under Pylons, `g` is the Pylons `c` object\n '''\n # see if it was proxied first\n g.remote_addr = request.environ.get(u'HTTP_X_FORWARDED_FOR', u'')\n if not g.remote_addr:\n g.remote_addr = request.environ.get(u'REMOTE_ADDR',\n u'Unknown IP Address')\n\n # Authentication plugins get a chance to run here break as soon as a user\n # is identified.\n authenticators = p.PluginImplementations(p.IAuthenticator)\n if authenticators:\n for item in authenticators:\n item.identify()\n try:\n if g.user:\n break\n except AttributeError:\n continue\n\n # We haven't identified the user so try the default methods\n if not getattr(g, u'user', None):\n _identify_user_default()\n\n # If we have a user but not the userobj let's get the userobj. This means\n # that IAuthenticator extensions do not need to access the user model\n # directly.\n if g.user and not getattr(g, u'userobj', None):\n g.userobj = model.User.by_name(g.user)\n\n # general settings\n if g.user:\n g.author = g.user\n else:\n g.author = g.remote_addr\n g.author = text_type(g.author)\n\n\ndef _identify_user_default():\n u'''\n Identifies the user using two methods:\n a) If they logged into the web interface then repoze.who will\n set REMOTE_USER.\n b) For API calls they may set a header with an API key.\n '''\n\n # environ['REMOTE_USER'] is set by repoze.who if it authenticates a\n # user's cookie. But repoze.who doesn't check the user (still) exists\n # in our database - we need to do that here. (Another way would be\n # with an userid_checker, but that would mean another db access.\n # See: http://docs.repoze.org/who/1.0/narr.html#module-repoze.who\\\n # .plugins.sql )\n g.user = request.environ.get(u'REMOTE_USER', u'')\n if g.user:\n g.user = g.user.decode(u'utf8')\n g.userobj = model.User.by_name(g.user)\n\n if g.userobj is None or not g.userobj.is_active():\n\n # This occurs when a user that was still logged in is deleted, or\n # when you are logged in, clean db and then restart (or when you\n # change your username). There is no user object, so even though\n # repoze thinks you are logged in and your cookie has\n # ckan_display_name, we need to force user to logout and login\n # again to get the User object.\n\n ev = request.environ\n if u'repoze.who.plugins' in ev:\n pth = getattr(ev[u'repoze.who.plugins'][u'friendlyform'],\n u'logout_handler_path')\n redirect(pth)\n else:\n g.userobj = _get_user_for_apikey()\n if g.userobj is not None:\n g.user = g.userobj.name\n\n\ndef _get_user_for_apikey():\n apikey_header_name = config.get(APIKEY_HEADER_NAME_KEY,\n APIKEY_HEADER_NAME_DEFAULT)\n apikey = request.headers.get(apikey_header_name, u'')\n if not apikey:\n apikey = request.environ.get(apikey_header_name, u'')\n if not apikey:\n # For misunderstanding old documentation (now fixed).\n apikey = request.environ.get(u'HTTP_AUTHORIZATION', u'')\n if not apikey:\n apikey = request.environ.get(u'Authorization', u'')\n # Forget HTTP Auth credentials (they have spaces).\n if u' ' in apikey:\n apikey = u''\n if not apikey:\n return None\n apikey = apikey.decode(u'utf8', u'ignore')\n log.debug(u'Received API Key: %s' % apikey)\n query = model.Session.query(model.User)\n user = query.filter_by(apikey=apikey).first()\n return user\n", "path": "ckan/views/__init__.py"}]} |
gh_patches_debug_1503 | rasdani/github-patches | git_diff | aio-libs__aiohttp-4040 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Client-sent Host header can include port as "None"
## Long story short
<!-- Please describe your problem and why the fix is important. -->
When the client forms the Host header, it is possible for it to include the port as "None".
This came up for me when using `aiodocker` to try to connect to the Docker API container attach websocket endpoint, which used a URL of the form "unix://localhost/..." and let to a "Host" header of "localhost:None", triggering a 400 error from docker with a message like:
parse ws://localhost:None/v1.35/containers/CONTAINER_ID/attach/ws?stdin=1&stdout=0&stderr=0&stream=1: invalid port ":None" after host
## Expected behaviour
<!-- What is the behaviour you expect? -->
At least, not to send "None" as a port number for the Host header.
According to [RFC 7230 Section 5.4](https://tools.ietf.org/html/rfc7230#section-5.4):
> If the authority component is missing or undefined for the target URI, then a client MUST send a Host header field with an empty field-value.
So perhaps it should be possible for the `aiohttp` client to get and recognize such a URI and send a blank Host header field.
At the moment though, I think, it doesn't seem possible to send such an "authority"-less URL to `ws_connect` nor does there currently exist a conditional path for the Host header construction to make a blank Host header field: [client_reqrep.py lines 314-320](https://github.com/aio-libs/aiohttp/blob/21b062199ff8da1a8d48b262f3d75fb616cc275f/aiohttp/client_reqrep.py#L314-L320)
## Actual behaviour
<!-- What's actually happening? -->
The Host header includes the string "None" as the port when making requests whose URL registers as not `is_default_port()` but has no port defined, e.g. `unix://localhost/path/to/endpoint`.
## Steps to reproduce
<!-- Please describe steps to reproduce the issue.
If you have a script that does that please include it here within
markdown code markup -->
This occurred for me while using the `aiodocker` package to attach to `stdin` of a running container.
A sort of silly example server/client that displays the behavior is as follows:
```python
from aiohttp import web
from asyncio import sleep, create_task
import aiohttp
SOCK_PATH = '/tmp/example.sock'
async def hello(request):
print('Host: '+request.headers['Host'])
return web.Response()
async def make_request():
await sleep(1) # Let the server become available.
conn = aiohttp.UnixConnector(path=SOCK_PATH)
async with aiohttp.ClientSession(connector=conn) as session:
async with session.get('unix://localhost/'):
pass # Produces a Host of "localhost:None"
async with session.get('http://localhost/'):
pass # Produces a Host of "localhost"
async def schedule_request(_):
create_task(make_request())
app = web.Application()
app.add_routes([web.get('/', hello)])
app.on_startup.append(schedule_request)
web.run_app(app, path=SOCK_PATH)
```
Output:
```
======== Running on http://unix:/tmp/example.sock: ========
(Press CTRL+C to quit)
Host: localhost:None
Host: localhost
```
## Your environment
<!-- Describe the environment you have that lead to your issue.
This includes aiohttp version, OS, proxy server and other bits that
are related to your case.
IMPORTANT: aiohttp is both server framework and client library.
For getting rid of confusing please put 'server', 'client' or 'both'
word here.
-->
* Debian 9
* Python 3.7.4
* aiohttp 3.5.4
* aiodocker 0.14.0
* Docker 19.03.2-ce
BTW the specific thing that I think make this appear where it didn't before was a security update to Go that make URL parsing more strict: https://github.com/golang/go/issues?q=milestone%3AGo1.12.8
Client-sent Host header can include port as "None"
## Long story short
<!-- Please describe your problem and why the fix is important. -->
When the client forms the Host header, it is possible for it to include the port as "None".
This came up for me when using `aiodocker` to try to connect to the Docker API container attach websocket endpoint, which used a URL of the form "unix://localhost/..." and let to a "Host" header of "localhost:None", triggering a 400 error from docker with a message like:
parse ws://localhost:None/v1.35/containers/CONTAINER_ID/attach/ws?stdin=1&stdout=0&stderr=0&stream=1: invalid port ":None" after host
## Expected behaviour
<!-- What is the behaviour you expect? -->
At least, not to send "None" as a port number for the Host header.
According to [RFC 7230 Section 5.4](https://tools.ietf.org/html/rfc7230#section-5.4):
> If the authority component is missing or undefined for the target URI, then a client MUST send a Host header field with an empty field-value.
So perhaps it should be possible for the `aiohttp` client to get and recognize such a URI and send a blank Host header field.
At the moment though, I think, it doesn't seem possible to send such an "authority"-less URL to `ws_connect` nor does there currently exist a conditional path for the Host header construction to make a blank Host header field: [client_reqrep.py lines 314-320](https://github.com/aio-libs/aiohttp/blob/21b062199ff8da1a8d48b262f3d75fb616cc275f/aiohttp/client_reqrep.py#L314-L320)
## Actual behaviour
<!-- What's actually happening? -->
The Host header includes the string "None" as the port when making requests whose URL registers as not `is_default_port()` but has no port defined, e.g. `unix://localhost/path/to/endpoint`.
## Steps to reproduce
<!-- Please describe steps to reproduce the issue.
If you have a script that does that please include it here within
markdown code markup -->
This occurred for me while using the `aiodocker` package to attach to `stdin` of a running container.
A sort of silly example server/client that displays the behavior is as follows:
```python
from aiohttp import web
from asyncio import sleep, create_task
import aiohttp
SOCK_PATH = '/tmp/example.sock'
async def hello(request):
print('Host: '+request.headers['Host'])
return web.Response()
async def make_request():
await sleep(1) # Let the server become available.
conn = aiohttp.UnixConnector(path=SOCK_PATH)
async with aiohttp.ClientSession(connector=conn) as session:
async with session.get('unix://localhost/'):
pass # Produces a Host of "localhost:None"
async with session.get('http://localhost/'):
pass # Produces a Host of "localhost"
async def schedule_request(_):
create_task(make_request())
app = web.Application()
app.add_routes([web.get('/', hello)])
app.on_startup.append(schedule_request)
web.run_app(app, path=SOCK_PATH)
```
Output:
```
======== Running on http://unix:/tmp/example.sock: ========
(Press CTRL+C to quit)
Host: localhost:None
Host: localhost
```
## Your environment
<!-- Describe the environment you have that lead to your issue.
This includes aiohttp version, OS, proxy server and other bits that
are related to your case.
IMPORTANT: aiohttp is both server framework and client library.
For getting rid of confusing please put 'server', 'client' or 'both'
word here.
-->
* Debian 9
* Python 3.7.4
* aiohttp 3.5.4
* aiodocker 0.14.0
* Docker 19.03.2-ce
BTW the specific thing that I think make this appear where it didn't before was a security update to Go that make URL parsing more strict: https://github.com/golang/go/issues?q=milestone%3AGo1.12.8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/client_reqrep.py`
Content:
```
1 import asyncio
2 import codecs
3 import io
4 import re
5 import sys
6 import traceback
7 import warnings
8 from hashlib import md5, sha1, sha256
9 from http.cookies import CookieError, Morsel, SimpleCookie
10 from types import MappingProxyType, TracebackType
11 from typing import ( # noqa
12 TYPE_CHECKING,
13 Any,
14 Dict,
15 Iterable,
16 List,
17 Mapping,
18 Optional,
19 Tuple,
20 Type,
21 Union,
22 cast,
23 )
24
25 import attr
26 from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
27 from yarl import URL
28
29 from . import hdrs, helpers, http, multipart, payload
30 from .abc import AbstractStreamWriter
31 from .client_exceptions import (
32 ClientConnectionError,
33 ClientOSError,
34 ClientResponseError,
35 ContentTypeError,
36 InvalidURL,
37 ServerFingerprintMismatch,
38 )
39 from .formdata import FormData
40 from .helpers import ( # noqa
41 PY_36,
42 BaseTimerContext,
43 BasicAuth,
44 HeadersMixin,
45 TimerNoop,
46 is_expected_content_type,
47 noop,
48 reify,
49 set_result,
50 )
51 from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter
52 from .log import client_logger
53 from .streams import StreamReader # noqa
54 from .typedefs import (
55 DEFAULT_JSON_DECODER,
56 JSONDecoder,
57 LooseCookies,
58 LooseHeaders,
59 RawHeaders,
60 )
61
62 try:
63 import ssl
64 from ssl import SSLContext
65 except ImportError: # pragma: no cover
66 ssl = None # type: ignore
67 SSLContext = object # type: ignore
68
69 try:
70 import cchardet as chardet
71 except ImportError: # pragma: no cover
72 import chardet
73
74
75 __all__ = ('ClientRequest', 'ClientResponse', 'RequestInfo', 'Fingerprint')
76
77
78 if TYPE_CHECKING: # pragma: no cover
79 from .client import ClientSession # noqa
80 from .connector import Connection # noqa
81 from .tracing import Trace # noqa
82
83
84 @attr.s(frozen=True, slots=True)
85 class ContentDisposition:
86 type = attr.ib(type=str) # type: Optional[str]
87 parameters = attr.ib(type=MappingProxyType) # type: MappingProxyType[str, str] # noqa
88 filename = attr.ib(type=str) # type: Optional[str]
89
90
91 @attr.s(frozen=True, slots=True)
92 class RequestInfo:
93 url = attr.ib(type=URL)
94 method = attr.ib(type=str)
95 headers = attr.ib(type=CIMultiDictProxy) # type: CIMultiDictProxy[str]
96 real_url = attr.ib(type=URL)
97
98 @real_url.default
99 def real_url_default(self) -> URL:
100 return self.url
101
102
103 class Fingerprint:
104 HASHFUNC_BY_DIGESTLEN = {
105 16: md5,
106 20: sha1,
107 32: sha256,
108 }
109
110 def __init__(self, fingerprint: bytes) -> None:
111 digestlen = len(fingerprint)
112 hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)
113 if not hashfunc:
114 raise ValueError('fingerprint has invalid length')
115 elif hashfunc is md5 or hashfunc is sha1:
116 raise ValueError('md5 and sha1 are insecure and '
117 'not supported. Use sha256.')
118 self._hashfunc = hashfunc
119 self._fingerprint = fingerprint
120
121 @property
122 def fingerprint(self) -> bytes:
123 return self._fingerprint
124
125 def check(self, transport: asyncio.Transport) -> None:
126 if not transport.get_extra_info('sslcontext'):
127 return
128 sslobj = transport.get_extra_info('ssl_object')
129 cert = sslobj.getpeercert(binary_form=True)
130 got = self._hashfunc(cert).digest()
131 if got != self._fingerprint:
132 host, port, *_ = transport.get_extra_info('peername')
133 raise ServerFingerprintMismatch(self._fingerprint,
134 got, host, port)
135
136
137 if ssl is not None:
138 SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))
139 else: # pragma: no cover
140 SSL_ALLOWED_TYPES = type(None)
141
142
143 @attr.s(slots=True, frozen=True)
144 class ConnectionKey:
145 # the key should contain an information about used proxy / TLS
146 # to prevent reusing wrong connections from a pool
147 host = attr.ib(type=str)
148 port = attr.ib(type=int) # type: Optional[int]
149 is_ssl = attr.ib(type=bool)
150 ssl = attr.ib() # type: Union[SSLContext, None, bool, Fingerprint]
151 proxy = attr.ib() # type: Optional[URL]
152 proxy_auth = attr.ib() # type: Optional[BasicAuth]
153 proxy_headers_hash = attr.ib(type=int) # type: Optional[int] # noqa # hash(CIMultiDict)
154
155
156 class ClientRequest:
157 GET_METHODS = {
158 hdrs.METH_GET,
159 hdrs.METH_HEAD,
160 hdrs.METH_OPTIONS,
161 hdrs.METH_TRACE,
162 }
163 POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
164 ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})
165
166 DEFAULT_HEADERS = {
167 hdrs.ACCEPT: '*/*',
168 hdrs.ACCEPT_ENCODING: 'gzip, deflate',
169 }
170
171 body = b''
172 auth = None
173 response = None
174 response_class = None
175
176 _writer = None # async task for streaming data
177 _continue = None # waiter future for '100 Continue' response
178
179 # N.B.
180 # Adding __del__ method with self._writer closing doesn't make sense
181 # because _writer is instance method, thus it keeps a reference to self.
182 # Until writer has finished finalizer will not be called.
183
184 def __init__(self, method: str, url: URL, *,
185 params: Optional[Mapping[str, str]]=None,
186 headers: Optional[LooseHeaders]=None,
187 skip_auto_headers: Iterable[str]=frozenset(),
188 data: Any=None,
189 cookies: Optional[LooseCookies]=None,
190 auth: Optional[BasicAuth]=None,
191 version: http.HttpVersion=http.HttpVersion11,
192 compress: Optional[str]=None,
193 chunked: Optional[bool]=None,
194 expect100: bool=False,
195 loop: asyncio.AbstractEventLoop,
196 response_class: Optional[Type['ClientResponse']]=None,
197 proxy: Optional[URL]=None,
198 proxy_auth: Optional[BasicAuth]=None,
199 timer: Optional[BaseTimerContext]=None,
200 session: Optional['ClientSession']=None,
201 ssl: Union[SSLContext, bool, Fingerprint, None]=None,
202 proxy_headers: Optional[LooseHeaders]=None,
203 traces: Optional[List['Trace']]=None):
204
205 assert isinstance(url, URL), url
206 assert isinstance(proxy, (URL, type(None))), proxy
207 # FIXME: session is None in tests only, need to fix tests
208 # assert session is not None
209 self._session = cast('ClientSession', session)
210 if params:
211 q = MultiDict(url.query)
212 url2 = url.with_query(params)
213 q.extend(url2.query)
214 url = url.with_query(q)
215 self.original_url = url
216 self.url = url.with_fragment(None)
217 self.method = method.upper()
218 self.chunked = chunked
219 self.compress = compress
220 self.loop = loop
221 self.length = None
222 if response_class is None:
223 real_response_class = ClientResponse
224 else:
225 real_response_class = response_class
226 self.response_class = real_response_class # type: Type[ClientResponse]
227 self._timer = timer if timer is not None else TimerNoop()
228 self._ssl = ssl
229
230 if loop.get_debug():
231 self._source_traceback = traceback.extract_stack(sys._getframe(1))
232
233 self.update_version(version)
234 self.update_host(url)
235 self.update_headers(headers)
236 self.update_auto_headers(skip_auto_headers)
237 self.update_cookies(cookies)
238 self.update_content_encoding(data)
239 self.update_auth(auth)
240 self.update_proxy(proxy, proxy_auth, proxy_headers)
241
242 self.update_body_from_data(data)
243 if data or self.method not in self.GET_METHODS:
244 self.update_transfer_encoding()
245 self.update_expect_continue(expect100)
246 if traces is None:
247 traces = []
248 self._traces = traces
249
250 def is_ssl(self) -> bool:
251 return self.url.scheme in ('https', 'wss')
252
253 @property
254 def ssl(self) -> Union['SSLContext', None, bool, Fingerprint]:
255 return self._ssl
256
257 @property
258 def connection_key(self) -> ConnectionKey:
259 proxy_headers = self.proxy_headers
260 if proxy_headers:
261 h = hash(tuple((k, v) for k, v in proxy_headers.items())) # type: Optional[int] # noqa
262 else:
263 h = None
264 return ConnectionKey(self.host, self.port, self.is_ssl(),
265 self.ssl,
266 self.proxy, self.proxy_auth, h)
267
268 @property
269 def host(self) -> str:
270 ret = self.url.host
271 assert ret is not None
272 return ret
273
274 @property
275 def port(self) -> Optional[int]:
276 return self.url.port
277
278 @property
279 def request_info(self) -> RequestInfo:
280 headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str]
281 return RequestInfo(self.url, self.method,
282 headers, self.original_url)
283
284 def update_host(self, url: URL) -> None:
285 """Update destination host, port and connection type (ssl)."""
286 # get host/port
287 if not url.host:
288 raise InvalidURL(url)
289
290 # basic auth info
291 username, password = url.user, url.password
292 if username:
293 self.auth = helpers.BasicAuth(username, password or '')
294
295 def update_version(self, version: Union[http.HttpVersion, str]) -> None:
296 """Convert request version to two elements tuple.
297
298 parser HTTP version '1.1' => (1, 1)
299 """
300 if isinstance(version, str):
301 v = [l.strip() for l in version.split('.', 1)]
302 try:
303 version = http.HttpVersion(int(v[0]), int(v[1]))
304 except ValueError:
305 raise ValueError(
306 'Can not parse http version number: {}'
307 .format(version)) from None
308 self.version = version
309
310 def update_headers(self, headers: Optional[LooseHeaders]) -> None:
311 """Update request headers."""
312 self.headers = CIMultiDict() # type: CIMultiDict[str]
313
314 # add host
315 netloc = cast(str, self.url.raw_host)
316 if helpers.is_ipv6_address(netloc):
317 netloc = '[{}]'.format(netloc)
318 if not self.url.is_default_port():
319 netloc += ':' + str(self.url.port)
320 self.headers[hdrs.HOST] = netloc
321
322 if headers:
323 if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
324 headers = headers.items() # type: ignore
325
326 for key, value in headers:
327 # A special case for Host header
328 if key.lower() == 'host':
329 self.headers[key] = value
330 else:
331 self.headers.add(key, value)
332
333 def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:
334 self.skip_auto_headers = CIMultiDict(
335 (hdr, None) for hdr in sorted(skip_auto_headers))
336 used_headers = self.headers.copy()
337 used_headers.extend(self.skip_auto_headers) # type: ignore
338
339 for hdr, val in self.DEFAULT_HEADERS.items():
340 if hdr not in used_headers:
341 self.headers.add(hdr, val)
342
343 if hdrs.USER_AGENT not in used_headers:
344 self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
345
346 def update_cookies(self, cookies: Optional[LooseCookies]) -> None:
347 """Update request cookies header."""
348 if not cookies:
349 return
350
351 c = SimpleCookie()
352 if hdrs.COOKIE in self.headers:
353 c.load(self.headers.get(hdrs.COOKIE, ''))
354 del self.headers[hdrs.COOKIE]
355
356 if isinstance(cookies, Mapping):
357 iter_cookies = cookies.items()
358 else:
359 iter_cookies = cookies # type: ignore
360 for name, value in iter_cookies:
361 if isinstance(value, Morsel):
362 # Preserve coded_value
363 mrsl_val = value.get(value.key, Morsel())
364 mrsl_val.set(value.key, value.value, value.coded_value) # type: ignore # noqa
365 c[name] = mrsl_val
366 else:
367 c[name] = value # type: ignore
368
369 self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()
370
371 def update_content_encoding(self, data: Any) -> None:
372 """Set request content encoding."""
373 if not data:
374 return
375
376 enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()
377 if enc:
378 if self.compress:
379 raise ValueError(
380 'compress can not be set '
381 'if Content-Encoding header is set')
382 elif self.compress:
383 if not isinstance(self.compress, str):
384 self.compress = 'deflate'
385 self.headers[hdrs.CONTENT_ENCODING] = self.compress
386 self.chunked = True # enable chunked, no need to deal with length
387
388 def update_transfer_encoding(self) -> None:
389 """Analyze transfer-encoding header."""
390 te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
391
392 if 'chunked' in te:
393 if self.chunked:
394 raise ValueError(
395 'chunked can not be set '
396 'if "Transfer-Encoding: chunked" header is set')
397
398 elif self.chunked:
399 if hdrs.CONTENT_LENGTH in self.headers:
400 raise ValueError(
401 'chunked can not be set '
402 'if Content-Length header is set')
403
404 self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
405 else:
406 if hdrs.CONTENT_LENGTH not in self.headers:
407 self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
408
409 def update_auth(self, auth: Optional[BasicAuth]) -> None:
410 """Set basic auth."""
411 if auth is None:
412 auth = self.auth
413 if auth is None:
414 return
415
416 if not isinstance(auth, helpers.BasicAuth):
417 raise TypeError('BasicAuth() tuple is required instead')
418
419 self.headers[hdrs.AUTHORIZATION] = auth.encode()
420
421 def update_body_from_data(self, body: Any) -> None:
422 if not body:
423 return
424
425 # FormData
426 if isinstance(body, FormData):
427 body = body()
428
429 try:
430 body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
431 except payload.LookupError:
432 body = FormData(body)()
433
434 self.body = body
435
436 # enable chunked encoding if needed
437 if not self.chunked:
438 if hdrs.CONTENT_LENGTH not in self.headers:
439 size = body.size
440 if size is None:
441 self.chunked = True
442 else:
443 if hdrs.CONTENT_LENGTH not in self.headers:
444 self.headers[hdrs.CONTENT_LENGTH] = str(size)
445
446 # copy payload headers
447 assert body.headers
448 for (key, value) in body.headers.items():
449 if key in self.headers:
450 continue
451 if key in self.skip_auto_headers:
452 continue
453 self.headers[key] = value
454
455 def update_expect_continue(self, expect: bool=False) -> None:
456 if expect:
457 self.headers[hdrs.EXPECT] = '100-continue'
458 elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':
459 expect = True
460
461 if expect:
462 self._continue = self.loop.create_future()
463
464 def update_proxy(self, proxy: Optional[URL],
465 proxy_auth: Optional[BasicAuth],
466 proxy_headers: Optional[LooseHeaders]) -> None:
467 if proxy and not proxy.scheme == 'http':
468 raise ValueError("Only http proxies are supported")
469 if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):
470 raise ValueError("proxy_auth must be None or BasicAuth() tuple")
471 self.proxy = proxy
472 self.proxy_auth = proxy_auth
473 self.proxy_headers = proxy_headers
474
475 def keep_alive(self) -> bool:
476 if self.version < HttpVersion10:
477 # keep alive not supported at all
478 return False
479 if self.version == HttpVersion10:
480 if self.headers.get(hdrs.CONNECTION) == 'keep-alive':
481 return True
482 else: # no headers means we close for Http 1.0
483 return False
484 elif self.headers.get(hdrs.CONNECTION) == 'close':
485 return False
486
487 return True
488
489 async def write_bytes(self, writer: AbstractStreamWriter,
490 conn: 'Connection') -> None:
491 """Support coroutines that yields bytes objects."""
492 # 100 response
493 if self._continue is not None:
494 await writer.drain()
495 await self._continue
496
497 protocol = conn.protocol
498 assert protocol is not None
499 try:
500 if isinstance(self.body, payload.Payload):
501 await self.body.write(writer)
502 else:
503 if isinstance(self.body, (bytes, bytearray)):
504 self.body = (self.body,) # type: ignore
505
506 for chunk in self.body:
507 await writer.write(chunk) # type: ignore
508
509 await writer.write_eof()
510 except OSError as exc:
511 new_exc = ClientOSError(
512 exc.errno,
513 'Can not write request body for %s' % self.url)
514 new_exc.__context__ = exc
515 new_exc.__cause__ = exc
516 protocol.set_exception(new_exc)
517 except asyncio.CancelledError as exc:
518 if not conn.closed:
519 protocol.set_exception(exc)
520 except Exception as exc:
521 protocol.set_exception(exc)
522 finally:
523 self._writer = None
524
525 async def send(self, conn: 'Connection') -> 'ClientResponse':
526 # Specify request target:
527 # - CONNECT request must send authority form URI
528 # - not CONNECT proxy must send absolute form URI
529 # - most common is origin form URI
530 if self.method == hdrs.METH_CONNECT:
531 connect_host = self.url.raw_host
532 assert connect_host is not None
533 if helpers.is_ipv6_address(connect_host):
534 connect_host = '[{}]'.format(connect_host)
535 path = '{}:{}'.format(connect_host, self.url.port)
536 elif self.proxy and not self.is_ssl():
537 path = str(self.url)
538 else:
539 path = self.url.raw_path
540 if self.url.raw_query_string:
541 path += '?' + self.url.raw_query_string
542
543 protocol = conn.protocol
544 assert protocol is not None
545 writer = StreamWriter(
546 protocol, self.loop,
547 on_chunk_sent=self._on_chunk_request_sent
548 )
549
550 if self.compress:
551 writer.enable_compression(self.compress)
552
553 if self.chunked is not None:
554 writer.enable_chunking()
555
556 # set default content-type
557 if (self.method in self.POST_METHODS and
558 hdrs.CONTENT_TYPE not in self.skip_auto_headers and
559 hdrs.CONTENT_TYPE not in self.headers):
560 self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
561
562 # set the connection header
563 connection = self.headers.get(hdrs.CONNECTION)
564 if not connection:
565 if self.keep_alive():
566 if self.version == HttpVersion10:
567 connection = 'keep-alive'
568 else:
569 if self.version == HttpVersion11:
570 connection = 'close'
571
572 if connection is not None:
573 self.headers[hdrs.CONNECTION] = connection
574
575 # status + headers
576 status_line = '{0} {1} HTTP/{2[0]}.{2[1]}'.format(
577 self.method, path, self.version)
578 await writer.write_headers(status_line, self.headers)
579
580 self._writer = self.loop.create_task(self.write_bytes(writer, conn))
581
582 response_class = self.response_class
583 assert response_class is not None
584 self.response = response_class(
585 self.method, self.original_url,
586 writer=self._writer, continue100=self._continue, timer=self._timer,
587 request_info=self.request_info,
588 traces=self._traces,
589 loop=self.loop,
590 session=self._session
591 )
592 return self.response
593
594 async def close(self) -> None:
595 if self._writer is not None:
596 try:
597 await self._writer
598 finally:
599 self._writer = None
600
601 def terminate(self) -> None:
602 if self._writer is not None:
603 if not self.loop.is_closed():
604 self._writer.cancel()
605 self._writer = None
606
607 async def _on_chunk_request_sent(self, chunk: bytes) -> None:
608 for trace in self._traces:
609 await trace.send_request_chunk_sent(chunk)
610
611
612 class ClientResponse(HeadersMixin):
613
614 # from the Status-Line of the response
615 version = None # HTTP-Version
616 status = None # type: int # Status-Code
617 reason = None # Reason-Phrase
618
619 content = None # type: StreamReader # Payload stream
620 _headers = None # type: CIMultiDictProxy[str] # Response headers
621 _raw_headers = None # type: RawHeaders # Response raw headers
622
623 _connection = None # current connection
624 _source_traceback = None
625 # setted up by ClientRequest after ClientResponse object creation
626 # post-init stage allows to not change ctor signature
627 _closed = True # to allow __del__ for non-initialized properly response
628 _released = False
629
630 def __init__(self, method: str, url: URL, *,
631 writer: 'asyncio.Task[None]',
632 continue100: Optional['asyncio.Future[bool]'],
633 timer: BaseTimerContext,
634 request_info: RequestInfo,
635 traces: List['Trace'],
636 loop: asyncio.AbstractEventLoop,
637 session: 'ClientSession') -> None:
638 assert isinstance(url, URL)
639 super().__init__()
640
641 self.method = method
642 self.cookies = SimpleCookie()
643
644 self._real_url = url
645 self._url = url.with_fragment(None)
646 self._body = None # type: Optional[bytes]
647 self._writer = writer # type: Optional[asyncio.Task[None]]
648 self._continue = continue100 # None by default
649 self._closed = True
650 self._history = () # type: Tuple[ClientResponse, ...]
651 self._request_info = request_info
652 self._timer = timer if timer is not None else TimerNoop()
653 self._cache = {} # type: Dict[str, Any]
654 self._traces = traces
655 self._loop = loop
656 # store a reference to session #1985
657 self._session = session # type: Optional[ClientSession]
658 if loop.get_debug():
659 self._source_traceback = traceback.extract_stack(sys._getframe(1))
660
661 @reify
662 def url(self) -> URL:
663 return self._url
664
665 @reify
666 def real_url(self) -> URL:
667 return self._real_url
668
669 @reify
670 def host(self) -> str:
671 assert self._url.host is not None
672 return self._url.host
673
674 @reify
675 def headers(self) -> 'CIMultiDictProxy[str]':
676 return self._headers
677
678 @reify
679 def raw_headers(self) -> RawHeaders:
680 return self._raw_headers
681
682 @reify
683 def request_info(self) -> RequestInfo:
684 return self._request_info
685
686 @reify
687 def content_disposition(self) -> Optional[ContentDisposition]:
688 raw = self._headers.get(hdrs.CONTENT_DISPOSITION)
689 if raw is None:
690 return None
691 disposition_type, params_dct = multipart.parse_content_disposition(raw)
692 params = MappingProxyType(params_dct)
693 filename = multipart.content_disposition_filename(params)
694 return ContentDisposition(disposition_type, params, filename)
695
696 def __del__(self, _warnings: Any=warnings) -> None:
697 if self._closed:
698 return
699
700 if self._connection is not None:
701 self._connection.release()
702 self._cleanup_writer()
703
704 if self._loop.get_debug():
705 if PY_36:
706 kwargs = {'source': self}
707 else:
708 kwargs = {}
709 _warnings.warn("Unclosed response {!r}".format(self),
710 ResourceWarning,
711 **kwargs)
712 context = {'client_response': self,
713 'message': 'Unclosed response'}
714 if self._source_traceback:
715 context['source_traceback'] = self._source_traceback
716 self._loop.call_exception_handler(context)
717
718 def __repr__(self) -> str:
719 out = io.StringIO()
720 ascii_encodable_url = str(self.url)
721 if self.reason:
722 ascii_encodable_reason = self.reason.encode('ascii',
723 'backslashreplace') \
724 .decode('ascii')
725 else:
726 ascii_encodable_reason = self.reason
727 print('<ClientResponse({}) [{} {}]>'.format(
728 ascii_encodable_url, self.status, ascii_encodable_reason),
729 file=out)
730 print(self.headers, file=out)
731 return out.getvalue()
732
733 @property
734 def connection(self) -> Optional['Connection']:
735 return self._connection
736
737 @reify
738 def history(self) -> Tuple['ClientResponse', ...]:
739 """A sequence of responses, if redirects occurred."""
740 return self._history
741
742 @reify
743 def links(self) -> 'MultiDictProxy[MultiDictProxy[Union[str, URL]]]':
744 links_str = ", ".join(self.headers.getall("link", []))
745
746 if not links_str:
747 return MultiDictProxy(MultiDict())
748
749 links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]
750
751 for val in re.split(r",(?=\s*<)", links_str):
752 match = re.match(r"\s*<(.*)>(.*)", val)
753 if match is None: # pragma: no cover
754 # the check exists to suppress mypy error
755 continue
756 url, params_str = match.groups()
757 params = params_str.split(";")[1:]
758
759 link = MultiDict() # type: MultiDict[Union[str, URL]]
760
761 for param in params:
762 match = re.match(
763 r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$",
764 param, re.M
765 )
766 if match is None: # pragma: no cover
767 # the check exists to suppress mypy error
768 continue
769 key, _, value, _ = match.groups()
770
771 link.add(key, value)
772
773 key = link.get("rel", url) # type: ignore
774
775 link.add("url", self.url.join(URL(url)))
776
777 links.add(key, MultiDictProxy(link))
778
779 return MultiDictProxy(links)
780
781 async def start(self, connection: 'Connection') -> 'ClientResponse':
782 """Start response processing."""
783 self._closed = False
784 self._protocol = connection.protocol
785 self._connection = connection
786
787 with self._timer:
788 while True:
789 # read response
790 try:
791 message, payload = await self._protocol.read() # type: ignore # noqa
792 except http.HttpProcessingError as exc:
793 raise ClientResponseError(
794 self.request_info, self.history,
795 status=exc.code,
796 message=exc.message, headers=exc.headers) from exc
797
798 if (message.code < 100 or
799 message.code > 199 or message.code == 101):
800 break
801
802 if self._continue is not None:
803 set_result(self._continue, True)
804 self._continue = None
805
806 # payload eof handler
807 payload.on_eof(self._response_eof)
808
809 # response status
810 self.version = message.version
811 self.status = message.code
812 self.reason = message.reason
813
814 # headers
815 self._headers = message.headers # type is CIMultiDictProxy
816 self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]
817
818 # payload
819 self.content = payload
820
821 # cookies
822 for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):
823 try:
824 self.cookies.load(hdr)
825 except CookieError as exc:
826 client_logger.warning(
827 'Can not load response cookies: %s', exc)
828 return self
829
830 def _response_eof(self) -> None:
831 if self._closed:
832 return
833
834 if self._connection is not None:
835 # websocket, protocol could be None because
836 # connection could be detached
837 if (self._connection.protocol is not None and
838 self._connection.protocol.upgraded):
839 return
840
841 self._connection.release()
842 self._connection = None
843
844 self._closed = True
845 self._cleanup_writer()
846
847 @property
848 def closed(self) -> bool:
849 return self._closed
850
851 def close(self) -> None:
852 if not self._released:
853 self._notify_content()
854 if self._closed:
855 return
856
857 self._closed = True
858 if self._loop is None or self._loop.is_closed():
859 return
860
861 if self._connection is not None:
862 self._connection.close()
863 self._connection = None
864 self._cleanup_writer()
865
866 def release(self) -> Any:
867 if not self._released:
868 self._notify_content()
869 if self._closed:
870 return noop()
871
872 self._closed = True
873 if self._connection is not None:
874 self._connection.release()
875 self._connection = None
876
877 self._cleanup_writer()
878 return noop()
879
880 def raise_for_status(self) -> None:
881 if 400 <= self.status:
882 # reason should always be not None for a started response
883 assert self.reason is not None
884 self.release()
885 raise ClientResponseError(
886 self.request_info,
887 self.history,
888 status=self.status,
889 message=self.reason,
890 headers=self.headers)
891
892 def _cleanup_writer(self) -> None:
893 if self._writer is not None:
894 self._writer.cancel()
895 self._writer = None
896 self._session = None
897
898 def _notify_content(self) -> None:
899 content = self.content
900 if content and content.exception() is None:
901 content.set_exception(
902 ClientConnectionError('Connection closed'))
903 self._released = True
904
905 async def wait_for_close(self) -> None:
906 if self._writer is not None:
907 try:
908 await self._writer
909 finally:
910 self._writer = None
911 self.release()
912
913 async def read(self) -> bytes:
914 """Read response payload."""
915 if self._body is None:
916 try:
917 self._body = await self.content.read()
918 for trace in self._traces:
919 await trace.send_response_chunk_received(self._body)
920 except BaseException:
921 self.close()
922 raise
923 elif self._released:
924 raise ClientConnectionError('Connection closed')
925
926 return self._body
927
928 def get_encoding(self) -> str:
929 ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
930 mimetype = helpers.parse_mimetype(ctype)
931
932 encoding = mimetype.parameters.get('charset')
933 if encoding:
934 try:
935 codecs.lookup(encoding)
936 except LookupError:
937 encoding = None
938 if not encoding:
939 if mimetype.type == 'application' and mimetype.subtype == 'json':
940 # RFC 7159 states that the default encoding is UTF-8.
941 encoding = 'utf-8'
942 else:
943 encoding = chardet.detect(self._body)['encoding']
944 if not encoding:
945 encoding = 'utf-8'
946
947 return encoding
948
949 async def text(self,
950 encoding: Optional[str]=None, errors: str='strict') -> str:
951 """Read response payload and decode."""
952 if self._body is None:
953 await self.read()
954
955 if encoding is None:
956 encoding = self.get_encoding()
957
958 return self._body.decode(encoding, errors=errors) # type: ignore
959
960 async def json(self, *, encoding: str=None,
961 loads: JSONDecoder=DEFAULT_JSON_DECODER,
962 content_type: Optional[str]='application/json') -> Any:
963 """Read and decodes JSON response."""
964 if self._body is None:
965 await self.read()
966
967 if content_type:
968 ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
969 if not is_expected_content_type(ctype, content_type):
970 raise ContentTypeError(
971 self.request_info,
972 self.history,
973 message=('Attempt to decode JSON with '
974 'unexpected mimetype: %s' % ctype),
975 headers=self.headers)
976
977 if encoding is None:
978 encoding = self.get_encoding()
979
980 return loads(self._body.decode(encoding)) # type: ignore
981
982 async def __aenter__(self) -> 'ClientResponse':
983 return self
984
985 async def __aexit__(self,
986 exc_type: Optional[Type[BaseException]],
987 exc_val: Optional[BaseException],
988 exc_tb: Optional[TracebackType]) -> None:
989 # similar to _RequestContextManager, we do not need to check
990 # for exceptions, response object can closes connection
991 # is state is broken
992 self.release()
993
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aiohttp/client_reqrep.py b/aiohttp/client_reqrep.py
--- a/aiohttp/client_reqrep.py
+++ b/aiohttp/client_reqrep.py
@@ -315,7 +315,7 @@
netloc = cast(str, self.url.raw_host)
if helpers.is_ipv6_address(netloc):
netloc = '[{}]'.format(netloc)
- if not self.url.is_default_port():
+ if self.url.port is not None and not self.url.is_default_port():
netloc += ':' + str(self.url.port)
self.headers[hdrs.HOST] = netloc
| {"golden_diff": "diff --git a/aiohttp/client_reqrep.py b/aiohttp/client_reqrep.py\n--- a/aiohttp/client_reqrep.py\n+++ b/aiohttp/client_reqrep.py\n@@ -315,7 +315,7 @@\n netloc = cast(str, self.url.raw_host)\n if helpers.is_ipv6_address(netloc):\n netloc = '[{}]'.format(netloc)\n- if not self.url.is_default_port():\n+ if self.url.port is not None and not self.url.is_default_port():\n netloc += ':' + str(self.url.port)\n self.headers[hdrs.HOST] = netloc\n", "issue": "Client-sent Host header can include port as \"None\"\n## Long story short\r\n\r\n<!-- Please describe your problem and why the fix is important. -->\r\nWhen the client forms the Host header, it is possible for it to include the port as \"None\".\r\n\r\nThis came up for me when using `aiodocker` to try to connect to the Docker API container attach websocket endpoint, which used a URL of the form \"unix://localhost/...\" and let to a \"Host\" header of \"localhost:None\", triggering a 400 error from docker with a message like:\r\n\r\n parse ws://localhost:None/v1.35/containers/CONTAINER_ID/attach/ws?stdin=1&stdout=0&stderr=0&stream=1: invalid port \":None\" after host\r\n\r\n## Expected behaviour\r\n\r\n<!-- What is the behaviour you expect? -->\r\nAt least, not to send \"None\" as a port number for the Host header.\r\n\r\nAccording to [RFC 7230 Section 5.4](https://tools.ietf.org/html/rfc7230#section-5.4):\r\n> If the authority component is missing or undefined for the target URI, then a client MUST send a Host header field with an empty field-value.\r\n\r\nSo perhaps it should be possible for the `aiohttp` client to get and recognize such a URI and send a blank Host header field.\r\n\r\nAt the moment though, I think, it doesn't seem possible to send such an \"authority\"-less URL to `ws_connect` nor does there currently exist a conditional path for the Host header construction to make a blank Host header field: [client_reqrep.py lines 314-320](https://github.com/aio-libs/aiohttp/blob/21b062199ff8da1a8d48b262f3d75fb616cc275f/aiohttp/client_reqrep.py#L314-L320)\r\n\r\n## Actual behaviour\r\n\r\n<!-- What's actually happening? -->\r\nThe Host header includes the string \"None\" as the port when making requests whose URL registers as not `is_default_port()` but has no port defined, e.g. `unix://localhost/path/to/endpoint`.\r\n\r\n## Steps to reproduce\r\n\r\n<!-- Please describe steps to reproduce the issue.\r\n If you have a script that does that please include it here within\r\n markdown code markup -->\r\nThis occurred for me while using the `aiodocker` package to attach to `stdin` of a running container.\r\n\r\nA sort of silly example server/client that displays the behavior is as follows:\r\n```python\r\nfrom aiohttp import web\r\nfrom asyncio import sleep, create_task\r\nimport aiohttp\r\n\r\nSOCK_PATH = '/tmp/example.sock'\r\n\r\nasync def hello(request):\r\n print('Host: '+request.headers['Host'])\r\n return web.Response()\r\n\r\nasync def make_request():\r\n await sleep(1) # Let the server become available.\r\n conn = aiohttp.UnixConnector(path=SOCK_PATH)\r\n async with aiohttp.ClientSession(connector=conn) as session:\r\n async with session.get('unix://localhost/'):\r\n pass # Produces a Host of \"localhost:None\"\r\n async with session.get('http://localhost/'):\r\n pass # Produces a Host of \"localhost\"\r\n\r\nasync def schedule_request(_):\r\n create_task(make_request())\r\n\r\napp = web.Application()\r\napp.add_routes([web.get('/', hello)])\r\napp.on_startup.append(schedule_request)\r\n\r\nweb.run_app(app, path=SOCK_PATH)\r\n```\r\nOutput:\r\n```\r\n======== Running on http://unix:/tmp/example.sock: ========\r\n(Press CTRL+C to quit)\r\nHost: localhost:None\r\nHost: localhost\r\n```\r\n\r\n## Your environment\r\n\r\n<!-- Describe the environment you have that lead to your issue.\r\n This includes aiohttp version, OS, proxy server and other bits that\r\n are related to your case. \r\n \r\n IMPORTANT: aiohttp is both server framework and client library.\r\n For getting rid of confusing please put 'server', 'client' or 'both'\r\n word here.\r\n -->\r\n* Debian 9\r\n* Python 3.7.4\r\n* aiohttp 3.5.4\r\n* aiodocker 0.14.0\r\n* Docker 19.03.2-ce\r\n\r\nBTW the specific thing that I think make this appear where it didn't before was a security update to Go that make URL parsing more strict: https://github.com/golang/go/issues?q=milestone%3AGo1.12.8\nClient-sent Host header can include port as \"None\"\n## Long story short\r\n\r\n<!-- Please describe your problem and why the fix is important. -->\r\nWhen the client forms the Host header, it is possible for it to include the port as \"None\".\r\n\r\nThis came up for me when using `aiodocker` to try to connect to the Docker API container attach websocket endpoint, which used a URL of the form \"unix://localhost/...\" and let to a \"Host\" header of \"localhost:None\", triggering a 400 error from docker with a message like:\r\n\r\n parse ws://localhost:None/v1.35/containers/CONTAINER_ID/attach/ws?stdin=1&stdout=0&stderr=0&stream=1: invalid port \":None\" after host\r\n\r\n## Expected behaviour\r\n\r\n<!-- What is the behaviour you expect? -->\r\nAt least, not to send \"None\" as a port number for the Host header.\r\n\r\nAccording to [RFC 7230 Section 5.4](https://tools.ietf.org/html/rfc7230#section-5.4):\r\n> If the authority component is missing or undefined for the target URI, then a client MUST send a Host header field with an empty field-value.\r\n\r\nSo perhaps it should be possible for the `aiohttp` client to get and recognize such a URI and send a blank Host header field.\r\n\r\nAt the moment though, I think, it doesn't seem possible to send such an \"authority\"-less URL to `ws_connect` nor does there currently exist a conditional path for the Host header construction to make a blank Host header field: [client_reqrep.py lines 314-320](https://github.com/aio-libs/aiohttp/blob/21b062199ff8da1a8d48b262f3d75fb616cc275f/aiohttp/client_reqrep.py#L314-L320)\r\n\r\n## Actual behaviour\r\n\r\n<!-- What's actually happening? -->\r\nThe Host header includes the string \"None\" as the port when making requests whose URL registers as not `is_default_port()` but has no port defined, e.g. `unix://localhost/path/to/endpoint`.\r\n\r\n## Steps to reproduce\r\n\r\n<!-- Please describe steps to reproduce the issue.\r\n If you have a script that does that please include it here within\r\n markdown code markup -->\r\nThis occurred for me while using the `aiodocker` package to attach to `stdin` of a running container.\r\n\r\nA sort of silly example server/client that displays the behavior is as follows:\r\n```python\r\nfrom aiohttp import web\r\nfrom asyncio import sleep, create_task\r\nimport aiohttp\r\n\r\nSOCK_PATH = '/tmp/example.sock'\r\n\r\nasync def hello(request):\r\n print('Host: '+request.headers['Host'])\r\n return web.Response()\r\n\r\nasync def make_request():\r\n await sleep(1) # Let the server become available.\r\n conn = aiohttp.UnixConnector(path=SOCK_PATH)\r\n async with aiohttp.ClientSession(connector=conn) as session:\r\n async with session.get('unix://localhost/'):\r\n pass # Produces a Host of \"localhost:None\"\r\n async with session.get('http://localhost/'):\r\n pass # Produces a Host of \"localhost\"\r\n\r\nasync def schedule_request(_):\r\n create_task(make_request())\r\n\r\napp = web.Application()\r\napp.add_routes([web.get('/', hello)])\r\napp.on_startup.append(schedule_request)\r\n\r\nweb.run_app(app, path=SOCK_PATH)\r\n```\r\nOutput:\r\n```\r\n======== Running on http://unix:/tmp/example.sock: ========\r\n(Press CTRL+C to quit)\r\nHost: localhost:None\r\nHost: localhost\r\n```\r\n\r\n## Your environment\r\n\r\n<!-- Describe the environment you have that lead to your issue.\r\n This includes aiohttp version, OS, proxy server and other bits that\r\n are related to your case. \r\n \r\n IMPORTANT: aiohttp is both server framework and client library.\r\n For getting rid of confusing please put 'server', 'client' or 'both'\r\n word here.\r\n -->\r\n* Debian 9\r\n* Python 3.7.4\r\n* aiohttp 3.5.4\r\n* aiodocker 0.14.0\r\n* Docker 19.03.2-ce\r\n\r\nBTW the specific thing that I think make this appear where it didn't before was a security update to Go that make URL parsing more strict: https://github.com/golang/go/issues?q=milestone%3AGo1.12.8\n", "before_files": [{"content": "import asyncio\nimport codecs\nimport io\nimport re\nimport sys\nimport traceback\nimport warnings\nfrom hashlib import md5, sha1, sha256\nfrom http.cookies import CookieError, Morsel, SimpleCookie\nfrom types import MappingProxyType, TracebackType\nfrom typing import ( # noqa\n TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nimport attr\nfrom multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy\nfrom yarl import URL\n\nfrom . import hdrs, helpers, http, multipart, payload\nfrom .abc import AbstractStreamWriter\nfrom .client_exceptions import (\n ClientConnectionError,\n ClientOSError,\n ClientResponseError,\n ContentTypeError,\n InvalidURL,\n ServerFingerprintMismatch,\n)\nfrom .formdata import FormData\nfrom .helpers import ( # noqa\n PY_36,\n BaseTimerContext,\n BasicAuth,\n HeadersMixin,\n TimerNoop,\n is_expected_content_type,\n noop,\n reify,\n set_result,\n)\nfrom .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter\nfrom .log import client_logger\nfrom .streams import StreamReader # noqa\nfrom .typedefs import (\n DEFAULT_JSON_DECODER,\n JSONDecoder,\n LooseCookies,\n LooseHeaders,\n RawHeaders,\n)\n\ntry:\n import ssl\n from ssl import SSLContext\nexcept ImportError: # pragma: no cover\n ssl = None # type: ignore\n SSLContext = object # type: ignore\n\ntry:\n import cchardet as chardet\nexcept ImportError: # pragma: no cover\n import chardet\n\n\n__all__ = ('ClientRequest', 'ClientResponse', 'RequestInfo', 'Fingerprint')\n\n\nif TYPE_CHECKING: # pragma: no cover\n from .client import ClientSession # noqa\n from .connector import Connection # noqa\n from .tracing import Trace # noqa\n\n\[email protected](frozen=True, slots=True)\nclass ContentDisposition:\n type = attr.ib(type=str) # type: Optional[str]\n parameters = attr.ib(type=MappingProxyType) # type: MappingProxyType[str, str] # noqa\n filename = attr.ib(type=str) # type: Optional[str]\n\n\[email protected](frozen=True, slots=True)\nclass RequestInfo:\n url = attr.ib(type=URL)\n method = attr.ib(type=str)\n headers = attr.ib(type=CIMultiDictProxy) # type: CIMultiDictProxy[str]\n real_url = attr.ib(type=URL)\n\n @real_url.default\n def real_url_default(self) -> URL:\n return self.url\n\n\nclass Fingerprint:\n HASHFUNC_BY_DIGESTLEN = {\n 16: md5,\n 20: sha1,\n 32: sha256,\n }\n\n def __init__(self, fingerprint: bytes) -> None:\n digestlen = len(fingerprint)\n hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)\n if not hashfunc:\n raise ValueError('fingerprint has invalid length')\n elif hashfunc is md5 or hashfunc is sha1:\n raise ValueError('md5 and sha1 are insecure and '\n 'not supported. Use sha256.')\n self._hashfunc = hashfunc\n self._fingerprint = fingerprint\n\n @property\n def fingerprint(self) -> bytes:\n return self._fingerprint\n\n def check(self, transport: asyncio.Transport) -> None:\n if not transport.get_extra_info('sslcontext'):\n return\n sslobj = transport.get_extra_info('ssl_object')\n cert = sslobj.getpeercert(binary_form=True)\n got = self._hashfunc(cert).digest()\n if got != self._fingerprint:\n host, port, *_ = transport.get_extra_info('peername')\n raise ServerFingerprintMismatch(self._fingerprint,\n got, host, port)\n\n\nif ssl is not None:\n SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))\nelse: # pragma: no cover\n SSL_ALLOWED_TYPES = type(None)\n\n\[email protected](slots=True, frozen=True)\nclass ConnectionKey:\n # the key should contain an information about used proxy / TLS\n # to prevent reusing wrong connections from a pool\n host = attr.ib(type=str)\n port = attr.ib(type=int) # type: Optional[int]\n is_ssl = attr.ib(type=bool)\n ssl = attr.ib() # type: Union[SSLContext, None, bool, Fingerprint]\n proxy = attr.ib() # type: Optional[URL]\n proxy_auth = attr.ib() # type: Optional[BasicAuth]\n proxy_headers_hash = attr.ib(type=int) # type: Optional[int] # noqa # hash(CIMultiDict)\n\n\nclass ClientRequest:\n GET_METHODS = {\n hdrs.METH_GET,\n hdrs.METH_HEAD,\n hdrs.METH_OPTIONS,\n hdrs.METH_TRACE,\n }\n POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}\n ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})\n\n DEFAULT_HEADERS = {\n hdrs.ACCEPT: '*/*',\n hdrs.ACCEPT_ENCODING: 'gzip, deflate',\n }\n\n body = b''\n auth = None\n response = None\n response_class = None\n\n _writer = None # async task for streaming data\n _continue = None # waiter future for '100 Continue' response\n\n # N.B.\n # Adding __del__ method with self._writer closing doesn't make sense\n # because _writer is instance method, thus it keeps a reference to self.\n # Until writer has finished finalizer will not be called.\n\n def __init__(self, method: str, url: URL, *,\n params: Optional[Mapping[str, str]]=None,\n headers: Optional[LooseHeaders]=None,\n skip_auto_headers: Iterable[str]=frozenset(),\n data: Any=None,\n cookies: Optional[LooseCookies]=None,\n auth: Optional[BasicAuth]=None,\n version: http.HttpVersion=http.HttpVersion11,\n compress: Optional[str]=None,\n chunked: Optional[bool]=None,\n expect100: bool=False,\n loop: asyncio.AbstractEventLoop,\n response_class: Optional[Type['ClientResponse']]=None,\n proxy: Optional[URL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n timer: Optional[BaseTimerContext]=None,\n session: Optional['ClientSession']=None,\n ssl: Union[SSLContext, bool, Fingerprint, None]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n traces: Optional[List['Trace']]=None):\n\n assert isinstance(url, URL), url\n assert isinstance(proxy, (URL, type(None))), proxy\n # FIXME: session is None in tests only, need to fix tests\n # assert session is not None\n self._session = cast('ClientSession', session)\n if params:\n q = MultiDict(url.query)\n url2 = url.with_query(params)\n q.extend(url2.query)\n url = url.with_query(q)\n self.original_url = url\n self.url = url.with_fragment(None)\n self.method = method.upper()\n self.chunked = chunked\n self.compress = compress\n self.loop = loop\n self.length = None\n if response_class is None:\n real_response_class = ClientResponse\n else:\n real_response_class = response_class\n self.response_class = real_response_class # type: Type[ClientResponse]\n self._timer = timer if timer is not None else TimerNoop()\n self._ssl = ssl\n\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1))\n\n self.update_version(version)\n self.update_host(url)\n self.update_headers(headers)\n self.update_auto_headers(skip_auto_headers)\n self.update_cookies(cookies)\n self.update_content_encoding(data)\n self.update_auth(auth)\n self.update_proxy(proxy, proxy_auth, proxy_headers)\n\n self.update_body_from_data(data)\n if data or self.method not in self.GET_METHODS:\n self.update_transfer_encoding()\n self.update_expect_continue(expect100)\n if traces is None:\n traces = []\n self._traces = traces\n\n def is_ssl(self) -> bool:\n return self.url.scheme in ('https', 'wss')\n\n @property\n def ssl(self) -> Union['SSLContext', None, bool, Fingerprint]:\n return self._ssl\n\n @property\n def connection_key(self) -> ConnectionKey:\n proxy_headers = self.proxy_headers\n if proxy_headers:\n h = hash(tuple((k, v) for k, v in proxy_headers.items())) # type: Optional[int] # noqa\n else:\n h = None\n return ConnectionKey(self.host, self.port, self.is_ssl(),\n self.ssl,\n self.proxy, self.proxy_auth, h)\n\n @property\n def host(self) -> str:\n ret = self.url.host\n assert ret is not None\n return ret\n\n @property\n def port(self) -> Optional[int]:\n return self.url.port\n\n @property\n def request_info(self) -> RequestInfo:\n headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str]\n return RequestInfo(self.url, self.method,\n headers, self.original_url)\n\n def update_host(self, url: URL) -> None:\n \"\"\"Update destination host, port and connection type (ssl).\"\"\"\n # get host/port\n if not url.host:\n raise InvalidURL(url)\n\n # basic auth info\n username, password = url.user, url.password\n if username:\n self.auth = helpers.BasicAuth(username, password or '')\n\n def update_version(self, version: Union[http.HttpVersion, str]) -> None:\n \"\"\"Convert request version to two elements tuple.\n\n parser HTTP version '1.1' => (1, 1)\n \"\"\"\n if isinstance(version, str):\n v = [l.strip() for l in version.split('.', 1)]\n try:\n version = http.HttpVersion(int(v[0]), int(v[1]))\n except ValueError:\n raise ValueError(\n 'Can not parse http version number: {}'\n .format(version)) from None\n self.version = version\n\n def update_headers(self, headers: Optional[LooseHeaders]) -> None:\n \"\"\"Update request headers.\"\"\"\n self.headers = CIMultiDict() # type: CIMultiDict[str]\n\n # add host\n netloc = cast(str, self.url.raw_host)\n if helpers.is_ipv6_address(netloc):\n netloc = '[{}]'.format(netloc)\n if not self.url.is_default_port():\n netloc += ':' + str(self.url.port)\n self.headers[hdrs.HOST] = netloc\n\n if headers:\n if isinstance(headers, (dict, MultiDictProxy, MultiDict)):\n headers = headers.items() # type: ignore\n\n for key, value in headers:\n # A special case for Host header\n if key.lower() == 'host':\n self.headers[key] = value\n else:\n self.headers.add(key, value)\n\n def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:\n self.skip_auto_headers = CIMultiDict(\n (hdr, None) for hdr in sorted(skip_auto_headers))\n used_headers = self.headers.copy()\n used_headers.extend(self.skip_auto_headers) # type: ignore\n\n for hdr, val in self.DEFAULT_HEADERS.items():\n if hdr not in used_headers:\n self.headers.add(hdr, val)\n\n if hdrs.USER_AGENT not in used_headers:\n self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE\n\n def update_cookies(self, cookies: Optional[LooseCookies]) -> None:\n \"\"\"Update request cookies header.\"\"\"\n if not cookies:\n return\n\n c = SimpleCookie()\n if hdrs.COOKIE in self.headers:\n c.load(self.headers.get(hdrs.COOKIE, ''))\n del self.headers[hdrs.COOKIE]\n\n if isinstance(cookies, Mapping):\n iter_cookies = cookies.items()\n else:\n iter_cookies = cookies # type: ignore\n for name, value in iter_cookies:\n if isinstance(value, Morsel):\n # Preserve coded_value\n mrsl_val = value.get(value.key, Morsel())\n mrsl_val.set(value.key, value.value, value.coded_value) # type: ignore # noqa\n c[name] = mrsl_val\n else:\n c[name] = value # type: ignore\n\n self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()\n\n def update_content_encoding(self, data: Any) -> None:\n \"\"\"Set request content encoding.\"\"\"\n if not data:\n return\n\n enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()\n if enc:\n if self.compress:\n raise ValueError(\n 'compress can not be set '\n 'if Content-Encoding header is set')\n elif self.compress:\n if not isinstance(self.compress, str):\n self.compress = 'deflate'\n self.headers[hdrs.CONTENT_ENCODING] = self.compress\n self.chunked = True # enable chunked, no need to deal with length\n\n def update_transfer_encoding(self) -> None:\n \"\"\"Analyze transfer-encoding header.\"\"\"\n te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()\n\n if 'chunked' in te:\n if self.chunked:\n raise ValueError(\n 'chunked can not be set '\n 'if \"Transfer-Encoding: chunked\" header is set')\n\n elif self.chunked:\n if hdrs.CONTENT_LENGTH in self.headers:\n raise ValueError(\n 'chunked can not be set '\n 'if Content-Length header is set')\n\n self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'\n else:\n if hdrs.CONTENT_LENGTH not in self.headers:\n self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))\n\n def update_auth(self, auth: Optional[BasicAuth]) -> None:\n \"\"\"Set basic auth.\"\"\"\n if auth is None:\n auth = self.auth\n if auth is None:\n return\n\n if not isinstance(auth, helpers.BasicAuth):\n raise TypeError('BasicAuth() tuple is required instead')\n\n self.headers[hdrs.AUTHORIZATION] = auth.encode()\n\n def update_body_from_data(self, body: Any) -> None:\n if not body:\n return\n\n # FormData\n if isinstance(body, FormData):\n body = body()\n\n try:\n body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)\n except payload.LookupError:\n body = FormData(body)()\n\n self.body = body\n\n # enable chunked encoding if needed\n if not self.chunked:\n if hdrs.CONTENT_LENGTH not in self.headers:\n size = body.size\n if size is None:\n self.chunked = True\n else:\n if hdrs.CONTENT_LENGTH not in self.headers:\n self.headers[hdrs.CONTENT_LENGTH] = str(size)\n\n # copy payload headers\n assert body.headers\n for (key, value) in body.headers.items():\n if key in self.headers:\n continue\n if key in self.skip_auto_headers:\n continue\n self.headers[key] = value\n\n def update_expect_continue(self, expect: bool=False) -> None:\n if expect:\n self.headers[hdrs.EXPECT] = '100-continue'\n elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':\n expect = True\n\n if expect:\n self._continue = self.loop.create_future()\n\n def update_proxy(self, proxy: Optional[URL],\n proxy_auth: Optional[BasicAuth],\n proxy_headers: Optional[LooseHeaders]) -> None:\n if proxy and not proxy.scheme == 'http':\n raise ValueError(\"Only http proxies are supported\")\n if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):\n raise ValueError(\"proxy_auth must be None or BasicAuth() tuple\")\n self.proxy = proxy\n self.proxy_auth = proxy_auth\n self.proxy_headers = proxy_headers\n\n def keep_alive(self) -> bool:\n if self.version < HttpVersion10:\n # keep alive not supported at all\n return False\n if self.version == HttpVersion10:\n if self.headers.get(hdrs.CONNECTION) == 'keep-alive':\n return True\n else: # no headers means we close for Http 1.0\n return False\n elif self.headers.get(hdrs.CONNECTION) == 'close':\n return False\n\n return True\n\n async def write_bytes(self, writer: AbstractStreamWriter,\n conn: 'Connection') -> None:\n \"\"\"Support coroutines that yields bytes objects.\"\"\"\n # 100 response\n if self._continue is not None:\n await writer.drain()\n await self._continue\n\n protocol = conn.protocol\n assert protocol is not None\n try:\n if isinstance(self.body, payload.Payload):\n await self.body.write(writer)\n else:\n if isinstance(self.body, (bytes, bytearray)):\n self.body = (self.body,) # type: ignore\n\n for chunk in self.body:\n await writer.write(chunk) # type: ignore\n\n await writer.write_eof()\n except OSError as exc:\n new_exc = ClientOSError(\n exc.errno,\n 'Can not write request body for %s' % self.url)\n new_exc.__context__ = exc\n new_exc.__cause__ = exc\n protocol.set_exception(new_exc)\n except asyncio.CancelledError as exc:\n if not conn.closed:\n protocol.set_exception(exc)\n except Exception as exc:\n protocol.set_exception(exc)\n finally:\n self._writer = None\n\n async def send(self, conn: 'Connection') -> 'ClientResponse':\n # Specify request target:\n # - CONNECT request must send authority form URI\n # - not CONNECT proxy must send absolute form URI\n # - most common is origin form URI\n if self.method == hdrs.METH_CONNECT:\n connect_host = self.url.raw_host\n assert connect_host is not None\n if helpers.is_ipv6_address(connect_host):\n connect_host = '[{}]'.format(connect_host)\n path = '{}:{}'.format(connect_host, self.url.port)\n elif self.proxy and not self.is_ssl():\n path = str(self.url)\n else:\n path = self.url.raw_path\n if self.url.raw_query_string:\n path += '?' + self.url.raw_query_string\n\n protocol = conn.protocol\n assert protocol is not None\n writer = StreamWriter(\n protocol, self.loop,\n on_chunk_sent=self._on_chunk_request_sent\n )\n\n if self.compress:\n writer.enable_compression(self.compress)\n\n if self.chunked is not None:\n writer.enable_chunking()\n\n # set default content-type\n if (self.method in self.POST_METHODS and\n hdrs.CONTENT_TYPE not in self.skip_auto_headers and\n hdrs.CONTENT_TYPE not in self.headers):\n self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'\n\n # set the connection header\n connection = self.headers.get(hdrs.CONNECTION)\n if not connection:\n if self.keep_alive():\n if self.version == HttpVersion10:\n connection = 'keep-alive'\n else:\n if self.version == HttpVersion11:\n connection = 'close'\n\n if connection is not None:\n self.headers[hdrs.CONNECTION] = connection\n\n # status + headers\n status_line = '{0} {1} HTTP/{2[0]}.{2[1]}'.format(\n self.method, path, self.version)\n await writer.write_headers(status_line, self.headers)\n\n self._writer = self.loop.create_task(self.write_bytes(writer, conn))\n\n response_class = self.response_class\n assert response_class is not None\n self.response = response_class(\n self.method, self.original_url,\n writer=self._writer, continue100=self._continue, timer=self._timer,\n request_info=self.request_info,\n traces=self._traces,\n loop=self.loop,\n session=self._session\n )\n return self.response\n\n async def close(self) -> None:\n if self._writer is not None:\n try:\n await self._writer\n finally:\n self._writer = None\n\n def terminate(self) -> None:\n if self._writer is not None:\n if not self.loop.is_closed():\n self._writer.cancel()\n self._writer = None\n\n async def _on_chunk_request_sent(self, chunk: bytes) -> None:\n for trace in self._traces:\n await trace.send_request_chunk_sent(chunk)\n\n\nclass ClientResponse(HeadersMixin):\n\n # from the Status-Line of the response\n version = None # HTTP-Version\n status = None # type: int # Status-Code\n reason = None # Reason-Phrase\n\n content = None # type: StreamReader # Payload stream\n _headers = None # type: CIMultiDictProxy[str] # Response headers\n _raw_headers = None # type: RawHeaders # Response raw headers\n\n _connection = None # current connection\n _source_traceback = None\n # setted up by ClientRequest after ClientResponse object creation\n # post-init stage allows to not change ctor signature\n _closed = True # to allow __del__ for non-initialized properly response\n _released = False\n\n def __init__(self, method: str, url: URL, *,\n writer: 'asyncio.Task[None]',\n continue100: Optional['asyncio.Future[bool]'],\n timer: BaseTimerContext,\n request_info: RequestInfo,\n traces: List['Trace'],\n loop: asyncio.AbstractEventLoop,\n session: 'ClientSession') -> None:\n assert isinstance(url, URL)\n super().__init__()\n\n self.method = method\n self.cookies = SimpleCookie()\n\n self._real_url = url\n self._url = url.with_fragment(None)\n self._body = None # type: Optional[bytes]\n self._writer = writer # type: Optional[asyncio.Task[None]]\n self._continue = continue100 # None by default\n self._closed = True\n self._history = () # type: Tuple[ClientResponse, ...]\n self._request_info = request_info\n self._timer = timer if timer is not None else TimerNoop()\n self._cache = {} # type: Dict[str, Any]\n self._traces = traces\n self._loop = loop\n # store a reference to session #1985\n self._session = session # type: Optional[ClientSession]\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1))\n\n @reify\n def url(self) -> URL:\n return self._url\n\n @reify\n def real_url(self) -> URL:\n return self._real_url\n\n @reify\n def host(self) -> str:\n assert self._url.host is not None\n return self._url.host\n\n @reify\n def headers(self) -> 'CIMultiDictProxy[str]':\n return self._headers\n\n @reify\n def raw_headers(self) -> RawHeaders:\n return self._raw_headers\n\n @reify\n def request_info(self) -> RequestInfo:\n return self._request_info\n\n @reify\n def content_disposition(self) -> Optional[ContentDisposition]:\n raw = self._headers.get(hdrs.CONTENT_DISPOSITION)\n if raw is None:\n return None\n disposition_type, params_dct = multipart.parse_content_disposition(raw)\n params = MappingProxyType(params_dct)\n filename = multipart.content_disposition_filename(params)\n return ContentDisposition(disposition_type, params, filename)\n\n def __del__(self, _warnings: Any=warnings) -> None:\n if self._closed:\n return\n\n if self._connection is not None:\n self._connection.release()\n self._cleanup_writer()\n\n if self._loop.get_debug():\n if PY_36:\n kwargs = {'source': self}\n else:\n kwargs = {}\n _warnings.warn(\"Unclosed response {!r}\".format(self),\n ResourceWarning,\n **kwargs)\n context = {'client_response': self,\n 'message': 'Unclosed response'}\n if self._source_traceback:\n context['source_traceback'] = self._source_traceback\n self._loop.call_exception_handler(context)\n\n def __repr__(self) -> str:\n out = io.StringIO()\n ascii_encodable_url = str(self.url)\n if self.reason:\n ascii_encodable_reason = self.reason.encode('ascii',\n 'backslashreplace') \\\n .decode('ascii')\n else:\n ascii_encodable_reason = self.reason\n print('<ClientResponse({}) [{} {}]>'.format(\n ascii_encodable_url, self.status, ascii_encodable_reason),\n file=out)\n print(self.headers, file=out)\n return out.getvalue()\n\n @property\n def connection(self) -> Optional['Connection']:\n return self._connection\n\n @reify\n def history(self) -> Tuple['ClientResponse', ...]:\n \"\"\"A sequence of responses, if redirects occurred.\"\"\"\n return self._history\n\n @reify\n def links(self) -> 'MultiDictProxy[MultiDictProxy[Union[str, URL]]]':\n links_str = \", \".join(self.headers.getall(\"link\", []))\n\n if not links_str:\n return MultiDictProxy(MultiDict())\n\n links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]\n\n for val in re.split(r\",(?=\\s*<)\", links_str):\n match = re.match(r\"\\s*<(.*)>(.*)\", val)\n if match is None: # pragma: no cover\n # the check exists to suppress mypy error\n continue\n url, params_str = match.groups()\n params = params_str.split(\";\")[1:]\n\n link = MultiDict() # type: MultiDict[Union[str, URL]]\n\n for param in params:\n match = re.match(\n r\"^\\s*(\\S*)\\s*=\\s*(['\\\"]?)(.*?)(\\2)\\s*$\",\n param, re.M\n )\n if match is None: # pragma: no cover\n # the check exists to suppress mypy error\n continue\n key, _, value, _ = match.groups()\n\n link.add(key, value)\n\n key = link.get(\"rel\", url) # type: ignore\n\n link.add(\"url\", self.url.join(URL(url)))\n\n links.add(key, MultiDictProxy(link))\n\n return MultiDictProxy(links)\n\n async def start(self, connection: 'Connection') -> 'ClientResponse':\n \"\"\"Start response processing.\"\"\"\n self._closed = False\n self._protocol = connection.protocol\n self._connection = connection\n\n with self._timer:\n while True:\n # read response\n try:\n message, payload = await self._protocol.read() # type: ignore # noqa\n except http.HttpProcessingError as exc:\n raise ClientResponseError(\n self.request_info, self.history,\n status=exc.code,\n message=exc.message, headers=exc.headers) from exc\n\n if (message.code < 100 or\n message.code > 199 or message.code == 101):\n break\n\n if self._continue is not None:\n set_result(self._continue, True)\n self._continue = None\n\n # payload eof handler\n payload.on_eof(self._response_eof)\n\n # response status\n self.version = message.version\n self.status = message.code\n self.reason = message.reason\n\n # headers\n self._headers = message.headers # type is CIMultiDictProxy\n self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]\n\n # payload\n self.content = payload\n\n # cookies\n for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):\n try:\n self.cookies.load(hdr)\n except CookieError as exc:\n client_logger.warning(\n 'Can not load response cookies: %s', exc)\n return self\n\n def _response_eof(self) -> None:\n if self._closed:\n return\n\n if self._connection is not None:\n # websocket, protocol could be None because\n # connection could be detached\n if (self._connection.protocol is not None and\n self._connection.protocol.upgraded):\n return\n\n self._connection.release()\n self._connection = None\n\n self._closed = True\n self._cleanup_writer()\n\n @property\n def closed(self) -> bool:\n return self._closed\n\n def close(self) -> None:\n if not self._released:\n self._notify_content()\n if self._closed:\n return\n\n self._closed = True\n if self._loop is None or self._loop.is_closed():\n return\n\n if self._connection is not None:\n self._connection.close()\n self._connection = None\n self._cleanup_writer()\n\n def release(self) -> Any:\n if not self._released:\n self._notify_content()\n if self._closed:\n return noop()\n\n self._closed = True\n if self._connection is not None:\n self._connection.release()\n self._connection = None\n\n self._cleanup_writer()\n return noop()\n\n def raise_for_status(self) -> None:\n if 400 <= self.status:\n # reason should always be not None for a started response\n assert self.reason is not None\n self.release()\n raise ClientResponseError(\n self.request_info,\n self.history,\n status=self.status,\n message=self.reason,\n headers=self.headers)\n\n def _cleanup_writer(self) -> None:\n if self._writer is not None:\n self._writer.cancel()\n self._writer = None\n self._session = None\n\n def _notify_content(self) -> None:\n content = self.content\n if content and content.exception() is None:\n content.set_exception(\n ClientConnectionError('Connection closed'))\n self._released = True\n\n async def wait_for_close(self) -> None:\n if self._writer is not None:\n try:\n await self._writer\n finally:\n self._writer = None\n self.release()\n\n async def read(self) -> bytes:\n \"\"\"Read response payload.\"\"\"\n if self._body is None:\n try:\n self._body = await self.content.read()\n for trace in self._traces:\n await trace.send_response_chunk_received(self._body)\n except BaseException:\n self.close()\n raise\n elif self._released:\n raise ClientConnectionError('Connection closed')\n\n return self._body\n\n def get_encoding(self) -> str:\n ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()\n mimetype = helpers.parse_mimetype(ctype)\n\n encoding = mimetype.parameters.get('charset')\n if encoding:\n try:\n codecs.lookup(encoding)\n except LookupError:\n encoding = None\n if not encoding:\n if mimetype.type == 'application' and mimetype.subtype == 'json':\n # RFC 7159 states that the default encoding is UTF-8.\n encoding = 'utf-8'\n else:\n encoding = chardet.detect(self._body)['encoding']\n if not encoding:\n encoding = 'utf-8'\n\n return encoding\n\n async def text(self,\n encoding: Optional[str]=None, errors: str='strict') -> str:\n \"\"\"Read response payload and decode.\"\"\"\n if self._body is None:\n await self.read()\n\n if encoding is None:\n encoding = self.get_encoding()\n\n return self._body.decode(encoding, errors=errors) # type: ignore\n\n async def json(self, *, encoding: str=None,\n loads: JSONDecoder=DEFAULT_JSON_DECODER,\n content_type: Optional[str]='application/json') -> Any:\n \"\"\"Read and decodes JSON response.\"\"\"\n if self._body is None:\n await self.read()\n\n if content_type:\n ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()\n if not is_expected_content_type(ctype, content_type):\n raise ContentTypeError(\n self.request_info,\n self.history,\n message=('Attempt to decode JSON with '\n 'unexpected mimetype: %s' % ctype),\n headers=self.headers)\n\n if encoding is None:\n encoding = self.get_encoding()\n\n return loads(self._body.decode(encoding)) # type: ignore\n\n async def __aenter__(self) -> 'ClientResponse':\n return self\n\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType]) -> None:\n # similar to _RequestContextManager, we do not need to check\n # for exceptions, response object can closes connection\n # is state is broken\n self.release()\n", "path": "aiohttp/client_reqrep.py"}], "after_files": [{"content": "import asyncio\nimport codecs\nimport io\nimport re\nimport sys\nimport traceback\nimport warnings\nfrom hashlib import md5, sha1, sha256\nfrom http.cookies import CookieError, Morsel, SimpleCookie\nfrom types import MappingProxyType, TracebackType\nfrom typing import ( # noqa\n TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nimport attr\nfrom multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy\nfrom yarl import URL\n\nfrom . import hdrs, helpers, http, multipart, payload\nfrom .abc import AbstractStreamWriter\nfrom .client_exceptions import (\n ClientConnectionError,\n ClientOSError,\n ClientResponseError,\n ContentTypeError,\n InvalidURL,\n ServerFingerprintMismatch,\n)\nfrom .formdata import FormData\nfrom .helpers import ( # noqa\n PY_36,\n BaseTimerContext,\n BasicAuth,\n HeadersMixin,\n TimerNoop,\n is_expected_content_type,\n noop,\n reify,\n set_result,\n)\nfrom .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter\nfrom .log import client_logger\nfrom .streams import StreamReader # noqa\nfrom .typedefs import (\n DEFAULT_JSON_DECODER,\n JSONDecoder,\n LooseCookies,\n LooseHeaders,\n RawHeaders,\n)\n\ntry:\n import ssl\n from ssl import SSLContext\nexcept ImportError: # pragma: no cover\n ssl = None # type: ignore\n SSLContext = object # type: ignore\n\ntry:\n import cchardet as chardet\nexcept ImportError: # pragma: no cover\n import chardet\n\n\n__all__ = ('ClientRequest', 'ClientResponse', 'RequestInfo', 'Fingerprint')\n\n\nif TYPE_CHECKING: # pragma: no cover\n from .client import ClientSession # noqa\n from .connector import Connection # noqa\n from .tracing import Trace # noqa\n\n\[email protected](frozen=True, slots=True)\nclass ContentDisposition:\n type = attr.ib(type=str) # type: Optional[str]\n parameters = attr.ib(type=MappingProxyType) # type: MappingProxyType[str, str] # noqa\n filename = attr.ib(type=str) # type: Optional[str]\n\n\[email protected](frozen=True, slots=True)\nclass RequestInfo:\n url = attr.ib(type=URL)\n method = attr.ib(type=str)\n headers = attr.ib(type=CIMultiDictProxy) # type: CIMultiDictProxy[str]\n real_url = attr.ib(type=URL)\n\n @real_url.default\n def real_url_default(self) -> URL:\n return self.url\n\n\nclass Fingerprint:\n HASHFUNC_BY_DIGESTLEN = {\n 16: md5,\n 20: sha1,\n 32: sha256,\n }\n\n def __init__(self, fingerprint: bytes) -> None:\n digestlen = len(fingerprint)\n hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)\n if not hashfunc:\n raise ValueError('fingerprint has invalid length')\n elif hashfunc is md5 or hashfunc is sha1:\n raise ValueError('md5 and sha1 are insecure and '\n 'not supported. Use sha256.')\n self._hashfunc = hashfunc\n self._fingerprint = fingerprint\n\n @property\n def fingerprint(self) -> bytes:\n return self._fingerprint\n\n def check(self, transport: asyncio.Transport) -> None:\n if not transport.get_extra_info('sslcontext'):\n return\n sslobj = transport.get_extra_info('ssl_object')\n cert = sslobj.getpeercert(binary_form=True)\n got = self._hashfunc(cert).digest()\n if got != self._fingerprint:\n host, port, *_ = transport.get_extra_info('peername')\n raise ServerFingerprintMismatch(self._fingerprint,\n got, host, port)\n\n\nif ssl is not None:\n SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))\nelse: # pragma: no cover\n SSL_ALLOWED_TYPES = type(None)\n\n\[email protected](slots=True, frozen=True)\nclass ConnectionKey:\n # the key should contain an information about used proxy / TLS\n # to prevent reusing wrong connections from a pool\n host = attr.ib(type=str)\n port = attr.ib(type=int) # type: Optional[int]\n is_ssl = attr.ib(type=bool)\n ssl = attr.ib() # type: Union[SSLContext, None, bool, Fingerprint]\n proxy = attr.ib() # type: Optional[URL]\n proxy_auth = attr.ib() # type: Optional[BasicAuth]\n proxy_headers_hash = attr.ib(type=int) # type: Optional[int] # noqa # hash(CIMultiDict)\n\n\nclass ClientRequest:\n GET_METHODS = {\n hdrs.METH_GET,\n hdrs.METH_HEAD,\n hdrs.METH_OPTIONS,\n hdrs.METH_TRACE,\n }\n POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}\n ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})\n\n DEFAULT_HEADERS = {\n hdrs.ACCEPT: '*/*',\n hdrs.ACCEPT_ENCODING: 'gzip, deflate',\n }\n\n body = b''\n auth = None\n response = None\n response_class = None\n\n _writer = None # async task for streaming data\n _continue = None # waiter future for '100 Continue' response\n\n # N.B.\n # Adding __del__ method with self._writer closing doesn't make sense\n # because _writer is instance method, thus it keeps a reference to self.\n # Until writer has finished finalizer will not be called.\n\n def __init__(self, method: str, url: URL, *,\n params: Optional[Mapping[str, str]]=None,\n headers: Optional[LooseHeaders]=None,\n skip_auto_headers: Iterable[str]=frozenset(),\n data: Any=None,\n cookies: Optional[LooseCookies]=None,\n auth: Optional[BasicAuth]=None,\n version: http.HttpVersion=http.HttpVersion11,\n compress: Optional[str]=None,\n chunked: Optional[bool]=None,\n expect100: bool=False,\n loop: asyncio.AbstractEventLoop,\n response_class: Optional[Type['ClientResponse']]=None,\n proxy: Optional[URL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n timer: Optional[BaseTimerContext]=None,\n session: Optional['ClientSession']=None,\n ssl: Union[SSLContext, bool, Fingerprint, None]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n traces: Optional[List['Trace']]=None):\n\n assert isinstance(url, URL), url\n assert isinstance(proxy, (URL, type(None))), proxy\n # FIXME: session is None in tests only, need to fix tests\n # assert session is not None\n self._session = cast('ClientSession', session)\n if params:\n q = MultiDict(url.query)\n url2 = url.with_query(params)\n q.extend(url2.query)\n url = url.with_query(q)\n self.original_url = url\n self.url = url.with_fragment(None)\n self.method = method.upper()\n self.chunked = chunked\n self.compress = compress\n self.loop = loop\n self.length = None\n if response_class is None:\n real_response_class = ClientResponse\n else:\n real_response_class = response_class\n self.response_class = real_response_class # type: Type[ClientResponse]\n self._timer = timer if timer is not None else TimerNoop()\n self._ssl = ssl\n\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1))\n\n self.update_version(version)\n self.update_host(url)\n self.update_headers(headers)\n self.update_auto_headers(skip_auto_headers)\n self.update_cookies(cookies)\n self.update_content_encoding(data)\n self.update_auth(auth)\n self.update_proxy(proxy, proxy_auth, proxy_headers)\n\n self.update_body_from_data(data)\n if data or self.method not in self.GET_METHODS:\n self.update_transfer_encoding()\n self.update_expect_continue(expect100)\n if traces is None:\n traces = []\n self._traces = traces\n\n def is_ssl(self) -> bool:\n return self.url.scheme in ('https', 'wss')\n\n @property\n def ssl(self) -> Union['SSLContext', None, bool, Fingerprint]:\n return self._ssl\n\n @property\n def connection_key(self) -> ConnectionKey:\n proxy_headers = self.proxy_headers\n if proxy_headers:\n h = hash(tuple((k, v) for k, v in proxy_headers.items())) # type: Optional[int] # noqa\n else:\n h = None\n return ConnectionKey(self.host, self.port, self.is_ssl(),\n self.ssl,\n self.proxy, self.proxy_auth, h)\n\n @property\n def host(self) -> str:\n ret = self.url.host\n assert ret is not None\n return ret\n\n @property\n def port(self) -> Optional[int]:\n return self.url.port\n\n @property\n def request_info(self) -> RequestInfo:\n headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str]\n return RequestInfo(self.url, self.method,\n headers, self.original_url)\n\n def update_host(self, url: URL) -> None:\n \"\"\"Update destination host, port and connection type (ssl).\"\"\"\n # get host/port\n if not url.host:\n raise InvalidURL(url)\n\n # basic auth info\n username, password = url.user, url.password\n if username:\n self.auth = helpers.BasicAuth(username, password or '')\n\n def update_version(self, version: Union[http.HttpVersion, str]) -> None:\n \"\"\"Convert request version to two elements tuple.\n\n parser HTTP version '1.1' => (1, 1)\n \"\"\"\n if isinstance(version, str):\n v = [l.strip() for l in version.split('.', 1)]\n try:\n version = http.HttpVersion(int(v[0]), int(v[1]))\n except ValueError:\n raise ValueError(\n 'Can not parse http version number: {}'\n .format(version)) from None\n self.version = version\n\n def update_headers(self, headers: Optional[LooseHeaders]) -> None:\n \"\"\"Update request headers.\"\"\"\n self.headers = CIMultiDict() # type: CIMultiDict[str]\n\n # add host\n netloc = cast(str, self.url.raw_host)\n if helpers.is_ipv6_address(netloc):\n netloc = '[{}]'.format(netloc)\n if self.url.port is not None and not self.url.is_default_port():\n netloc += ':' + str(self.url.port)\n self.headers[hdrs.HOST] = netloc\n\n if headers:\n if isinstance(headers, (dict, MultiDictProxy, MultiDict)):\n headers = headers.items() # type: ignore\n\n for key, value in headers:\n # A special case for Host header\n if key.lower() == 'host':\n self.headers[key] = value\n else:\n self.headers.add(key, value)\n\n def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:\n self.skip_auto_headers = CIMultiDict(\n (hdr, None) for hdr in sorted(skip_auto_headers))\n used_headers = self.headers.copy()\n used_headers.extend(self.skip_auto_headers) # type: ignore\n\n for hdr, val in self.DEFAULT_HEADERS.items():\n if hdr not in used_headers:\n self.headers.add(hdr, val)\n\n if hdrs.USER_AGENT not in used_headers:\n self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE\n\n def update_cookies(self, cookies: Optional[LooseCookies]) -> None:\n \"\"\"Update request cookies header.\"\"\"\n if not cookies:\n return\n\n c = SimpleCookie()\n if hdrs.COOKIE in self.headers:\n c.load(self.headers.get(hdrs.COOKIE, ''))\n del self.headers[hdrs.COOKIE]\n\n if isinstance(cookies, Mapping):\n iter_cookies = cookies.items()\n else:\n iter_cookies = cookies # type: ignore\n for name, value in iter_cookies:\n if isinstance(value, Morsel):\n # Preserve coded_value\n mrsl_val = value.get(value.key, Morsel())\n mrsl_val.set(value.key, value.value, value.coded_value) # type: ignore # noqa\n c[name] = mrsl_val\n else:\n c[name] = value # type: ignore\n\n self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()\n\n def update_content_encoding(self, data: Any) -> None:\n \"\"\"Set request content encoding.\"\"\"\n if not data:\n return\n\n enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()\n if enc:\n if self.compress:\n raise ValueError(\n 'compress can not be set '\n 'if Content-Encoding header is set')\n elif self.compress:\n if not isinstance(self.compress, str):\n self.compress = 'deflate'\n self.headers[hdrs.CONTENT_ENCODING] = self.compress\n self.chunked = True # enable chunked, no need to deal with length\n\n def update_transfer_encoding(self) -> None:\n \"\"\"Analyze transfer-encoding header.\"\"\"\n te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()\n\n if 'chunked' in te:\n if self.chunked:\n raise ValueError(\n 'chunked can not be set '\n 'if \"Transfer-Encoding: chunked\" header is set')\n\n elif self.chunked:\n if hdrs.CONTENT_LENGTH in self.headers:\n raise ValueError(\n 'chunked can not be set '\n 'if Content-Length header is set')\n\n self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'\n else:\n if hdrs.CONTENT_LENGTH not in self.headers:\n self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))\n\n def update_auth(self, auth: Optional[BasicAuth]) -> None:\n \"\"\"Set basic auth.\"\"\"\n if auth is None:\n auth = self.auth\n if auth is None:\n return\n\n if not isinstance(auth, helpers.BasicAuth):\n raise TypeError('BasicAuth() tuple is required instead')\n\n self.headers[hdrs.AUTHORIZATION] = auth.encode()\n\n def update_body_from_data(self, body: Any) -> None:\n if not body:\n return\n\n # FormData\n if isinstance(body, FormData):\n body = body()\n\n try:\n body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)\n except payload.LookupError:\n body = FormData(body)()\n\n self.body = body\n\n # enable chunked encoding if needed\n if not self.chunked:\n if hdrs.CONTENT_LENGTH not in self.headers:\n size = body.size\n if size is None:\n self.chunked = True\n else:\n if hdrs.CONTENT_LENGTH not in self.headers:\n self.headers[hdrs.CONTENT_LENGTH] = str(size)\n\n # copy payload headers\n assert body.headers\n for (key, value) in body.headers.items():\n if key in self.headers:\n continue\n if key in self.skip_auto_headers:\n continue\n self.headers[key] = value\n\n def update_expect_continue(self, expect: bool=False) -> None:\n if expect:\n self.headers[hdrs.EXPECT] = '100-continue'\n elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':\n expect = True\n\n if expect:\n self._continue = self.loop.create_future()\n\n def update_proxy(self, proxy: Optional[URL],\n proxy_auth: Optional[BasicAuth],\n proxy_headers: Optional[LooseHeaders]) -> None:\n if proxy and not proxy.scheme == 'http':\n raise ValueError(\"Only http proxies are supported\")\n if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):\n raise ValueError(\"proxy_auth must be None or BasicAuth() tuple\")\n self.proxy = proxy\n self.proxy_auth = proxy_auth\n self.proxy_headers = proxy_headers\n\n def keep_alive(self) -> bool:\n if self.version < HttpVersion10:\n # keep alive not supported at all\n return False\n if self.version == HttpVersion10:\n if self.headers.get(hdrs.CONNECTION) == 'keep-alive':\n return True\n else: # no headers means we close for Http 1.0\n return False\n elif self.headers.get(hdrs.CONNECTION) == 'close':\n return False\n\n return True\n\n async def write_bytes(self, writer: AbstractStreamWriter,\n conn: 'Connection') -> None:\n \"\"\"Support coroutines that yields bytes objects.\"\"\"\n # 100 response\n if self._continue is not None:\n await writer.drain()\n await self._continue\n\n protocol = conn.protocol\n assert protocol is not None\n try:\n if isinstance(self.body, payload.Payload):\n await self.body.write(writer)\n else:\n if isinstance(self.body, (bytes, bytearray)):\n self.body = (self.body,) # type: ignore\n\n for chunk in self.body:\n await writer.write(chunk) # type: ignore\n\n await writer.write_eof()\n except OSError as exc:\n new_exc = ClientOSError(\n exc.errno,\n 'Can not write request body for %s' % self.url)\n new_exc.__context__ = exc\n new_exc.__cause__ = exc\n protocol.set_exception(new_exc)\n except asyncio.CancelledError as exc:\n if not conn.closed:\n protocol.set_exception(exc)\n except Exception as exc:\n protocol.set_exception(exc)\n finally:\n self._writer = None\n\n async def send(self, conn: 'Connection') -> 'ClientResponse':\n # Specify request target:\n # - CONNECT request must send authority form URI\n # - not CONNECT proxy must send absolute form URI\n # - most common is origin form URI\n if self.method == hdrs.METH_CONNECT:\n connect_host = self.url.raw_host\n assert connect_host is not None\n if helpers.is_ipv6_address(connect_host):\n connect_host = '[{}]'.format(connect_host)\n path = '{}:{}'.format(connect_host, self.url.port)\n elif self.proxy and not self.is_ssl():\n path = str(self.url)\n else:\n path = self.url.raw_path\n if self.url.raw_query_string:\n path += '?' + self.url.raw_query_string\n\n protocol = conn.protocol\n assert protocol is not None\n writer = StreamWriter(\n protocol, self.loop,\n on_chunk_sent=self._on_chunk_request_sent\n )\n\n if self.compress:\n writer.enable_compression(self.compress)\n\n if self.chunked is not None:\n writer.enable_chunking()\n\n # set default content-type\n if (self.method in self.POST_METHODS and\n hdrs.CONTENT_TYPE not in self.skip_auto_headers and\n hdrs.CONTENT_TYPE not in self.headers):\n self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'\n\n # set the connection header\n connection = self.headers.get(hdrs.CONNECTION)\n if not connection:\n if self.keep_alive():\n if self.version == HttpVersion10:\n connection = 'keep-alive'\n else:\n if self.version == HttpVersion11:\n connection = 'close'\n\n if connection is not None:\n self.headers[hdrs.CONNECTION] = connection\n\n # status + headers\n status_line = '{0} {1} HTTP/{2[0]}.{2[1]}'.format(\n self.method, path, self.version)\n await writer.write_headers(status_line, self.headers)\n\n self._writer = self.loop.create_task(self.write_bytes(writer, conn))\n\n response_class = self.response_class\n assert response_class is not None\n self.response = response_class(\n self.method, self.original_url,\n writer=self._writer, continue100=self._continue, timer=self._timer,\n request_info=self.request_info,\n traces=self._traces,\n loop=self.loop,\n session=self._session\n )\n return self.response\n\n async def close(self) -> None:\n if self._writer is not None:\n try:\n await self._writer\n finally:\n self._writer = None\n\n def terminate(self) -> None:\n if self._writer is not None:\n if not self.loop.is_closed():\n self._writer.cancel()\n self._writer = None\n\n async def _on_chunk_request_sent(self, chunk: bytes) -> None:\n for trace in self._traces:\n await trace.send_request_chunk_sent(chunk)\n\n\nclass ClientResponse(HeadersMixin):\n\n # from the Status-Line of the response\n version = None # HTTP-Version\n status = None # type: int # Status-Code\n reason = None # Reason-Phrase\n\n content = None # type: StreamReader # Payload stream\n _headers = None # type: CIMultiDictProxy[str] # Response headers\n _raw_headers = None # type: RawHeaders # Response raw headers\n\n _connection = None # current connection\n _source_traceback = None\n # setted up by ClientRequest after ClientResponse object creation\n # post-init stage allows to not change ctor signature\n _closed = True # to allow __del__ for non-initialized properly response\n _released = False\n\n def __init__(self, method: str, url: URL, *,\n writer: 'asyncio.Task[None]',\n continue100: Optional['asyncio.Future[bool]'],\n timer: BaseTimerContext,\n request_info: RequestInfo,\n traces: List['Trace'],\n loop: asyncio.AbstractEventLoop,\n session: 'ClientSession') -> None:\n assert isinstance(url, URL)\n super().__init__()\n\n self.method = method\n self.cookies = SimpleCookie()\n\n self._real_url = url\n self._url = url.with_fragment(None)\n self._body = None # type: Optional[bytes]\n self._writer = writer # type: Optional[asyncio.Task[None]]\n self._continue = continue100 # None by default\n self._closed = True\n self._history = () # type: Tuple[ClientResponse, ...]\n self._request_info = request_info\n self._timer = timer if timer is not None else TimerNoop()\n self._cache = {} # type: Dict[str, Any]\n self._traces = traces\n self._loop = loop\n # store a reference to session #1985\n self._session = session # type: Optional[ClientSession]\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1))\n\n @reify\n def url(self) -> URL:\n return self._url\n\n @reify\n def real_url(self) -> URL:\n return self._real_url\n\n @reify\n def host(self) -> str:\n assert self._url.host is not None\n return self._url.host\n\n @reify\n def headers(self) -> 'CIMultiDictProxy[str]':\n return self._headers\n\n @reify\n def raw_headers(self) -> RawHeaders:\n return self._raw_headers\n\n @reify\n def request_info(self) -> RequestInfo:\n return self._request_info\n\n @reify\n def content_disposition(self) -> Optional[ContentDisposition]:\n raw = self._headers.get(hdrs.CONTENT_DISPOSITION)\n if raw is None:\n return None\n disposition_type, params_dct = multipart.parse_content_disposition(raw)\n params = MappingProxyType(params_dct)\n filename = multipart.content_disposition_filename(params)\n return ContentDisposition(disposition_type, params, filename)\n\n def __del__(self, _warnings: Any=warnings) -> None:\n if self._closed:\n return\n\n if self._connection is not None:\n self._connection.release()\n self._cleanup_writer()\n\n if self._loop.get_debug():\n if PY_36:\n kwargs = {'source': self}\n else:\n kwargs = {}\n _warnings.warn(\"Unclosed response {!r}\".format(self),\n ResourceWarning,\n **kwargs)\n context = {'client_response': self,\n 'message': 'Unclosed response'}\n if self._source_traceback:\n context['source_traceback'] = self._source_traceback\n self._loop.call_exception_handler(context)\n\n def __repr__(self) -> str:\n out = io.StringIO()\n ascii_encodable_url = str(self.url)\n if self.reason:\n ascii_encodable_reason = self.reason.encode('ascii',\n 'backslashreplace') \\\n .decode('ascii')\n else:\n ascii_encodable_reason = self.reason\n print('<ClientResponse({}) [{} {}]>'.format(\n ascii_encodable_url, self.status, ascii_encodable_reason),\n file=out)\n print(self.headers, file=out)\n return out.getvalue()\n\n @property\n def connection(self) -> Optional['Connection']:\n return self._connection\n\n @reify\n def history(self) -> Tuple['ClientResponse', ...]:\n \"\"\"A sequence of responses, if redirects occurred.\"\"\"\n return self._history\n\n @reify\n def links(self) -> 'MultiDictProxy[MultiDictProxy[Union[str, URL]]]':\n links_str = \", \".join(self.headers.getall(\"link\", []))\n\n if not links_str:\n return MultiDictProxy(MultiDict())\n\n links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]\n\n for val in re.split(r\",(?=\\s*<)\", links_str):\n match = re.match(r\"\\s*<(.*)>(.*)\", val)\n if match is None: # pragma: no cover\n # the check exists to suppress mypy error\n continue\n url, params_str = match.groups()\n params = params_str.split(\";\")[1:]\n\n link = MultiDict() # type: MultiDict[Union[str, URL]]\n\n for param in params:\n match = re.match(\n r\"^\\s*(\\S*)\\s*=\\s*(['\\\"]?)(.*?)(\\2)\\s*$\",\n param, re.M\n )\n if match is None: # pragma: no cover\n # the check exists to suppress mypy error\n continue\n key, _, value, _ = match.groups()\n\n link.add(key, value)\n\n key = link.get(\"rel\", url) # type: ignore\n\n link.add(\"url\", self.url.join(URL(url)))\n\n links.add(key, MultiDictProxy(link))\n\n return MultiDictProxy(links)\n\n async def start(self, connection: 'Connection') -> 'ClientResponse':\n \"\"\"Start response processing.\"\"\"\n self._closed = False\n self._protocol = connection.protocol\n self._connection = connection\n\n with self._timer:\n while True:\n # read response\n try:\n message, payload = await self._protocol.read() # type: ignore # noqa\n except http.HttpProcessingError as exc:\n raise ClientResponseError(\n self.request_info, self.history,\n status=exc.code,\n message=exc.message, headers=exc.headers) from exc\n\n if (message.code < 100 or\n message.code > 199 or message.code == 101):\n break\n\n if self._continue is not None:\n set_result(self._continue, True)\n self._continue = None\n\n # payload eof handler\n payload.on_eof(self._response_eof)\n\n # response status\n self.version = message.version\n self.status = message.code\n self.reason = message.reason\n\n # headers\n self._headers = message.headers # type is CIMultiDictProxy\n self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]\n\n # payload\n self.content = payload\n\n # cookies\n for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):\n try:\n self.cookies.load(hdr)\n except CookieError as exc:\n client_logger.warning(\n 'Can not load response cookies: %s', exc)\n return self\n\n def _response_eof(self) -> None:\n if self._closed:\n return\n\n if self._connection is not None:\n # websocket, protocol could be None because\n # connection could be detached\n if (self._connection.protocol is not None and\n self._connection.protocol.upgraded):\n return\n\n self._connection.release()\n self._connection = None\n\n self._closed = True\n self._cleanup_writer()\n\n @property\n def closed(self) -> bool:\n return self._closed\n\n def close(self) -> None:\n if not self._released:\n self._notify_content()\n if self._closed:\n return\n\n self._closed = True\n if self._loop is None or self._loop.is_closed():\n return\n\n if self._connection is not None:\n self._connection.close()\n self._connection = None\n self._cleanup_writer()\n\n def release(self) -> Any:\n if not self._released:\n self._notify_content()\n if self._closed:\n return noop()\n\n self._closed = True\n if self._connection is not None:\n self._connection.release()\n self._connection = None\n\n self._cleanup_writer()\n return noop()\n\n def raise_for_status(self) -> None:\n if 400 <= self.status:\n # reason should always be not None for a started response\n assert self.reason is not None\n self.release()\n raise ClientResponseError(\n self.request_info,\n self.history,\n status=self.status,\n message=self.reason,\n headers=self.headers)\n\n def _cleanup_writer(self) -> None:\n if self._writer is not None:\n self._writer.cancel()\n self._writer = None\n self._session = None\n\n def _notify_content(self) -> None:\n content = self.content\n if content and content.exception() is None:\n content.set_exception(\n ClientConnectionError('Connection closed'))\n self._released = True\n\n async def wait_for_close(self) -> None:\n if self._writer is not None:\n try:\n await self._writer\n finally:\n self._writer = None\n self.release()\n\n async def read(self) -> bytes:\n \"\"\"Read response payload.\"\"\"\n if self._body is None:\n try:\n self._body = await self.content.read()\n for trace in self._traces:\n await trace.send_response_chunk_received(self._body)\n except BaseException:\n self.close()\n raise\n elif self._released:\n raise ClientConnectionError('Connection closed')\n\n return self._body\n\n def get_encoding(self) -> str:\n ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()\n mimetype = helpers.parse_mimetype(ctype)\n\n encoding = mimetype.parameters.get('charset')\n if encoding:\n try:\n codecs.lookup(encoding)\n except LookupError:\n encoding = None\n if not encoding:\n if mimetype.type == 'application' and mimetype.subtype == 'json':\n # RFC 7159 states that the default encoding is UTF-8.\n encoding = 'utf-8'\n else:\n encoding = chardet.detect(self._body)['encoding']\n if not encoding:\n encoding = 'utf-8'\n\n return encoding\n\n async def text(self,\n encoding: Optional[str]=None, errors: str='strict') -> str:\n \"\"\"Read response payload and decode.\"\"\"\n if self._body is None:\n await self.read()\n\n if encoding is None:\n encoding = self.get_encoding()\n\n return self._body.decode(encoding, errors=errors) # type: ignore\n\n async def json(self, *, encoding: str=None,\n loads: JSONDecoder=DEFAULT_JSON_DECODER,\n content_type: Optional[str]='application/json') -> Any:\n \"\"\"Read and decodes JSON response.\"\"\"\n if self._body is None:\n await self.read()\n\n if content_type:\n ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()\n if not is_expected_content_type(ctype, content_type):\n raise ContentTypeError(\n self.request_info,\n self.history,\n message=('Attempt to decode JSON with '\n 'unexpected mimetype: %s' % ctype),\n headers=self.headers)\n\n if encoding is None:\n encoding = self.get_encoding()\n\n return loads(self._body.decode(encoding)) # type: ignore\n\n async def __aenter__(self) -> 'ClientResponse':\n return self\n\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType]) -> None:\n # similar to _RequestContextManager, we do not need to check\n # for exceptions, response object can closes connection\n # is state is broken\n self.release()\n", "path": "aiohttp/client_reqrep.py"}]} |
gh_patches_debug_1504 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2466 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot list all the rules
### CloudFormation Lint Version
0.70.0
### What operating system are you using?
Windows
### Describe the bug
`cfn-lint --list-rules` throws below Error.(username is masked.)
```
Traceback (most recent call last):
File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\Scripts\cfn-lint.exe\__main__.py", line 7, in <module>
File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\site-packages\cfnlint\__main__.py", line 38, in main
(args, filenames, formatter) = cfnlint.core.get_args_filenames(sys.argv[1:])
File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\site-packages\cfnlint\core.py", line 235, in get_args_filenames
print(rules)
File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\site-packages\cfnlint\rules\__init__.py", line 306, in __repr__
[rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]
File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\site-packages\cfnlint\rules\__init__.py", line 306, in <lambda>
[rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]
AttributeError: 'str' object has no attribute 'id'
```
### Expected behavior
show list all the rules.
### Reproduction template
This is not bug of linting.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/__init__.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import os
6 import logging
7 from datetime import datetime
8 import importlib
9 import traceback
10 from typing import Any, Dict, List, Optional, Tuple, Union
11 from cfnlint.exceptions import DuplicateRuleError
12 import cfnlint.helpers
13 import cfnlint.rules.custom
14 from cfnlint.decode.node import TemplateAttributeError
15 from cfnlint.template import Template
16
17
18 LOGGER = logging.getLogger(__name__)
19
20
21 def matching(match_type: Any):
22 """Does Logging for match functions"""
23
24 def decorator(match_function):
25 """The Actual Decorator"""
26
27 def wrapper(self, filename, cfn, *args, **kwargs):
28 """Wrapper"""
29 matches = []
30
31 if not getattr(self, match_type):
32 return []
33
34 if match_type == 'match_resource_properties':
35 if args[1] not in self.resource_property_types:
36 return []
37 elif match_type == 'match_resource_sub_properties':
38 if args[1] not in self.resource_sub_property_types:
39 return []
40
41 start = datetime.now()
42 LOGGER.debug('Starting match function for rule %s at %s', self.id, start)
43 # pylint: disable=E1102
44 results = match_function(self, filename, cfn, *args, **kwargs)
45 LOGGER.debug(
46 'Complete match function for rule %s at %s. Ran in %s',
47 self.id,
48 datetime.now(),
49 datetime.now() - start,
50 )
51 LOGGER.debug('Results from rule %s are %s: ', self.id, results)
52
53 if results:
54 for result in results:
55 error_rule = self
56 if hasattr(result, 'rule'):
57 error_rule = result.rule
58 linenumbers: Union[Tuple[int, int, int, int], None] = None
59 if hasattr(result, 'location'):
60 linenumbers = result.location
61 else:
62 linenumbers = cfn.get_location_yaml(cfn.template, result.path)
63 if linenumbers:
64 matches.append(
65 Match(
66 linenumbers[0] + 1,
67 linenumbers[1] + 1,
68 linenumbers[2] + 1,
69 linenumbers[3] + 1,
70 filename,
71 error_rule,
72 result.message,
73 result,
74 )
75 )
76 else:
77 matches.append(
78 Match(
79 1, 1, 1, 1, filename, error_rule, result.message, result
80 )
81 )
82
83 return matches
84
85 return wrapper
86
87 return decorator
88
89
90 class CloudFormationLintRule:
91 """CloudFormation linter rules"""
92
93 id: str = ''
94 shortdesc: str = ''
95 description: str = ''
96 source_url: str = ''
97 tags: List[str] = []
98 experimental: bool = False
99 child_rules: Dict[str, Any] = {}
100
101 logger = logging.getLogger(__name__)
102
103 def __init__(self):
104 self.resource_property_types = []
105 self.resource_sub_property_types = []
106 self.config = {} # `-X E3012:strict=false`... Show more
107 self.config_definition = {}
108
109 def __repr__(self):
110 return f'{self.id}: {self.shortdesc}'
111
112 @property
113 def severity(self):
114 """Severity level"""
115 levels = {
116 'I': 'informational',
117 'E': 'error',
118 'W': 'warning',
119 }
120 return levels.get(self.id[0].upper(), 'unknown')
121
122 def verbose(self):
123 """Verbose output"""
124 return f'{self.id}: {self.shortdesc}\n{self.description}'
125
126 def initialize(self, cfn):
127 """Initialize the rule"""
128
129 def is_enabled(
130 self,
131 include_experimental=False,
132 ignore_rules=None,
133 include_rules=None,
134 mandatory_rules=None,
135 ):
136 """Is the rule enabled based on the configuration"""
137 ignore_rules = ignore_rules or []
138 include_rules = include_rules or []
139 mandatory_rules = mandatory_rules or []
140
141 # Evaluate experimental rules
142 if self.experimental and not include_experimental:
143 return False
144
145 # Evaluate includes first:
146 include_filter = False
147 for include_rule in include_rules:
148 if self.id.startswith(include_rule):
149 include_filter = True
150 if not include_filter:
151 return False
152
153 # Enable mandatory rules without checking for if they are ignored
154 for mandatory_rule in mandatory_rules:
155 if self.id.startswith(mandatory_rule):
156 return True
157
158 # Allowing ignoring of rules based on prefix to ignore checks
159 for ignore_rule in ignore_rules:
160 if self.id.startswith(ignore_rule) and ignore_rule:
161 return False
162
163 return True
164
165 def configure(self, configs=None):
166 """Set the configuration"""
167
168 # set defaults
169 if isinstance(self.config_definition, dict):
170 for config_name, config_values in self.config_definition.items():
171 self.config[config_name] = config_values['default']
172
173 if isinstance(configs, dict):
174 for key, value in configs.items():
175 if key in self.config_definition:
176 if self.config_definition[key]['type'] == 'boolean':
177 self.config[key] = cfnlint.helpers.bool_compare(value, True)
178 elif self.config_definition[key]['type'] == 'string':
179 self.config[key] = str(value)
180 elif self.config_definition[key]['type'] == 'integer':
181 self.config[key] = int(value)
182 elif self.config_definition[key]['type'] == 'list':
183 self.config[key] = []
184 for l_value in value:
185 if self.config_definition[key]['itemtype'] == 'boolean':
186 self.config[key].append(
187 cfnlint.helpers.bool_compare(l_value, True)
188 )
189 elif self.config_definition[key]['itemtype'] == 'string':
190 self.config[key].append(str(l_value))
191 elif self.config_definition[key]['itemtype'] == 'integer':
192 self.config[key].append(int(l_value))
193
194 match = None
195 match_resource_properties = None
196 match_resource_sub_properties = None
197
198 @matching('match')
199 # pylint: disable=W0613
200 def matchall(self, filename, cfn):
201 """Match the entire file"""
202 return self.match(cfn) # pylint: disable=E1102
203
204 @matching('match_resource_properties')
205 # pylint: disable=W0613
206 def matchall_resource_properties(
207 self, filename, cfn, resource_properties, property_type, path
208 ):
209 """Check for resource properties type"""
210 return self.match_resource_properties( # pylint: disable=E1102
211 resource_properties, property_type, path, cfn
212 )
213
214 @matching('match_resource_sub_properties')
215 # pylint: disable=W0613
216 def matchall_resource_sub_properties(
217 self, filename, cfn, resource_properties, property_type, path
218 ):
219 """Check for resource properties type"""
220 return self.match_resource_sub_properties( # pylint: disable=E1102
221 resource_properties, property_type, path, cfn
222 )
223
224
225 # pylint: disable=too-many-instance-attributes
226 class RulesCollection:
227 """Collection of rules"""
228
229 def __init__(
230 self,
231 ignore_rules=None,
232 include_rules=None,
233 configure_rules=None,
234 include_experimental=False,
235 mandatory_rules=None,
236 ):
237 self.rules: Dict[str, CloudFormationLintRule] = {}
238 self.all_rules: Dict[str, CloudFormationLintRule] = {}
239 self.used_rules = set()
240
241 self.configure(
242 ignore_rules=ignore_rules,
243 include_rules=include_rules,
244 configure_rules=configure_rules,
245 include_experimental=include_experimental,
246 mandatory_rules=mandatory_rules,
247 )
248
249 def configure(
250 self,
251 ignore_rules=None,
252 include_rules=None,
253 configure_rules=None,
254 include_experimental=False,
255 mandatory_rules=None,
256 ):
257 self.rules: Dict[str, CloudFormationLintRule] = {}
258 # Whether "experimental" rules should be added
259 self.include_experimental = include_experimental
260
261 # Make Ignore Rules not required
262 self.ignore_rules = ignore_rules or []
263 self.include_rules = include_rules or []
264 self.mandatory_rules = mandatory_rules or []
265 self.configure_rules = configure_rules or {}
266 # by default include 'W' and 'E'
267 # 'I' has to be included manually for backwards compabitility
268 # Have to add W, E here because integrations don't use config
269 for default_rule in ['W', 'E']:
270 if default_rule not in self.include_rules:
271 self.include_rules.extend([default_rule])
272
273 for rule in self.all_rules.values():
274 self.__register(rule)
275
276 def __register(self, rule: CloudFormationLintRule):
277 """Register and configure the rule"""
278 if self.is_rule_enabled(rule):
279 self.used_rules.add(rule.id)
280 self.rules[rule.id] = rule
281 rule.configure(self.configure_rules.get(rule.id, None))
282
283 def register(self, rule: CloudFormationLintRule):
284 """Register rules"""
285 # Some rules are inheritited to limit code re-use.
286 # These rules have no rule ID so we filter this out
287 if rule.id != '':
288 if rule.id in self.all_rules:
289 raise DuplicateRuleError(rule_id=rule.id)
290 self.all_rules[rule.id] = rule
291 self.__register(rule)
292
293 def __iter__(self):
294 return iter(self.rules.values())
295
296 def __len__(self):
297 return len(self.rules.keys())
298
299 def extend(self, more):
300 """Extend rules"""
301 for rule in more:
302 self.register(rule)
303
304 def __repr__(self):
305 return '\n'.join(
306 [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]
307 )
308
309 def is_rule_enabled(self, rule: CloudFormationLintRule):
310 """Checks if an individual rule is valid"""
311 return rule.is_enabled(
312 self.include_experimental,
313 self.ignore_rules,
314 self.include_rules,
315 self.mandatory_rules,
316 )
317
318 # pylint: disable=inconsistent-return-statements
319 def run_check(self, check, filename, rule_id, *args):
320 """Run a check"""
321 try:
322 return check(*args)
323 except TemplateAttributeError as err:
324 LOGGER.debug(str(err))
325 return []
326 except Exception as err: # pylint: disable=W0703
327 if self.is_rule_enabled(RuleError()):
328 # In debug mode, print the error include complete stack trace
329 if LOGGER.getEffectiveLevel() == logging.DEBUG:
330 error_message = traceback.format_exc()
331 else:
332 error_message = str(err)
333 message = 'Unknown exception while processing rule {}: {}'
334 return [
335 Match(
336 1,
337 1,
338 1,
339 1,
340 filename,
341 RuleError(),
342 message.format(rule_id, error_message),
343 )
344 ]
345
346 def resource_property(
347 self, filename, cfn, path, properties, resource_type, property_type
348 ):
349 """Run loops in resource checks for embedded properties"""
350 matches = []
351 property_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('PropertyTypes')
352 if property_type == 'Tag':
353 property_spec_name = 'Tag'
354 else:
355 property_spec_name = f'{resource_type}.{property_type}'
356
357 if property_spec_name in property_spec:
358 for rule in self.rules.values():
359 if isinstance(properties, dict):
360 if len(properties) == 1:
361 for k, _ in properties.items():
362 if k != 'Fn::If':
363 matches.extend(
364 self.run_check(
365 rule.matchall_resource_sub_properties,
366 filename,
367 rule.id,
368 filename,
369 cfn,
370 properties,
371 property_spec_name,
372 path,
373 )
374 )
375 else:
376 matches.extend(
377 self.run_check(
378 rule.matchall_resource_sub_properties,
379 filename,
380 rule.id,
381 filename,
382 cfn,
383 properties,
384 property_spec_name,
385 path,
386 )
387 )
388 else:
389 matches.extend(
390 self.run_check(
391 rule.matchall_resource_sub_properties,
392 filename,
393 rule.id,
394 filename,
395 cfn,
396 properties,
397 property_spec_name,
398 path,
399 )
400 )
401
402 resource_spec_properties = property_spec.get(property_spec_name, {}).get(
403 'Properties'
404 )
405 if not resource_spec_properties:
406 if property_spec.get(property_spec_name, {}).get('Type') == 'List':
407 if isinstance(properties, list):
408 property_type = property_spec.get(property_spec_name, {}).get(
409 'ItemType'
410 )
411 for index, item in enumerate(properties):
412 matches.extend(
413 self.resource_property(
414 filename,
415 cfn,
416 path[:] + [index],
417 item,
418 resource_type,
419 property_type,
420 )
421 )
422 return matches
423 if isinstance(properties, dict):
424 for resource_property, resource_property_value in properties.items():
425 property_path = path[:] + [resource_property]
426 resource_spec_property = resource_spec_properties.get(
427 resource_property, {}
428 )
429 if resource_property not in resource_spec_properties:
430 if resource_property == 'Fn::If':
431 if isinstance(resource_property_value, list):
432 if len(resource_property_value) == 3:
433 for index, c_value in enumerate(
434 resource_property_value[1:]
435 ):
436 if isinstance(c_value, list):
437 for s_i, c_l_value in enumerate(c_value):
438 matches.extend(
439 self.resource_property(
440 filename,
441 cfn,
442 property_path[:]
443 + [index + 1]
444 + [s_i],
445 c_l_value,
446 resource_type,
447 property_type,
448 )
449 )
450 else:
451 matches.extend(
452 self.resource_property(
453 filename,
454 cfn,
455 property_path[:] + [index + 1],
456 c_value,
457 resource_type,
458 property_type,
459 )
460 )
461 continue
462 if resource_spec_property.get(
463 'Type'
464 ) == 'List' and not resource_spec_properties.get(
465 'PrimitiveItemType'
466 ):
467 if isinstance(resource_property_value, (list)):
468 for index, value in enumerate(resource_property_value):
469 matches.extend(
470 self.resource_property(
471 filename,
472 cfn,
473 property_path[:] + [index],
474 value,
475 resource_type,
476 resource_spec_property.get('ItemType'),
477 )
478 )
479 elif resource_spec_property.get('Type'):
480 if isinstance(resource_property_value, (dict)):
481 matches.extend(
482 self.resource_property(
483 filename,
484 cfn,
485 property_path,
486 resource_property_value,
487 resource_type,
488 resource_spec_property.get('Type'),
489 )
490 )
491
492 return matches
493
494 def run_resource(self, filename, cfn, resource_type, resource_properties, path):
495 """Run loops in resource checks for embedded properties"""
496 matches = []
497 resource_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('ResourceTypes')
498 if resource_properties is not None and resource_type in resource_spec:
499 resource_spec_properties = resource_spec.get(resource_type, {}).get(
500 'Properties'
501 )
502 items_safe = resource_properties.items_safe(path, type_t=(dict))
503 for resource_properties_safe, path_safe in items_safe:
504 for (
505 resource_property,
506 resource_property_value,
507 ) in resource_properties_safe.items():
508 resource_spec_property = resource_spec_properties.get(
509 resource_property, {}
510 )
511 if resource_spec_property.get(
512 'Type'
513 ) == 'List' and not resource_spec_properties.get(
514 'PrimitiveItemType'
515 ):
516 if isinstance(resource_property_value, (list)):
517 for index, value in enumerate(resource_property_value):
518 matches.extend(
519 self.resource_property(
520 filename,
521 cfn,
522 path_safe[:] + [resource_property, index],
523 value,
524 resource_type,
525 resource_spec_property.get('ItemType'),
526 )
527 )
528 elif resource_spec_property.get('Type'):
529 if isinstance(resource_property_value, (dict)):
530 matches.extend(
531 self.resource_property(
532 filename,
533 cfn,
534 path_safe[:] + [resource_property],
535 resource_property_value,
536 resource_type,
537 resource_spec_property.get('Type'),
538 )
539 )
540
541 return matches
542
543 def run(self, filename: Optional[str], cfn: Template):
544 """Run rules"""
545 matches = []
546 for rule in self.rules.values():
547 rule.initialize(cfn)
548
549 for rule in self.rules.values():
550 for key in rule.child_rules.keys():
551 rule.child_rules[key] = self.rules.get(key)
552
553 for rule in self.rules.values():
554 matches.extend(
555 self.run_check(rule.matchall, filename, rule.id, filename, cfn)
556 )
557
558 for resource_name, resource_attributes in cfn.get_resources().items():
559 resource_type = resource_attributes.get('Type')
560 resource_properties = resource_attributes.get('Properties')
561 if isinstance(resource_type, str) and isinstance(resource_properties, dict):
562 path = ['Resources', resource_name, 'Properties']
563 for rule in self.rules.values():
564 matches.extend(
565 self.run_check(
566 rule.matchall_resource_properties,
567 filename,
568 rule.id,
569 filename,
570 cfn,
571 resource_properties,
572 resource_type,
573 path,
574 )
575 )
576
577 matches.extend(
578 self.run_resource(
579 filename, cfn, resource_type, resource_properties, path
580 )
581 )
582
583 return matches
584
585 def create_from_module(self, modpath):
586 """Create rules from a module import path"""
587 mod = importlib.import_module(modpath)
588 self.extend(cfnlint.helpers.create_rules(mod))
589
590 def create_from_directory(self, rulesdir):
591 """Create rules from directory"""
592 result = []
593 if rulesdir != '':
594 result = cfnlint.helpers.load_plugins(os.path.expanduser(rulesdir))
595 self.extend(result)
596
597 def create_from_custom_rules_file(self, custom_rules_file):
598 """Create rules from custom rules file"""
599 custom_rules = []
600 if custom_rules_file:
601 with open(custom_rules_file, encoding='utf-8') as customRules:
602 line_number = 1
603 for line in customRules:
604 LOGGER.debug('Processing Custom Rule Line %d', line_number)
605 custom_rule = cfnlint.rules.custom.make_rule(line, line_number)
606 if custom_rule:
607 custom_rules.append(custom_rule)
608 line_number += 1
609
610 self.extend(custom_rules)
611
612
613 class RuleMatch:
614 """Rules Error"""
615
616 def __init__(self, path, message, **kwargs):
617 """Init"""
618 self.path = path
619 self.path_string = '/'.join(map(str, path))
620 self.message = message
621 for k, v in kwargs.items():
622 setattr(self, k, v)
623
624 def __eq__(self, item):
625 """Override unique"""
626 return (self.path, self.message) == (item.path, item.message)
627
628 def __hash__(self):
629 """Hash for comparisons"""
630 return hash((self.path, self.message))
631
632
633 class Match: # pylint: disable=R0902
634 """Match Classes"""
635
636 def __init__(
637 self,
638 linenumber,
639 columnnumber,
640 linenumberend,
641 columnnumberend,
642 filename,
643 rule,
644 message=None,
645 rulematch_obj=None,
646 ):
647 """Init"""
648 self.linenumber = linenumber
649 '''Starting line number of the region this match spans'''
650 self.columnnumber = columnnumber
651 '''Starting line number of the region this match spans'''
652 self.linenumberend = linenumberend
653 '''Ending line number of the region this match spans'''
654 self.columnnumberend = columnnumberend
655 '''Ending column number of the region this match spans'''
656 self.filename = filename
657 '''Name of the filename associated with this match, or None if there is no such file'''
658 self.rule = rule
659 '''The rule of this match'''
660 self.message = message # or rule.shortdesc
661 '''The message of this match'''
662 if rulematch_obj:
663 for k, v in vars(rulematch_obj).items():
664 if not hasattr(self, k):
665 setattr(self, k, v)
666
667 def __repr__(self):
668 """Represent"""
669 file_str = self.filename + ':' if self.filename else ''
670 return f'[{self.rule}] ({self.message}) matched {file_str}{self.linenumber}'
671
672 def __eq__(self, item):
673 """Override equal to compare matches"""
674 return (self.linenumber, self.columnnumber, self.rule.id, self.message) == (
675 item.linenumber,
676 item.columnnumber,
677 item.rule.id,
678 item.message,
679 )
680
681
682 class ParseError(CloudFormationLintRule):
683 """Parse Lint Rule"""
684
685 id = 'E0000'
686 shortdesc = 'Parsing error found when parsing the template'
687 description = 'Checks for JSON/YAML formatting errors in your template'
688 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'
689 tags = ['base']
690
691
692 class TransformError(CloudFormationLintRule):
693 """Transform Lint Rule"""
694
695 id = 'E0001'
696 shortdesc = 'Error found when transforming the template'
697 description = 'Errors found when performing transformation on the template'
698 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'
699 tags = ['base', 'transform']
700
701
702 class RuleError(CloudFormationLintRule):
703 """Rule processing Error"""
704
705 id = 'E0002'
706 shortdesc = 'Error processing rule on the template'
707 description = 'Errors found when processing a rule on the template'
708 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'
709 tags = ['base', 'rule']
710
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/__init__.py b/src/cfnlint/rules/__init__.py
--- a/src/cfnlint/rules/__init__.py
+++ b/src/cfnlint/rules/__init__.py
@@ -302,9 +302,7 @@
self.register(rule)
def __repr__(self):
- return '\n'.join(
- [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]
- )
+ return '\n'.join([self.rules[id].verbose() for id in sorted(self.rules)])
def is_rule_enabled(self, rule: CloudFormationLintRule):
"""Checks if an individual rule is valid"""
| {"golden_diff": "diff --git a/src/cfnlint/rules/__init__.py b/src/cfnlint/rules/__init__.py\n--- a/src/cfnlint/rules/__init__.py\n+++ b/src/cfnlint/rules/__init__.py\n@@ -302,9 +302,7 @@\n self.register(rule)\n \n def __repr__(self):\n- return '\\n'.join(\n- [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]\n- )\n+ return '\\n'.join([self.rules[id].verbose() for id in sorted(self.rules)])\n \n def is_rule_enabled(self, rule: CloudFormationLintRule):\n \"\"\"Checks if an individual rule is valid\"\"\"\n", "issue": "Cannot list all the rules\n### CloudFormation Lint Version\n\n0.70.0\n\n### What operating system are you using?\n\nWindows\n\n### Describe the bug\n\n`cfn-lint --list-rules` throws below Error.(username is masked.)\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\${username}\\AppData\\Local\\Programs\\Python\\Python39\\lib\\runpy.py\", line 197, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"C:\\Users\\${username}\\AppData\\Local\\Programs\\Python\\Python39\\lib\\runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\${username}\\AppData\\Local\\Programs\\Python\\Python39\\Scripts\\cfn-lint.exe\\__main__.py\", line 7, in <module>\r\n File \"C:\\Users\\${username}\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\cfnlint\\__main__.py\", line 38, in main\r\n (args, filenames, formatter) = cfnlint.core.get_args_filenames(sys.argv[1:])\r\n File \"C:\\Users\\${username}\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\cfnlint\\core.py\", line 235, in get_args_filenames\r\n print(rules)\r\n File \"C:\\Users\\${username}\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\cfnlint\\rules\\__init__.py\", line 306, in __repr__\r\n [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]\r\n File \"C:\\Users\\${username}\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\cfnlint\\rules\\__init__.py\", line 306, in <lambda>\r\n [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]\r\nAttributeError: 'str' object has no attribute 'id'\r\n```\r\n\n\n### Expected behavior\n\nshow list all the rules.\n\n### Reproduction template\n\nThis is not bug of linting.\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nfrom datetime import datetime\nimport importlib\nimport traceback\nfrom typing import Any, Dict, List, Optional, Tuple, Union\nfrom cfnlint.exceptions import DuplicateRuleError\nimport cfnlint.helpers\nimport cfnlint.rules.custom\nfrom cfnlint.decode.node import TemplateAttributeError\nfrom cfnlint.template import Template\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef matching(match_type: Any):\n \"\"\"Does Logging for match functions\"\"\"\n\n def decorator(match_function):\n \"\"\"The Actual Decorator\"\"\"\n\n def wrapper(self, filename, cfn, *args, **kwargs):\n \"\"\"Wrapper\"\"\"\n matches = []\n\n if not getattr(self, match_type):\n return []\n\n if match_type == 'match_resource_properties':\n if args[1] not in self.resource_property_types:\n return []\n elif match_type == 'match_resource_sub_properties':\n if args[1] not in self.resource_sub_property_types:\n return []\n\n start = datetime.now()\n LOGGER.debug('Starting match function for rule %s at %s', self.id, start)\n # pylint: disable=E1102\n results = match_function(self, filename, cfn, *args, **kwargs)\n LOGGER.debug(\n 'Complete match function for rule %s at %s. Ran in %s',\n self.id,\n datetime.now(),\n datetime.now() - start,\n )\n LOGGER.debug('Results from rule %s are %s: ', self.id, results)\n\n if results:\n for result in results:\n error_rule = self\n if hasattr(result, 'rule'):\n error_rule = result.rule\n linenumbers: Union[Tuple[int, int, int, int], None] = None\n if hasattr(result, 'location'):\n linenumbers = result.location\n else:\n linenumbers = cfn.get_location_yaml(cfn.template, result.path)\n if linenumbers:\n matches.append(\n Match(\n linenumbers[0] + 1,\n linenumbers[1] + 1,\n linenumbers[2] + 1,\n linenumbers[3] + 1,\n filename,\n error_rule,\n result.message,\n result,\n )\n )\n else:\n matches.append(\n Match(\n 1, 1, 1, 1, filename, error_rule, result.message, result\n )\n )\n\n return matches\n\n return wrapper\n\n return decorator\n\n\nclass CloudFormationLintRule:\n \"\"\"CloudFormation linter rules\"\"\"\n\n id: str = ''\n shortdesc: str = ''\n description: str = ''\n source_url: str = ''\n tags: List[str] = []\n experimental: bool = False\n child_rules: Dict[str, Any] = {}\n\n logger = logging.getLogger(__name__)\n\n def __init__(self):\n self.resource_property_types = []\n self.resource_sub_property_types = []\n self.config = {} # `-X E3012:strict=false`... Show more\n self.config_definition = {}\n\n def __repr__(self):\n return f'{self.id}: {self.shortdesc}'\n\n @property\n def severity(self):\n \"\"\"Severity level\"\"\"\n levels = {\n 'I': 'informational',\n 'E': 'error',\n 'W': 'warning',\n }\n return levels.get(self.id[0].upper(), 'unknown')\n\n def verbose(self):\n \"\"\"Verbose output\"\"\"\n return f'{self.id}: {self.shortdesc}\\n{self.description}'\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n\n def is_enabled(\n self,\n include_experimental=False,\n ignore_rules=None,\n include_rules=None,\n mandatory_rules=None,\n ):\n \"\"\"Is the rule enabled based on the configuration\"\"\"\n ignore_rules = ignore_rules or []\n include_rules = include_rules or []\n mandatory_rules = mandatory_rules or []\n\n # Evaluate experimental rules\n if self.experimental and not include_experimental:\n return False\n\n # Evaluate includes first:\n include_filter = False\n for include_rule in include_rules:\n if self.id.startswith(include_rule):\n include_filter = True\n if not include_filter:\n return False\n\n # Enable mandatory rules without checking for if they are ignored\n for mandatory_rule in mandatory_rules:\n if self.id.startswith(mandatory_rule):\n return True\n\n # Allowing ignoring of rules based on prefix to ignore checks\n for ignore_rule in ignore_rules:\n if self.id.startswith(ignore_rule) and ignore_rule:\n return False\n\n return True\n\n def configure(self, configs=None):\n \"\"\"Set the configuration\"\"\"\n\n # set defaults\n if isinstance(self.config_definition, dict):\n for config_name, config_values in self.config_definition.items():\n self.config[config_name] = config_values['default']\n\n if isinstance(configs, dict):\n for key, value in configs.items():\n if key in self.config_definition:\n if self.config_definition[key]['type'] == 'boolean':\n self.config[key] = cfnlint.helpers.bool_compare(value, True)\n elif self.config_definition[key]['type'] == 'string':\n self.config[key] = str(value)\n elif self.config_definition[key]['type'] == 'integer':\n self.config[key] = int(value)\n elif self.config_definition[key]['type'] == 'list':\n self.config[key] = []\n for l_value in value:\n if self.config_definition[key]['itemtype'] == 'boolean':\n self.config[key].append(\n cfnlint.helpers.bool_compare(l_value, True)\n )\n elif self.config_definition[key]['itemtype'] == 'string':\n self.config[key].append(str(l_value))\n elif self.config_definition[key]['itemtype'] == 'integer':\n self.config[key].append(int(l_value))\n\n match = None\n match_resource_properties = None\n match_resource_sub_properties = None\n\n @matching('match')\n # pylint: disable=W0613\n def matchall(self, filename, cfn):\n \"\"\"Match the entire file\"\"\"\n return self.match(cfn) # pylint: disable=E1102\n\n @matching('match_resource_properties')\n # pylint: disable=W0613\n def matchall_resource_properties(\n self, filename, cfn, resource_properties, property_type, path\n ):\n \"\"\"Check for resource properties type\"\"\"\n return self.match_resource_properties( # pylint: disable=E1102\n resource_properties, property_type, path, cfn\n )\n\n @matching('match_resource_sub_properties')\n # pylint: disable=W0613\n def matchall_resource_sub_properties(\n self, filename, cfn, resource_properties, property_type, path\n ):\n \"\"\"Check for resource properties type\"\"\"\n return self.match_resource_sub_properties( # pylint: disable=E1102\n resource_properties, property_type, path, cfn\n )\n\n\n# pylint: disable=too-many-instance-attributes\nclass RulesCollection:\n \"\"\"Collection of rules\"\"\"\n\n def __init__(\n self,\n ignore_rules=None,\n include_rules=None,\n configure_rules=None,\n include_experimental=False,\n mandatory_rules=None,\n ):\n self.rules: Dict[str, CloudFormationLintRule] = {}\n self.all_rules: Dict[str, CloudFormationLintRule] = {}\n self.used_rules = set()\n\n self.configure(\n ignore_rules=ignore_rules,\n include_rules=include_rules,\n configure_rules=configure_rules,\n include_experimental=include_experimental,\n mandatory_rules=mandatory_rules,\n )\n\n def configure(\n self,\n ignore_rules=None,\n include_rules=None,\n configure_rules=None,\n include_experimental=False,\n mandatory_rules=None,\n ):\n self.rules: Dict[str, CloudFormationLintRule] = {}\n # Whether \"experimental\" rules should be added\n self.include_experimental = include_experimental\n\n # Make Ignore Rules not required\n self.ignore_rules = ignore_rules or []\n self.include_rules = include_rules or []\n self.mandatory_rules = mandatory_rules or []\n self.configure_rules = configure_rules or {}\n # by default include 'W' and 'E'\n # 'I' has to be included manually for backwards compabitility\n # Have to add W, E here because integrations don't use config\n for default_rule in ['W', 'E']:\n if default_rule not in self.include_rules:\n self.include_rules.extend([default_rule])\n\n for rule in self.all_rules.values():\n self.__register(rule)\n\n def __register(self, rule: CloudFormationLintRule):\n \"\"\"Register and configure the rule\"\"\"\n if self.is_rule_enabled(rule):\n self.used_rules.add(rule.id)\n self.rules[rule.id] = rule\n rule.configure(self.configure_rules.get(rule.id, None))\n\n def register(self, rule: CloudFormationLintRule):\n \"\"\"Register rules\"\"\"\n # Some rules are inheritited to limit code re-use.\n # These rules have no rule ID so we filter this out\n if rule.id != '':\n if rule.id in self.all_rules:\n raise DuplicateRuleError(rule_id=rule.id)\n self.all_rules[rule.id] = rule\n self.__register(rule)\n\n def __iter__(self):\n return iter(self.rules.values())\n\n def __len__(self):\n return len(self.rules.keys())\n\n def extend(self, more):\n \"\"\"Extend rules\"\"\"\n for rule in more:\n self.register(rule)\n\n def __repr__(self):\n return '\\n'.join(\n [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]\n )\n\n def is_rule_enabled(self, rule: CloudFormationLintRule):\n \"\"\"Checks if an individual rule is valid\"\"\"\n return rule.is_enabled(\n self.include_experimental,\n self.ignore_rules,\n self.include_rules,\n self.mandatory_rules,\n )\n\n # pylint: disable=inconsistent-return-statements\n def run_check(self, check, filename, rule_id, *args):\n \"\"\"Run a check\"\"\"\n try:\n return check(*args)\n except TemplateAttributeError as err:\n LOGGER.debug(str(err))\n return []\n except Exception as err: # pylint: disable=W0703\n if self.is_rule_enabled(RuleError()):\n # In debug mode, print the error include complete stack trace\n if LOGGER.getEffectiveLevel() == logging.DEBUG:\n error_message = traceback.format_exc()\n else:\n error_message = str(err)\n message = 'Unknown exception while processing rule {}: {}'\n return [\n Match(\n 1,\n 1,\n 1,\n 1,\n filename,\n RuleError(),\n message.format(rule_id, error_message),\n )\n ]\n\n def resource_property(\n self, filename, cfn, path, properties, resource_type, property_type\n ):\n \"\"\"Run loops in resource checks for embedded properties\"\"\"\n matches = []\n property_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('PropertyTypes')\n if property_type == 'Tag':\n property_spec_name = 'Tag'\n else:\n property_spec_name = f'{resource_type}.{property_type}'\n\n if property_spec_name in property_spec:\n for rule in self.rules.values():\n if isinstance(properties, dict):\n if len(properties) == 1:\n for k, _ in properties.items():\n if k != 'Fn::If':\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n else:\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n else:\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n\n resource_spec_properties = property_spec.get(property_spec_name, {}).get(\n 'Properties'\n )\n if not resource_spec_properties:\n if property_spec.get(property_spec_name, {}).get('Type') == 'List':\n if isinstance(properties, list):\n property_type = property_spec.get(property_spec_name, {}).get(\n 'ItemType'\n )\n for index, item in enumerate(properties):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path[:] + [index],\n item,\n resource_type,\n property_type,\n )\n )\n return matches\n if isinstance(properties, dict):\n for resource_property, resource_property_value in properties.items():\n property_path = path[:] + [resource_property]\n resource_spec_property = resource_spec_properties.get(\n resource_property, {}\n )\n if resource_property not in resource_spec_properties:\n if resource_property == 'Fn::If':\n if isinstance(resource_property_value, list):\n if len(resource_property_value) == 3:\n for index, c_value in enumerate(\n resource_property_value[1:]\n ):\n if isinstance(c_value, list):\n for s_i, c_l_value in enumerate(c_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:]\n + [index + 1]\n + [s_i],\n c_l_value,\n resource_type,\n property_type,\n )\n )\n else:\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:] + [index + 1],\n c_value,\n resource_type,\n property_type,\n )\n )\n continue\n if resource_spec_property.get(\n 'Type'\n ) == 'List' and not resource_spec_properties.get(\n 'PrimitiveItemType'\n ):\n if isinstance(resource_property_value, (list)):\n for index, value in enumerate(resource_property_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:] + [index],\n value,\n resource_type,\n resource_spec_property.get('ItemType'),\n )\n )\n elif resource_spec_property.get('Type'):\n if isinstance(resource_property_value, (dict)):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path,\n resource_property_value,\n resource_type,\n resource_spec_property.get('Type'),\n )\n )\n\n return matches\n\n def run_resource(self, filename, cfn, resource_type, resource_properties, path):\n \"\"\"Run loops in resource checks for embedded properties\"\"\"\n matches = []\n resource_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('ResourceTypes')\n if resource_properties is not None and resource_type in resource_spec:\n resource_spec_properties = resource_spec.get(resource_type, {}).get(\n 'Properties'\n )\n items_safe = resource_properties.items_safe(path, type_t=(dict))\n for resource_properties_safe, path_safe in items_safe:\n for (\n resource_property,\n resource_property_value,\n ) in resource_properties_safe.items():\n resource_spec_property = resource_spec_properties.get(\n resource_property, {}\n )\n if resource_spec_property.get(\n 'Type'\n ) == 'List' and not resource_spec_properties.get(\n 'PrimitiveItemType'\n ):\n if isinstance(resource_property_value, (list)):\n for index, value in enumerate(resource_property_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path_safe[:] + [resource_property, index],\n value,\n resource_type,\n resource_spec_property.get('ItemType'),\n )\n )\n elif resource_spec_property.get('Type'):\n if isinstance(resource_property_value, (dict)):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path_safe[:] + [resource_property],\n resource_property_value,\n resource_type,\n resource_spec_property.get('Type'),\n )\n )\n\n return matches\n\n def run(self, filename: Optional[str], cfn: Template):\n \"\"\"Run rules\"\"\"\n matches = []\n for rule in self.rules.values():\n rule.initialize(cfn)\n\n for rule in self.rules.values():\n for key in rule.child_rules.keys():\n rule.child_rules[key] = self.rules.get(key)\n\n for rule in self.rules.values():\n matches.extend(\n self.run_check(rule.matchall, filename, rule.id, filename, cfn)\n )\n\n for resource_name, resource_attributes in cfn.get_resources().items():\n resource_type = resource_attributes.get('Type')\n resource_properties = resource_attributes.get('Properties')\n if isinstance(resource_type, str) and isinstance(resource_properties, dict):\n path = ['Resources', resource_name, 'Properties']\n for rule in self.rules.values():\n matches.extend(\n self.run_check(\n rule.matchall_resource_properties,\n filename,\n rule.id,\n filename,\n cfn,\n resource_properties,\n resource_type,\n path,\n )\n )\n\n matches.extend(\n self.run_resource(\n filename, cfn, resource_type, resource_properties, path\n )\n )\n\n return matches\n\n def create_from_module(self, modpath):\n \"\"\"Create rules from a module import path\"\"\"\n mod = importlib.import_module(modpath)\n self.extend(cfnlint.helpers.create_rules(mod))\n\n def create_from_directory(self, rulesdir):\n \"\"\"Create rules from directory\"\"\"\n result = []\n if rulesdir != '':\n result = cfnlint.helpers.load_plugins(os.path.expanduser(rulesdir))\n self.extend(result)\n\n def create_from_custom_rules_file(self, custom_rules_file):\n \"\"\"Create rules from custom rules file\"\"\"\n custom_rules = []\n if custom_rules_file:\n with open(custom_rules_file, encoding='utf-8') as customRules:\n line_number = 1\n for line in customRules:\n LOGGER.debug('Processing Custom Rule Line %d', line_number)\n custom_rule = cfnlint.rules.custom.make_rule(line, line_number)\n if custom_rule:\n custom_rules.append(custom_rule)\n line_number += 1\n\n self.extend(custom_rules)\n\n\nclass RuleMatch:\n \"\"\"Rules Error\"\"\"\n\n def __init__(self, path, message, **kwargs):\n \"\"\"Init\"\"\"\n self.path = path\n self.path_string = '/'.join(map(str, path))\n self.message = message\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __eq__(self, item):\n \"\"\"Override unique\"\"\"\n return (self.path, self.message) == (item.path, item.message)\n\n def __hash__(self):\n \"\"\"Hash for comparisons\"\"\"\n return hash((self.path, self.message))\n\n\nclass Match: # pylint: disable=R0902\n \"\"\"Match Classes\"\"\"\n\n def __init__(\n self,\n linenumber,\n columnnumber,\n linenumberend,\n columnnumberend,\n filename,\n rule,\n message=None,\n rulematch_obj=None,\n ):\n \"\"\"Init\"\"\"\n self.linenumber = linenumber\n '''Starting line number of the region this match spans'''\n self.columnnumber = columnnumber\n '''Starting line number of the region this match spans'''\n self.linenumberend = linenumberend\n '''Ending line number of the region this match spans'''\n self.columnnumberend = columnnumberend\n '''Ending column number of the region this match spans'''\n self.filename = filename\n '''Name of the filename associated with this match, or None if there is no such file'''\n self.rule = rule\n '''The rule of this match'''\n self.message = message # or rule.shortdesc\n '''The message of this match'''\n if rulematch_obj:\n for k, v in vars(rulematch_obj).items():\n if not hasattr(self, k):\n setattr(self, k, v)\n\n def __repr__(self):\n \"\"\"Represent\"\"\"\n file_str = self.filename + ':' if self.filename else ''\n return f'[{self.rule}] ({self.message}) matched {file_str}{self.linenumber}'\n\n def __eq__(self, item):\n \"\"\"Override equal to compare matches\"\"\"\n return (self.linenumber, self.columnnumber, self.rule.id, self.message) == (\n item.linenumber,\n item.columnnumber,\n item.rule.id,\n item.message,\n )\n\n\nclass ParseError(CloudFormationLintRule):\n \"\"\"Parse Lint Rule\"\"\"\n\n id = 'E0000'\n shortdesc = 'Parsing error found when parsing the template'\n description = 'Checks for JSON/YAML formatting errors in your template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base']\n\n\nclass TransformError(CloudFormationLintRule):\n \"\"\"Transform Lint Rule\"\"\"\n\n id = 'E0001'\n shortdesc = 'Error found when transforming the template'\n description = 'Errors found when performing transformation on the template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base', 'transform']\n\n\nclass RuleError(CloudFormationLintRule):\n \"\"\"Rule processing Error\"\"\"\n\n id = 'E0002'\n shortdesc = 'Error processing rule on the template'\n description = 'Errors found when processing a rule on the template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base', 'rule']\n", "path": "src/cfnlint/rules/__init__.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nfrom datetime import datetime\nimport importlib\nimport traceback\nfrom typing import Any, Dict, List, Optional, Tuple, Union\nfrom cfnlint.exceptions import DuplicateRuleError\nimport cfnlint.helpers\nimport cfnlint.rules.custom\nfrom cfnlint.decode.node import TemplateAttributeError\nfrom cfnlint.template import Template\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef matching(match_type: Any):\n \"\"\"Does Logging for match functions\"\"\"\n\n def decorator(match_function):\n \"\"\"The Actual Decorator\"\"\"\n\n def wrapper(self, filename, cfn, *args, **kwargs):\n \"\"\"Wrapper\"\"\"\n matches = []\n\n if not getattr(self, match_type):\n return []\n\n if match_type == 'match_resource_properties':\n if args[1] not in self.resource_property_types:\n return []\n elif match_type == 'match_resource_sub_properties':\n if args[1] not in self.resource_sub_property_types:\n return []\n\n start = datetime.now()\n LOGGER.debug('Starting match function for rule %s at %s', self.id, start)\n # pylint: disable=E1102\n results = match_function(self, filename, cfn, *args, **kwargs)\n LOGGER.debug(\n 'Complete match function for rule %s at %s. Ran in %s',\n self.id,\n datetime.now(),\n datetime.now() - start,\n )\n LOGGER.debug('Results from rule %s are %s: ', self.id, results)\n\n if results:\n for result in results:\n error_rule = self\n if hasattr(result, 'rule'):\n error_rule = result.rule\n linenumbers: Union[Tuple[int, int, int, int], None] = None\n if hasattr(result, 'location'):\n linenumbers = result.location\n else:\n linenumbers = cfn.get_location_yaml(cfn.template, result.path)\n if linenumbers:\n matches.append(\n Match(\n linenumbers[0] + 1,\n linenumbers[1] + 1,\n linenumbers[2] + 1,\n linenumbers[3] + 1,\n filename,\n error_rule,\n result.message,\n result,\n )\n )\n else:\n matches.append(\n Match(\n 1, 1, 1, 1, filename, error_rule, result.message, result\n )\n )\n\n return matches\n\n return wrapper\n\n return decorator\n\n\nclass CloudFormationLintRule:\n \"\"\"CloudFormation linter rules\"\"\"\n\n id: str = ''\n shortdesc: str = ''\n description: str = ''\n source_url: str = ''\n tags: List[str] = []\n experimental: bool = False\n child_rules: Dict[str, Any] = {}\n\n logger = logging.getLogger(__name__)\n\n def __init__(self):\n self.resource_property_types = []\n self.resource_sub_property_types = []\n self.config = {} # `-X E3012:strict=false`... Show more\n self.config_definition = {}\n\n def __repr__(self):\n return f'{self.id}: {self.shortdesc}'\n\n @property\n def severity(self):\n \"\"\"Severity level\"\"\"\n levels = {\n 'I': 'informational',\n 'E': 'error',\n 'W': 'warning',\n }\n return levels.get(self.id[0].upper(), 'unknown')\n\n def verbose(self):\n \"\"\"Verbose output\"\"\"\n return f'{self.id}: {self.shortdesc}\\n{self.description}'\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n\n def is_enabled(\n self,\n include_experimental=False,\n ignore_rules=None,\n include_rules=None,\n mandatory_rules=None,\n ):\n \"\"\"Is the rule enabled based on the configuration\"\"\"\n ignore_rules = ignore_rules or []\n include_rules = include_rules or []\n mandatory_rules = mandatory_rules or []\n\n # Evaluate experimental rules\n if self.experimental and not include_experimental:\n return False\n\n # Evaluate includes first:\n include_filter = False\n for include_rule in include_rules:\n if self.id.startswith(include_rule):\n include_filter = True\n if not include_filter:\n return False\n\n # Enable mandatory rules without checking for if they are ignored\n for mandatory_rule in mandatory_rules:\n if self.id.startswith(mandatory_rule):\n return True\n\n # Allowing ignoring of rules based on prefix to ignore checks\n for ignore_rule in ignore_rules:\n if self.id.startswith(ignore_rule) and ignore_rule:\n return False\n\n return True\n\n def configure(self, configs=None):\n \"\"\"Set the configuration\"\"\"\n\n # set defaults\n if isinstance(self.config_definition, dict):\n for config_name, config_values in self.config_definition.items():\n self.config[config_name] = config_values['default']\n\n if isinstance(configs, dict):\n for key, value in configs.items():\n if key in self.config_definition:\n if self.config_definition[key]['type'] == 'boolean':\n self.config[key] = cfnlint.helpers.bool_compare(value, True)\n elif self.config_definition[key]['type'] == 'string':\n self.config[key] = str(value)\n elif self.config_definition[key]['type'] == 'integer':\n self.config[key] = int(value)\n elif self.config_definition[key]['type'] == 'list':\n self.config[key] = []\n for l_value in value:\n if self.config_definition[key]['itemtype'] == 'boolean':\n self.config[key].append(\n cfnlint.helpers.bool_compare(l_value, True)\n )\n elif self.config_definition[key]['itemtype'] == 'string':\n self.config[key].append(str(l_value))\n elif self.config_definition[key]['itemtype'] == 'integer':\n self.config[key].append(int(l_value))\n\n match = None\n match_resource_properties = None\n match_resource_sub_properties = None\n\n @matching('match')\n # pylint: disable=W0613\n def matchall(self, filename, cfn):\n \"\"\"Match the entire file\"\"\"\n return self.match(cfn) # pylint: disable=E1102\n\n @matching('match_resource_properties')\n # pylint: disable=W0613\n def matchall_resource_properties(\n self, filename, cfn, resource_properties, property_type, path\n ):\n \"\"\"Check for resource properties type\"\"\"\n return self.match_resource_properties( # pylint: disable=E1102\n resource_properties, property_type, path, cfn\n )\n\n @matching('match_resource_sub_properties')\n # pylint: disable=W0613\n def matchall_resource_sub_properties(\n self, filename, cfn, resource_properties, property_type, path\n ):\n \"\"\"Check for resource properties type\"\"\"\n return self.match_resource_sub_properties( # pylint: disable=E1102\n resource_properties, property_type, path, cfn\n )\n\n\n# pylint: disable=too-many-instance-attributes\nclass RulesCollection:\n \"\"\"Collection of rules\"\"\"\n\n def __init__(\n self,\n ignore_rules=None,\n include_rules=None,\n configure_rules=None,\n include_experimental=False,\n mandatory_rules=None,\n ):\n self.rules: Dict[str, CloudFormationLintRule] = {}\n self.all_rules: Dict[str, CloudFormationLintRule] = {}\n self.used_rules = set()\n\n self.configure(\n ignore_rules=ignore_rules,\n include_rules=include_rules,\n configure_rules=configure_rules,\n include_experimental=include_experimental,\n mandatory_rules=mandatory_rules,\n )\n\n def configure(\n self,\n ignore_rules=None,\n include_rules=None,\n configure_rules=None,\n include_experimental=False,\n mandatory_rules=None,\n ):\n self.rules: Dict[str, CloudFormationLintRule] = {}\n # Whether \"experimental\" rules should be added\n self.include_experimental = include_experimental\n\n # Make Ignore Rules not required\n self.ignore_rules = ignore_rules or []\n self.include_rules = include_rules or []\n self.mandatory_rules = mandatory_rules or []\n self.configure_rules = configure_rules or {}\n # by default include 'W' and 'E'\n # 'I' has to be included manually for backwards compabitility\n # Have to add W, E here because integrations don't use config\n for default_rule in ['W', 'E']:\n if default_rule not in self.include_rules:\n self.include_rules.extend([default_rule])\n\n for rule in self.all_rules.values():\n self.__register(rule)\n\n def __register(self, rule: CloudFormationLintRule):\n \"\"\"Register and configure the rule\"\"\"\n if self.is_rule_enabled(rule):\n self.used_rules.add(rule.id)\n self.rules[rule.id] = rule\n rule.configure(self.configure_rules.get(rule.id, None))\n\n def register(self, rule: CloudFormationLintRule):\n \"\"\"Register rules\"\"\"\n # Some rules are inheritited to limit code re-use.\n # These rules have no rule ID so we filter this out\n if rule.id != '':\n if rule.id in self.all_rules:\n raise DuplicateRuleError(rule_id=rule.id)\n self.all_rules[rule.id] = rule\n self.__register(rule)\n\n def __iter__(self):\n return iter(self.rules.values())\n\n def __len__(self):\n return len(self.rules.keys())\n\n def extend(self, more):\n \"\"\"Extend rules\"\"\"\n for rule in more:\n self.register(rule)\n\n def __repr__(self):\n return '\\n'.join([self.rules[id].verbose() for id in sorted(self.rules)])\n\n def is_rule_enabled(self, rule: CloudFormationLintRule):\n \"\"\"Checks if an individual rule is valid\"\"\"\n return rule.is_enabled(\n self.include_experimental,\n self.ignore_rules,\n self.include_rules,\n self.mandatory_rules,\n )\n\n # pylint: disable=inconsistent-return-statements\n def run_check(self, check, filename, rule_id, *args):\n \"\"\"Run a check\"\"\"\n try:\n return check(*args)\n except TemplateAttributeError as err:\n LOGGER.debug(str(err))\n return []\n except Exception as err: # pylint: disable=W0703\n if self.is_rule_enabled(RuleError()):\n # In debug mode, print the error include complete stack trace\n if LOGGER.getEffectiveLevel() == logging.DEBUG:\n error_message = traceback.format_exc()\n else:\n error_message = str(err)\n message = 'Unknown exception while processing rule {}: {}'\n return [\n Match(\n 1,\n 1,\n 1,\n 1,\n filename,\n RuleError(),\n message.format(rule_id, error_message),\n )\n ]\n\n def resource_property(\n self, filename, cfn, path, properties, resource_type, property_type\n ):\n \"\"\"Run loops in resource checks for embedded properties\"\"\"\n matches = []\n property_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('PropertyTypes')\n if property_type == 'Tag':\n property_spec_name = 'Tag'\n else:\n property_spec_name = f'{resource_type}.{property_type}'\n\n if property_spec_name in property_spec:\n for rule in self.rules.values():\n if isinstance(properties, dict):\n if len(properties) == 1:\n for k, _ in properties.items():\n if k != 'Fn::If':\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n else:\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n else:\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n\n resource_spec_properties = property_spec.get(property_spec_name, {}).get(\n 'Properties'\n )\n if not resource_spec_properties:\n if property_spec.get(property_spec_name, {}).get('Type') == 'List':\n if isinstance(properties, list):\n property_type = property_spec.get(property_spec_name, {}).get(\n 'ItemType'\n )\n for index, item in enumerate(properties):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path[:] + [index],\n item,\n resource_type,\n property_type,\n )\n )\n return matches\n if isinstance(properties, dict):\n for resource_property, resource_property_value in properties.items():\n property_path = path[:] + [resource_property]\n resource_spec_property = resource_spec_properties.get(\n resource_property, {}\n )\n if resource_property not in resource_spec_properties:\n if resource_property == 'Fn::If':\n if isinstance(resource_property_value, list):\n if len(resource_property_value) == 3:\n for index, c_value in enumerate(\n resource_property_value[1:]\n ):\n if isinstance(c_value, list):\n for s_i, c_l_value in enumerate(c_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:]\n + [index + 1]\n + [s_i],\n c_l_value,\n resource_type,\n property_type,\n )\n )\n else:\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:] + [index + 1],\n c_value,\n resource_type,\n property_type,\n )\n )\n continue\n if resource_spec_property.get(\n 'Type'\n ) == 'List' and not resource_spec_properties.get(\n 'PrimitiveItemType'\n ):\n if isinstance(resource_property_value, (list)):\n for index, value in enumerate(resource_property_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:] + [index],\n value,\n resource_type,\n resource_spec_property.get('ItemType'),\n )\n )\n elif resource_spec_property.get('Type'):\n if isinstance(resource_property_value, (dict)):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path,\n resource_property_value,\n resource_type,\n resource_spec_property.get('Type'),\n )\n )\n\n return matches\n\n def run_resource(self, filename, cfn, resource_type, resource_properties, path):\n \"\"\"Run loops in resource checks for embedded properties\"\"\"\n matches = []\n resource_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('ResourceTypes')\n if resource_properties is not None and resource_type in resource_spec:\n resource_spec_properties = resource_spec.get(resource_type, {}).get(\n 'Properties'\n )\n items_safe = resource_properties.items_safe(path, type_t=(dict))\n for resource_properties_safe, path_safe in items_safe:\n for (\n resource_property,\n resource_property_value,\n ) in resource_properties_safe.items():\n resource_spec_property = resource_spec_properties.get(\n resource_property, {}\n )\n if resource_spec_property.get(\n 'Type'\n ) == 'List' and not resource_spec_properties.get(\n 'PrimitiveItemType'\n ):\n if isinstance(resource_property_value, (list)):\n for index, value in enumerate(resource_property_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path_safe[:] + [resource_property, index],\n value,\n resource_type,\n resource_spec_property.get('ItemType'),\n )\n )\n elif resource_spec_property.get('Type'):\n if isinstance(resource_property_value, (dict)):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path_safe[:] + [resource_property],\n resource_property_value,\n resource_type,\n resource_spec_property.get('Type'),\n )\n )\n\n return matches\n\n def run(self, filename: Optional[str], cfn: Template):\n \"\"\"Run rules\"\"\"\n matches = []\n for rule in self.rules.values():\n rule.initialize(cfn)\n\n for rule in self.rules.values():\n for key in rule.child_rules.keys():\n rule.child_rules[key] = self.rules.get(key)\n\n for rule in self.rules.values():\n matches.extend(\n self.run_check(rule.matchall, filename, rule.id, filename, cfn)\n )\n\n for resource_name, resource_attributes in cfn.get_resources().items():\n resource_type = resource_attributes.get('Type')\n resource_properties = resource_attributes.get('Properties')\n if isinstance(resource_type, str) and isinstance(resource_properties, dict):\n path = ['Resources', resource_name, 'Properties']\n for rule in self.rules.values():\n matches.extend(\n self.run_check(\n rule.matchall_resource_properties,\n filename,\n rule.id,\n filename,\n cfn,\n resource_properties,\n resource_type,\n path,\n )\n )\n\n matches.extend(\n self.run_resource(\n filename, cfn, resource_type, resource_properties, path\n )\n )\n\n return matches\n\n def create_from_module(self, modpath):\n \"\"\"Create rules from a module import path\"\"\"\n mod = importlib.import_module(modpath)\n self.extend(cfnlint.helpers.create_rules(mod))\n\n def create_from_directory(self, rulesdir):\n \"\"\"Create rules from directory\"\"\"\n result = []\n if rulesdir != '':\n result = cfnlint.helpers.load_plugins(os.path.expanduser(rulesdir))\n self.extend(result)\n\n def create_from_custom_rules_file(self, custom_rules_file):\n \"\"\"Create rules from custom rules file\"\"\"\n custom_rules = []\n if custom_rules_file:\n with open(custom_rules_file, encoding='utf-8') as customRules:\n line_number = 1\n for line in customRules:\n LOGGER.debug('Processing Custom Rule Line %d', line_number)\n custom_rule = cfnlint.rules.custom.make_rule(line, line_number)\n if custom_rule:\n custom_rules.append(custom_rule)\n line_number += 1\n\n self.extend(custom_rules)\n\n\nclass RuleMatch:\n \"\"\"Rules Error\"\"\"\n\n def __init__(self, path, message, **kwargs):\n \"\"\"Init\"\"\"\n self.path = path\n self.path_string = '/'.join(map(str, path))\n self.message = message\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __eq__(self, item):\n \"\"\"Override unique\"\"\"\n return (self.path, self.message) == (item.path, item.message)\n\n def __hash__(self):\n \"\"\"Hash for comparisons\"\"\"\n return hash((self.path, self.message))\n\n\nclass Match: # pylint: disable=R0902\n \"\"\"Match Classes\"\"\"\n\n def __init__(\n self,\n linenumber,\n columnnumber,\n linenumberend,\n columnnumberend,\n filename,\n rule,\n message=None,\n rulematch_obj=None,\n ):\n \"\"\"Init\"\"\"\n self.linenumber = linenumber\n '''Starting line number of the region this match spans'''\n self.columnnumber = columnnumber\n '''Starting line number of the region this match spans'''\n self.linenumberend = linenumberend\n '''Ending line number of the region this match spans'''\n self.columnnumberend = columnnumberend\n '''Ending column number of the region this match spans'''\n self.filename = filename\n '''Name of the filename associated with this match, or None if there is no such file'''\n self.rule = rule\n '''The rule of this match'''\n self.message = message # or rule.shortdesc\n '''The message of this match'''\n if rulematch_obj:\n for k, v in vars(rulematch_obj).items():\n if not hasattr(self, k):\n setattr(self, k, v)\n\n def __repr__(self):\n \"\"\"Represent\"\"\"\n file_str = self.filename + ':' if self.filename else ''\n return f'[{self.rule}] ({self.message}) matched {file_str}{self.linenumber}'\n\n def __eq__(self, item):\n \"\"\"Override equal to compare matches\"\"\"\n return (self.linenumber, self.columnnumber, self.rule.id, self.message) == (\n item.linenumber,\n item.columnnumber,\n item.rule.id,\n item.message,\n )\n\n\nclass ParseError(CloudFormationLintRule):\n \"\"\"Parse Lint Rule\"\"\"\n\n id = 'E0000'\n shortdesc = 'Parsing error found when parsing the template'\n description = 'Checks for JSON/YAML formatting errors in your template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base']\n\n\nclass TransformError(CloudFormationLintRule):\n \"\"\"Transform Lint Rule\"\"\"\n\n id = 'E0001'\n shortdesc = 'Error found when transforming the template'\n description = 'Errors found when performing transformation on the template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base', 'transform']\n\n\nclass RuleError(CloudFormationLintRule):\n \"\"\"Rule processing Error\"\"\"\n\n id = 'E0002'\n shortdesc = 'Error processing rule on the template'\n description = 'Errors found when processing a rule on the template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base', 'rule']\n", "path": "src/cfnlint/rules/__init__.py"}]} |
gh_patches_debug_1505 | rasdani/github-patches | git_diff | flask-admin__flask-admin-434 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
form_overrides does not work with Peewee model backend
I assume this is the same reason as the others -- that the Peewee backend's not quite as up-to-date?
Looking through the code, it appears that form_overrides is not even checked in the Peewee backend, so it shouldn't be too hard for me to fix this.
If you confirm that this is a bug and I'm not missing something, I can send through some code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flask_admin/contrib/peewee/form.py`
Content:
```
1 from wtforms import fields
2
3 from peewee import (DateTimeField, DateField, TimeField,
4 PrimaryKeyField, ForeignKeyField, BaseModel)
5
6 from wtfpeewee.orm import ModelConverter, model_form
7
8 from flask.ext.admin import form
9 from flask.ext.admin._compat import iteritems, itervalues
10 from flask.ext.admin.model.form import InlineFormAdmin, InlineModelConverterBase
11 from flask.ext.admin.model.fields import InlineModelFormField, InlineFieldList, AjaxSelectField
12
13 from .tools import get_primary_key
14 from .ajax import create_ajax_loader
15
16
17 class InlineModelFormList(InlineFieldList):
18 """
19 Customized inline model form list field.
20 """
21
22 form_field_type = InlineModelFormField
23 """
24 Form field type. Override to use custom field for each inline form
25 """
26
27 def __init__(self, form, model, prop, inline_view, **kwargs):
28 self.form = form
29 self.model = model
30 self.prop = prop
31 self.inline_view = inline_view
32
33 self._pk = get_primary_key(model)
34 super(InlineModelFormList, self).__init__(self.form_field_type(form, self._pk), **kwargs)
35
36 def display_row_controls(self, field):
37 return field.get_pk() is not None
38
39 # *** bryhoyt removed def process() entirely, because I believe it was buggy
40 # (but worked because another part of the code had a complimentary bug)
41 # and I'm not sure why it was necessary anyway.
42 # If we want it back in, we need to fix the following bogus query:
43 # self.model.select().where(attr == data).execute() # `data` is not an ID, and only happened to be so because we patched it in in .contribute() below
44 #
45 # For reference:
46 # .process() introduced in https://github.com/mrjoes/flask-admin/commit/2845e4b28cb40b25e2bf544b327f6202dc7e5709
47 # Fixed, brokenly I think, in https://github.com/mrjoes/flask-admin/commit/4383eef3ce7eb01878f086928f8773adb9de79f8#diff-f87e7cd76fb9bc48c8681b24f238fb13R30
48
49 def populate_obj(self, obj, name):
50 pass
51
52 def save_related(self, obj):
53 model_id = getattr(obj, self._pk)
54
55 attr = getattr(self.model, self.prop)
56 values = self.model.select().where(attr == model_id).execute()
57
58 pk_map = dict((str(getattr(v, self._pk)), v) for v in values)
59
60 # Handle request data
61 for field in self.entries:
62 field_id = field.get_pk()
63
64 if field_id in pk_map:
65 model = pk_map[field_id]
66
67 if self.should_delete(field):
68 model.delete_instance(recursive=True)
69 continue
70 else:
71 model = self.model()
72
73 field.populate_obj(model, None)
74
75 # Force relation
76 setattr(model, self.prop, model_id)
77
78 self.inline_view.on_model_change(field, model)
79
80 model.save()
81
82
83 class CustomModelConverter(ModelConverter):
84 def __init__(self, view, additional=None):
85 super(CustomModelConverter, self).__init__(additional)
86 self.view = view
87
88 self.converters[PrimaryKeyField] = self.handle_pk
89 self.converters[DateTimeField] = self.handle_datetime
90 self.converters[DateField] = self.handle_date
91 self.converters[TimeField] = self.handle_time
92
93 def handle_foreign_key(self, model, field, **kwargs):
94 loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)
95
96 if loader:
97 if field.null:
98 kwargs['allow_blank'] = True
99
100 return field.name, AjaxSelectField(loader, **kwargs)
101
102 return super(CustomModelConverter, self).handle_foreign_key(model, field, **kwargs)
103
104 def handle_pk(self, model, field, **kwargs):
105 kwargs['validators'] = []
106 return field.name, fields.HiddenField(**kwargs)
107
108 def handle_date(self, model, field, **kwargs):
109 kwargs['widget'] = form.DatePickerWidget()
110 return field.name, fields.DateField(**kwargs)
111
112 def handle_datetime(self, model, field, **kwargs):
113 kwargs['widget'] = form.DateTimePickerWidget()
114 return field.name, fields.DateTimeField(**kwargs)
115
116 def handle_time(self, model, field, **kwargs):
117 return field.name, form.TimeField(**kwargs)
118
119
120 def get_form(model, converter,
121 base_class=form.BaseForm,
122 only=None,
123 exclude=None,
124 field_args=None,
125 allow_pk=False,
126 extra_fields=None):
127 """
128 Create form from peewee model and contribute extra fields, if necessary
129 """
130 result = model_form(model,
131 base_class=base_class,
132 only=only,
133 exclude=exclude,
134 field_args=field_args,
135 allow_pk=allow_pk,
136 converter=converter)
137
138 if extra_fields:
139 for name, field in iteritems(extra_fields):
140 setattr(result, name, form.recreate_field(field))
141
142 return result
143
144
145 class InlineModelConverter(InlineModelConverterBase):
146 """
147 Inline model form helper.
148 """
149
150 inline_field_list_type = InlineModelFormList
151 """
152 Used field list type.
153
154 If you want to do some custom rendering of inline field lists,
155 you can create your own wtforms field and use it instead
156 """
157
158 def get_info(self, p):
159 info = super(InlineModelConverter, self).get_info(p)
160
161 if info is None:
162 if isinstance(p, BaseModel):
163 info = InlineFormAdmin(p)
164 else:
165 model = getattr(p, 'model', None)
166 if model is None:
167 raise Exception('Unknown inline model admin: %s' % repr(p))
168
169 attrs = dict()
170
171 for attr in dir(p):
172 if not attr.startswith('_') and attr != model:
173 attrs[attr] = getattr(p, attr)
174
175 info = InlineFormAdmin(model, **attrs)
176
177 # Resolve AJAX FKs
178 info._form_ajax_refs = self.process_ajax_refs(info)
179
180 return info
181
182 def process_ajax_refs(self, info):
183 refs = getattr(info, 'form_ajax_refs', None)
184
185 result = {}
186
187 if refs:
188 for name, opts in iteritems(refs):
189 new_name = '%s.%s' % (info.model.__name__.lower(), name)
190
191 loader = None
192 if isinstance(opts, (list, tuple)):
193 loader = create_ajax_loader(info.model, new_name, name, opts)
194 else:
195 loader = opts
196
197 result[name] = loader
198 self.view._form_ajax_refs[new_name] = loader
199
200 return result
201
202 def contribute(self, converter, model, form_class, inline_model):
203 # Find property from target model to current model
204 reverse_field = None
205
206 info = self.get_info(inline_model)
207
208 for field in info.model._meta.get_fields():
209 field_type = type(field)
210
211 if field_type == ForeignKeyField:
212 if field.rel_model == model:
213 reverse_field = field
214 break
215 else:
216 raise Exception('Cannot find reverse relation for model %s' % info.model)
217
218 # Remove reverse property from the list
219 ignore = [reverse_field.name]
220
221 if info.form_excluded_columns:
222 exclude = ignore + info.form_excluded_columns
223 else:
224 exclude = ignore
225
226 # Create field
227 child_form = info.get_form()
228
229 if child_form is None:
230 child_form = model_form(info.model,
231 base_class=form.BaseForm,
232 only=info.form_columns,
233 exclude=exclude,
234 field_args=info.form_args,
235 allow_pk=True,
236 converter=converter)
237
238
239 prop_name = reverse_field.related_name
240
241 label = self.get_label(info, prop_name)
242
243 setattr(form_class,
244 prop_name,
245 self.inline_field_list_type(child_form,
246 info.model,
247 reverse_field.name,
248 info,
249 label=label or info.model.__name__))
250
251 return form_class
252
253
254 def save_inline(form, model):
255 for f in itervalues(form._fields):
256 if f.type == 'InlineModelFormList':
257 f.save_related(model)
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flask_admin/contrib/peewee/form.py b/flask_admin/contrib/peewee/form.py
--- a/flask_admin/contrib/peewee/form.py
+++ b/flask_admin/contrib/peewee/form.py
@@ -90,6 +90,8 @@
self.converters[DateField] = self.handle_date
self.converters[TimeField] = self.handle_time
+ self.overrides = getattr(self.view, 'form_overrides', None) or {}
+
def handle_foreign_key(self, model, field, **kwargs):
loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)
| {"golden_diff": "diff --git a/flask_admin/contrib/peewee/form.py b/flask_admin/contrib/peewee/form.py\n--- a/flask_admin/contrib/peewee/form.py\n+++ b/flask_admin/contrib/peewee/form.py\n@@ -90,6 +90,8 @@\n self.converters[DateField] = self.handle_date\n self.converters[TimeField] = self.handle_time\n \n+ self.overrides = getattr(self.view, 'form_overrides', None) or {}\n+\n def handle_foreign_key(self, model, field, **kwargs):\n loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)\n", "issue": "form_overrides does not work with Peewee model backend\nI assume this is the same reason as the others -- that the Peewee backend's not quite as up-to-date?\n\nLooking through the code, it appears that form_overrides is not even checked in the Peewee backend, so it shouldn't be too hard for me to fix this.\n\nIf you confirm that this is a bug and I'm not missing something, I can send through some code.\n\n", "before_files": [{"content": "from wtforms import fields\n\nfrom peewee import (DateTimeField, DateField, TimeField,\n PrimaryKeyField, ForeignKeyField, BaseModel)\n\nfrom wtfpeewee.orm import ModelConverter, model_form\n\nfrom flask.ext.admin import form\nfrom flask.ext.admin._compat import iteritems, itervalues\nfrom flask.ext.admin.model.form import InlineFormAdmin, InlineModelConverterBase\nfrom flask.ext.admin.model.fields import InlineModelFormField, InlineFieldList, AjaxSelectField\n\nfrom .tools import get_primary_key\nfrom .ajax import create_ajax_loader\n\n\nclass InlineModelFormList(InlineFieldList):\n \"\"\"\n Customized inline model form list field.\n \"\"\"\n\n form_field_type = InlineModelFormField\n \"\"\"\n Form field type. Override to use custom field for each inline form\n \"\"\"\n\n def __init__(self, form, model, prop, inline_view, **kwargs):\n self.form = form\n self.model = model\n self.prop = prop\n self.inline_view = inline_view\n\n self._pk = get_primary_key(model)\n super(InlineModelFormList, self).__init__(self.form_field_type(form, self._pk), **kwargs)\n\n def display_row_controls(self, field):\n return field.get_pk() is not None\n\n # *** bryhoyt removed def process() entirely, because I believe it was buggy\n # (but worked because another part of the code had a complimentary bug)\n # and I'm not sure why it was necessary anyway.\n # If we want it back in, we need to fix the following bogus query:\n # self.model.select().where(attr == data).execute() # `data` is not an ID, and only happened to be so because we patched it in in .contribute() below\n #\n # For reference:\n # .process() introduced in https://github.com/mrjoes/flask-admin/commit/2845e4b28cb40b25e2bf544b327f6202dc7e5709\n # Fixed, brokenly I think, in https://github.com/mrjoes/flask-admin/commit/4383eef3ce7eb01878f086928f8773adb9de79f8#diff-f87e7cd76fb9bc48c8681b24f238fb13R30\n\n def populate_obj(self, obj, name):\n pass\n\n def save_related(self, obj):\n model_id = getattr(obj, self._pk)\n\n attr = getattr(self.model, self.prop)\n values = self.model.select().where(attr == model_id).execute()\n\n pk_map = dict((str(getattr(v, self._pk)), v) for v in values)\n\n # Handle request data\n for field in self.entries:\n field_id = field.get_pk()\n\n if field_id in pk_map:\n model = pk_map[field_id]\n\n if self.should_delete(field):\n model.delete_instance(recursive=True)\n continue\n else:\n model = self.model()\n\n field.populate_obj(model, None)\n\n # Force relation\n setattr(model, self.prop, model_id)\n\n self.inline_view.on_model_change(field, model)\n\n model.save()\n\n\nclass CustomModelConverter(ModelConverter):\n def __init__(self, view, additional=None):\n super(CustomModelConverter, self).__init__(additional)\n self.view = view\n\n self.converters[PrimaryKeyField] = self.handle_pk\n self.converters[DateTimeField] = self.handle_datetime\n self.converters[DateField] = self.handle_date\n self.converters[TimeField] = self.handle_time\n\n def handle_foreign_key(self, model, field, **kwargs):\n loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)\n\n if loader:\n if field.null:\n kwargs['allow_blank'] = True\n\n return field.name, AjaxSelectField(loader, **kwargs)\n\n return super(CustomModelConverter, self).handle_foreign_key(model, field, **kwargs)\n\n def handle_pk(self, model, field, **kwargs):\n kwargs['validators'] = []\n return field.name, fields.HiddenField(**kwargs)\n\n def handle_date(self, model, field, **kwargs):\n kwargs['widget'] = form.DatePickerWidget()\n return field.name, fields.DateField(**kwargs)\n\n def handle_datetime(self, model, field, **kwargs):\n kwargs['widget'] = form.DateTimePickerWidget()\n return field.name, fields.DateTimeField(**kwargs)\n\n def handle_time(self, model, field, **kwargs):\n return field.name, form.TimeField(**kwargs)\n\n\ndef get_form(model, converter,\n base_class=form.BaseForm,\n only=None,\n exclude=None,\n field_args=None,\n allow_pk=False,\n extra_fields=None):\n \"\"\"\n Create form from peewee model and contribute extra fields, if necessary\n \"\"\"\n result = model_form(model,\n base_class=base_class,\n only=only,\n exclude=exclude,\n field_args=field_args,\n allow_pk=allow_pk,\n converter=converter)\n\n if extra_fields:\n for name, field in iteritems(extra_fields):\n setattr(result, name, form.recreate_field(field))\n\n return result\n\n\nclass InlineModelConverter(InlineModelConverterBase):\n \"\"\"\n Inline model form helper.\n \"\"\"\n\n inline_field_list_type = InlineModelFormList\n \"\"\"\n Used field list type.\n\n If you want to do some custom rendering of inline field lists,\n you can create your own wtforms field and use it instead\n \"\"\"\n\n def get_info(self, p):\n info = super(InlineModelConverter, self).get_info(p)\n\n if info is None:\n if isinstance(p, BaseModel):\n info = InlineFormAdmin(p)\n else:\n model = getattr(p, 'model', None)\n if model is None:\n raise Exception('Unknown inline model admin: %s' % repr(p))\n\n attrs = dict()\n\n for attr in dir(p):\n if not attr.startswith('_') and attr != model:\n attrs[attr] = getattr(p, attr)\n\n info = InlineFormAdmin(model, **attrs)\n\n # Resolve AJAX FKs\n info._form_ajax_refs = self.process_ajax_refs(info)\n\n return info\n\n def process_ajax_refs(self, info):\n refs = getattr(info, 'form_ajax_refs', None)\n\n result = {}\n\n if refs:\n for name, opts in iteritems(refs):\n new_name = '%s.%s' % (info.model.__name__.lower(), name)\n\n loader = None\n if isinstance(opts, (list, tuple)):\n loader = create_ajax_loader(info.model, new_name, name, opts)\n else:\n loader = opts\n\n result[name] = loader\n self.view._form_ajax_refs[new_name] = loader\n\n return result\n\n def contribute(self, converter, model, form_class, inline_model):\n # Find property from target model to current model\n reverse_field = None\n\n info = self.get_info(inline_model)\n\n for field in info.model._meta.get_fields():\n field_type = type(field)\n\n if field_type == ForeignKeyField:\n if field.rel_model == model:\n reverse_field = field\n break\n else:\n raise Exception('Cannot find reverse relation for model %s' % info.model)\n\n # Remove reverse property from the list\n ignore = [reverse_field.name]\n\n if info.form_excluded_columns:\n exclude = ignore + info.form_excluded_columns\n else:\n exclude = ignore\n\n # Create field\n child_form = info.get_form()\n\n if child_form is None:\n child_form = model_form(info.model,\n base_class=form.BaseForm,\n only=info.form_columns,\n exclude=exclude,\n field_args=info.form_args,\n allow_pk=True,\n converter=converter)\n\n\n prop_name = reverse_field.related_name\n\n label = self.get_label(info, prop_name)\n\n setattr(form_class,\n prop_name,\n self.inline_field_list_type(child_form,\n info.model,\n reverse_field.name,\n info,\n label=label or info.model.__name__))\n\n return form_class\n\n\ndef save_inline(form, model):\n for f in itervalues(form._fields):\n if f.type == 'InlineModelFormList':\n f.save_related(model)\n", "path": "flask_admin/contrib/peewee/form.py"}], "after_files": [{"content": "from wtforms import fields\n\nfrom peewee import (DateTimeField, DateField, TimeField,\n PrimaryKeyField, ForeignKeyField, BaseModel)\n\nfrom wtfpeewee.orm import ModelConverter, model_form\n\nfrom flask.ext.admin import form\nfrom flask.ext.admin._compat import iteritems, itervalues\nfrom flask.ext.admin.model.form import InlineFormAdmin, InlineModelConverterBase\nfrom flask.ext.admin.model.fields import InlineModelFormField, InlineFieldList, AjaxSelectField\n\nfrom .tools import get_primary_key\nfrom .ajax import create_ajax_loader\n\n\nclass InlineModelFormList(InlineFieldList):\n \"\"\"\n Customized inline model form list field.\n \"\"\"\n\n form_field_type = InlineModelFormField\n \"\"\"\n Form field type. Override to use custom field for each inline form\n \"\"\"\n\n def __init__(self, form, model, prop, inline_view, **kwargs):\n self.form = form\n self.model = model\n self.prop = prop\n self.inline_view = inline_view\n\n self._pk = get_primary_key(model)\n\n super(InlineModelFormList, self).__init__(self.form_field_type(form, self._pk, inline_view), **kwargs)\n\n def display_row_controls(self, field):\n return field.get_pk() is not None\n\n def process(self, formdata, data=None):\n if not formdata:\n attr = getattr(self.model, self.prop)\n data = self.model.select().where(attr == data).execute()\n else:\n data = None\n\n return super(InlineModelFormList, self).process(formdata, data)\n\n def populate_obj(self, obj, name):\n pass\n\n def save_related(self, obj):\n model_id = getattr(obj, self._pk)\n\n attr = getattr(self.model, self.prop)\n values = self.model.select().where(attr == model_id).execute()\n\n pk_map = dict((str(getattr(v, self._pk)), v) for v in values)\n\n # Handle request data\n for field in self.entries:\n field_id = field.get_pk()\n\n if field_id in pk_map:\n model = pk_map[field_id]\n\n if self.should_delete(field):\n model.delete_instance(recursive=True)\n continue\n else:\n model = self.model()\n\n field.populate_obj(model, None)\n\n # Force relation\n setattr(model, self.prop, model_id)\n\n self.inline_view.on_model_change(field, model)\n\n model.save()\n\n\nclass CustomModelConverter(ModelConverter):\n def __init__(self, view, additional=None):\n super(CustomModelConverter, self).__init__(additional)\n self.view = view\n\n self.converters[PrimaryKeyField] = self.handle_pk\n self.converters[DateTimeField] = self.handle_datetime\n self.converters[DateField] = self.handle_date\n self.converters[TimeField] = self.handle_time\n\n self.overrides = getattr(self.view, 'form_overrides', None) or {}\n\n def handle_foreign_key(self, model, field, **kwargs):\n loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)\n\n if loader:\n if field.null:\n kwargs['allow_blank'] = True\n\n return field.name, AjaxSelectField(loader, **kwargs)\n\n return super(CustomModelConverter, self).handle_foreign_key(model, field, **kwargs)\n\n def handle_pk(self, model, field, **kwargs):\n kwargs['validators'] = []\n return field.name, fields.HiddenField(**kwargs)\n\n def handle_date(self, model, field, **kwargs):\n kwargs['widget'] = form.DatePickerWidget()\n return field.name, fields.DateField(**kwargs)\n\n def handle_datetime(self, model, field, **kwargs):\n kwargs['widget'] = form.DateTimePickerWidget()\n return field.name, fields.DateTimeField(**kwargs)\n\n def handle_time(self, model, field, **kwargs):\n return field.name, form.TimeField(**kwargs)\n\n\ndef get_form(model, converter,\n base_class=form.BaseForm,\n only=None,\n exclude=None,\n field_args=None,\n allow_pk=False,\n extra_fields=None):\n \"\"\"\n Create form from peewee model and contribute extra fields, if necessary\n \"\"\"\n result = model_form(model,\n base_class=base_class,\n only=only,\n exclude=exclude,\n field_args=field_args,\n allow_pk=allow_pk,\n converter=converter)\n\n if extra_fields:\n for name, field in iteritems(extra_fields):\n setattr(result, name, form.recreate_field(field))\n\n return result\n\n\nclass InlineModelConverter(InlineModelConverterBase):\n \"\"\"\n Inline model form helper.\n \"\"\"\n\n inline_field_list_type = InlineModelFormList\n \"\"\"\n Used field list type.\n\n If you want to do some custom rendering of inline field lists,\n you can create your own wtforms field and use it instead\n \"\"\"\n\n def get_info(self, p):\n info = super(InlineModelConverter, self).get_info(p)\n\n if info is None:\n if isinstance(p, BaseModel):\n info = InlineFormAdmin(p)\n else:\n model = getattr(p, 'model', None)\n if model is None:\n raise Exception('Unknown inline model admin: %s' % repr(p))\n\n attrs = dict()\n\n for attr in dir(p):\n if not attr.startswith('_') and attr != model:\n attrs[attr] = getattr(p, attr)\n\n info = InlineFormAdmin(model, **attrs)\n\n # Resolve AJAX FKs\n info._form_ajax_refs = self.process_ajax_refs(info)\n\n return info\n\n def process_ajax_refs(self, info):\n refs = getattr(info, 'form_ajax_refs', None)\n\n result = {}\n\n if refs:\n for name, opts in iteritems(refs):\n new_name = '%s.%s' % (info.model.__name__.lower(), name)\n\n loader = None\n if isinstance(opts, (list, tuple)):\n loader = create_ajax_loader(info.model, new_name, name, opts)\n else:\n loader = opts\n\n result[name] = loader\n self.view._form_ajax_refs[new_name] = loader\n\n return result\n\n def contribute(self, converter, model, form_class, inline_model):\n # Find property from target model to current model\n reverse_field = None\n\n info = self.get_info(inline_model)\n\n for field in info.model._meta.get_fields():\n field_type = type(field)\n\n if field_type == ForeignKeyField:\n if field.rel_model == model:\n reverse_field = field\n break\n else:\n raise Exception('Cannot find reverse relation for model %s' % info.model)\n\n # Remove reverse property from the list\n ignore = [reverse_field.name]\n\n if info.form_excluded_columns:\n exclude = ignore + info.form_excluded_columns\n else:\n exclude = ignore\n\n # Create field\n child_form = info.get_form()\n\n if child_form is None:\n child_form = model_form(info.model,\n base_class=form.BaseForm,\n only=info.form_columns,\n exclude=exclude,\n field_args=info.form_args,\n allow_pk=True,\n converter=converter)\n\n prop_name = 'fa_%s' % model.__name__\n\n label = self.get_label(info, prop_name)\n\n setattr(form_class,\n prop_name,\n self.inline_field_list_type(child_form,\n info.model,\n reverse_field.name,\n info,\n label=label or info.model.__name__))\n\n setattr(field.rel_model,\n prop_name,\n property(lambda self: self.id))\n\n return form_class\n\n\ndef save_inline(form, model):\n for f in itervalues(form._fields):\n if f.type == 'InlineModelFormList':\n f.save_related(model)\n", "path": "flask_admin/contrib/peewee/form.py"}]} |
gh_patches_debug_1506 | rasdani/github-patches | git_diff | google__turbinia-323 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
turbiniactl -a status breaks on local installations
When running `turbiniactl -a status` with local tasks, there is an error, and a stack trace gets thrown here and `saved_paths` is incorrectly set to None:
https://github.com/google/turbinia/blob/b65bc0d26f635655ad03ece4d65fae2e2e224915/turbinia/client.py#L250
FYI @ericzinnikas
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/client.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2017 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Client objects for Turbinia."""
16
17 from __future__ import unicode_literals
18
19 from datetime import datetime
20 from datetime import timedelta
21 import json
22 import logging
23 import os
24 import stat
25 import time
26
27 from turbinia import config
28 from turbinia.config import logger
29 from turbinia import task_manager
30 from turbinia import workers
31 from turbinia import TurbiniaException
32 from turbinia.workers.artifact import FileArtifactExtractionTask
33 from turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask
34 from turbinia.workers.analysis.jenkins import JenkinsAnalysisTask
35 from turbinia.workers.grep import GrepTask
36 from turbinia.workers.hadoop import HadoopAnalysisTask
37 from turbinia.workers.plaso import PlasoTask
38 from turbinia.workers.psort import PsortTask
39 from turbinia.workers.sshd import SSHDAnalysisTask
40 from turbinia.workers.strings import StringsAsciiTask
41 from turbinia.workers.strings import StringsUnicodeTask
42 from turbinia.workers.tomcat import TomcatAnalysisTask
43 from turbinia.workers.worker_stat import StatTask
44
45 # TODO(aarontp): Remove this map after
46 # https://github.com/google/turbinia/issues/278 is fixed.
47 TASK_MAP = {
48 'fileartifactextractiontask': FileArtifactExtractionTask,
49 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,
50 'jenkinsanalysistask': JenkinsAnalysisTask,
51 'greptask': GrepTask,
52 'hadoopanalysistask': HadoopAnalysisTask,
53 'plasotask': PlasoTask,
54 'psorttask': PsortTask,
55 'sshdanalysistask': SSHDAnalysisTask,
56 'stringsasciitask': StringsAsciiTask,
57 'stringsunicodetask': StringsUnicodeTask,
58 'tomcatanalysistask': TomcatAnalysisTask,
59 'stattask': StatTask,
60 }
61
62 config.LoadConfig()
63 if config.TASK_MANAGER.lower() == 'psq':
64 import psq
65
66 from google.cloud import exceptions
67 from google.cloud import datastore
68 from google.cloud import pubsub
69
70 from turbinia.lib.google_cloud import GoogleCloudFunction
71 elif config.TASK_MANAGER.lower() == 'celery':
72 from turbinia.state_manager import RedisStateManager
73
74 log = logging.getLogger('turbinia')
75 logger.setup()
76
77
78 def check_directory(directory):
79 """Checks directory to make sure it exists and is writable.
80
81 Args:
82 directory (string): Path to directory
83
84 Raises:
85 TurbiniaException: When directory cannot be created or used.
86 """
87 if os.path.exists(directory) and not os.path.isdir(directory):
88 raise TurbiniaException(
89 'File {0:s} exists, but is not a directory'.format(directory))
90
91 if not os.path.exists(directory):
92 try:
93 os.makedirs(directory)
94 except OSError:
95 raise TurbiniaException(
96 'Can not create Directory {0:s}'.format(directory))
97
98 if not os.access(directory, os.W_OK):
99 try:
100 mode = os.stat(directory)[0]
101 os.chmod(directory, mode | stat.S_IWUSR)
102 except OSError:
103 raise TurbiniaException(
104 'Can not add write permissions to {0:s}'.format(directory))
105
106
107 class TurbiniaClient(object):
108 """Client class for Turbinia.
109
110 Attributes:
111 task_manager (TaskManager): Turbinia task manager
112 """
113
114 def __init__(self, run_local=False):
115 config.LoadConfig()
116 if run_local:
117 self.task_manager = None
118 else:
119 self.task_manager = task_manager.get_task_manager()
120 self.task_manager.setup(server=False)
121
122 def create_task(self, task_name):
123 """Creates a Turbinia Task by name.
124
125 Args:
126 task_name(string): Name of the Task we are going to run.
127
128 Returns:
129 TurbiniaTask: An instantiated Task object.
130
131 Raises:
132 TurbiniaException: When no Task object matching task_name is found.
133 """
134 task_obj = TASK_MAP.get(task_name.lower())
135 log.debug('Looking up Task {0:s} by name'.format(task_name))
136 if not task_obj:
137 raise TurbiniaException('No Task named {0:s} found'.format(task_name))
138 return task_obj()
139
140 def list_jobs(self):
141 """List the available jobs."""
142 # TODO(aarontp): Refactor this out so that we don't need to depend on
143 # the task manager from the client.
144 log.info('Available Jobs:')
145 for job in self.task_manager.jobs:
146 log.info('\t{0:s}'.format(job.name))
147
148 def wait_for_request(
149 self, instance, project, region, request_id=None, user=None,
150 poll_interval=60):
151 """Polls and waits for Turbinia Request to complete.
152
153 Args:
154 instance (string): The Turbinia instance name (by default the same as the
155 INSTANCE_ID in the config).
156 project (string): The name of the project.
157 region (string): The name of the region to execute in.
158 request_id (string): The Id of the request we want tasks for.
159 user (string): The user of the request we want tasks for.
160 poll_interval (int): Interval of seconds between polling cycles.
161 """
162 while True:
163 task_results = self.get_task_data(
164 instance, project, region, request_id=request_id, user=user)
165 completed_count = 0
166 uncompleted_count = 0
167 for task in task_results:
168 if task.get('successful') is not None:
169 completed_count += 1
170 else:
171 uncompleted_count += 1
172
173 if completed_count and completed_count == len(task_results):
174 break
175
176 log.info(
177 '{0:d} Tasks found, {1:d} completed. Waiting {2:d} seconds.'.format(
178 len(task_results), completed_count, poll_interval))
179 time.sleep(poll_interval)
180
181 log.info('All {0:d} Tasks completed'.format(len(task_results)))
182
183 def get_task_data(
184 self, instance, project, region, days=0, task_id=None, request_id=None,
185 user=None, function_name='gettasks'):
186 """Gets task data from Google Cloud Functions.
187
188 Args:
189 instance (string): The Turbinia instance name (by default the same as the
190 INSTANCE_ID in the config).
191 project (string): The name of the project.
192 region (string): The name of the region to execute in.
193 days (int): The number of days we want history for.
194 task_id (string): The Id of the task.
195 request_id (string): The Id of the request we want tasks for.
196 user (string): The user of the request we want tasks for.
197 function_name (string): The GCF function we want to call
198
199 Returns:
200 List of Task dict objects.
201 """
202 cloud_function = GoogleCloudFunction(project_id=project, region=region)
203 func_args = {'instance': instance, 'kind': 'TurbiniaTask'}
204
205 if days:
206 start_time = datetime.now() - timedelta(days=days)
207 # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a
208 # javascript Date() object in the cloud function.
209 start_string = start_time.strftime('%Y-%m-%dT%H:%M:%S')
210 func_args.update({'start_time': start_string})
211 elif task_id:
212 func_args.update({'task_id': task_id})
213 elif request_id:
214 func_args.update({'request_id': request_id})
215
216 if user:
217 func_args.update({'user': user})
218
219 response = cloud_function.ExecuteFunction(function_name, func_args)
220 if 'result' not in response:
221 log.error('No results found')
222 if response.get('error', '{}') != '{}':
223 msg = 'Error executing Cloud Function: [{0!s}].'.format(
224 response.get('error'))
225 log.error(msg)
226 log.debug('GCF response: {0!s}'.format(response))
227 raise TurbiniaException(
228 'Cloud Function {0:s} returned no results.'.format(function_name))
229
230 try:
231 results = json.loads(response['result'])
232 except (TypeError, ValueError) as e:
233 raise TurbiniaException(
234 'Could not deserialize result from GCF: [{0!s}]'.format(e))
235
236 return results[0]
237
238 def format_task_status(
239 self, instance, project, region, days=0, task_id=None, request_id=None,
240 user=None, all_fields=False):
241 """Formats the recent history for Turbinia Tasks.
242
243 Args:
244 instance (string): The Turbinia instance name (by default the same as the
245 INSTANCE_ID in the config).
246 project (string): The name of the project.
247 region (string): The name of the zone to execute in.
248 days (int): The number of days we want history for.
249 task_id (string): The Id of the task.
250 request_id (string): The Id of the request we want tasks for.
251 user (string): The user of the request we want tasks for.
252 all_fields (bool): Include all fields for the task, including task,
253 request ids and saved file paths.
254
255 Returns:
256 String of task status
257 """
258 task_results = self.get_task_data(
259 instance, project, region, days, task_id, request_id, user)
260 num_results = len(task_results)
261 results = []
262 if not num_results:
263 msg = '\nNo Tasks found.'
264 log.info(msg)
265 return msg
266
267 results.append('\nRetrieved {0:d} Task results:'.format(num_results))
268 for task in task_results:
269 if task.get('successful'):
270 success = 'Successful'
271 elif task.get('successful') is None:
272 success = 'Running'
273 else:
274 success = 'Failed'
275
276 status = task.get('status', 'No task status')
277 if all_fields:
278 results.append(
279 '{0:s} request: {1:s} task: {2:s} {3:s} {4:s} {5:s} {6:s}: {7:s}'
280 .format(
281 task.get('last_update'), task.get('request_id'), task.get('id'),
282 task.get('name'), task.get('user'), task.get('worker_name'),
283 success, status))
284 saved_paths = task.get('saved_paths', [])
285 for path in saved_paths:
286 results.append('\t{0:s}'.format(path))
287 else:
288 results.append(
289 '{0:s} {1:s} {2:s}: {3:s}'.format(
290 task.get('last_update'), task.get('name'), success, status))
291
292 return '\n'.join(results)
293
294 def run_local_task(self, task_name, request):
295 """Runs a Turbinia Task locally.
296
297 Args:
298 task_name(string): Name of the Task we are going to run.
299 request (TurbiniaRequest): Object containing request and evidence info.
300
301 Returns:
302 TurbiniaTaskResult: The result returned by the Task Execution.
303 """
304 task = self.create_task(task_name)
305 task.request_id = request.request_id
306 task.base_output_dir = config.OUTPUT_DIR
307 task.run_local = True
308 if not request.evidence:
309 raise TurbiniaException('TurbiniaRequest does not contain evidence.')
310 log.info('Running Task {0:s} locally'.format(task_name))
311 result = task.run_wrapper(request.evidence[0])
312 return result
313
314 def send_request(self, request):
315 """Sends a TurbiniaRequest message.
316
317 Args:
318 request: A TurbiniaRequest object.
319 """
320 self.task_manager.server_pubsub.send_request(request)
321
322 def close_tasks(
323 self, instance, project, region, request_id=None, task_id=None, user=None,
324 requester=None):
325 """Close Turbinia Tasks based on Request ID.
326
327 Args:
328 instance (string): The Turbinia instance name (by default the same as the
329 INSTANCE_ID in the config).
330 project (string): The name of the project.
331 region (string): The name of the zone to execute in.
332 request_id (string): The Id of the request we want tasks for.
333 task_id (string): The Id of the request we want task for.
334 user (string): The user of the request we want tasks for.
335 requester (string): The user making the request to close tasks.
336
337 Returns: String of closed Task IDs.
338 """
339 cloud_function = GoogleCloudFunction(project_id=project, region=region)
340 func_args = {
341 'instance': instance,
342 'kind': 'TurbiniaTask',
343 'request_id': request_id,
344 'task_id': task_id,
345 'user': user,
346 'requester': requester
347 }
348 response = cloud_function.ExecuteFunction('closetasks', func_args)
349 return 'Closed Task IDs: %s' % response.get('result')
350
351
352 class TurbiniaCeleryClient(TurbiniaClient):
353 """Client class for Turbinia (Celery).
354
355 Overriding some things specific to Celery operation.
356
357 Attributes:
358 redis (RedisStateManager): Redis datastore object
359 """
360
361 def __init__(self, *_, **__):
362 super(TurbiniaCeleryClient, self).__init__()
363 self.redis = RedisStateManager()
364
365 def send_request(self, request):
366 """Sends a TurbiniaRequest message.
367
368 Args:
369 request: A TurbiniaRequest object.
370 """
371 self.task_manager.kombu.send_request(request)
372
373 # pylint: disable=arguments-differ
374 def get_task_data(
375 self, instance, _, __, days=0, task_id=None, request_id=None,
376 function_name=None):
377 """Gets task data from Redis.
378
379 We keep the same function signature, but ignore arguments passed for GCP.
380
381 Args:
382 instance (string): The Turbinia instance name (by default the same as the
383 INSTANCE_ID in the config).
384 days (int): The number of days we want history for.
385 task_id (string): The Id of the task.
386 request_id (string): The Id of the request we want tasks for.
387
388 Returns:
389 List of Task dict objects.
390 """
391 return self.redis.get_task_data(instance, days, task_id, request_id)
392
393
394 class TurbiniaServer(object):
395 """Turbinia Server class.
396
397 Attributes:
398 task_manager (TaskManager): An object to manage turbinia tasks.
399 """
400
401 def __init__(self, jobs_blacklist=None, jobs_whitelist=None):
402 """Initializes Turbinia Server.
403
404 Args:
405 jobs_blacklist (Optional[list[str]]): Jobs we will exclude from running
406 jobs_whitelist (Optional[list[str]]): The only Jobs we will include to run
407 """
408 config.LoadConfig()
409 self.task_manager = task_manager.get_task_manager()
410 self.task_manager.setup(jobs_blacklist, jobs_whitelist)
411
412 def start(self):
413 """Start Turbinia Server."""
414 log.info('Running Turbinia Server.')
415 self.task_manager.run()
416
417 def add_evidence(self, evidence_):
418 """Add evidence to be processed."""
419 self.task_manager.add_evidence(evidence_)
420
421
422 class TurbiniaCeleryWorker(TurbiniaClient):
423 """Turbinia Celery Worker class.
424
425 Attributes:
426 worker (celery.app): Celery worker app
427 """
428
429 def __init__(self, *_, **__):
430 """Initialization for Celery worker."""
431 super(TurbiniaCeleryWorker, self).__init__()
432 check_directory(config.MOUNT_DIR_PREFIX)
433 check_directory(config.OUTPUT_DIR)
434 check_directory(config.TMP_DIR)
435 self.worker = self.task_manager.celery.app
436
437 def start(self):
438 """Start Turbinia Celery Worker."""
439 log.info('Running Turbinia Celery Worker.')
440 argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']
441 self.worker.start(argv)
442
443
444 class TurbiniaPsqWorker(object):
445 """Turbinia PSQ Worker class.
446
447 Attributes:
448 worker (psq.Worker): PSQ Worker object
449 psq (psq.Queue): A Task queue object
450
451 Raises:
452 TurbiniaException: When errors occur
453 """
454
455 def __init__(self, *_, **__):
456 """Initialization for PSQ Worker."""
457 config.LoadConfig()
458 psq_publisher = pubsub.PublisherClient()
459 psq_subscriber = pubsub.SubscriberClient()
460 datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)
461 try:
462 self.psq = psq.Queue(
463 psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,
464 name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))
465 except exceptions.GoogleCloudError as e:
466 msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))
467 log.error(msg)
468 raise TurbiniaException(msg)
469
470 check_directory(config.MOUNT_DIR_PREFIX)
471 check_directory(config.OUTPUT_DIR)
472 check_directory(config.TMP_DIR)
473
474 log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))
475 self.worker = psq.Worker(queue=self.psq)
476
477 def start(self):
478 """Start Turbinia PSQ Worker."""
479 log.info('Running Turbinia PSQ Worker.')
480 self.worker.listen()
481
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/client.py b/turbinia/client.py
--- a/turbinia/client.py
+++ b/turbinia/client.py
@@ -282,6 +282,8 @@
task.get('name'), task.get('user'), task.get('worker_name'),
success, status))
saved_paths = task.get('saved_paths', [])
+ if saved_paths is None:
+ saved_paths = []
for path in saved_paths:
results.append('\t{0:s}'.format(path))
else:
| {"golden_diff": "diff --git a/turbinia/client.py b/turbinia/client.py\n--- a/turbinia/client.py\n+++ b/turbinia/client.py\n@@ -282,6 +282,8 @@\n task.get('name'), task.get('user'), task.get('worker_name'),\n success, status))\n saved_paths = task.get('saved_paths', [])\n+ if saved_paths is None:\n+ saved_paths = []\n for path in saved_paths:\n results.append('\\t{0:s}'.format(path))\n else:\n", "issue": "turbiniactl -a status breaks on local installations\nWhen running `turbiniactl -a status` with local tasks, there is an error, and a stack trace gets thrown here and `saved_paths` is incorrectly set to None:\r\nhttps://github.com/google/turbinia/blob/b65bc0d26f635655ad03ece4d65fae2e2e224915/turbinia/client.py#L250\r\n\r\nFYI @ericzinnikas \r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Client objects for Turbinia.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nimport logging\nimport os\nimport stat\nimport time\n\nfrom turbinia import config\nfrom turbinia.config import logger\nfrom turbinia import task_manager\nfrom turbinia import workers\nfrom turbinia import TurbiniaException\nfrom turbinia.workers.artifact import FileArtifactExtractionTask\nfrom turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask\nfrom turbinia.workers.analysis.jenkins import JenkinsAnalysisTask\nfrom turbinia.workers.grep import GrepTask\nfrom turbinia.workers.hadoop import HadoopAnalysisTask\nfrom turbinia.workers.plaso import PlasoTask\nfrom turbinia.workers.psort import PsortTask\nfrom turbinia.workers.sshd import SSHDAnalysisTask\nfrom turbinia.workers.strings import StringsAsciiTask\nfrom turbinia.workers.strings import StringsUnicodeTask\nfrom turbinia.workers.tomcat import TomcatAnalysisTask\nfrom turbinia.workers.worker_stat import StatTask\n\n# TODO(aarontp): Remove this map after\n# https://github.com/google/turbinia/issues/278 is fixed.\nTASK_MAP = {\n 'fileartifactextractiontask': FileArtifactExtractionTask,\n 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,\n 'jenkinsanalysistask': JenkinsAnalysisTask,\n 'greptask': GrepTask,\n 'hadoopanalysistask': HadoopAnalysisTask,\n 'plasotask': PlasoTask,\n 'psorttask': PsortTask,\n 'sshdanalysistask': SSHDAnalysisTask,\n 'stringsasciitask': StringsAsciiTask,\n 'stringsunicodetask': StringsUnicodeTask,\n 'tomcatanalysistask': TomcatAnalysisTask,\n 'stattask': StatTask,\n}\n\nconfig.LoadConfig()\nif config.TASK_MANAGER.lower() == 'psq':\n import psq\n\n from google.cloud import exceptions\n from google.cloud import datastore\n from google.cloud import pubsub\n\n from turbinia.lib.google_cloud import GoogleCloudFunction\nelif config.TASK_MANAGER.lower() == 'celery':\n from turbinia.state_manager import RedisStateManager\n\nlog = logging.getLogger('turbinia')\nlogger.setup()\n\n\ndef check_directory(directory):\n \"\"\"Checks directory to make sure it exists and is writable.\n\n Args:\n directory (string): Path to directory\n\n Raises:\n TurbiniaException: When directory cannot be created or used.\n \"\"\"\n if os.path.exists(directory) and not os.path.isdir(directory):\n raise TurbiniaException(\n 'File {0:s} exists, but is not a directory'.format(directory))\n\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError:\n raise TurbiniaException(\n 'Can not create Directory {0:s}'.format(directory))\n\n if not os.access(directory, os.W_OK):\n try:\n mode = os.stat(directory)[0]\n os.chmod(directory, mode | stat.S_IWUSR)\n except OSError:\n raise TurbiniaException(\n 'Can not add write permissions to {0:s}'.format(directory))\n\n\nclass TurbiniaClient(object):\n \"\"\"Client class for Turbinia.\n\n Attributes:\n task_manager (TaskManager): Turbinia task manager\n \"\"\"\n\n def __init__(self, run_local=False):\n config.LoadConfig()\n if run_local:\n self.task_manager = None\n else:\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(server=False)\n\n def create_task(self, task_name):\n \"\"\"Creates a Turbinia Task by name.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n\n Returns:\n TurbiniaTask: An instantiated Task object.\n\n Raises:\n TurbiniaException: When no Task object matching task_name is found.\n \"\"\"\n task_obj = TASK_MAP.get(task_name.lower())\n log.debug('Looking up Task {0:s} by name'.format(task_name))\n if not task_obj:\n raise TurbiniaException('No Task named {0:s} found'.format(task_name))\n return task_obj()\n\n def list_jobs(self):\n \"\"\"List the available jobs.\"\"\"\n # TODO(aarontp): Refactor this out so that we don't need to depend on\n # the task manager from the client.\n log.info('Available Jobs:')\n for job in self.task_manager.jobs:\n log.info('\\t{0:s}'.format(job.name))\n\n def wait_for_request(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \"\"\"Polls and waits for Turbinia Request to complete.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n poll_interval (int): Interval of seconds between polling cycles.\n \"\"\"\n while True:\n task_results = self.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n completed_count = 0\n uncompleted_count = 0\n for task in task_results:\n if task.get('successful') is not None:\n completed_count += 1\n else:\n uncompleted_count += 1\n\n if completed_count and completed_count == len(task_results):\n break\n\n log.info(\n '{0:d} Tasks found, {1:d} completed. Waiting {2:d} seconds.'.format(\n len(task_results), completed_count, poll_interval))\n time.sleep(poll_interval)\n\n log.info('All {0:d} Tasks completed'.format(len(task_results)))\n\n def get_task_data(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, function_name='gettasks'):\n \"\"\"Gets task data from Google Cloud Functions.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n function_name (string): The GCF function we want to call\n\n Returns:\n List of Task dict objects.\n \"\"\"\n cloud_function = GoogleCloudFunction(project_id=project, region=region)\n func_args = {'instance': instance, 'kind': 'TurbiniaTask'}\n\n if days:\n start_time = datetime.now() - timedelta(days=days)\n # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a\n # javascript Date() object in the cloud function.\n start_string = start_time.strftime('%Y-%m-%dT%H:%M:%S')\n func_args.update({'start_time': start_string})\n elif task_id:\n func_args.update({'task_id': task_id})\n elif request_id:\n func_args.update({'request_id': request_id})\n\n if user:\n func_args.update({'user': user})\n\n response = cloud_function.ExecuteFunction(function_name, func_args)\n if 'result' not in response:\n log.error('No results found')\n if response.get('error', '{}') != '{}':\n msg = 'Error executing Cloud Function: [{0!s}].'.format(\n response.get('error'))\n log.error(msg)\n log.debug('GCF response: {0!s}'.format(response))\n raise TurbiniaException(\n 'Cloud Function {0:s} returned no results.'.format(function_name))\n\n try:\n results = json.loads(response['result'])\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not deserialize result from GCF: [{0!s}]'.format(e))\n\n return results[0]\n\n def format_task_status(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Tasks.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n all_fields (bool): Include all fields for the task, including task,\n request ids and saved file paths.\n\n Returns:\n String of task status\n \"\"\"\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user)\n num_results = len(task_results)\n results = []\n if not num_results:\n msg = '\\nNo Tasks found.'\n log.info(msg)\n return msg\n\n results.append('\\nRetrieved {0:d} Task results:'.format(num_results))\n for task in task_results:\n if task.get('successful'):\n success = 'Successful'\n elif task.get('successful') is None:\n success = 'Running'\n else:\n success = 'Failed'\n\n status = task.get('status', 'No task status')\n if all_fields:\n results.append(\n '{0:s} request: {1:s} task: {2:s} {3:s} {4:s} {5:s} {6:s}: {7:s}'\n .format(\n task.get('last_update'), task.get('request_id'), task.get('id'),\n task.get('name'), task.get('user'), task.get('worker_name'),\n success, status))\n saved_paths = task.get('saved_paths', [])\n for path in saved_paths:\n results.append('\\t{0:s}'.format(path))\n else:\n results.append(\n '{0:s} {1:s} {2:s}: {3:s}'.format(\n task.get('last_update'), task.get('name'), success, status))\n\n return '\\n'.join(results)\n\n def run_local_task(self, task_name, request):\n \"\"\"Runs a Turbinia Task locally.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n request (TurbiniaRequest): Object containing request and evidence info.\n\n Returns:\n TurbiniaTaskResult: The result returned by the Task Execution.\n \"\"\"\n task = self.create_task(task_name)\n task.request_id = request.request_id\n task.base_output_dir = config.OUTPUT_DIR\n task.run_local = True\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n result = task.run_wrapper(request.evidence[0])\n return result\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.server_pubsub.send_request(request)\n\n def close_tasks(\n self, instance, project, region, request_id=None, task_id=None, user=None,\n requester=None):\n \"\"\"Close Turbinia Tasks based on Request ID.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n request_id (string): The Id of the request we want tasks for.\n task_id (string): The Id of the request we want task for.\n user (string): The user of the request we want tasks for.\n requester (string): The user making the request to close tasks.\n\n Returns: String of closed Task IDs.\n \"\"\"\n cloud_function = GoogleCloudFunction(project_id=project, region=region)\n func_args = {\n 'instance': instance,\n 'kind': 'TurbiniaTask',\n 'request_id': request_id,\n 'task_id': task_id,\n 'user': user,\n 'requester': requester\n }\n response = cloud_function.ExecuteFunction('closetasks', func_args)\n return 'Closed Task IDs: %s' % response.get('result')\n\n\nclass TurbiniaCeleryClient(TurbiniaClient):\n \"\"\"Client class for Turbinia (Celery).\n\n Overriding some things specific to Celery operation.\n\n Attributes:\n redis (RedisStateManager): Redis datastore object\n \"\"\"\n\n def __init__(self, *_, **__):\n super(TurbiniaCeleryClient, self).__init__()\n self.redis = RedisStateManager()\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.kombu.send_request(request)\n\n # pylint: disable=arguments-differ\n def get_task_data(\n self, instance, _, __, days=0, task_id=None, request_id=None,\n function_name=None):\n \"\"\"Gets task data from Redis.\n\n We keep the same function signature, but ignore arguments passed for GCP.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n\n Returns:\n List of Task dict objects.\n \"\"\"\n return self.redis.get_task_data(instance, days, task_id, request_id)\n\n\nclass TurbiniaServer(object):\n \"\"\"Turbinia Server class.\n\n Attributes:\n task_manager (TaskManager): An object to manage turbinia tasks.\n \"\"\"\n\n def __init__(self, jobs_blacklist=None, jobs_whitelist=None):\n \"\"\"Initializes Turbinia Server.\n\n Args:\n jobs_blacklist (Optional[list[str]]): Jobs we will exclude from running\n jobs_whitelist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(jobs_blacklist, jobs_whitelist)\n\n def start(self):\n \"\"\"Start Turbinia Server.\"\"\"\n log.info('Running Turbinia Server.')\n self.task_manager.run()\n\n def add_evidence(self, evidence_):\n \"\"\"Add evidence to be processed.\"\"\"\n self.task_manager.add_evidence(evidence_)\n\n\nclass TurbiniaCeleryWorker(TurbiniaClient):\n \"\"\"Turbinia Celery Worker class.\n\n Attributes:\n worker (celery.app): Celery worker app\n \"\"\"\n\n def __init__(self, *_, **__):\n \"\"\"Initialization for Celery worker.\"\"\"\n super(TurbiniaCeleryWorker, self).__init__()\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n self.worker = self.task_manager.celery.app\n\n def start(self):\n \"\"\"Start Turbinia Celery Worker.\"\"\"\n log.info('Running Turbinia Celery Worker.')\n argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']\n self.worker.start(argv)\n\n\nclass TurbiniaPsqWorker(object):\n \"\"\"Turbinia PSQ Worker class.\n\n Attributes:\n worker (psq.Worker): PSQ Worker object\n psq (psq.Queue): A Task queue object\n\n Raises:\n TurbiniaException: When errors occur\n \"\"\"\n\n def __init__(self, *_, **__):\n \"\"\"Initialization for PSQ Worker.\"\"\"\n config.LoadConfig()\n psq_publisher = pubsub.PublisherClient()\n psq_subscriber = pubsub.SubscriberClient()\n datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)\n try:\n self.psq = psq.Queue(\n psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,\n name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))\n except exceptions.GoogleCloudError as e:\n msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))\n log.error(msg)\n raise TurbiniaException(msg)\n\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))\n self.worker = psq.Worker(queue=self.psq)\n\n def start(self):\n \"\"\"Start Turbinia PSQ Worker.\"\"\"\n log.info('Running Turbinia PSQ Worker.')\n self.worker.listen()\n", "path": "turbinia/client.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Client objects for Turbinia.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nimport logging\nimport os\nimport stat\nimport time\n\nfrom turbinia import config\nfrom turbinia.config import logger\nfrom turbinia import task_manager\nfrom turbinia import workers\nfrom turbinia import TurbiniaException\nfrom turbinia.workers.artifact import FileArtifactExtractionTask\nfrom turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask\nfrom turbinia.workers.analysis.jenkins import JenkinsAnalysisTask\nfrom turbinia.workers.grep import GrepTask\nfrom turbinia.workers.hadoop import HadoopAnalysisTask\nfrom turbinia.workers.plaso import PlasoTask\nfrom turbinia.workers.psort import PsortTask\nfrom turbinia.workers.sshd import SSHDAnalysisTask\nfrom turbinia.workers.strings import StringsAsciiTask\nfrom turbinia.workers.strings import StringsUnicodeTask\nfrom turbinia.workers.tomcat import TomcatAnalysisTask\nfrom turbinia.workers.worker_stat import StatTask\n\n# TODO(aarontp): Remove this map after\n# https://github.com/google/turbinia/issues/278 is fixed.\nTASK_MAP = {\n 'fileartifactextractiontask': FileArtifactExtractionTask,\n 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,\n 'jenkinsanalysistask': JenkinsAnalysisTask,\n 'greptask': GrepTask,\n 'hadoopanalysistask': HadoopAnalysisTask,\n 'plasotask': PlasoTask,\n 'psorttask': PsortTask,\n 'sshdanalysistask': SSHDAnalysisTask,\n 'stringsasciitask': StringsAsciiTask,\n 'stringsunicodetask': StringsUnicodeTask,\n 'tomcatanalysistask': TomcatAnalysisTask,\n 'stattask': StatTask,\n}\n\nconfig.LoadConfig()\nif config.TASK_MANAGER.lower() == 'psq':\n import psq\n\n from google.cloud import exceptions\n from google.cloud import datastore\n from google.cloud import pubsub\n\n from turbinia.lib.google_cloud import GoogleCloudFunction\nelif config.TASK_MANAGER.lower() == 'celery':\n from turbinia.state_manager import RedisStateManager\n\nlog = logging.getLogger('turbinia')\nlogger.setup()\n\n\ndef check_directory(directory):\n \"\"\"Checks directory to make sure it exists and is writable.\n\n Args:\n directory (string): Path to directory\n\n Raises:\n TurbiniaException: When directory cannot be created or used.\n \"\"\"\n if os.path.exists(directory) and not os.path.isdir(directory):\n raise TurbiniaException(\n 'File {0:s} exists, but is not a directory'.format(directory))\n\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError:\n raise TurbiniaException(\n 'Can not create Directory {0:s}'.format(directory))\n\n if not os.access(directory, os.W_OK):\n try:\n mode = os.stat(directory)[0]\n os.chmod(directory, mode | stat.S_IWUSR)\n except OSError:\n raise TurbiniaException(\n 'Can not add write permissions to {0:s}'.format(directory))\n\n\nclass TurbiniaClient(object):\n \"\"\"Client class for Turbinia.\n\n Attributes:\n task_manager (TaskManager): Turbinia task manager\n \"\"\"\n\n def __init__(self, run_local=False):\n config.LoadConfig()\n if run_local:\n self.task_manager = None\n else:\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(server=False)\n\n def create_task(self, task_name):\n \"\"\"Creates a Turbinia Task by name.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n\n Returns:\n TurbiniaTask: An instantiated Task object.\n\n Raises:\n TurbiniaException: When no Task object matching task_name is found.\n \"\"\"\n task_obj = TASK_MAP.get(task_name.lower())\n log.debug('Looking up Task {0:s} by name'.format(task_name))\n if not task_obj:\n raise TurbiniaException('No Task named {0:s} found'.format(task_name))\n return task_obj()\n\n def list_jobs(self):\n \"\"\"List the available jobs.\"\"\"\n # TODO(aarontp): Refactor this out so that we don't need to depend on\n # the task manager from the client.\n log.info('Available Jobs:')\n for job in self.task_manager.jobs:\n log.info('\\t{0:s}'.format(job.name))\n\n def wait_for_request(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \"\"\"Polls and waits for Turbinia Request to complete.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n poll_interval (int): Interval of seconds between polling cycles.\n \"\"\"\n while True:\n task_results = self.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n completed_count = 0\n uncompleted_count = 0\n for task in task_results:\n if task.get('successful') is not None:\n completed_count += 1\n else:\n uncompleted_count += 1\n\n if completed_count and completed_count == len(task_results):\n break\n\n log.info(\n '{0:d} Tasks found, {1:d} completed. Waiting {2:d} seconds.'.format(\n len(task_results), completed_count, poll_interval))\n time.sleep(poll_interval)\n\n log.info('All {0:d} Tasks completed'.format(len(task_results)))\n\n def get_task_data(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, function_name='gettasks'):\n \"\"\"Gets task data from Google Cloud Functions.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n function_name (string): The GCF function we want to call\n\n Returns:\n List of Task dict objects.\n \"\"\"\n cloud_function = GoogleCloudFunction(project_id=project, region=region)\n func_args = {'instance': instance, 'kind': 'TurbiniaTask'}\n\n if days:\n start_time = datetime.now() - timedelta(days=days)\n # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a\n # javascript Date() object in the cloud function.\n start_string = start_time.strftime('%Y-%m-%dT%H:%M:%S')\n func_args.update({'start_time': start_string})\n elif task_id:\n func_args.update({'task_id': task_id})\n elif request_id:\n func_args.update({'request_id': request_id})\n\n if user:\n func_args.update({'user': user})\n\n response = cloud_function.ExecuteFunction(function_name, func_args)\n if 'result' not in response:\n log.error('No results found')\n if response.get('error', '{}') != '{}':\n msg = 'Error executing Cloud Function: [{0!s}].'.format(\n response.get('error'))\n log.error(msg)\n log.debug('GCF response: {0!s}'.format(response))\n raise TurbiniaException(\n 'Cloud Function {0:s} returned no results.'.format(function_name))\n\n try:\n results = json.loads(response['result'])\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not deserialize result from GCF: [{0!s}]'.format(e))\n\n return results[0]\n\n def format_task_status(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Tasks.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n all_fields (bool): Include all fields for the task, including task,\n request ids and saved file paths.\n\n Returns:\n String of task status\n \"\"\"\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user)\n num_results = len(task_results)\n results = []\n if not num_results:\n msg = '\\nNo Tasks found.'\n log.info(msg)\n return msg\n\n results.append('\\nRetrieved {0:d} Task results:'.format(num_results))\n for task in task_results:\n if task.get('successful'):\n success = 'Successful'\n elif task.get('successful') is None:\n success = 'Running'\n else:\n success = 'Failed'\n\n status = task.get('status', 'No task status')\n if all_fields:\n results.append(\n '{0:s} request: {1:s} task: {2:s} {3:s} {4:s} {5:s} {6:s}: {7:s}'\n .format(\n task.get('last_update'), task.get('request_id'), task.get('id'),\n task.get('name'), task.get('user'), task.get('worker_name'),\n success, status))\n saved_paths = task.get('saved_paths', [])\n if saved_paths is None:\n saved_paths = []\n for path in saved_paths:\n results.append('\\t{0:s}'.format(path))\n else:\n results.append(\n '{0:s} {1:s} {2:s}: {3:s}'.format(\n task.get('last_update'), task.get('name'), success, status))\n\n return '\\n'.join(results)\n\n def run_local_task(self, task_name, request):\n \"\"\"Runs a Turbinia Task locally.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n request (TurbiniaRequest): Object containing request and evidence info.\n\n Returns:\n TurbiniaTaskResult: The result returned by the Task Execution.\n \"\"\"\n task = self.create_task(task_name)\n task.request_id = request.request_id\n task.base_output_dir = config.OUTPUT_DIR\n task.run_local = True\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n result = task.run_wrapper(request.evidence[0])\n return result\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.server_pubsub.send_request(request)\n\n def close_tasks(\n self, instance, project, region, request_id=None, task_id=None, user=None,\n requester=None):\n \"\"\"Close Turbinia Tasks based on Request ID.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n request_id (string): The Id of the request we want tasks for.\n task_id (string): The Id of the request we want task for.\n user (string): The user of the request we want tasks for.\n requester (string): The user making the request to close tasks.\n\n Returns: String of closed Task IDs.\n \"\"\"\n cloud_function = GoogleCloudFunction(project_id=project, region=region)\n func_args = {\n 'instance': instance,\n 'kind': 'TurbiniaTask',\n 'request_id': request_id,\n 'task_id': task_id,\n 'user': user,\n 'requester': requester\n }\n response = cloud_function.ExecuteFunction('closetasks', func_args)\n return 'Closed Task IDs: %s' % response.get('result')\n\n\nclass TurbiniaCeleryClient(TurbiniaClient):\n \"\"\"Client class for Turbinia (Celery).\n\n Overriding some things specific to Celery operation.\n\n Attributes:\n redis (RedisStateManager): Redis datastore object\n \"\"\"\n\n def __init__(self, *_, **__):\n super(TurbiniaCeleryClient, self).__init__()\n self.redis = RedisStateManager()\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.kombu.send_request(request)\n\n # pylint: disable=arguments-differ\n def get_task_data(\n self, instance, _, __, days=0, task_id=None, request_id=None,\n function_name=None):\n \"\"\"Gets task data from Redis.\n\n We keep the same function signature, but ignore arguments passed for GCP.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n\n Returns:\n List of Task dict objects.\n \"\"\"\n return self.redis.get_task_data(instance, days, task_id, request_id)\n\n\nclass TurbiniaServer(object):\n \"\"\"Turbinia Server class.\n\n Attributes:\n task_manager (TaskManager): An object to manage turbinia tasks.\n \"\"\"\n\n def __init__(self, jobs_blacklist=None, jobs_whitelist=None):\n \"\"\"Initializes Turbinia Server.\n\n Args:\n jobs_blacklist (Optional[list[str]]): Jobs we will exclude from running\n jobs_whitelist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(jobs_blacklist, jobs_whitelist)\n\n def start(self):\n \"\"\"Start Turbinia Server.\"\"\"\n log.info('Running Turbinia Server.')\n self.task_manager.run()\n\n def add_evidence(self, evidence_):\n \"\"\"Add evidence to be processed.\"\"\"\n self.task_manager.add_evidence(evidence_)\n\n\nclass TurbiniaCeleryWorker(TurbiniaClient):\n \"\"\"Turbinia Celery Worker class.\n\n Attributes:\n worker (celery.app): Celery worker app\n \"\"\"\n\n def __init__(self, *_, **__):\n \"\"\"Initialization for Celery worker.\"\"\"\n super(TurbiniaCeleryWorker, self).__init__()\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n self.worker = self.task_manager.celery.app\n\n def start(self):\n \"\"\"Start Turbinia Celery Worker.\"\"\"\n log.info('Running Turbinia Celery Worker.')\n argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']\n self.worker.start(argv)\n\n\nclass TurbiniaPsqWorker(object):\n \"\"\"Turbinia PSQ Worker class.\n\n Attributes:\n worker (psq.Worker): PSQ Worker object\n psq (psq.Queue): A Task queue object\n\n Raises:\n TurbiniaException: When errors occur\n \"\"\"\n\n def __init__(self, *_, **__):\n \"\"\"Initialization for PSQ Worker.\"\"\"\n config.LoadConfig()\n psq_publisher = pubsub.PublisherClient()\n psq_subscriber = pubsub.SubscriberClient()\n datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)\n try:\n self.psq = psq.Queue(\n psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,\n name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))\n except exceptions.GoogleCloudError as e:\n msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))\n log.error(msg)\n raise TurbiniaException(msg)\n\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))\n self.worker = psq.Worker(queue=self.psq)\n\n def start(self):\n \"\"\"Start Turbinia PSQ Worker.\"\"\"\n log.info('Running Turbinia PSQ Worker.')\n self.worker.listen()\n", "path": "turbinia/client.py"}]} |
gh_patches_debug_1507 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1704 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User permissions for deleting employments
Currently the organisation Admins and User Managers do not have the right to delete an employment. As they are managing these employments, they should be able to do so.
## Test plan
GIVEN the user management page
WHEN logged in as an Organisation Admin or User Manager
THEN the user should be able to delete employments
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/models/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 import logging
9 logger = logging.getLogger('akvo.rsr')
10
11 from django.conf import settings
12 from django.db.models.signals import pre_save, post_save, post_delete
13 from django.contrib.admin.models import LogEntry
14
15 from akvo.api.models import create_api_key
16
17 from ..signals import (
18 change_name_of_file_on_change, change_name_of_file_on_create,
19 create_publishing_status, create_organisation_account,
20 create_payment_gateway_selector, donation_completed, act_on_log_entry,
21 employment_post_save, employment_pre_save, update_project_budget,
22 update_project_funding, create_iati_file)
23
24 from .benchmark import Benchmark, Benchmarkname
25 from .budget_item import BudgetItem, BudgetItemLabel, CountryBudgetItem
26 from .country import Country, RecipientCountry
27 from .crs_add import CrsAdd, CrsAddOtherFlag
28 from .category import Category
29 from .employment import Employment
30 from .focus_area import FocusArea
31 from .fss import Fss, FssForecast
32 from .goal import Goal
33 from .iati_export import IatiExport
34 from .indicator import Indicator, IndicatorPeriod
35 from .invoice import Invoice
36 from .internal_organisation_id import InternalOrganisationID
37 from .keyword import Keyword
38 from .legacy_data import LegacyData
39 from .link import Link
40 from .location import (OrganisationLocation, ProjectLocation, ProjectUpdateLocation,
41 AdministrativeLocation)
42 from .organisation import Organisation
43 from .organisation_account import OrganisationAccount
44 from .partner_site import PartnerSite
45 from .partner_type import PartnerType
46 from .partnership import Partnership
47 from .payment_gateway import PayPalGateway, MollieGateway, PaymentGatewaySelector
48 from .planned_disbursement import PlannedDisbursement
49 from .policy_marker import PolicyMarker
50 from .project import Project
51 from .project_comment import ProjectComment
52 from .project_condition import ProjectCondition
53 from .project_contact import ProjectContact
54 from .project_document import ProjectDocument
55 from .project_update import ProjectUpdate
56 from .publishing_status import PublishingStatus
57 from .region import RecipientRegion
58 from .related_project import RelatedProject
59 from .result import Result
60 from .sector import Sector
61 from .transaction import Transaction, TransactionSector
62 from .user import User
63
64 __all__ = [
65 'Benchmark',
66 'Benchmarkname',
67 'BudgetItem',
68 'BudgetItemLabel',
69 'CountryBudgetItem',
70 'Country',
71 'RecipientCountry',
72 'Category',
73 'CrsAdd',
74 'CrsAddOtherFlag',
75 'Employment',
76 'FocusArea',
77 'Fss',
78 'FssForecast',
79 'Goal',
80 'IatiExport',
81 'Indicator',
82 'IndicatorPeriod',
83 'Invoice',
84 'InternalOrganisationID',
85 'Keyword',
86 'LegacyData',
87 'Link',
88 'OrganisationLocation',
89 'ProjectLocation',
90 'AdministrativeLocation',
91 'ProjectUpdateLocation',
92 'Organisation',
93 'OrganisationAccount',
94 'PartnerSite',
95 'PartnerType',
96 'Partnership',
97 'PayPalGateway',
98 'MollieGateway',
99 'PaymentGatewaySelector',
100 'PlannedDisbursement',
101 'PolicyMarker',
102 'Project',
103 'ProjectComment',
104 'ProjectCondition',
105 'ProjectContact',
106 'ProjectDocument',
107 'ProjectUpdate',
108 'PublishingStatus',
109 'RecipientRegion',
110 'RelatedProject',
111 'Result',
112 'Sector',
113 'Transaction',
114 'TransactionSector',
115 'User',
116 ]
117
118 # Permission rules
119 import rules
120 from ..permissions import (is_rsr_admin, is_org_admin, is_org_user_manager,
121 is_org_project_editor, is_org_user, is_self)
122
123 rules.add_perm('rsr', rules.always_allow)
124
125 rules.add_perm('rsr.add_benchmarkname', is_rsr_admin)
126 rules.add_perm('rsr.change_benchmarkname', is_rsr_admin)
127
128 rules.add_perm('rsr.add_country', is_rsr_admin)
129 rules.add_perm('rsr.change_country', is_rsr_admin)
130
131 rules.add_perm('rsr.add_budgetitemlabel', is_rsr_admin)
132 rules.add_perm('rsr.change_budgetitemlabel', is_rsr_admin)
133
134 rules.add_perm('rsr.add_category', is_rsr_admin)
135 rules.add_perm('rsr.change_category', is_rsr_admin)
136
137 rules.add_perm('rsr.add_focusarea', is_rsr_admin)
138 rules.add_perm('rsr.change_focusarea', is_rsr_admin)
139
140 rules.add_perm('rsr.add_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)
141 rules.add_perm('rsr.change_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)
142 rules.add_perm('rsr.delete_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)
143
144 rules.add_perm('rsr.add_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)
145 rules.add_perm('rsr.change_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)
146 rules.add_perm('rsr.delete_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)
147
148 rules.add_perm('rsr.add_keyword', is_rsr_admin)
149 rules.add_perm('rsr.change_keyword', is_rsr_admin)
150
151 rules.add_perm('rsr.add_partnersite', is_rsr_admin)
152 rules.add_perm('rsr.change_partnersite', is_rsr_admin | is_org_admin)
153
154 rules.add_perm('rsr.add_partnertype', is_rsr_admin)
155 rules.add_perm('rsr.change_partnertype', is_rsr_admin)
156
157 rules.add_perm('rsr.change_organisationaccount', is_rsr_admin)
158
159 rules.add_perm('rsr.add_projectupdate', is_rsr_admin | is_org_admin | is_org_user_manager |
160 is_org_project_editor | is_org_user)
161 rules.add_perm('rsr.change_projectupdate', is_rsr_admin)
162
163 rules.add_perm('rsr.add_projectupdatelocation', is_rsr_admin)
164 rules.add_perm('rsr.change_projectupdatelocation', is_rsr_admin)
165 rules.add_perm('rsr.delete_projectupdatelocation', is_rsr_admin)
166
167 rules.add_perm('rsr.add_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)
168 rules.add_perm('rsr.change_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)
169 rules.add_perm('rsr.delete_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)
170
171 rules.add_perm('rsr.add_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)
172 rules.add_perm('rsr.change_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)
173
174 rules.add_perm('rsr.add_goal', is_rsr_admin | is_org_admin | is_org_project_editor)
175 rules.add_perm('rsr.change_goal', is_rsr_admin | is_org_admin | is_org_project_editor)
176 rules.add_perm('rsr.delete_goal', is_rsr_admin | is_org_admin | is_org_project_editor)
177
178 rules.add_perm('rsr.add_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)
179 rules.add_perm('rsr.change_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)
180 rules.add_perm('rsr.delete_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)
181
182 rules.add_perm('rsr.add_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)
183 rules.add_perm('rsr.change_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)
184 rules.add_perm('rsr.delete_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)
185
186 rules.add_perm('rsr.add_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)
187 rules.add_perm('rsr.change_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)
188 rules.add_perm('rsr.delete_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)
189
190 rules.add_perm('rsr.add_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)
191 rules.add_perm('rsr.change_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)
192 rules.add_perm('rsr.delete_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)
193
194 rules.add_perm('rsr.add_link', is_rsr_admin | is_org_admin | is_org_project_editor)
195 rules.add_perm('rsr.change_link', is_rsr_admin | is_org_admin | is_org_project_editor)
196 rules.add_perm('rsr.delete_link', is_rsr_admin | is_org_admin | is_org_project_editor)
197
198 rules.add_perm('rsr.add_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)
199 rules.add_perm('rsr.change_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)
200 rules.add_perm('rsr.delete_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)
201
202 rules.add_perm('rsr.add_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)
203 rules.add_perm('rsr.change_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)
204 rules.add_perm('rsr.delete_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)
205
206 rules.add_perm('rsr.add_countrybudgetitem', is_rsr_admin | is_org_admin)
207 rules.add_perm('rsr.change_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)
208 rules.add_perm('rsr.delete_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)
209
210 rules.add_perm('rsr.add_planneddisbursement', is_rsr_admin | is_org_admin | is_org_project_editor)
211 rules.add_perm('rsr.change_planneddisbursement', is_rsr_admin | is_org_admin |
212 is_org_project_editor)
213 rules.add_perm('rsr.delete_planneddisbursement', is_rsr_admin | is_org_admin |
214 is_org_project_editor)
215
216 rules.add_perm('rsr.add_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)
217 rules.add_perm('rsr.change_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)
218 rules.add_perm('rsr.delete_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)
219
220 rules.add_perm('rsr.add_recipientcountry', is_rsr_admin | is_org_admin)
221 rules.add_perm('rsr.change_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)
222 rules.add_perm('rsr.delete_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)
223
224 rules.add_perm('rsr.add_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)
225 rules.add_perm('rsr.change_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)
226 rules.add_perm('rsr.delete_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)
227
228 rules.add_perm('rsr.add_result', is_rsr_admin | is_org_admin | is_org_project_editor)
229 rules.add_perm('rsr.change_result', is_rsr_admin | is_org_admin | is_org_project_editor)
230 rules.add_perm('rsr.delete_result', is_rsr_admin | is_org_admin | is_org_project_editor)
231
232 rules.add_perm('rsr.add_sector', is_rsr_admin | is_org_admin | is_org_project_editor)
233 rules.add_perm('rsr.change_sector', is_rsr_admin | is_org_admin | is_org_project_editor)
234 rules.add_perm('rsr.delete_sector', is_rsr_admin | is_org_admin | is_org_project_editor)
235
236 rules.add_perm('rsr.add_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)
237 rules.add_perm('rsr.change_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)
238 rules.add_perm('rsr.delete_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)
239
240 rules.add_perm('rsr.add_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)
241 rules.add_perm('rsr.change_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)
242 rules.add_perm('rsr.delete_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)
243
244 rules.add_perm('rsr.add_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)
245 rules.add_perm('rsr.change_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)
246 rules.add_perm('rsr.delete_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)
247
248 rules.add_perm('rsr.add_organisation', is_rsr_admin)
249 rules.add_perm('rsr.change_organisation', is_rsr_admin | is_org_admin)
250
251 rules.add_perm('rsr.add_organisationlocation', is_rsr_admin | is_org_admin)
252 rules.add_perm('rsr.change_organisationlocation', is_rsr_admin | is_org_admin)
253 rules.add_perm('rsr.delete_organisationlocation', is_rsr_admin | is_org_admin)
254
255 rules.add_perm('rsr.add_project', is_rsr_admin | is_org_admin | is_org_project_editor)
256 rules.add_perm('rsr.change_project', is_rsr_admin | is_org_admin | is_org_project_editor)
257
258 rules.add_perm('rsr.change_publishingstatus', is_rsr_admin | is_org_admin)
259
260 rules.add_perm('rsr.add_user', is_rsr_admin)
261 rules.add_perm('rsr.change_user', is_rsr_admin | is_org_admin | is_org_user_manager | is_self)
262
263 rules.add_perm('tastypie.change_apikey', is_rsr_admin | is_org_admin | is_org_user_manager |
264 is_org_project_editor)
265
266 rules.add_perm('rsr.add_employment', is_rsr_admin)
267 rules.add_perm('rsr.change_employment', is_rsr_admin | is_org_admin | is_org_user_manager)
268
269 rules.add_perm('rsr.iati_management', is_rsr_admin | is_org_admin | is_org_project_editor)
270
271 rules.add_perm('rsr.user_management', is_rsr_admin | is_org_admin | is_org_user_manager)
272
273 rules.add_perm('rsr.post_updates', is_rsr_admin | is_org_admin | is_org_user_manager |
274 is_org_project_editor | is_org_user)
275
276
277 # Signals
278 pre_save.connect(employment_pre_save, sender=Employment)
279 post_save.connect(employment_post_save, sender=Employment)
280
281 post_save.connect(create_organisation_account, sender=Organisation)
282
283 post_save.connect(create_publishing_status, sender=Project)
284 post_save.connect(create_payment_gateway_selector, sender=Project)
285
286 if getattr(settings, "DONATION_NOTIFICATION_EMAILS", True):
287 post_save.connect(donation_completed, sender=Invoice)
288
289 post_save.connect(change_name_of_file_on_create, sender=Organisation)
290 post_save.connect(change_name_of_file_on_create, sender=Project)
291 post_save.connect(change_name_of_file_on_create, sender=ProjectUpdate)
292 post_save.connect(act_on_log_entry, sender=LogEntry)
293
294 pre_save.connect(change_name_of_file_on_change, sender=Organisation)
295 pre_save.connect(change_name_of_file_on_change, sender=Project)
296 pre_save.connect(change_name_of_file_on_change, sender=ProjectUpdate)
297
298 post_save.connect(update_project_budget, sender=BudgetItem)
299 post_save.connect(update_project_funding, sender=Invoice)
300 post_save.connect(update_project_funding, sender=Partnership)
301
302 post_delete.connect(update_project_budget, sender=BudgetItem)
303 post_delete.connect(update_project_funding, sender=Invoice)
304 post_delete.connect(update_project_funding, sender=Partnership)
305
306 post_save.connect(create_api_key, sender=User)
307
308 post_save.connect(create_iati_file, sender=IatiExport)
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/models/__init__.py b/akvo/rsr/models/__init__.py
--- a/akvo/rsr/models/__init__.py
+++ b/akvo/rsr/models/__init__.py
@@ -265,6 +265,7 @@
rules.add_perm('rsr.add_employment', is_rsr_admin)
rules.add_perm('rsr.change_employment', is_rsr_admin | is_org_admin | is_org_user_manager)
+rules.add_perm('rsr.delete_employment', is_rsr_admin | is_org_admin | is_org_user_manager)
rules.add_perm('rsr.iati_management', is_rsr_admin | is_org_admin | is_org_project_editor)
| {"golden_diff": "diff --git a/akvo/rsr/models/__init__.py b/akvo/rsr/models/__init__.py\n--- a/akvo/rsr/models/__init__.py\n+++ b/akvo/rsr/models/__init__.py\n@@ -265,6 +265,7 @@\n \n rules.add_perm('rsr.add_employment', is_rsr_admin)\n rules.add_perm('rsr.change_employment', is_rsr_admin | is_org_admin | is_org_user_manager)\n+rules.add_perm('rsr.delete_employment', is_rsr_admin | is_org_admin | is_org_user_manager)\n \n rules.add_perm('rsr.iati_management', is_rsr_admin | is_org_admin | is_org_project_editor)\n", "issue": "User permissions for deleting employments\nCurrently the organisation Admins and User Managers do not have the right to delete an employment. As they are managing these employments, they should be able to do so.\n## Test plan\n\nGIVEN the user management page\nWHEN logged in as an Organisation Admin or User Manager\nTHEN the user should be able to delete employments\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nimport logging\nlogger = logging.getLogger('akvo.rsr')\n\nfrom django.conf import settings\nfrom django.db.models.signals import pre_save, post_save, post_delete\nfrom django.contrib.admin.models import LogEntry\n\nfrom akvo.api.models import create_api_key\n\nfrom ..signals import (\n change_name_of_file_on_change, change_name_of_file_on_create,\n create_publishing_status, create_organisation_account,\n create_payment_gateway_selector, donation_completed, act_on_log_entry,\n employment_post_save, employment_pre_save, update_project_budget,\n update_project_funding, create_iati_file)\n\nfrom .benchmark import Benchmark, Benchmarkname\nfrom .budget_item import BudgetItem, BudgetItemLabel, CountryBudgetItem\nfrom .country import Country, RecipientCountry\nfrom .crs_add import CrsAdd, CrsAddOtherFlag\nfrom .category import Category\nfrom .employment import Employment\nfrom .focus_area import FocusArea\nfrom .fss import Fss, FssForecast\nfrom .goal import Goal\nfrom .iati_export import IatiExport\nfrom .indicator import Indicator, IndicatorPeriod\nfrom .invoice import Invoice\nfrom .internal_organisation_id import InternalOrganisationID\nfrom .keyword import Keyword\nfrom .legacy_data import LegacyData\nfrom .link import Link\nfrom .location import (OrganisationLocation, ProjectLocation, ProjectUpdateLocation,\n AdministrativeLocation)\nfrom .organisation import Organisation\nfrom .organisation_account import OrganisationAccount\nfrom .partner_site import PartnerSite\nfrom .partner_type import PartnerType\nfrom .partnership import Partnership\nfrom .payment_gateway import PayPalGateway, MollieGateway, PaymentGatewaySelector\nfrom .planned_disbursement import PlannedDisbursement\nfrom .policy_marker import PolicyMarker\nfrom .project import Project\nfrom .project_comment import ProjectComment\nfrom .project_condition import ProjectCondition\nfrom .project_contact import ProjectContact\nfrom .project_document import ProjectDocument\nfrom .project_update import ProjectUpdate\nfrom .publishing_status import PublishingStatus\nfrom .region import RecipientRegion\nfrom .related_project import RelatedProject\nfrom .result import Result\nfrom .sector import Sector\nfrom .transaction import Transaction, TransactionSector\nfrom .user import User\n\n__all__ = [\n 'Benchmark',\n 'Benchmarkname',\n 'BudgetItem',\n 'BudgetItemLabel',\n 'CountryBudgetItem',\n 'Country',\n 'RecipientCountry',\n 'Category',\n 'CrsAdd',\n 'CrsAddOtherFlag',\n 'Employment',\n 'FocusArea',\n 'Fss',\n 'FssForecast',\n 'Goal',\n 'IatiExport',\n 'Indicator',\n 'IndicatorPeriod',\n 'Invoice',\n 'InternalOrganisationID',\n 'Keyword',\n 'LegacyData',\n 'Link',\n 'OrganisationLocation',\n 'ProjectLocation',\n 'AdministrativeLocation',\n 'ProjectUpdateLocation',\n 'Organisation',\n 'OrganisationAccount',\n 'PartnerSite',\n 'PartnerType',\n 'Partnership',\n 'PayPalGateway',\n 'MollieGateway',\n 'PaymentGatewaySelector',\n 'PlannedDisbursement',\n 'PolicyMarker',\n 'Project',\n 'ProjectComment',\n 'ProjectCondition',\n 'ProjectContact',\n 'ProjectDocument',\n 'ProjectUpdate',\n 'PublishingStatus',\n 'RecipientRegion',\n 'RelatedProject',\n 'Result',\n 'Sector',\n 'Transaction',\n 'TransactionSector',\n 'User',\n]\n\n# Permission rules\nimport rules\nfrom ..permissions import (is_rsr_admin, is_org_admin, is_org_user_manager,\n is_org_project_editor, is_org_user, is_self)\n\nrules.add_perm('rsr', rules.always_allow)\n\nrules.add_perm('rsr.add_benchmarkname', is_rsr_admin)\nrules.add_perm('rsr.change_benchmarkname', is_rsr_admin)\n\nrules.add_perm('rsr.add_country', is_rsr_admin)\nrules.add_perm('rsr.change_country', is_rsr_admin)\n\nrules.add_perm('rsr.add_budgetitemlabel', is_rsr_admin)\nrules.add_perm('rsr.change_budgetitemlabel', is_rsr_admin)\n\nrules.add_perm('rsr.add_category', is_rsr_admin)\nrules.add_perm('rsr.change_category', is_rsr_admin)\n\nrules.add_perm('rsr.add_focusarea', is_rsr_admin)\nrules.add_perm('rsr.change_focusarea', is_rsr_admin)\n\nrules.add_perm('rsr.add_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_keyword', is_rsr_admin)\nrules.add_perm('rsr.change_keyword', is_rsr_admin)\n\nrules.add_perm('rsr.add_partnersite', is_rsr_admin)\nrules.add_perm('rsr.change_partnersite', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_partnertype', is_rsr_admin)\nrules.add_perm('rsr.change_partnertype', is_rsr_admin)\n\nrules.add_perm('rsr.change_organisationaccount', is_rsr_admin)\n\nrules.add_perm('rsr.add_projectupdate', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor | is_org_user)\nrules.add_perm('rsr.change_projectupdate', is_rsr_admin)\n\nrules.add_perm('rsr.add_projectupdatelocation', is_rsr_admin)\nrules.add_perm('rsr.change_projectupdatelocation', is_rsr_admin)\nrules.add_perm('rsr.delete_projectupdatelocation', is_rsr_admin)\n\nrules.add_perm('rsr.add_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_link', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_link', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_link', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_countrybudgetitem', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_planneddisbursement', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_planneddisbursement', is_rsr_admin | is_org_admin |\n is_org_project_editor)\nrules.add_perm('rsr.delete_planneddisbursement', is_rsr_admin | is_org_admin |\n is_org_project_editor)\n\nrules.add_perm('rsr.add_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_recipientcountry', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_result', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_result', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_result', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_organisation', is_rsr_admin)\nrules.add_perm('rsr.change_organisation', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_organisationlocation', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_organisationlocation', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.delete_organisationlocation', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_project', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_project', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.change_publishingstatus', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_user', is_rsr_admin)\nrules.add_perm('rsr.change_user', is_rsr_admin | is_org_admin | is_org_user_manager | is_self)\n\nrules.add_perm('tastypie.change_apikey', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor)\n\nrules.add_perm('rsr.add_employment', is_rsr_admin)\nrules.add_perm('rsr.change_employment', is_rsr_admin | is_org_admin | is_org_user_manager)\n\nrules.add_perm('rsr.iati_management', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.user_management', is_rsr_admin | is_org_admin | is_org_user_manager)\n\nrules.add_perm('rsr.post_updates', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor | is_org_user)\n\n\n# Signals\npre_save.connect(employment_pre_save, sender=Employment)\npost_save.connect(employment_post_save, sender=Employment)\n\npost_save.connect(create_organisation_account, sender=Organisation)\n\npost_save.connect(create_publishing_status, sender=Project)\npost_save.connect(create_payment_gateway_selector, sender=Project)\n\nif getattr(settings, \"DONATION_NOTIFICATION_EMAILS\", True):\n post_save.connect(donation_completed, sender=Invoice)\n\npost_save.connect(change_name_of_file_on_create, sender=Organisation)\npost_save.connect(change_name_of_file_on_create, sender=Project)\npost_save.connect(change_name_of_file_on_create, sender=ProjectUpdate)\npost_save.connect(act_on_log_entry, sender=LogEntry)\n\npre_save.connect(change_name_of_file_on_change, sender=Organisation)\npre_save.connect(change_name_of_file_on_change, sender=Project)\npre_save.connect(change_name_of_file_on_change, sender=ProjectUpdate)\n\npost_save.connect(update_project_budget, sender=BudgetItem)\npost_save.connect(update_project_funding, sender=Invoice)\npost_save.connect(update_project_funding, sender=Partnership)\n\npost_delete.connect(update_project_budget, sender=BudgetItem)\npost_delete.connect(update_project_funding, sender=Invoice)\npost_delete.connect(update_project_funding, sender=Partnership)\n\npost_save.connect(create_api_key, sender=User)\n\npost_save.connect(create_iati_file, sender=IatiExport)\n", "path": "akvo/rsr/models/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nimport logging\nlogger = logging.getLogger('akvo.rsr')\n\nfrom django.conf import settings\nfrom django.db.models.signals import pre_save, post_save, post_delete\nfrom django.contrib.admin.models import LogEntry\n\nfrom akvo.api.models import create_api_key\n\nfrom ..signals import (\n change_name_of_file_on_change, change_name_of_file_on_create,\n create_publishing_status, create_organisation_account,\n create_payment_gateway_selector, donation_completed, act_on_log_entry,\n employment_post_save, employment_pre_save, update_project_budget,\n update_project_funding, create_iati_file)\n\nfrom .benchmark import Benchmark, Benchmarkname\nfrom .budget_item import BudgetItem, BudgetItemLabel, CountryBudgetItem\nfrom .country import Country, RecipientCountry\nfrom .crs_add import CrsAdd, CrsAddOtherFlag\nfrom .category import Category\nfrom .employment import Employment\nfrom .focus_area import FocusArea\nfrom .fss import Fss, FssForecast\nfrom .goal import Goal\nfrom .iati_export import IatiExport\nfrom .indicator import Indicator, IndicatorPeriod\nfrom .invoice import Invoice\nfrom .internal_organisation_id import InternalOrganisationID\nfrom .keyword import Keyword\nfrom .legacy_data import LegacyData\nfrom .link import Link\nfrom .location import (OrganisationLocation, ProjectLocation, ProjectUpdateLocation,\n AdministrativeLocation)\nfrom .organisation import Organisation\nfrom .organisation_account import OrganisationAccount\nfrom .partner_site import PartnerSite\nfrom .partner_type import PartnerType\nfrom .partnership import Partnership\nfrom .payment_gateway import PayPalGateway, MollieGateway, PaymentGatewaySelector\nfrom .planned_disbursement import PlannedDisbursement\nfrom .policy_marker import PolicyMarker\nfrom .project import Project\nfrom .project_comment import ProjectComment\nfrom .project_condition import ProjectCondition\nfrom .project_contact import ProjectContact\nfrom .project_document import ProjectDocument\nfrom .project_update import ProjectUpdate\nfrom .publishing_status import PublishingStatus\nfrom .region import RecipientRegion\nfrom .related_project import RelatedProject\nfrom .result import Result\nfrom .sector import Sector\nfrom .transaction import Transaction, TransactionSector\nfrom .user import User\n\n__all__ = [\n 'Benchmark',\n 'Benchmarkname',\n 'BudgetItem',\n 'BudgetItemLabel',\n 'CountryBudgetItem',\n 'Country',\n 'RecipientCountry',\n 'Category',\n 'CrsAdd',\n 'CrsAddOtherFlag',\n 'Employment',\n 'FocusArea',\n 'Fss',\n 'FssForecast',\n 'Goal',\n 'IatiExport',\n 'Indicator',\n 'IndicatorPeriod',\n 'Invoice',\n 'InternalOrganisationID',\n 'Keyword',\n 'LegacyData',\n 'Link',\n 'OrganisationLocation',\n 'ProjectLocation',\n 'AdministrativeLocation',\n 'ProjectUpdateLocation',\n 'Organisation',\n 'OrganisationAccount',\n 'PartnerSite',\n 'PartnerType',\n 'Partnership',\n 'PayPalGateway',\n 'MollieGateway',\n 'PaymentGatewaySelector',\n 'PlannedDisbursement',\n 'PolicyMarker',\n 'Project',\n 'ProjectComment',\n 'ProjectCondition',\n 'ProjectContact',\n 'ProjectDocument',\n 'ProjectUpdate',\n 'PublishingStatus',\n 'RecipientRegion',\n 'RelatedProject',\n 'Result',\n 'Sector',\n 'Transaction',\n 'TransactionSector',\n 'User',\n]\n\n# Permission rules\nimport rules\nfrom ..permissions import (is_rsr_admin, is_org_admin, is_org_user_manager,\n is_org_project_editor, is_org_user, is_self)\n\nrules.add_perm('rsr', rules.always_allow)\n\nrules.add_perm('rsr.add_benchmarkname', is_rsr_admin)\nrules.add_perm('rsr.change_benchmarkname', is_rsr_admin)\n\nrules.add_perm('rsr.add_country', is_rsr_admin)\nrules.add_perm('rsr.change_country', is_rsr_admin)\n\nrules.add_perm('rsr.add_budgetitemlabel', is_rsr_admin)\nrules.add_perm('rsr.change_budgetitemlabel', is_rsr_admin)\n\nrules.add_perm('rsr.add_category', is_rsr_admin)\nrules.add_perm('rsr.change_category', is_rsr_admin)\n\nrules.add_perm('rsr.add_focusarea', is_rsr_admin)\nrules.add_perm('rsr.change_focusarea', is_rsr_admin)\n\nrules.add_perm('rsr.add_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_keyword', is_rsr_admin)\nrules.add_perm('rsr.change_keyword', is_rsr_admin)\n\nrules.add_perm('rsr.add_partnersite', is_rsr_admin)\nrules.add_perm('rsr.change_partnersite', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_partnertype', is_rsr_admin)\nrules.add_perm('rsr.change_partnertype', is_rsr_admin)\n\nrules.add_perm('rsr.change_organisationaccount', is_rsr_admin)\n\nrules.add_perm('rsr.add_projectupdate', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor | is_org_user)\nrules.add_perm('rsr.change_projectupdate', is_rsr_admin)\n\nrules.add_perm('rsr.add_projectupdatelocation', is_rsr_admin)\nrules.add_perm('rsr.change_projectupdatelocation', is_rsr_admin)\nrules.add_perm('rsr.delete_projectupdatelocation', is_rsr_admin)\n\nrules.add_perm('rsr.add_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_link', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_link', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_link', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_countrybudgetitem', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_planneddisbursement', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_planneddisbursement', is_rsr_admin | is_org_admin |\n is_org_project_editor)\nrules.add_perm('rsr.delete_planneddisbursement', is_rsr_admin | is_org_admin |\n is_org_project_editor)\n\nrules.add_perm('rsr.add_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_recipientcountry', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_result', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_result', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_result', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_organisation', is_rsr_admin)\nrules.add_perm('rsr.change_organisation', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_organisationlocation', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_organisationlocation', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.delete_organisationlocation', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_project', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_project', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.change_publishingstatus', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_user', is_rsr_admin)\nrules.add_perm('rsr.change_user', is_rsr_admin | is_org_admin | is_org_user_manager | is_self)\n\nrules.add_perm('tastypie.change_apikey', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor)\n\nrules.add_perm('rsr.add_employment', is_rsr_admin)\nrules.add_perm('rsr.change_employment', is_rsr_admin | is_org_admin | is_org_user_manager)\nrules.add_perm('rsr.delete_employment', is_rsr_admin | is_org_admin | is_org_user_manager)\n\nrules.add_perm('rsr.iati_management', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.user_management', is_rsr_admin | is_org_admin | is_org_user_manager)\n\nrules.add_perm('rsr.post_updates', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor | is_org_user)\n\n\n# Signals\npre_save.connect(employment_pre_save, sender=Employment)\npost_save.connect(employment_post_save, sender=Employment)\n\npost_save.connect(create_organisation_account, sender=Organisation)\n\npost_save.connect(create_publishing_status, sender=Project)\npost_save.connect(create_payment_gateway_selector, sender=Project)\n\nif getattr(settings, \"DONATION_NOTIFICATION_EMAILS\", True):\n post_save.connect(donation_completed, sender=Invoice)\n\npost_save.connect(change_name_of_file_on_create, sender=Organisation)\npost_save.connect(change_name_of_file_on_create, sender=Project)\npost_save.connect(change_name_of_file_on_create, sender=ProjectUpdate)\npost_save.connect(act_on_log_entry, sender=LogEntry)\n\npre_save.connect(change_name_of_file_on_change, sender=Organisation)\npre_save.connect(change_name_of_file_on_change, sender=Project)\npre_save.connect(change_name_of_file_on_change, sender=ProjectUpdate)\n\npost_save.connect(update_project_budget, sender=BudgetItem)\npost_save.connect(update_project_funding, sender=Invoice)\npost_save.connect(update_project_funding, sender=Partnership)\n\npost_delete.connect(update_project_budget, sender=BudgetItem)\npost_delete.connect(update_project_funding, sender=Invoice)\npost_delete.connect(update_project_funding, sender=Partnership)\n\npost_save.connect(create_api_key, sender=User)\n\npost_save.connect(create_iati_file, sender=IatiExport)\n", "path": "akvo/rsr/models/__init__.py"}]} |
gh_patches_debug_1508 | rasdani/github-patches | git_diff | deis__deis-1332 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deis auth:cancel doesn't delete the account
steps to reproduce the issue
```
deis register http://deis.54.193.35.8.xip.io --username=Test --password=asdf1234 [email protected]
Registered Test
Logged in as Test
sivarams-MacBook-Pro:integration ram$ deis auth:cancel
Please log in again in order to cancel this account
username: Test
password:
Logged in as Test
Cancel account "Test" at http://deis.54.193.35.8.xip.io? (y/n) y
Account cancelled
sivarams-MacBook-Pro:integration ram$ deis register http://deis.54.193.35.8.xip.io --username=Test --password=asdf1234 [email protected]
Registration failed {"username": ["User with this Username already exists."]}
```
auth:cancel says account cancelled but when I try to register with the same account it's not succeeding .
controller logs
```
2014-07-01 14:12:00 [119] [DEBUG] POST /api/auth/register
2014-07-01 14:12:01 [119] [DEBUG] GET /api/auth/login/
2014-07-01 14:12:01 [120] [DEBUG] POST /api/auth/login/
2014-07-01 14:12:14 [120] [DEBUG] GET /api/auth/login/
2014-07-01 14:12:14 [120] [DEBUG] POST /api/auth/login/
2014-07-01 14:12:16 [121] [DEBUG] DELETE /api/auth/cancel
ERROR Internal Server Error: /api/auth/cancel
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 114, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python2.7/dist-packages/rest_framework/viewsets.py", line 78, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/django/views/decorators/csrf.py", line 57, in wrapped_view
return view_func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/rest_framework/views.py", line 400, in dispatch
response = self.handle_exception(exc)
File "/usr/local/lib/python2.7/dist-packages/rest_framework/views.py", line 397, in dispatch
response = handler(request, *args, **kwargs)
File "/app/api/views.py", line 162, in destroy
obj.delete()
File "/usr/local/lib/python2.7/dist-packages/django/db/models/base.py", line 695, in delete
collector.delete()
File "/usr/local/lib/python2.7/dist-packages/django/db/models/deletion.py", line 282, in delete
sender=model, instance=obj, using=self.using
File "/usr/local/lib/python2.7/dist-packages/django/dispatch/dispatcher.py", line 185, in send
response = receiver(signal=self, sender=sender, **named)
File "/app/api/models.py", line 613, in _etcd_purge_user
_etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)
File "/usr/local/lib/python2.7/dist-packages/etcd/client.py", line 343, in delete
self.key_endpoint + key, self._MDELETE, kwds)
File "/usr/local/lib/python2.7/dist-packages/etcd/client.py", line 533, in api_execute
return self._handle_server_response(response)
File "/usr/local/lib/python2.7/dist-packages/etcd/client.py", line 549, in _handle_server_response
etcd.EtcdError.handle(**r)
File "/usr/local/lib/python2.7/dist-packages/etcd/__init__.py", line 110, in handle
raise exc(msg)
KeyError: 'Key not found : /deis/builder/users/Test'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `controller/api/models.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 Data models for the Deis API.
5 """
6
7 from __future__ import unicode_literals
8 import etcd
9 import importlib
10 import logging
11 import os
12 import subprocess
13
14 from celery.canvas import group
15 from django.conf import settings
16 from django.contrib.auth.models import User
17 from django.db import models, connections
18 from django.db.models import Max
19 from django.db.models.signals import post_delete
20 from django.db.models.signals import post_save
21 from django.utils.encoding import python_2_unicode_compatible
22 from django_fsm import FSMField, transition
23 from django_fsm.signals import post_transition
24 from json_field.fields import JSONField
25
26 from api import fields, tasks
27 from registry import publish_release
28 from utils import dict_diff, fingerprint
29
30
31 logger = logging.getLogger(__name__)
32
33
34 def log_event(app, msg, level=logging.INFO):
35 msg = "{}: {}".format(app.id, msg)
36 logger.log(level, msg)
37
38
39 def close_db_connections(func, *args, **kwargs):
40 """
41 Decorator to close db connections during threaded execution
42
43 Note this is necessary to work around:
44 https://code.djangoproject.com/ticket/22420
45 """
46 def _inner(*args, **kwargs):
47 func(*args, **kwargs)
48 for conn in connections.all():
49 conn.close()
50 return _inner
51
52
53 class AuditedModel(models.Model):
54 """Add created and updated fields to a model."""
55
56 created = models.DateTimeField(auto_now_add=True)
57 updated = models.DateTimeField(auto_now=True)
58
59 class Meta:
60 """Mark :class:`AuditedModel` as abstract."""
61 abstract = True
62
63
64 class UuidAuditedModel(AuditedModel):
65 """Add a UUID primary key to an :class:`AuditedModel`."""
66
67 uuid = fields.UuidField('UUID', primary_key=True)
68
69 class Meta:
70 """Mark :class:`UuidAuditedModel` as abstract."""
71 abstract = True
72
73
74 @python_2_unicode_compatible
75 class Cluster(UuidAuditedModel):
76 """
77 Cluster used to run jobs
78 """
79
80 CLUSTER_TYPES = (('mock', 'Mock Cluster'),
81 ('coreos', 'CoreOS Cluster'),
82 ('faulty', 'Faulty Cluster'))
83
84 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
85 id = models.CharField(max_length=128, unique=True)
86 type = models.CharField(max_length=16, choices=CLUSTER_TYPES, default='coreos')
87
88 domain = models.CharField(max_length=128)
89 hosts = models.CharField(max_length=256)
90 auth = models.TextField()
91 options = JSONField(default='{}', blank=True)
92
93 def __str__(self):
94 return self.id
95
96 def _get_scheduler(self, *args, **kwargs):
97 module_name = 'scheduler.' + self.type
98 mod = importlib.import_module(module_name)
99 return mod.SchedulerClient(self.id, self.hosts, self.auth,
100 self.domain, self.options)
101
102 _scheduler = property(_get_scheduler)
103
104 def create(self):
105 """
106 Initialize a cluster's router and log aggregator
107 """
108 return tasks.create_cluster.delay(self).get()
109
110 def destroy(self):
111 """
112 Destroy a cluster's router and log aggregator
113 """
114 return tasks.destroy_cluster.delay(self).get()
115
116
117 @python_2_unicode_compatible
118 class App(UuidAuditedModel):
119 """
120 Application used to service requests on behalf of end-users
121 """
122
123 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
124 id = models.SlugField(max_length=64, unique=True)
125 cluster = models.ForeignKey('Cluster')
126 structure = JSONField(default='{}', blank=True)
127
128 class Meta:
129 permissions = (('use_app', 'Can use app'),)
130
131 def __str__(self):
132 return self.id
133
134 def create(self, *args, **kwargs):
135 config = Config.objects.create(owner=self.owner, app=self, values={})
136 build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)
137 Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)
138
139 def delete(self, *args, **kwargs):
140 for c in self.container_set.all():
141 c.destroy()
142 return super(App, self).delete(*args, **kwargs)
143
144 def deploy(self, release, initial=False):
145 tasks.deploy_release.delay(self, release).get()
146 if initial:
147 # if there is no SHA, assume a docker image is being promoted
148 if not release.build.sha:
149 self.structure = {'cmd': 1}
150 # if a dockerfile exists without a procfile, assume docker workflow
151 elif release.build.dockerfile and not release.build.procfile:
152 self.structure = {'cmd': 1}
153 # if a procfile exists without a web entry, assume docker workflow
154 elif release.build.procfile and not 'web' in release.build.procfile:
155 self.structure = {'cmd': 1}
156 # default to heroku workflow
157 else:
158 self.structure = {'web': 1}
159 self.save()
160 self.scale()
161
162 def destroy(self, *args, **kwargs):
163 return self.delete(*args, **kwargs)
164
165 def scale(self, **kwargs): # noqa
166 """Scale containers up or down to match requested."""
167 requested_containers = self.structure.copy()
168 release = self.release_set.latest()
169 # test for available process types
170 available_process_types = release.build.procfile or {}
171 for container_type in requested_containers.keys():
172 if container_type == 'cmd':
173 continue # allow docker cmd types in case we don't have the image source
174 if not container_type in available_process_types:
175 raise EnvironmentError(
176 'Container type {} does not exist in application'.format(container_type))
177 msg = 'Containers scaled ' + ' '.join(
178 "{}={}".format(k, v) for k, v in requested_containers.items())
179 # iterate and scale by container type (web, worker, etc)
180 changed = False
181 to_add, to_remove = [], []
182 for container_type in requested_containers.keys():
183 containers = list(self.container_set.filter(type=container_type).order_by('created'))
184 # increment new container nums off the most recent container
185 results = self.container_set.filter(type=container_type).aggregate(Max('num'))
186 container_num = (results.get('num__max') or 0) + 1
187 requested = requested_containers.pop(container_type)
188 diff = requested - len(containers)
189 if diff == 0:
190 continue
191 changed = True
192 while diff < 0:
193 c = containers.pop()
194 to_remove.append(c)
195 diff += 1
196 while diff > 0:
197 c = Container.objects.create(owner=self.owner,
198 app=self,
199 release=release,
200 type=container_type,
201 num=container_num)
202 to_add.append(c)
203 container_num += 1
204 diff -= 1
205 if changed:
206 subtasks = []
207 if to_add:
208 subtasks.append(tasks.start_containers.s(to_add))
209 if to_remove:
210 subtasks.append(tasks.stop_containers.s(to_remove))
211 group(*subtasks).apply_async().join()
212 log_event(self, msg)
213 return changed
214
215 def logs(self):
216 """Return aggregated log data for this application."""
217 path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
218 if not os.path.exists(path):
219 raise EnvironmentError('Could not locate logs')
220 data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
221 return data
222
223 def run(self, command):
224 """Run a one-off command in an ephemeral app container."""
225 # TODO: add support for interactive shell
226 log_event(self, "deis run '{}'".format(command))
227 c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1
228 c = Container.objects.create(owner=self.owner,
229 app=self,
230 release=self.release_set.latest(),
231 type='admin',
232 num=c_num)
233 rc, output = tasks.run_command.delay(c, command).get()
234 return rc, output
235
236
237 @python_2_unicode_compatible
238 class Container(UuidAuditedModel):
239 """
240 Docker container used to securely host an application process.
241 """
242 INITIALIZED = 'initialized'
243 CREATED = 'created'
244 UP = 'up'
245 DOWN = 'down'
246 DESTROYED = 'destroyed'
247 STATE_CHOICES = (
248 (INITIALIZED, 'initialized'),
249 (CREATED, 'created'),
250 (UP, 'up'),
251 (DOWN, 'down'),
252 (DESTROYED, 'destroyed')
253 )
254
255 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
256 app = models.ForeignKey('App')
257 release = models.ForeignKey('Release')
258 type = models.CharField(max_length=128, blank=True)
259 num = models.PositiveIntegerField()
260 state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, protected=True)
261
262 def short_name(self):
263 if self.type:
264 return "{}.{}.{}".format(self.release.app.id, self.type, self.num)
265 return "{}.{}".format(self.release.app.id, self.num)
266 short_name.short_description = 'Name'
267
268 def __str__(self):
269 return self.short_name()
270
271 class Meta:
272 get_latest_by = '-created'
273 ordering = ['created']
274
275 def _get_job_id(self):
276 app = self.app.id
277 release = self.release
278 version = "v{}".format(release.version)
279 num = self.num
280 c_type = self.type
281 if not c_type:
282 job_id = "{app}_{version}.{num}".format(**locals())
283 else:
284 job_id = "{app}_{version}.{c_type}.{num}".format(**locals())
285 return job_id
286
287 _job_id = property(_get_job_id)
288
289 def _get_scheduler(self):
290 return self.app.cluster._scheduler
291
292 _scheduler = property(_get_scheduler)
293
294 def _get_command(self):
295 c_type = self.type
296 if c_type:
297 # handle special case for Dockerfile deployments
298 if c_type == 'cmd':
299 return ''
300 else:
301 return "start {}".format(c_type)
302 else:
303 return ''
304
305 _command = property(_get_command)
306
307 def _command_announceable(self):
308 return self._command.lower() in ['start web', '']
309
310 @close_db_connections
311 @transition(field=state, source=INITIALIZED, target=CREATED)
312 def create(self):
313 image = self.release.image
314 self._scheduler.create(name=self._job_id,
315 image=image,
316 command=self._command,
317 use_announcer=self._command_announceable())
318
319 @close_db_connections
320 @transition(field=state,
321 source=[CREATED, UP, DOWN],
322 target=UP, crashed=DOWN)
323 def start(self):
324 self._scheduler.start(self._job_id, self._command_announceable())
325
326 @close_db_connections
327 @transition(field=state,
328 source=[INITIALIZED, CREATED, UP, DOWN],
329 target=UP,
330 crashed=DOWN)
331 def deploy(self, release):
332 old_job_id = self._job_id
333 # update release
334 self.release = release
335 self.save()
336 # deploy new container
337 new_job_id = self._job_id
338 image = self.release.image
339 c_type = self.type
340 self._scheduler.create(name=new_job_id,
341 image=image,
342 command=self._command.format(**locals()),
343 use_announcer=self._command_announceable())
344 self._scheduler.start(new_job_id, self._command_announceable())
345 # destroy old container
346 self._scheduler.destroy(old_job_id, self._command_announceable())
347
348 @close_db_connections
349 @transition(field=state, source=UP, target=DOWN)
350 def stop(self):
351 self._scheduler.stop(self._job_id, self._command_announceable())
352
353 @close_db_connections
354 @transition(field=state,
355 source=[INITIALIZED, CREATED, UP, DOWN],
356 target=DESTROYED)
357 def destroy(self):
358 # TODO: add check for active connections before killing
359 self._scheduler.destroy(self._job_id, self._command_announceable())
360
361 @transition(field=state,
362 source=[INITIALIZED, CREATED, DESTROYED],
363 target=DESTROYED)
364 def run(self, command):
365 """Run a one-off command"""
366 rc, output = self._scheduler.run(self._job_id, self.release.image, command)
367 return rc, output
368
369
370 @python_2_unicode_compatible
371 class Push(UuidAuditedModel):
372 """
373 Instance of a push used to trigger an application build
374 """
375 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
376 app = models.ForeignKey('App')
377 sha = models.CharField(max_length=40)
378
379 fingerprint = models.CharField(max_length=255)
380 receive_user = models.CharField(max_length=255)
381 receive_repo = models.CharField(max_length=255)
382
383 ssh_connection = models.CharField(max_length=255)
384 ssh_original_command = models.CharField(max_length=255)
385
386 class Meta:
387 get_latest_by = 'created'
388 ordering = ['-created']
389 unique_together = (('app', 'uuid'),)
390
391 def __str__(self):
392 return "{0}-{1}".format(self.app.id, self.sha[:7])
393
394
395 @python_2_unicode_compatible
396 class Build(UuidAuditedModel):
397 """
398 Instance of a software build used by runtime nodes
399 """
400
401 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
402 app = models.ForeignKey('App')
403 image = models.CharField(max_length=256)
404
405 # optional fields populated by builder
406 sha = models.CharField(max_length=40, blank=True)
407 procfile = JSONField(default='{}', blank=True)
408 dockerfile = models.TextField(blank=True)
409
410 class Meta:
411 get_latest_by = 'created'
412 ordering = ['-created']
413 unique_together = (('app', 'uuid'),)
414
415 def __str__(self):
416 return "{0}-{1}".format(self.app.id, self.uuid[:7])
417
418
419 @python_2_unicode_compatible
420 class Config(UuidAuditedModel):
421 """
422 Set of configuration values applied as environment variables
423 during runtime execution of the Application.
424 """
425
426 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
427 app = models.ForeignKey('App')
428 values = JSONField(default='{}', blank=True)
429
430 class Meta:
431 get_latest_by = 'created'
432 ordering = ['-created']
433 unique_together = (('app', 'uuid'),)
434
435 def __str__(self):
436 return "{}-{}".format(self.app.id, self.uuid[:7])
437
438
439 @python_2_unicode_compatible
440 class Release(UuidAuditedModel):
441 """
442 Software release deployed by the application platform
443
444 Releases contain a :class:`Build` and a :class:`Config`.
445 """
446
447 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
448 app = models.ForeignKey('App')
449 version = models.PositiveIntegerField()
450 summary = models.TextField(blank=True, null=True)
451
452 config = models.ForeignKey('Config')
453 build = models.ForeignKey('Build')
454 # NOTE: image contains combined build + config, ready to run
455 image = models.CharField(max_length=256)
456
457 class Meta:
458 get_latest_by = 'created'
459 ordering = ['-created']
460 unique_together = (('app', 'version'),)
461
462 def __str__(self):
463 return "{0}-v{1}".format(self.app.id, self.version)
464
465 def new(self, user, config=None, build=None, summary=None, source_version=None):
466 """
467 Create a new application release using the provided Build and Config
468 on behalf of a user.
469
470 Releases start at v1 and auto-increment.
471 """
472 if not config:
473 config = self.config
474 if not build:
475 build = self.build
476 if not source_version:
477 source_version = 'latest'
478 else:
479 source_version = 'v{}'.format(source_version)
480 # prepare release tag
481 new_version = self.version + 1
482 tag = 'v{}'.format(new_version)
483 image = build.image + ':{tag}'.format(**locals())
484 # create new release and auto-increment version
485 release = Release.objects.create(
486 owner=user, app=self.app, config=config,
487 build=build, version=new_version, image=image, summary=summary)
488 # publish release to registry as new docker image
489 repository_path = self.app.id
490 publish_release(repository_path,
491 config.values,
492 tag,
493 source_tag=source_version)
494 return release
495
496 def previous(self):
497 """
498 Return the previous Release to this one.
499
500 :return: the previous :class:`Release`, or None
501 """
502 releases = self.app.release_set
503 if self.pk:
504 releases = releases.exclude(pk=self.pk)
505 try:
506 # Get the Release previous to this one
507 prev_release = releases.latest()
508 except Release.DoesNotExist:
509 prev_release = None
510 return prev_release
511
512 def save(self, *args, **kwargs):
513 if not self.summary:
514 self.summary = ''
515 prev_release = self.previous()
516 # compare this build to the previous build
517 old_build = prev_release.build if prev_release else None
518 # if the build changed, log it and who pushed it
519 if self.build != old_build:
520 if self.build.sha:
521 self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
522 else:
523 self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
524 # compare this config to the previous config
525 old_config = prev_release.config if prev_release else None
526 # if the config data changed, log the dict diff
527 if self.config != old_config:
528 dict1 = self.config.values
529 dict2 = old_config.values if old_config else {}
530 diff = dict_diff(dict1, dict2)
531 # try to be as succinct as possible
532 added = ', '.join(k for k in diff.get('added', {}))
533 added = 'added ' + added if added else ''
534 changed = ', '.join(k for k in diff.get('changed', {}))
535 changed = 'changed ' + changed if changed else ''
536 deleted = ', '.join(k for k in diff.get('deleted', {}))
537 deleted = 'deleted ' + deleted if deleted else ''
538 changes = ', '.join(i for i in (added, changed, deleted) if i)
539 if changes:
540 if self.summary:
541 self.summary += ' and '
542 self.summary += "{} {}".format(self.config.owner, changes)
543 if not self.summary:
544 if self.version == 1:
545 self.summary = "{} created the initial release".format(self.owner)
546 else:
547 self.summary = "{} changed nothing".format(self.owner)
548 super(Release, self).save(*args, **kwargs)
549
550
551 @python_2_unicode_compatible
552 class Domain(AuditedModel):
553 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
554 app = models.ForeignKey('App')
555 domain = models.TextField(blank=False, null=False, unique=True)
556
557 def __str__(self):
558 return self.domain
559
560
561 @python_2_unicode_compatible
562 class Key(UuidAuditedModel):
563 """An SSH public key."""
564
565 owner = models.ForeignKey(settings.AUTH_USER_MODEL)
566 id = models.CharField(max_length=128)
567 public = models.TextField(unique=True)
568
569 class Meta:
570 verbose_name = 'SSH Key'
571 unique_together = (('owner', 'id'))
572
573 def __str__(self):
574 return "{}...{}".format(self.public[:18], self.public[-31:])
575
576
577 # define update/delete callbacks for synchronizing
578 # models with the configuration management backend
579
580 def _log_build_created(**kwargs):
581 if kwargs.get('created'):
582 build = kwargs['instance']
583 log_event(build.app, "Build {} created".format(build))
584
585
586 def _log_release_created(**kwargs):
587 if kwargs.get('created'):
588 release = kwargs['instance']
589 log_event(release.app, "Release {} created".format(release))
590
591
592 def _log_config_updated(**kwargs):
593 config = kwargs['instance']
594 log_event(config.app, "Config {} updated".format(config))
595
596
597 def _log_domain_added(**kwargs):
598 domain = kwargs['instance']
599 log_event(domain.app, "Domain {} added".format(domain))
600
601
602 def _log_domain_removed(**kwargs):
603 domain = kwargs['instance']
604 log_event(domain.app, "Domain {} removed".format(domain))
605
606
607 def _etcd_publish_key(**kwargs):
608 key = kwargs['instance']
609 _etcd_client.write('/deis/builder/users/{}/{}'.format(
610 key.owner.username, fingerprint(key.public)), key.public)
611
612
613 def _etcd_purge_key(**kwargs):
614 key = kwargs['instance']
615 _etcd_client.delete('/deis/builder/users/{}/{}'.format(
616 key.owner.username, fingerprint(key.public)))
617
618
619 def _etcd_purge_user(**kwargs):
620 username = kwargs['instance'].username
621 _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)
622
623
624 def _etcd_publish_domains(**kwargs):
625 app = kwargs['instance'].app
626 app_domains = app.domain_set.all()
627 if app_domains:
628 _etcd_client.write('/deis/domains/{}'.format(app),
629 ' '.join(str(d.domain) for d in app_domains))
630 else:
631 _etcd_client.delete('/deis/domains/{}'.format(app))
632
633
634 # Log significant app-related events
635 post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')
636 post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')
637 post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')
638 post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')
639 post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')
640
641
642 # save FSM transitions as they happen
643 def _save_transition(**kwargs):
644 kwargs['instance'].save()
645
646 post_transition.connect(_save_transition)
647
648 # wire up etcd publishing if we can connect
649 try:
650 _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))
651 _etcd_client.get('/deis')
652 except etcd.EtcdException:
653 logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
654 _etcd_client = None
655
656 if _etcd_client:
657 post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')
658 post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')
659 post_delete.connect(_etcd_purge_user, sender=User, dispatch_uid='api.models')
660 post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
661 post_delete.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
662
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/controller/api/models.py b/controller/api/models.py
--- a/controller/api/models.py
+++ b/controller/api/models.py
@@ -618,7 +618,12 @@
def _etcd_purge_user(**kwargs):
username = kwargs['instance'].username
- _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)
+ try:
+ _etcd_client.delete(
+ '/deis/builder/users/{}'.format(username), dir=True, recursive=True)
+ except KeyError:
+ # If _etcd_publish_key() wasn't called, there is no user dir to delete.
+ pass
def _etcd_publish_domains(**kwargs):
| {"golden_diff": "diff --git a/controller/api/models.py b/controller/api/models.py\n--- a/controller/api/models.py\n+++ b/controller/api/models.py\n@@ -618,7 +618,12 @@\n \n def _etcd_purge_user(**kwargs):\n username = kwargs['instance'].username\n- _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n+ try:\n+ _etcd_client.delete(\n+ '/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n+ except KeyError:\n+ # If _etcd_publish_key() wasn't called, there is no user dir to delete.\n+ pass\n \n \n def _etcd_publish_domains(**kwargs):\n", "issue": "deis auth:cancel doesn't delete the account \nsteps to reproduce the issue \n\n```\ndeis register http://deis.54.193.35.8.xip.io --username=Test --password=asdf1234 [email protected]\nRegistered Test\nLogged in as Test\nsivarams-MacBook-Pro:integration ram$ deis auth:cancel\nPlease log in again in order to cancel this account\nusername: Test\npassword: \nLogged in as Test\nCancel account \"Test\" at http://deis.54.193.35.8.xip.io? (y/n) y\nAccount cancelled\nsivarams-MacBook-Pro:integration ram$ deis register http://deis.54.193.35.8.xip.io --username=Test --password=asdf1234 [email protected]\nRegistration failed {\"username\": [\"User with this Username already exists.\"]}\n```\n\nauth:cancel says account cancelled but when I try to register with the same account it's not succeeding . \ncontroller logs\n\n```\n2014-07-01 14:12:00 [119] [DEBUG] POST /api/auth/register\n2014-07-01 14:12:01 [119] [DEBUG] GET /api/auth/login/\n2014-07-01 14:12:01 [120] [DEBUG] POST /api/auth/login/\n2014-07-01 14:12:14 [120] [DEBUG] GET /api/auth/login/\n2014-07-01 14:12:14 [120] [DEBUG] POST /api/auth/login/\n2014-07-01 14:12:16 [121] [DEBUG] DELETE /api/auth/cancel\nERROR Internal Server Error: /api/auth/cancel\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py\", line 114, in get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/rest_framework/viewsets.py\", line 78, in view\n return self.dispatch(request, *args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/django/views/decorators/csrf.py\", line 57, in wrapped_view\n return view_func(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/rest_framework/views.py\", line 400, in dispatch\n response = self.handle_exception(exc)\n File \"/usr/local/lib/python2.7/dist-packages/rest_framework/views.py\", line 397, in dispatch\n response = handler(request, *args, **kwargs)\n File \"/app/api/views.py\", line 162, in destroy\n obj.delete()\n File \"/usr/local/lib/python2.7/dist-packages/django/db/models/base.py\", line 695, in delete\n collector.delete()\n File \"/usr/local/lib/python2.7/dist-packages/django/db/models/deletion.py\", line 282, in delete\n sender=model, instance=obj, using=self.using\n File \"/usr/local/lib/python2.7/dist-packages/django/dispatch/dispatcher.py\", line 185, in send\n response = receiver(signal=self, sender=sender, **named)\n File \"/app/api/models.py\", line 613, in _etcd_purge_user\n _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n File \"/usr/local/lib/python2.7/dist-packages/etcd/client.py\", line 343, in delete\n self.key_endpoint + key, self._MDELETE, kwds)\n File \"/usr/local/lib/python2.7/dist-packages/etcd/client.py\", line 533, in api_execute\n return self._handle_server_response(response)\n File \"/usr/local/lib/python2.7/dist-packages/etcd/client.py\", line 549, in _handle_server_response\n etcd.EtcdError.handle(**r)\n File \"/usr/local/lib/python2.7/dist-packages/etcd/__init__.py\", line 110, in handle\n raise exc(msg)\nKeyError: 'Key not found : /deis/builder/users/Test'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nData models for the Deis API.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport etcd\nimport importlib\nimport logging\nimport os\nimport subprocess\n\nfrom celery.canvas import group\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models, connections\nfrom django.db.models import Max\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\nfrom json_field.fields import JSONField\n\nfrom api import fields, tasks\nfrom registry import publish_release\nfrom utils import dict_diff, fingerprint\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_event(app, msg, level=logging.INFO):\n msg = \"{}: {}\".format(app.id, msg)\n logger.log(level, msg)\n\n\ndef close_db_connections(func, *args, **kwargs):\n \"\"\"\n Decorator to close db connections during threaded execution\n\n Note this is necessary to work around:\n https://code.djangoproject.com/ticket/22420\n \"\"\"\n def _inner(*args, **kwargs):\n func(*args, **kwargs)\n for conn in connections.all():\n conn.close()\n return _inner\n\n\nclass AuditedModel(models.Model):\n \"\"\"Add created and updated fields to a model.\"\"\"\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n \"\"\"Mark :class:`AuditedModel` as abstract.\"\"\"\n abstract = True\n\n\nclass UuidAuditedModel(AuditedModel):\n \"\"\"Add a UUID primary key to an :class:`AuditedModel`.\"\"\"\n\n uuid = fields.UuidField('UUID', primary_key=True)\n\n class Meta:\n \"\"\"Mark :class:`UuidAuditedModel` as abstract.\"\"\"\n abstract = True\n\n\n@python_2_unicode_compatible\nclass Cluster(UuidAuditedModel):\n \"\"\"\n Cluster used to run jobs\n \"\"\"\n\n CLUSTER_TYPES = (('mock', 'Mock Cluster'),\n ('coreos', 'CoreOS Cluster'),\n ('faulty', 'Faulty Cluster'))\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128, unique=True)\n type = models.CharField(max_length=16, choices=CLUSTER_TYPES, default='coreos')\n\n domain = models.CharField(max_length=128)\n hosts = models.CharField(max_length=256)\n auth = models.TextField()\n options = JSONField(default='{}', blank=True)\n\n def __str__(self):\n return self.id\n\n def _get_scheduler(self, *args, **kwargs):\n module_name = 'scheduler.' + self.type\n mod = importlib.import_module(module_name)\n return mod.SchedulerClient(self.id, self.hosts, self.auth,\n self.domain, self.options)\n\n _scheduler = property(_get_scheduler)\n\n def create(self):\n \"\"\"\n Initialize a cluster's router and log aggregator\n \"\"\"\n return tasks.create_cluster.delay(self).get()\n\n def destroy(self):\n \"\"\"\n Destroy a cluster's router and log aggregator\n \"\"\"\n return tasks.destroy_cluster.delay(self).get()\n\n\n@python_2_unicode_compatible\nclass App(UuidAuditedModel):\n \"\"\"\n Application used to service requests on behalf of end-users\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.SlugField(max_length=64, unique=True)\n cluster = models.ForeignKey('Cluster')\n structure = JSONField(default='{}', blank=True)\n\n class Meta:\n permissions = (('use_app', 'Can use app'),)\n\n def __str__(self):\n return self.id\n\n def create(self, *args, **kwargs):\n config = Config.objects.create(owner=self.owner, app=self, values={})\n build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)\n Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)\n\n def delete(self, *args, **kwargs):\n for c in self.container_set.all():\n c.destroy()\n return super(App, self).delete(*args, **kwargs)\n\n def deploy(self, release, initial=False):\n tasks.deploy_release.delay(self, release).get()\n if initial:\n # if there is no SHA, assume a docker image is being promoted\n if not release.build.sha:\n self.structure = {'cmd': 1}\n # if a dockerfile exists without a procfile, assume docker workflow\n elif release.build.dockerfile and not release.build.procfile:\n self.structure = {'cmd': 1}\n # if a procfile exists without a web entry, assume docker workflow\n elif release.build.procfile and not 'web' in release.build.procfile:\n self.structure = {'cmd': 1}\n # default to heroku workflow\n else:\n self.structure = {'web': 1}\n self.save()\n self.scale()\n\n def destroy(self, *args, **kwargs):\n return self.delete(*args, **kwargs)\n\n def scale(self, **kwargs): # noqa\n \"\"\"Scale containers up or down to match requested.\"\"\"\n requested_containers = self.structure.copy()\n release = self.release_set.latest()\n # test for available process types\n available_process_types = release.build.procfile or {}\n for container_type in requested_containers.keys():\n if container_type == 'cmd':\n continue # allow docker cmd types in case we don't have the image source\n if not container_type in available_process_types:\n raise EnvironmentError(\n 'Container type {} does not exist in application'.format(container_type))\n msg = 'Containers scaled ' + ' '.join(\n \"{}={}\".format(k, v) for k, v in requested_containers.items())\n # iterate and scale by container type (web, worker, etc)\n changed = False\n to_add, to_remove = [], []\n for container_type in requested_containers.keys():\n containers = list(self.container_set.filter(type=container_type).order_by('created'))\n # increment new container nums off the most recent container\n results = self.container_set.filter(type=container_type).aggregate(Max('num'))\n container_num = (results.get('num__max') or 0) + 1\n requested = requested_containers.pop(container_type)\n diff = requested - len(containers)\n if diff == 0:\n continue\n changed = True\n while diff < 0:\n c = containers.pop()\n to_remove.append(c)\n diff += 1\n while diff > 0:\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=release,\n type=container_type,\n num=container_num)\n to_add.append(c)\n container_num += 1\n diff -= 1\n if changed:\n subtasks = []\n if to_add:\n subtasks.append(tasks.start_containers.s(to_add))\n if to_remove:\n subtasks.append(tasks.stop_containers.s(to_remove))\n group(*subtasks).apply_async().join()\n log_event(self, msg)\n return changed\n\n def logs(self):\n \"\"\"Return aggregated log data for this application.\"\"\"\n path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')\n if not os.path.exists(path):\n raise EnvironmentError('Could not locate logs')\n data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])\n return data\n\n def run(self, command):\n \"\"\"Run a one-off command in an ephemeral app container.\"\"\"\n # TODO: add support for interactive shell\n log_event(self, \"deis run '{}'\".format(command))\n c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=self.release_set.latest(),\n type='admin',\n num=c_num)\n rc, output = tasks.run_command.delay(c, command).get()\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Container(UuidAuditedModel):\n \"\"\"\n Docker container used to securely host an application process.\n \"\"\"\n INITIALIZED = 'initialized'\n CREATED = 'created'\n UP = 'up'\n DOWN = 'down'\n DESTROYED = 'destroyed'\n STATE_CHOICES = (\n (INITIALIZED, 'initialized'),\n (CREATED, 'created'),\n (UP, 'up'),\n (DOWN, 'down'),\n (DESTROYED, 'destroyed')\n )\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n release = models.ForeignKey('Release')\n type = models.CharField(max_length=128, blank=True)\n num = models.PositiveIntegerField()\n state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, protected=True)\n\n def short_name(self):\n if self.type:\n return \"{}.{}.{}\".format(self.release.app.id, self.type, self.num)\n return \"{}.{}\".format(self.release.app.id, self.num)\n short_name.short_description = 'Name'\n\n def __str__(self):\n return self.short_name()\n\n class Meta:\n get_latest_by = '-created'\n ordering = ['created']\n\n def _get_job_id(self):\n app = self.app.id\n release = self.release\n version = \"v{}\".format(release.version)\n num = self.num\n c_type = self.type\n if not c_type:\n job_id = \"{app}_{version}.{num}\".format(**locals())\n else:\n job_id = \"{app}_{version}.{c_type}.{num}\".format(**locals())\n return job_id\n\n _job_id = property(_get_job_id)\n\n def _get_scheduler(self):\n return self.app.cluster._scheduler\n\n _scheduler = property(_get_scheduler)\n\n def _get_command(self):\n c_type = self.type\n if c_type:\n # handle special case for Dockerfile deployments\n if c_type == 'cmd':\n return ''\n else:\n return \"start {}\".format(c_type)\n else:\n return ''\n\n _command = property(_get_command)\n\n def _command_announceable(self):\n return self._command.lower() in ['start web', '']\n\n @close_db_connections\n @transition(field=state, source=INITIALIZED, target=CREATED)\n def create(self):\n image = self.release.image\n self._scheduler.create(name=self._job_id,\n image=image,\n command=self._command,\n use_announcer=self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[CREATED, UP, DOWN],\n target=UP, crashed=DOWN)\n def start(self):\n self._scheduler.start(self._job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=UP,\n crashed=DOWN)\n def deploy(self, release):\n old_job_id = self._job_id\n # update release\n self.release = release\n self.save()\n # deploy new container\n new_job_id = self._job_id\n image = self.release.image\n c_type = self.type\n self._scheduler.create(name=new_job_id,\n image=image,\n command=self._command.format(**locals()),\n use_announcer=self._command_announceable())\n self._scheduler.start(new_job_id, self._command_announceable())\n # destroy old container\n self._scheduler.destroy(old_job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state, source=UP, target=DOWN)\n def stop(self):\n self._scheduler.stop(self._job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=DESTROYED)\n def destroy(self):\n # TODO: add check for active connections before killing\n self._scheduler.destroy(self._job_id, self._command_announceable())\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, DESTROYED],\n target=DESTROYED)\n def run(self, command):\n \"\"\"Run a one-off command\"\"\"\n rc, output = self._scheduler.run(self._job_id, self.release.image, command)\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Push(UuidAuditedModel):\n \"\"\"\n Instance of a push used to trigger an application build\n \"\"\"\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n sha = models.CharField(max_length=40)\n\n fingerprint = models.CharField(max_length=255)\n receive_user = models.CharField(max_length=255)\n receive_repo = models.CharField(max_length=255)\n\n ssh_connection = models.CharField(max_length=255)\n ssh_original_command = models.CharField(max_length=255)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.sha[:7])\n\n\n@python_2_unicode_compatible\nclass Build(UuidAuditedModel):\n \"\"\"\n Instance of a software build used by runtime nodes\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n image = models.CharField(max_length=256)\n\n # optional fields populated by builder\n sha = models.CharField(max_length=40, blank=True)\n procfile = JSONField(default='{}', blank=True)\n dockerfile = models.TextField(blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Config(UuidAuditedModel):\n \"\"\"\n Set of configuration values applied as environment variables\n during runtime execution of the Application.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n values = JSONField(default='{}', blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{}-{}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Release(UuidAuditedModel):\n \"\"\"\n Software release deployed by the application platform\n\n Releases contain a :class:`Build` and a :class:`Config`.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n version = models.PositiveIntegerField()\n summary = models.TextField(blank=True, null=True)\n\n config = models.ForeignKey('Config')\n build = models.ForeignKey('Build')\n # NOTE: image contains combined build + config, ready to run\n image = models.CharField(max_length=256)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'version'),)\n\n def __str__(self):\n return \"{0}-v{1}\".format(self.app.id, self.version)\n\n def new(self, user, config=None, build=None, summary=None, source_version=None):\n \"\"\"\n Create a new application release using the provided Build and Config\n on behalf of a user.\n\n Releases start at v1 and auto-increment.\n \"\"\"\n if not config:\n config = self.config\n if not build:\n build = self.build\n if not source_version:\n source_version = 'latest'\n else:\n source_version = 'v{}'.format(source_version)\n # prepare release tag\n new_version = self.version + 1\n tag = 'v{}'.format(new_version)\n image = build.image + ':{tag}'.format(**locals())\n # create new release and auto-increment version\n release = Release.objects.create(\n owner=user, app=self.app, config=config,\n build=build, version=new_version, image=image, summary=summary)\n # publish release to registry as new docker image\n repository_path = self.app.id\n publish_release(repository_path,\n config.values,\n tag,\n source_tag=source_version)\n return release\n\n def previous(self):\n \"\"\"\n Return the previous Release to this one.\n\n :return: the previous :class:`Release`, or None\n \"\"\"\n releases = self.app.release_set\n if self.pk:\n releases = releases.exclude(pk=self.pk)\n try:\n # Get the Release previous to this one\n prev_release = releases.latest()\n except Release.DoesNotExist:\n prev_release = None\n return prev_release\n\n def save(self, *args, **kwargs):\n if not self.summary:\n self.summary = ''\n prev_release = self.previous()\n # compare this build to the previous build\n old_build = prev_release.build if prev_release else None\n # if the build changed, log it and who pushed it\n if self.build != old_build:\n if self.build.sha:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.sha[:7])\n else:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.image)\n # compare this config to the previous config\n old_config = prev_release.config if prev_release else None\n # if the config data changed, log the dict diff\n if self.config != old_config:\n dict1 = self.config.values\n dict2 = old_config.values if old_config else {}\n diff = dict_diff(dict1, dict2)\n # try to be as succinct as possible\n added = ', '.join(k for k in diff.get('added', {}))\n added = 'added ' + added if added else ''\n changed = ', '.join(k for k in diff.get('changed', {}))\n changed = 'changed ' + changed if changed else ''\n deleted = ', '.join(k for k in diff.get('deleted', {}))\n deleted = 'deleted ' + deleted if deleted else ''\n changes = ', '.join(i for i in (added, changed, deleted) if i)\n if changes:\n if self.summary:\n self.summary += ' and '\n self.summary += \"{} {}\".format(self.config.owner, changes)\n if not self.summary:\n if self.version == 1:\n self.summary = \"{} created the initial release\".format(self.owner)\n else:\n self.summary = \"{} changed nothing\".format(self.owner)\n super(Release, self).save(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass Domain(AuditedModel):\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n domain = models.TextField(blank=False, null=False, unique=True)\n\n def __str__(self):\n return self.domain\n\n\n@python_2_unicode_compatible\nclass Key(UuidAuditedModel):\n \"\"\"An SSH public key.\"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128)\n public = models.TextField(unique=True)\n\n class Meta:\n verbose_name = 'SSH Key'\n unique_together = (('owner', 'id'))\n\n def __str__(self):\n return \"{}...{}\".format(self.public[:18], self.public[-31:])\n\n\n# define update/delete callbacks for synchronizing\n# models with the configuration management backend\n\ndef _log_build_created(**kwargs):\n if kwargs.get('created'):\n build = kwargs['instance']\n log_event(build.app, \"Build {} created\".format(build))\n\n\ndef _log_release_created(**kwargs):\n if kwargs.get('created'):\n release = kwargs['instance']\n log_event(release.app, \"Release {} created\".format(release))\n\n\ndef _log_config_updated(**kwargs):\n config = kwargs['instance']\n log_event(config.app, \"Config {} updated\".format(config))\n\n\ndef _log_domain_added(**kwargs):\n domain = kwargs['instance']\n log_event(domain.app, \"Domain {} added\".format(domain))\n\n\ndef _log_domain_removed(**kwargs):\n domain = kwargs['instance']\n log_event(domain.app, \"Domain {} removed\".format(domain))\n\n\ndef _etcd_publish_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.write('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)), key.public)\n\n\ndef _etcd_purge_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.delete('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)))\n\n\ndef _etcd_purge_user(**kwargs):\n username = kwargs['instance'].username\n _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n\n\ndef _etcd_publish_domains(**kwargs):\n app = kwargs['instance'].app\n app_domains = app.domain_set.all()\n if app_domains:\n _etcd_client.write('/deis/domains/{}'.format(app),\n ' '.join(str(d.domain) for d in app_domains))\n else:\n _etcd_client.delete('/deis/domains/{}'.format(app))\n\n\n# Log significant app-related events\npost_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')\npost_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')\npost_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')\npost_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')\npost_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')\n\n\n# save FSM transitions as they happen\ndef _save_transition(**kwargs):\n kwargs['instance'].save()\n\npost_transition.connect(_save_transition)\n\n# wire up etcd publishing if we can connect\ntry:\n _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))\n _etcd_client.get('/deis')\nexcept etcd.EtcdException:\n logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')\n _etcd_client = None\n\nif _etcd_client:\n post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_user, sender=User, dispatch_uid='api.models')\n post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')\n post_delete.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')\n", "path": "controller/api/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nData models for the Deis API.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport etcd\nimport importlib\nimport logging\nimport os\nimport subprocess\n\nfrom celery.canvas import group\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models, connections\nfrom django.db.models import Max\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\nfrom json_field.fields import JSONField\n\nfrom api import fields, tasks\nfrom registry import publish_release\nfrom utils import dict_diff, fingerprint\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_event(app, msg, level=logging.INFO):\n msg = \"{}: {}\".format(app.id, msg)\n logger.log(level, msg)\n\n\ndef close_db_connections(func, *args, **kwargs):\n \"\"\"\n Decorator to close db connections during threaded execution\n\n Note this is necessary to work around:\n https://code.djangoproject.com/ticket/22420\n \"\"\"\n def _inner(*args, **kwargs):\n func(*args, **kwargs)\n for conn in connections.all():\n conn.close()\n return _inner\n\n\nclass AuditedModel(models.Model):\n \"\"\"Add created and updated fields to a model.\"\"\"\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n \"\"\"Mark :class:`AuditedModel` as abstract.\"\"\"\n abstract = True\n\n\nclass UuidAuditedModel(AuditedModel):\n \"\"\"Add a UUID primary key to an :class:`AuditedModel`.\"\"\"\n\n uuid = fields.UuidField('UUID', primary_key=True)\n\n class Meta:\n \"\"\"Mark :class:`UuidAuditedModel` as abstract.\"\"\"\n abstract = True\n\n\n@python_2_unicode_compatible\nclass Cluster(UuidAuditedModel):\n \"\"\"\n Cluster used to run jobs\n \"\"\"\n\n CLUSTER_TYPES = (('mock', 'Mock Cluster'),\n ('coreos', 'CoreOS Cluster'),\n ('faulty', 'Faulty Cluster'))\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128, unique=True)\n type = models.CharField(max_length=16, choices=CLUSTER_TYPES, default='coreos')\n\n domain = models.CharField(max_length=128)\n hosts = models.CharField(max_length=256)\n auth = models.TextField()\n options = JSONField(default='{}', blank=True)\n\n def __str__(self):\n return self.id\n\n def _get_scheduler(self, *args, **kwargs):\n module_name = 'scheduler.' + self.type\n mod = importlib.import_module(module_name)\n return mod.SchedulerClient(self.id, self.hosts, self.auth,\n self.domain, self.options)\n\n _scheduler = property(_get_scheduler)\n\n def create(self):\n \"\"\"\n Initialize a cluster's router and log aggregator\n \"\"\"\n return tasks.create_cluster.delay(self).get()\n\n def destroy(self):\n \"\"\"\n Destroy a cluster's router and log aggregator\n \"\"\"\n return tasks.destroy_cluster.delay(self).get()\n\n\n@python_2_unicode_compatible\nclass App(UuidAuditedModel):\n \"\"\"\n Application used to service requests on behalf of end-users\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.SlugField(max_length=64, unique=True)\n cluster = models.ForeignKey('Cluster')\n structure = JSONField(default='{}', blank=True)\n\n class Meta:\n permissions = (('use_app', 'Can use app'),)\n\n def __str__(self):\n return self.id\n\n def create(self, *args, **kwargs):\n config = Config.objects.create(owner=self.owner, app=self, values={})\n build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)\n Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)\n\n def delete(self, *args, **kwargs):\n for c in self.container_set.all():\n c.destroy()\n return super(App, self).delete(*args, **kwargs)\n\n def deploy(self, release, initial=False):\n tasks.deploy_release.delay(self, release).get()\n if initial:\n # if there is no SHA, assume a docker image is being promoted\n if not release.build.sha:\n self.structure = {'cmd': 1}\n # if a dockerfile exists without a procfile, assume docker workflow\n elif release.build.dockerfile and not release.build.procfile:\n self.structure = {'cmd': 1}\n # if a procfile exists without a web entry, assume docker workflow\n elif release.build.procfile and not 'web' in release.build.procfile:\n self.structure = {'cmd': 1}\n # default to heroku workflow\n else:\n self.structure = {'web': 1}\n self.save()\n self.scale()\n\n def destroy(self, *args, **kwargs):\n return self.delete(*args, **kwargs)\n\n def scale(self, **kwargs): # noqa\n \"\"\"Scale containers up or down to match requested.\"\"\"\n requested_containers = self.structure.copy()\n release = self.release_set.latest()\n # test for available process types\n available_process_types = release.build.procfile or {}\n for container_type in requested_containers.keys():\n if container_type == 'cmd':\n continue # allow docker cmd types in case we don't have the image source\n if not container_type in available_process_types:\n raise EnvironmentError(\n 'Container type {} does not exist in application'.format(container_type))\n msg = 'Containers scaled ' + ' '.join(\n \"{}={}\".format(k, v) for k, v in requested_containers.items())\n # iterate and scale by container type (web, worker, etc)\n changed = False\n to_add, to_remove = [], []\n for container_type in requested_containers.keys():\n containers = list(self.container_set.filter(type=container_type).order_by('created'))\n # increment new container nums off the most recent container\n results = self.container_set.filter(type=container_type).aggregate(Max('num'))\n container_num = (results.get('num__max') or 0) + 1\n requested = requested_containers.pop(container_type)\n diff = requested - len(containers)\n if diff == 0:\n continue\n changed = True\n while diff < 0:\n c = containers.pop()\n to_remove.append(c)\n diff += 1\n while diff > 0:\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=release,\n type=container_type,\n num=container_num)\n to_add.append(c)\n container_num += 1\n diff -= 1\n if changed:\n subtasks = []\n if to_add:\n subtasks.append(tasks.start_containers.s(to_add))\n if to_remove:\n subtasks.append(tasks.stop_containers.s(to_remove))\n group(*subtasks).apply_async().join()\n log_event(self, msg)\n return changed\n\n def logs(self):\n \"\"\"Return aggregated log data for this application.\"\"\"\n path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')\n if not os.path.exists(path):\n raise EnvironmentError('Could not locate logs')\n data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])\n return data\n\n def run(self, command):\n \"\"\"Run a one-off command in an ephemeral app container.\"\"\"\n # TODO: add support for interactive shell\n log_event(self, \"deis run '{}'\".format(command))\n c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=self.release_set.latest(),\n type='admin',\n num=c_num)\n rc, output = tasks.run_command.delay(c, command).get()\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Container(UuidAuditedModel):\n \"\"\"\n Docker container used to securely host an application process.\n \"\"\"\n INITIALIZED = 'initialized'\n CREATED = 'created'\n UP = 'up'\n DOWN = 'down'\n DESTROYED = 'destroyed'\n STATE_CHOICES = (\n (INITIALIZED, 'initialized'),\n (CREATED, 'created'),\n (UP, 'up'),\n (DOWN, 'down'),\n (DESTROYED, 'destroyed')\n )\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n release = models.ForeignKey('Release')\n type = models.CharField(max_length=128, blank=True)\n num = models.PositiveIntegerField()\n state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, protected=True)\n\n def short_name(self):\n if self.type:\n return \"{}.{}.{}\".format(self.release.app.id, self.type, self.num)\n return \"{}.{}\".format(self.release.app.id, self.num)\n short_name.short_description = 'Name'\n\n def __str__(self):\n return self.short_name()\n\n class Meta:\n get_latest_by = '-created'\n ordering = ['created']\n\n def _get_job_id(self):\n app = self.app.id\n release = self.release\n version = \"v{}\".format(release.version)\n num = self.num\n c_type = self.type\n if not c_type:\n job_id = \"{app}_{version}.{num}\".format(**locals())\n else:\n job_id = \"{app}_{version}.{c_type}.{num}\".format(**locals())\n return job_id\n\n _job_id = property(_get_job_id)\n\n def _get_scheduler(self):\n return self.app.cluster._scheduler\n\n _scheduler = property(_get_scheduler)\n\n def _get_command(self):\n c_type = self.type\n if c_type:\n # handle special case for Dockerfile deployments\n if c_type == 'cmd':\n return ''\n else:\n return \"start {}\".format(c_type)\n else:\n return ''\n\n _command = property(_get_command)\n\n def _command_announceable(self):\n return self._command.lower() in ['start web', '']\n\n @close_db_connections\n @transition(field=state, source=INITIALIZED, target=CREATED)\n def create(self):\n image = self.release.image\n self._scheduler.create(name=self._job_id,\n image=image,\n command=self._command,\n use_announcer=self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[CREATED, UP, DOWN],\n target=UP, crashed=DOWN)\n def start(self):\n self._scheduler.start(self._job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=UP,\n crashed=DOWN)\n def deploy(self, release):\n old_job_id = self._job_id\n # update release\n self.release = release\n self.save()\n # deploy new container\n new_job_id = self._job_id\n image = self.release.image\n c_type = self.type\n self._scheduler.create(name=new_job_id,\n image=image,\n command=self._command.format(**locals()),\n use_announcer=self._command_announceable())\n self._scheduler.start(new_job_id, self._command_announceable())\n # destroy old container\n self._scheduler.destroy(old_job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state, source=UP, target=DOWN)\n def stop(self):\n self._scheduler.stop(self._job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=DESTROYED)\n def destroy(self):\n # TODO: add check for active connections before killing\n self._scheduler.destroy(self._job_id, self._command_announceable())\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, DESTROYED],\n target=DESTROYED)\n def run(self, command):\n \"\"\"Run a one-off command\"\"\"\n rc, output = self._scheduler.run(self._job_id, self.release.image, command)\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Push(UuidAuditedModel):\n \"\"\"\n Instance of a push used to trigger an application build\n \"\"\"\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n sha = models.CharField(max_length=40)\n\n fingerprint = models.CharField(max_length=255)\n receive_user = models.CharField(max_length=255)\n receive_repo = models.CharField(max_length=255)\n\n ssh_connection = models.CharField(max_length=255)\n ssh_original_command = models.CharField(max_length=255)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.sha[:7])\n\n\n@python_2_unicode_compatible\nclass Build(UuidAuditedModel):\n \"\"\"\n Instance of a software build used by runtime nodes\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n image = models.CharField(max_length=256)\n\n # optional fields populated by builder\n sha = models.CharField(max_length=40, blank=True)\n procfile = JSONField(default='{}', blank=True)\n dockerfile = models.TextField(blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Config(UuidAuditedModel):\n \"\"\"\n Set of configuration values applied as environment variables\n during runtime execution of the Application.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n values = JSONField(default='{}', blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{}-{}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Release(UuidAuditedModel):\n \"\"\"\n Software release deployed by the application platform\n\n Releases contain a :class:`Build` and a :class:`Config`.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n version = models.PositiveIntegerField()\n summary = models.TextField(blank=True, null=True)\n\n config = models.ForeignKey('Config')\n build = models.ForeignKey('Build')\n # NOTE: image contains combined build + config, ready to run\n image = models.CharField(max_length=256)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'version'),)\n\n def __str__(self):\n return \"{0}-v{1}\".format(self.app.id, self.version)\n\n def new(self, user, config=None, build=None, summary=None, source_version=None):\n \"\"\"\n Create a new application release using the provided Build and Config\n on behalf of a user.\n\n Releases start at v1 and auto-increment.\n \"\"\"\n if not config:\n config = self.config\n if not build:\n build = self.build\n if not source_version:\n source_version = 'latest'\n else:\n source_version = 'v{}'.format(source_version)\n # prepare release tag\n new_version = self.version + 1\n tag = 'v{}'.format(new_version)\n image = build.image + ':{tag}'.format(**locals())\n # create new release and auto-increment version\n release = Release.objects.create(\n owner=user, app=self.app, config=config,\n build=build, version=new_version, image=image, summary=summary)\n # publish release to registry as new docker image\n repository_path = self.app.id\n publish_release(repository_path,\n config.values,\n tag,\n source_tag=source_version)\n return release\n\n def previous(self):\n \"\"\"\n Return the previous Release to this one.\n\n :return: the previous :class:`Release`, or None\n \"\"\"\n releases = self.app.release_set\n if self.pk:\n releases = releases.exclude(pk=self.pk)\n try:\n # Get the Release previous to this one\n prev_release = releases.latest()\n except Release.DoesNotExist:\n prev_release = None\n return prev_release\n\n def save(self, *args, **kwargs):\n if not self.summary:\n self.summary = ''\n prev_release = self.previous()\n # compare this build to the previous build\n old_build = prev_release.build if prev_release else None\n # if the build changed, log it and who pushed it\n if self.build != old_build:\n if self.build.sha:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.sha[:7])\n else:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.image)\n # compare this config to the previous config\n old_config = prev_release.config if prev_release else None\n # if the config data changed, log the dict diff\n if self.config != old_config:\n dict1 = self.config.values\n dict2 = old_config.values if old_config else {}\n diff = dict_diff(dict1, dict2)\n # try to be as succinct as possible\n added = ', '.join(k for k in diff.get('added', {}))\n added = 'added ' + added if added else ''\n changed = ', '.join(k for k in diff.get('changed', {}))\n changed = 'changed ' + changed if changed else ''\n deleted = ', '.join(k for k in diff.get('deleted', {}))\n deleted = 'deleted ' + deleted if deleted else ''\n changes = ', '.join(i for i in (added, changed, deleted) if i)\n if changes:\n if self.summary:\n self.summary += ' and '\n self.summary += \"{} {}\".format(self.config.owner, changes)\n if not self.summary:\n if self.version == 1:\n self.summary = \"{} created the initial release\".format(self.owner)\n else:\n self.summary = \"{} changed nothing\".format(self.owner)\n super(Release, self).save(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass Domain(AuditedModel):\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n domain = models.TextField(blank=False, null=False, unique=True)\n\n def __str__(self):\n return self.domain\n\n\n@python_2_unicode_compatible\nclass Key(UuidAuditedModel):\n \"\"\"An SSH public key.\"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128)\n public = models.TextField(unique=True)\n\n class Meta:\n verbose_name = 'SSH Key'\n unique_together = (('owner', 'id'))\n\n def __str__(self):\n return \"{}...{}\".format(self.public[:18], self.public[-31:])\n\n\n# define update/delete callbacks for synchronizing\n# models with the configuration management backend\n\ndef _log_build_created(**kwargs):\n if kwargs.get('created'):\n build = kwargs['instance']\n log_event(build.app, \"Build {} created\".format(build))\n\n\ndef _log_release_created(**kwargs):\n if kwargs.get('created'):\n release = kwargs['instance']\n log_event(release.app, \"Release {} created\".format(release))\n\n\ndef _log_config_updated(**kwargs):\n config = kwargs['instance']\n log_event(config.app, \"Config {} updated\".format(config))\n\n\ndef _log_domain_added(**kwargs):\n domain = kwargs['instance']\n log_event(domain.app, \"Domain {} added\".format(domain))\n\n\ndef _log_domain_removed(**kwargs):\n domain = kwargs['instance']\n log_event(domain.app, \"Domain {} removed\".format(domain))\n\n\ndef _etcd_publish_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.write('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)), key.public)\n\n\ndef _etcd_purge_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.delete('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)))\n\n\ndef _etcd_purge_user(**kwargs):\n username = kwargs['instance'].username\n try:\n _etcd_client.delete(\n '/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n except KeyError:\n # If _etcd_publish_key() wasn't called, there is no user dir to delete.\n pass\n\n\ndef _etcd_publish_domains(**kwargs):\n app = kwargs['instance'].app\n app_domains = app.domain_set.all()\n if app_domains:\n _etcd_client.write('/deis/domains/{}'.format(app),\n ' '.join(str(d.domain) for d in app_domains))\n else:\n _etcd_client.delete('/deis/domains/{}'.format(app))\n\n\n# Log significant app-related events\npost_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')\npost_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')\npost_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')\npost_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')\npost_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')\n\n\n# save FSM transitions as they happen\ndef _save_transition(**kwargs):\n kwargs['instance'].save()\n\npost_transition.connect(_save_transition)\n\n# wire up etcd publishing if we can connect\ntry:\n _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))\n _etcd_client.get('/deis')\nexcept etcd.EtcdException:\n logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')\n _etcd_client = None\n\nif _etcd_client:\n post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_user, sender=User, dispatch_uid='api.models')\n post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')\n post_delete.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')\n", "path": "controller/api/models.py"}]} |
gh_patches_debug_1509 | rasdani/github-patches | git_diff | xonsh__xonsh-5328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unexpected exception while updating completions
<!--- Provide a general summary of the issue in the Title above -->
When I set $UPDATE_COMPLETIONS_ON_KEYPRESS = True and type for instance /usr/bin/ls -a in terminal, following exception is thrown: "Exception [Errno 13] Permission denied: '/usr/bin/ls.json'"
<!--- If you have a question along the lines of "How do I do this Bash command in xonsh"
please first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html
If you don't find an answer there, please do open an issue! -->
## xonfig
<details>
```
+------------------+----------------------+
| xonsh | 0.13.4 |
| Python | 3.8.10 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.36 |
| shell type | prompt_toolkit |
| history backend | json |
| pygments | 2.14.0 |
| on posix | True |
| on linux | True |
| distro | ubuntu |
| on wsl | False |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
| xontrib | [] |
| RC file 1 | /home/ralis/.xonshrc |
+------------------+----------------------+
```
</details>
## Expected Behavior
<!--- Tell us what should happen -->
The warning should be either more subtle or no completion suggestions should be shown.
## Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
Huge multi-line error is printed.
<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error
To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.
On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->
### Traceback (if applicable)
<details>
```
Unhandled exception in event loop:
File "/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/buffer.py", line 1939, in new_coroutine
await coroutine(*a, **kw)
File "/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/buffer.py", line 1763, in async_completer
async for completion in async_generator:
File "/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/completion/base.py", line 326, in get_completions_async
async for completion in completer.get_completions_async(
File "/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/completion/base.py", line 202, in get_completions_async
for item in self.get_completions(document, complete_event):
File "/usr/local/lib/python3.8/dist-packages/xonsh/ptk_shell/completer.py", line 58, in get_completions
completions, plen = self.completer.complete(
File "/usr/local/lib/python3.8/dist-packages/xonsh/completer.py", line 121, in complete
return self.complete_from_context(
File "/usr/local/lib/python3.8/dist-packages/xonsh/completer.py", line 272, in complete_from_context
for comp in self.generate_completions(
File "/usr/local/lib/python3.8/dist-packages/xonsh/completer.py", line 233, in generate_completions
for comp in res:
File "/usr/local/lib/python3.8/dist-packages/xonsh/completers/man.py", line 137, in completions
for desc, opts in _parse_man_page_options(cmd).items():
File "/usr/local/lib/python3.8/dist-packages/xonsh/completers/man.py", line 121, in _parse_man_page_options
path.write_text(json.dumps(options))
File "/usr/lib/python3.8/pathlib.py", line 1255, in write_text
with self.open(mode='w', encoding=encoding, errors=errors) as f:
File "/usr/lib/python3.8/pathlib.py", line 1222, in open
return io.open(self, mode, buffering, encoding, errors, newline,
File "/usr/lib/python3.8/pathlib.py", line 1078, in _opener
return self._accessor.open(self, flags, mode)
Exception [Errno 13] Permission denied: '/usr/bin/ls.json'
```
</details>
## Steps to Reproduce
<!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! -->
```xsh
$UPDATE_COMPLETIONS_ON_KEYPRESS = True
/usr/bin/ls - # exception after typing
```
## For community
⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/completers/man.py`
Content:
```
1 import functools
2 import json
3 import re
4 import shutil
5 import subprocess
6 import textwrap
7 from pathlib import Path
8
9 from xonsh.built_ins import XSH
10 from xonsh.completers.tools import RichCompletion, contextual_command_completer
11 from xonsh.parsers.completion_context import CommandContext
12
13
14 @functools.cache
15 def get_man_completions_path() -> Path:
16 env = XSH.env or {}
17 datadir = Path(env["XONSH_DATA_DIR"]) / "generated_completions" / "man"
18 if datadir.exists() and (not datadir.is_dir()):
19 shutil.move(datadir, datadir.with_suffix(".bkp"))
20 if not datadir.exists():
21 datadir.mkdir(exist_ok=True, parents=True)
22 return datadir
23
24
25 def _get_man_page(cmd: str):
26 """without control characters"""
27 env = XSH.env.detype()
28 manpage = subprocess.Popen(
29 ["man", cmd], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, env=env
30 )
31 # This is a trick to get rid of reverse line feeds
32 return subprocess.check_output(["col", "-b"], stdin=manpage.stdout, env=env)
33
34
35 @functools.cache
36 def _man_option_string_regex():
37 return re.compile(
38 r"(?:(,\s?)|^|(\sor\s))(?P<option>-[\w]|--[\w-]+)(?=\[?(\s|,|=\w+|$))"
39 )
40
41
42 def generate_options_of(cmd: str):
43 out = _get_man_page(cmd)
44 if not out:
45 return
46
47 def get_headers(text: str):
48 """split as header-body based on indent"""
49 if not text:
50 return
51 header = ""
52 body = []
53 for line in textwrap.dedent(text.replace("\n\t", "\n ")).splitlines():
54 if not line.strip():
55 continue
56 if line.startswith((" ", "\t")):
57 body.append(line)
58 else:
59 if header or body:
60 yield header, body
61
62 # found new section
63 header = line.strip()
64 body = []
65 if header or body:
66 yield header, body
67
68 def split_options_string(text: str):
69 text = text.strip()
70 regex = _man_option_string_regex()
71
72 regex.findall(text)
73 options = []
74 for match in regex.finditer(text):
75 option = match.groupdict().pop("option", None)
76 if option:
77 options.append(option)
78 text = text[match.end() :]
79 return options, text.strip()
80
81 def get_option_section():
82 option_sect = dict(get_headers(out.decode()))
83 small_names = {k.lower(): k for k in option_sect}
84 for head in (
85 "options",
86 "command options",
87 "description",
88 ): # prefer sections in this order
89 if head in small_names:
90 title = small_names[head]
91 return "\n".join(option_sect[title])
92
93 def get_options(text):
94 """finally get the options"""
95 # return old section if
96 for opt, lines in get_headers(text):
97 # todo: some have [+-] or such vague notations
98 if opt.startswith("-"):
99 # sometime a single line will have both desc and options
100 option_strings, rest = split_options_string(opt)
101 descs = []
102 if rest:
103 descs.append(rest)
104 if lines:
105 descs.append(textwrap.dedent("\n".join(lines)))
106 if option_strings:
107 yield ". ".join(descs), tuple(option_strings)
108 elif lines:
109 # sometimes the options are nested inside subheaders
110 yield from get_options("\n".join(lines))
111
112 yield from get_options(get_option_section())
113
114
115 @functools.lru_cache(maxsize=10)
116 def _parse_man_page_options(cmd: str) -> "dict[str, tuple[str, ...]]":
117 path = get_man_completions_path() / f"{cmd}.json"
118 if path.exists():
119 return json.loads(path.read_text())
120 options = dict(generate_options_of(cmd))
121 path.write_text(json.dumps(options))
122 return options
123
124
125 @contextual_command_completer
126 def complete_from_man(context: CommandContext):
127 """
128 Completes an option name, based on the contents of the associated man
129 page.
130 """
131
132 if context.arg_index == 0 or not context.prefix.startswith("-"):
133 return
134 cmd = context.args[0].value
135
136 def completions():
137 for desc, opts in _parse_man_page_options(cmd).items():
138 yield RichCompletion(
139 value=opts[-1], display=", ".join(opts), description=desc
140 )
141
142 return completions(), False
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/completers/man.py b/xonsh/completers/man.py
--- a/xonsh/completers/man.py
+++ b/xonsh/completers/man.py
@@ -114,7 +114,7 @@
@functools.lru_cache(maxsize=10)
def _parse_man_page_options(cmd: str) -> "dict[str, tuple[str, ...]]":
- path = get_man_completions_path() / f"{cmd}.json"
+ path = get_man_completions_path() / Path(cmd).with_suffix(".json").name
if path.exists():
return json.loads(path.read_text())
options = dict(generate_options_of(cmd))
| {"golden_diff": "diff --git a/xonsh/completers/man.py b/xonsh/completers/man.py\n--- a/xonsh/completers/man.py\n+++ b/xonsh/completers/man.py\n@@ -114,7 +114,7 @@\n \n @functools.lru_cache(maxsize=10)\n def _parse_man_page_options(cmd: str) -> \"dict[str, tuple[str, ...]]\":\n- path = get_man_completions_path() / f\"{cmd}.json\"\n+ path = get_man_completions_path() / Path(cmd).with_suffix(\".json\").name\n if path.exists():\n return json.loads(path.read_text())\n options = dict(generate_options_of(cmd))\n", "issue": "Unexpected exception while updating completions\n<!--- Provide a general summary of the issue in the Title above -->\r\nWhen I set $UPDATE_COMPLETIONS_ON_KEYPRESS = True and type for instance /usr/bin/ls -a in terminal, following exception is thrown: \"Exception [Errno 13] Permission denied: '/usr/bin/ls.json'\"\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n+------------------+----------------------+\r\n| xonsh | 0.13.4 |\r\n| Python | 3.8.10 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 3.0.36 |\r\n| shell type | prompt_toolkit |\r\n| history backend | json |\r\n| pygments | 2.14.0 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | ubuntu |\r\n| on wsl | False |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n| xontrib | [] |\r\n| RC file 1 | /home/ralis/.xonshrc |\r\n+------------------+----------------------+\r\n\r\n```\r\n\r\n</details>\r\n\r\n## Expected Behavior\r\n<!--- Tell us what should happen -->\r\nThe warning should be either more subtle or no completion suggestions should be shown.\r\n## Current Behavior\r\n<!--- Tell us what happens instead of the expected behavior -->\r\nHuge multi-line error is printed.\r\n<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error\r\nTo enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.\r\nOn Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->\r\n\r\n### Traceback (if applicable)\r\n\r\n<details>\r\n\r\n```\r\n\r\nUnhandled exception in event loop:\r\n File \"/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/buffer.py\", line 1939, in new_coroutine\r\n await coroutine(*a, **kw)\r\n File \"/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/buffer.py\", line 1763, in async_completer\r\n async for completion in async_generator:\r\n File \"/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/completion/base.py\", line 326, in get_completions_async\r\n async for completion in completer.get_completions_async(\r\n File \"/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/completion/base.py\", line 202, in get_completions_async\r\n for item in self.get_completions(document, complete_event):\r\n File \"/usr/local/lib/python3.8/dist-packages/xonsh/ptk_shell/completer.py\", line 58, in get_completions\r\n completions, plen = self.completer.complete(\r\n File \"/usr/local/lib/python3.8/dist-packages/xonsh/completer.py\", line 121, in complete\r\n return self.complete_from_context(\r\n File \"/usr/local/lib/python3.8/dist-packages/xonsh/completer.py\", line 272, in complete_from_context\r\n for comp in self.generate_completions(\r\n File \"/usr/local/lib/python3.8/dist-packages/xonsh/completer.py\", line 233, in generate_completions\r\n for comp in res:\r\n File \"/usr/local/lib/python3.8/dist-packages/xonsh/completers/man.py\", line 137, in completions\r\n for desc, opts in _parse_man_page_options(cmd).items():\r\n File \"/usr/local/lib/python3.8/dist-packages/xonsh/completers/man.py\", line 121, in _parse_man_page_options\r\n path.write_text(json.dumps(options))\r\n File \"/usr/lib/python3.8/pathlib.py\", line 1255, in write_text\r\n with self.open(mode='w', encoding=encoding, errors=errors) as f:\r\n File \"/usr/lib/python3.8/pathlib.py\", line 1222, in open\r\n return io.open(self, mode, buffering, encoding, errors, newline,\r\n File \"/usr/lib/python3.8/pathlib.py\", line 1078, in _opener\r\n return self._accessor.open(self, flags, mode)\r\n\r\nException [Errno 13] Permission denied: '/usr/bin/ls.json'\r\n\r\n```\r\n\r\n</details>\r\n\r\n## Steps to Reproduce\r\n<!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! -->\r\n```xsh\r\n$UPDATE_COMPLETIONS_ON_KEYPRESS = True\r\n/usr/bin/ls - # exception after typing\r\n```\r\n## For community\r\n\u2b07\ufe0f **Please click the \ud83d\udc4d reaction instead of leaving a `+1` or \ud83d\udc4d comment**\r\n\n", "before_files": [{"content": "import functools\nimport json\nimport re\nimport shutil\nimport subprocess\nimport textwrap\nfrom pathlib import Path\n\nfrom xonsh.built_ins import XSH\nfrom xonsh.completers.tools import RichCompletion, contextual_command_completer\nfrom xonsh.parsers.completion_context import CommandContext\n\n\[email protected]\ndef get_man_completions_path() -> Path:\n env = XSH.env or {}\n datadir = Path(env[\"XONSH_DATA_DIR\"]) / \"generated_completions\" / \"man\"\n if datadir.exists() and (not datadir.is_dir()):\n shutil.move(datadir, datadir.with_suffix(\".bkp\"))\n if not datadir.exists():\n datadir.mkdir(exist_ok=True, parents=True)\n return datadir\n\n\ndef _get_man_page(cmd: str):\n \"\"\"without control characters\"\"\"\n env = XSH.env.detype()\n manpage = subprocess.Popen(\n [\"man\", cmd], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, env=env\n )\n # This is a trick to get rid of reverse line feeds\n return subprocess.check_output([\"col\", \"-b\"], stdin=manpage.stdout, env=env)\n\n\[email protected]\ndef _man_option_string_regex():\n return re.compile(\n r\"(?:(,\\s?)|^|(\\sor\\s))(?P<option>-[\\w]|--[\\w-]+)(?=\\[?(\\s|,|=\\w+|$))\"\n )\n\n\ndef generate_options_of(cmd: str):\n out = _get_man_page(cmd)\n if not out:\n return\n\n def get_headers(text: str):\n \"\"\"split as header-body based on indent\"\"\"\n if not text:\n return\n header = \"\"\n body = []\n for line in textwrap.dedent(text.replace(\"\\n\\t\", \"\\n \")).splitlines():\n if not line.strip():\n continue\n if line.startswith((\" \", \"\\t\")):\n body.append(line)\n else:\n if header or body:\n yield header, body\n\n # found new section\n header = line.strip()\n body = []\n if header or body:\n yield header, body\n\n def split_options_string(text: str):\n text = text.strip()\n regex = _man_option_string_regex()\n\n regex.findall(text)\n options = []\n for match in regex.finditer(text):\n option = match.groupdict().pop(\"option\", None)\n if option:\n options.append(option)\n text = text[match.end() :]\n return options, text.strip()\n\n def get_option_section():\n option_sect = dict(get_headers(out.decode()))\n small_names = {k.lower(): k for k in option_sect}\n for head in (\n \"options\",\n \"command options\",\n \"description\",\n ): # prefer sections in this order\n if head in small_names:\n title = small_names[head]\n return \"\\n\".join(option_sect[title])\n\n def get_options(text):\n \"\"\"finally get the options\"\"\"\n # return old section if\n for opt, lines in get_headers(text):\n # todo: some have [+-] or such vague notations\n if opt.startswith(\"-\"):\n # sometime a single line will have both desc and options\n option_strings, rest = split_options_string(opt)\n descs = []\n if rest:\n descs.append(rest)\n if lines:\n descs.append(textwrap.dedent(\"\\n\".join(lines)))\n if option_strings:\n yield \". \".join(descs), tuple(option_strings)\n elif lines:\n # sometimes the options are nested inside subheaders\n yield from get_options(\"\\n\".join(lines))\n\n yield from get_options(get_option_section())\n\n\[email protected]_cache(maxsize=10)\ndef _parse_man_page_options(cmd: str) -> \"dict[str, tuple[str, ...]]\":\n path = get_man_completions_path() / f\"{cmd}.json\"\n if path.exists():\n return json.loads(path.read_text())\n options = dict(generate_options_of(cmd))\n path.write_text(json.dumps(options))\n return options\n\n\n@contextual_command_completer\ndef complete_from_man(context: CommandContext):\n \"\"\"\n Completes an option name, based on the contents of the associated man\n page.\n \"\"\"\n\n if context.arg_index == 0 or not context.prefix.startswith(\"-\"):\n return\n cmd = context.args[0].value\n\n def completions():\n for desc, opts in _parse_man_page_options(cmd).items():\n yield RichCompletion(\n value=opts[-1], display=\", \".join(opts), description=desc\n )\n\n return completions(), False\n", "path": "xonsh/completers/man.py"}], "after_files": [{"content": "import functools\nimport json\nimport re\nimport shutil\nimport subprocess\nimport textwrap\nfrom pathlib import Path\n\nfrom xonsh.built_ins import XSH\nfrom xonsh.completers.tools import RichCompletion, contextual_command_completer\nfrom xonsh.parsers.completion_context import CommandContext\n\n\[email protected]\ndef get_man_completions_path() -> Path:\n env = XSH.env or {}\n datadir = Path(env[\"XONSH_DATA_DIR\"]) / \"generated_completions\" / \"man\"\n if datadir.exists() and (not datadir.is_dir()):\n shutil.move(datadir, datadir.with_suffix(\".bkp\"))\n if not datadir.exists():\n datadir.mkdir(exist_ok=True, parents=True)\n return datadir\n\n\ndef _get_man_page(cmd: str):\n \"\"\"without control characters\"\"\"\n env = XSH.env.detype()\n manpage = subprocess.Popen(\n [\"man\", cmd], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, env=env\n )\n # This is a trick to get rid of reverse line feeds\n return subprocess.check_output([\"col\", \"-b\"], stdin=manpage.stdout, env=env)\n\n\[email protected]\ndef _man_option_string_regex():\n return re.compile(\n r\"(?:(,\\s?)|^|(\\sor\\s))(?P<option>-[\\w]|--[\\w-]+)(?=\\[?(\\s|,|=\\w+|$))\"\n )\n\n\ndef generate_options_of(cmd: str):\n out = _get_man_page(cmd)\n if not out:\n return\n\n def get_headers(text: str):\n \"\"\"split as header-body based on indent\"\"\"\n if not text:\n return\n header = \"\"\n body = []\n for line in textwrap.dedent(text.replace(\"\\n\\t\", \"\\n \")).splitlines():\n if not line.strip():\n continue\n if line.startswith((\" \", \"\\t\")):\n body.append(line)\n else:\n if header or body:\n yield header, body\n\n # found new section\n header = line.strip()\n body = []\n if header or body:\n yield header, body\n\n def split_options_string(text: str):\n text = text.strip()\n regex = _man_option_string_regex()\n\n regex.findall(text)\n options = []\n for match in regex.finditer(text):\n option = match.groupdict().pop(\"option\", None)\n if option:\n options.append(option)\n text = text[match.end() :]\n return options, text.strip()\n\n def get_option_section():\n option_sect = dict(get_headers(out.decode()))\n small_names = {k.lower(): k for k in option_sect}\n for head in (\n \"options\",\n \"command options\",\n \"description\",\n ): # prefer sections in this order\n if head in small_names:\n title = small_names[head]\n return \"\\n\".join(option_sect[title])\n\n def get_options(text):\n \"\"\"finally get the options\"\"\"\n # return old section if\n for opt, lines in get_headers(text):\n # todo: some have [+-] or such vague notations\n if opt.startswith(\"-\"):\n # sometime a single line will have both desc and options\n option_strings, rest = split_options_string(opt)\n descs = []\n if rest:\n descs.append(rest)\n if lines:\n descs.append(textwrap.dedent(\"\\n\".join(lines)))\n if option_strings:\n yield \". \".join(descs), tuple(option_strings)\n elif lines:\n # sometimes the options are nested inside subheaders\n yield from get_options(\"\\n\".join(lines))\n\n yield from get_options(get_option_section())\n\n\[email protected]_cache(maxsize=10)\ndef _parse_man_page_options(cmd: str) -> \"dict[str, tuple[str, ...]]\":\n path = get_man_completions_path() / Path(cmd).with_suffix(\".json\").name\n if path.exists():\n return json.loads(path.read_text())\n options = dict(generate_options_of(cmd))\n path.write_text(json.dumps(options))\n return options\n\n\n@contextual_command_completer\ndef complete_from_man(context: CommandContext):\n \"\"\"\n Completes an option name, based on the contents of the associated man\n page.\n \"\"\"\n\n if context.arg_index == 0 or not context.prefix.startswith(\"-\"):\n return\n cmd = context.args[0].value\n\n def completions():\n for desc, opts in _parse_man_page_options(cmd).items():\n yield RichCompletion(\n value=opts[-1], display=\", \".join(opts), description=desc\n )\n\n return completions(), False\n", "path": "xonsh/completers/man.py"}]} |
gh_patches_debug_1510 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-3351 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'NoneType' object has no attribute 'job_label'
I am trying to configure the bigquery connection profile. Somehow get the following error message after "dbt debug".
>'NoneType' object has no attribute 'job_label'
Not sure how to debug this. Can someone give me a hint please?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/bigquery/dbt/adapters/bigquery/connections.py`
Content:
```
1 import json
2 import re
3 from contextlib import contextmanager
4 from dataclasses import dataclass
5 from functools import lru_cache
6 import agate
7 from requests.exceptions import ConnectionError
8 from typing import Optional, Any, Dict, Tuple
9
10 import google.auth
11 import google.auth.exceptions
12 import google.cloud.bigquery
13 import google.cloud.exceptions
14 from google.api_core import retry, client_info
15 from google.auth import impersonated_credentials
16 from google.oauth2 import (
17 credentials as GoogleCredentials,
18 service_account as GoogleServiceAccountCredentials
19 )
20
21 from dbt.utils import format_bytes, format_rows_number
22 from dbt.clients import agate_helper, gcloud
23 from dbt.tracking import active_user
24 from dbt.contracts.connection import ConnectionState, AdapterResponse
25 from dbt.exceptions import (
26 FailedToConnectException, RuntimeException, DatabaseException
27 )
28 from dbt.adapters.base import BaseConnectionManager, Credentials
29 from dbt.logger import GLOBAL_LOGGER as logger
30 from dbt.version import __version__ as dbt_version
31
32 from dbt.dataclass_schema import StrEnum
33
34
35 BQ_QUERY_JOB_SPLIT = '-----Query Job SQL Follows-----'
36
37 WRITE_TRUNCATE = google.cloud.bigquery.job.WriteDisposition.WRITE_TRUNCATE
38
39 REOPENABLE_ERRORS = (
40 ConnectionResetError,
41 ConnectionError,
42 )
43
44 RETRYABLE_ERRORS = (
45 google.cloud.exceptions.ServerError,
46 google.cloud.exceptions.BadRequest,
47 ConnectionResetError,
48 ConnectionError,
49 )
50
51
52 @lru_cache()
53 def get_bigquery_defaults(scopes=None) -> Tuple[Any, Optional[str]]:
54 """
55 Returns (credentials, project_id)
56
57 project_id is returned available from the environment; otherwise None
58 """
59 # Cached, because the underlying implementation shells out, taking ~1s
60 return google.auth.default(scopes=scopes)
61
62
63 class Priority(StrEnum):
64 Interactive = 'interactive'
65 Batch = 'batch'
66
67
68 class BigQueryConnectionMethod(StrEnum):
69 OAUTH = 'oauth'
70 SERVICE_ACCOUNT = 'service-account'
71 SERVICE_ACCOUNT_JSON = 'service-account-json'
72 OAUTH_SECRETS = 'oauth-secrets'
73
74
75 @dataclass
76 class BigQueryAdapterResponse(AdapterResponse):
77 bytes_processed: Optional[int] = None
78
79
80 @dataclass
81 class BigQueryCredentials(Credentials):
82 method: BigQueryConnectionMethod
83 # BigQuery allows an empty database / project, where it defers to the
84 # environment for the project
85 database: Optional[str]
86 timeout_seconds: Optional[int] = 300
87 location: Optional[str] = None
88 priority: Optional[Priority] = None
89 retries: Optional[int] = 1
90 maximum_bytes_billed: Optional[int] = None
91 impersonate_service_account: Optional[str] = None
92
93 # Keyfile json creds
94 keyfile: Optional[str] = None
95 keyfile_json: Optional[Dict[str, Any]] = None
96
97 # oauth-secrets
98 token: Optional[str] = None
99 refresh_token: Optional[str] = None
100 client_id: Optional[str] = None
101 client_secret: Optional[str] = None
102 token_uri: Optional[str] = None
103
104 _ALIASES = {
105 'project': 'database',
106 'dataset': 'schema',
107 }
108
109 @property
110 def type(self):
111 return 'bigquery'
112
113 def _connection_keys(self):
114 return ('method', 'database', 'schema', 'location', 'priority',
115 'timeout_seconds', 'maximum_bytes_billed')
116
117 @classmethod
118 def __pre_deserialize__(cls, d: Dict[Any, Any]) -> Dict[Any, Any]:
119 # We need to inject the correct value of the database (aka project) at
120 # this stage, ref
121 # https://github.com/fishtown-analytics/dbt/pull/2908#discussion_r532927436.
122
123 # `database` is an alias of `project` in BigQuery
124 if 'database' not in d:
125 _, database = get_bigquery_defaults()
126 d['database'] = database
127 return d
128
129
130 class BigQueryConnectionManager(BaseConnectionManager):
131 TYPE = 'bigquery'
132
133 SCOPE = ('https://www.googleapis.com/auth/bigquery',
134 'https://www.googleapis.com/auth/cloud-platform',
135 'https://www.googleapis.com/auth/drive')
136
137 QUERY_TIMEOUT = 300
138 RETRIES = 1
139 DEFAULT_INITIAL_DELAY = 1.0 # Seconds
140 DEFAULT_MAXIMUM_DELAY = 1.0 # Seconds
141
142 @classmethod
143 def handle_error(cls, error, message):
144 error_msg = "\n".join([item['message'] for item in error.errors])
145 raise DatabaseException(error_msg)
146
147 def clear_transaction(self):
148 pass
149
150 @contextmanager
151 def exception_handler(self, sql):
152 try:
153 yield
154
155 except google.cloud.exceptions.BadRequest as e:
156 message = "Bad request while running query"
157 self.handle_error(e, message)
158
159 except google.cloud.exceptions.Forbidden as e:
160 message = "Access denied while running query"
161 self.handle_error(e, message)
162
163 except google.auth.exceptions.RefreshError as e:
164 message = "Unable to generate access token, if you're using " \
165 "impersonate_service_account, make sure your " \
166 'initial account has the "roles/' \
167 'iam.serviceAccountTokenCreator" role on the ' \
168 'account you are trying to impersonate.\n\n' \
169 f'{str(e)}'
170 raise RuntimeException(message)
171
172 except Exception as e:
173 logger.debug("Unhandled error while running:\n{}".format(sql))
174 logger.debug(e)
175 if isinstance(e, RuntimeException):
176 # during a sql query, an internal to dbt exception was raised.
177 # this sounds a lot like a signal handler and probably has
178 # useful information, so raise it without modification.
179 raise
180 exc_message = str(e)
181 # the google bigquery library likes to add the query log, which we
182 # don't want to log. Hopefully they never change this!
183 if BQ_QUERY_JOB_SPLIT in exc_message:
184 exc_message = exc_message.split(BQ_QUERY_JOB_SPLIT)[0].strip()
185 raise RuntimeException(exc_message)
186
187 def cancel_open(self) -> None:
188 pass
189
190 @classmethod
191 def close(cls, connection):
192 connection.state = ConnectionState.CLOSED
193
194 return connection
195
196 def begin(self):
197 pass
198
199 def commit(self):
200 pass
201
202 @classmethod
203 def get_bigquery_credentials(cls, profile_credentials):
204 method = profile_credentials.method
205 creds = GoogleServiceAccountCredentials.Credentials
206
207 if method == BigQueryConnectionMethod.OAUTH:
208 credentials, _ = get_bigquery_defaults(scopes=cls.SCOPE)
209 return credentials
210
211 elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT:
212 keyfile = profile_credentials.keyfile
213 return creds.from_service_account_file(keyfile, scopes=cls.SCOPE)
214
215 elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT_JSON:
216 details = profile_credentials.keyfile_json
217 return creds.from_service_account_info(details, scopes=cls.SCOPE)
218
219 elif method == BigQueryConnectionMethod.OAUTH_SECRETS:
220 return GoogleCredentials.Credentials(
221 token=profile_credentials.token,
222 refresh_token=profile_credentials.refresh_token,
223 client_id=profile_credentials.client_id,
224 client_secret=profile_credentials.client_secret,
225 token_uri=profile_credentials.token_uri,
226 scopes=cls.SCOPE
227 )
228
229 error = ('Invalid `method` in profile: "{}"'.format(method))
230 raise FailedToConnectException(error)
231
232 @classmethod
233 def get_impersonated_bigquery_credentials(cls, profile_credentials):
234 source_credentials = cls.get_bigquery_credentials(profile_credentials)
235 return impersonated_credentials.Credentials(
236 source_credentials=source_credentials,
237 target_principal=profile_credentials.impersonate_service_account,
238 target_scopes=list(cls.SCOPE),
239 lifetime=profile_credentials.timeout_seconds,
240 )
241
242 @classmethod
243 def get_bigquery_client(cls, profile_credentials):
244 if profile_credentials.impersonate_service_account:
245 creds =\
246 cls.get_impersonated_bigquery_credentials(profile_credentials)
247 else:
248 creds = cls.get_bigquery_credentials(profile_credentials)
249 database = profile_credentials.database
250 location = getattr(profile_credentials, 'location', None)
251
252 info = client_info.ClientInfo(user_agent=f'dbt-{dbt_version}')
253 return google.cloud.bigquery.Client(
254 database,
255 creds,
256 location=location,
257 client_info=info,
258 )
259
260 @classmethod
261 def open(cls, connection):
262 if connection.state == 'open':
263 logger.debug('Connection is already open, skipping open.')
264 return connection
265
266 try:
267 handle = cls.get_bigquery_client(connection.credentials)
268
269 except google.auth.exceptions.DefaultCredentialsError:
270 logger.info("Please log into GCP to continue")
271 gcloud.setup_default_credentials()
272
273 handle = cls.get_bigquery_client(connection.credentials)
274
275 except Exception as e:
276 logger.debug("Got an error when attempting to create a bigquery "
277 "client: '{}'".format(e))
278
279 connection.handle = None
280 connection.state = 'fail'
281
282 raise FailedToConnectException(str(e))
283
284 connection.handle = handle
285 connection.state = 'open'
286 return connection
287
288 @classmethod
289 def get_timeout(cls, conn):
290 credentials = conn.credentials
291 return credentials.timeout_seconds
292
293 @classmethod
294 def get_retries(cls, conn) -> int:
295 credentials = conn.credentials
296 if credentials.retries is not None:
297 return credentials.retries
298 else:
299 return 1
300
301 @classmethod
302 def get_table_from_response(cls, resp):
303 column_names = [field.name for field in resp.schema]
304 return agate_helper.table_from_data_flat(resp, column_names)
305
306 def raw_execute(self, sql, fetch=False, *, use_legacy_sql=False):
307 conn = self.get_thread_connection()
308 client = conn.handle
309
310 logger.debug('On {}: {}', conn.name, sql)
311
312 if self.profile.query_comment.job_label:
313 query_comment = self.query_header.comment.query_comment
314 labels = self._labels_from_query_comment(query_comment)
315 else:
316 labels = {}
317
318 if active_user:
319 labels['dbt_invocation_id'] = active_user.invocation_id
320
321 job_params = {'use_legacy_sql': use_legacy_sql, 'labels': labels}
322
323 priority = conn.credentials.priority
324 if priority == Priority.Batch:
325 job_params['priority'] = google.cloud.bigquery.QueryPriority.BATCH
326 else:
327 job_params[
328 'priority'] = google.cloud.bigquery.QueryPriority.INTERACTIVE
329
330 maximum_bytes_billed = conn.credentials.maximum_bytes_billed
331 if maximum_bytes_billed is not None and maximum_bytes_billed != 0:
332 job_params['maximum_bytes_billed'] = maximum_bytes_billed
333
334 def fn():
335 return self._query_and_results(client, sql, conn, job_params)
336
337 query_job, iterator = self._retry_and_handle(msg=sql, conn=conn, fn=fn)
338
339 return query_job, iterator
340
341 def execute(
342 self, sql, auto_begin=False, fetch=None
343 ) -> Tuple[BigQueryAdapterResponse, agate.Table]:
344 sql = self._add_query_comment(sql)
345 # auto_begin is ignored on bigquery, and only included for consistency
346 query_job, iterator = self.raw_execute(sql, fetch=fetch)
347
348 if fetch:
349 table = self.get_table_from_response(iterator)
350 else:
351 table = agate_helper.empty_table()
352
353 message = 'OK'
354 code = None
355 num_rows = None
356 bytes_processed = None
357
358 if query_job.statement_type == 'CREATE_VIEW':
359 code = 'CREATE VIEW'
360
361 elif query_job.statement_type == 'CREATE_TABLE_AS_SELECT':
362 conn = self.get_thread_connection()
363 client = conn.handle
364 query_table = client.get_table(query_job.destination)
365 code = 'CREATE TABLE'
366 num_rows = query_table.num_rows
367 bytes_processed = query_job.total_bytes_processed
368 message = '{} ({} rows, {} processed)'.format(
369 code,
370 format_rows_number(num_rows),
371 format_bytes(bytes_processed)
372 )
373
374 elif query_job.statement_type == 'SCRIPT':
375 code = 'SCRIPT'
376 bytes_processed = query_job.total_bytes_processed
377 message = f'{code} ({format_bytes(bytes_processed)} processed)'
378
379 elif query_job.statement_type in ['INSERT', 'DELETE', 'MERGE']:
380 code = query_job.statement_type
381 num_rows = query_job.num_dml_affected_rows
382 bytes_processed = query_job.total_bytes_processed
383 message = '{} ({} rows, {} processed)'.format(
384 code,
385 format_rows_number(num_rows),
386 format_bytes(bytes_processed),
387 )
388
389 response = BigQueryAdapterResponse(
390 _message=message,
391 rows_affected=num_rows,
392 code=code,
393 bytes_processed=bytes_processed
394 )
395
396 return response, table
397
398 def get_partitions_metadata(self, table):
399 def standard_to_legacy(table):
400 return table.project + ':' + table.dataset + '.' + table.identifier
401
402 legacy_sql = 'SELECT * FROM ['\
403 + standard_to_legacy(table) + '$__PARTITIONS_SUMMARY__]'
404
405 sql = self._add_query_comment(legacy_sql)
406 # auto_begin is ignored on bigquery, and only included for consistency
407 _, iterator =\
408 self.raw_execute(sql, fetch='fetch_result', use_legacy_sql=True)
409 return self.get_table_from_response(iterator)
410
411 def create_bigquery_table(self, database, schema, table_name, callback,
412 sql):
413 """Create a bigquery table. The caller must supply a callback
414 that takes one argument, a `google.cloud.bigquery.Table`, and mutates
415 it.
416 """
417 conn = self.get_thread_connection()
418 client = conn.handle
419
420 view_ref = self.table_ref(database, schema, table_name, conn)
421 view = google.cloud.bigquery.Table(view_ref)
422 callback(view)
423
424 def fn():
425 return client.create_table(view)
426 self._retry_and_handle(msg=sql, conn=conn, fn=fn)
427
428 def create_view(self, database, schema, table_name, sql):
429 def callback(table):
430 table.view_query = sql
431 table.view_use_legacy_sql = False
432
433 self.create_bigquery_table(database, schema, table_name, callback, sql)
434
435 def create_table(self, database, schema, table_name, sql):
436 conn = self.get_thread_connection()
437 client = conn.handle
438
439 table_ref = self.table_ref(database, schema, table_name, conn)
440 job_params = {'destination': table_ref,
441 'write_disposition': WRITE_TRUNCATE}
442
443 timeout = self.get_timeout(conn)
444
445 def fn():
446 return self._query_and_results(client, sql, conn, job_params,
447 timeout=timeout)
448 self._retry_and_handle(msg=sql, conn=conn, fn=fn)
449
450 def create_date_partitioned_table(self, database, schema, table_name):
451 def callback(table):
452 table.partitioning_type = 'DAY'
453
454 self.create_bigquery_table(database, schema, table_name, callback,
455 'CREATE DAY PARTITIONED TABLE')
456
457 def copy_bq_table(self, source, destination, write_disposition):
458 conn = self.get_thread_connection()
459 client = conn.handle
460
461 source_ref = self.table_ref(
462 source.database, source.schema, source.table, conn)
463 destination_ref = self.table_ref(
464 destination.database, destination.schema, destination.table, conn)
465
466 logger.debug(
467 'Copying table "{}" to "{}" with disposition: "{}"',
468 source_ref.path, destination_ref.path, write_disposition)
469
470 def copy_and_results():
471 job_config = google.cloud.bigquery.CopyJobConfig(
472 write_disposition=write_disposition)
473 copy_job = client.copy_table(
474 source_ref, destination_ref, job_config=job_config)
475 iterator = copy_job.result(timeout=self.get_timeout(conn))
476 return copy_job, iterator
477
478 self._retry_and_handle(
479 msg='copy table "{}" to "{}"'.format(
480 source_ref.path, destination_ref.path),
481 conn=conn, fn=copy_and_results)
482
483 @staticmethod
484 def dataset(database, schema, conn):
485 dataset_ref = conn.handle.dataset(schema, database)
486 return google.cloud.bigquery.Dataset(dataset_ref)
487
488 @staticmethod
489 def dataset_from_id(dataset_id):
490 return google.cloud.bigquery.Dataset.from_string(dataset_id)
491
492 def table_ref(self, database, schema, table_name, conn):
493 dataset = self.dataset(database, schema, conn)
494 return dataset.table(table_name)
495
496 def get_bq_table(self, database, schema, identifier):
497 """Get a bigquery table for a schema/model."""
498 conn = self.get_thread_connection()
499 table_ref = self.table_ref(database, schema, identifier, conn)
500 return conn.handle.get_table(table_ref)
501
502 def drop_dataset(self, database, schema):
503 conn = self.get_thread_connection()
504 dataset = self.dataset(database, schema, conn)
505 client = conn.handle
506
507 def fn():
508 return client.delete_dataset(
509 dataset, delete_contents=True, not_found_ok=True)
510
511 self._retry_and_handle(
512 msg='drop dataset', conn=conn, fn=fn)
513
514 def create_dataset(self, database, schema):
515 conn = self.get_thread_connection()
516 client = conn.handle
517 dataset = self.dataset(database, schema, conn)
518
519 def fn():
520 return client.create_dataset(dataset, exists_ok=True)
521 self._retry_and_handle(msg='create dataset', conn=conn, fn=fn)
522
523 def _query_and_results(self, client, sql, conn, job_params, timeout=None):
524 """Query the client and wait for results."""
525 # Cannot reuse job_config if destination is set and ddl is used
526 job_config = google.cloud.bigquery.QueryJobConfig(**job_params)
527 query_job = client.query(sql, job_config=job_config)
528 iterator = query_job.result(timeout=timeout)
529
530 return query_job, iterator
531
532 def _retry_and_handle(self, msg, conn, fn):
533 """retry a function call within the context of exception_handler."""
534 def reopen_conn_on_error(error):
535 if isinstance(error, REOPENABLE_ERRORS):
536 logger.warning('Reopening connection after {!r}', error)
537 self.close(conn)
538 self.open(conn)
539 return
540
541 with self.exception_handler(msg):
542 return retry.retry_target(
543 target=fn,
544 predicate=_ErrorCounter(self.get_retries(conn)).count_error,
545 sleep_generator=self._retry_generator(),
546 deadline=None,
547 on_error=reopen_conn_on_error)
548
549 def _retry_generator(self):
550 """Generates retry intervals that exponentially back off."""
551 return retry.exponential_sleep_generator(
552 initial=self.DEFAULT_INITIAL_DELAY,
553 maximum=self.DEFAULT_MAXIMUM_DELAY)
554
555 def _labels_from_query_comment(self, comment: str) -> Dict:
556 try:
557 comment_labels = json.loads(comment)
558 except (TypeError, ValueError):
559 return {'query_comment': _sanitize_label(comment)}
560 return {
561 _sanitize_label(key): _sanitize_label(str(value))
562 for key, value in comment_labels.items()
563 }
564
565
566 class _ErrorCounter(object):
567 """Counts errors seen up to a threshold then raises the next error."""
568
569 def __init__(self, retries):
570 self.retries = retries
571 self.error_count = 0
572
573 def count_error(self, error):
574 if self.retries == 0:
575 return False # Don't log
576 self.error_count += 1
577 if _is_retryable(error) and self.error_count <= self.retries:
578 logger.debug(
579 'Retry attempt {} of {} after error: {}',
580 self.error_count, self.retries, repr(error))
581 return True
582 else:
583 return False
584
585
586 def _is_retryable(error):
587 """Return true for errors that are unlikely to occur again if retried."""
588 if isinstance(error, RETRYABLE_ERRORS):
589 return True
590 elif isinstance(error, google.api_core.exceptions.Forbidden) and any(
591 e['reason'] == 'rateLimitExceeded' for e in error.errors):
592 return True
593 return False
594
595
596 _SANITIZE_LABEL_PATTERN = re.compile(r"[^a-z0-9_-]")
597
598
599 def _sanitize_label(value: str) -> str:
600 """Return a legal value for a BigQuery label."""
601 value = value.strip().lower()
602 value = _SANITIZE_LABEL_PATTERN.sub("_", value)
603 return value
604
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/bigquery/dbt/adapters/bigquery/connections.py b/plugins/bigquery/dbt/adapters/bigquery/connections.py
--- a/plugins/bigquery/dbt/adapters/bigquery/connections.py
+++ b/plugins/bigquery/dbt/adapters/bigquery/connections.py
@@ -309,7 +309,7 @@
logger.debug('On {}: {}', conn.name, sql)
- if self.profile.query_comment.job_label:
+ if self.profile.query_comment and self.profile.query_comment.job_label:
query_comment = self.query_header.comment.query_comment
labels = self._labels_from_query_comment(query_comment)
else:
| {"golden_diff": "diff --git a/plugins/bigquery/dbt/adapters/bigquery/connections.py b/plugins/bigquery/dbt/adapters/bigquery/connections.py\n--- a/plugins/bigquery/dbt/adapters/bigquery/connections.py\n+++ b/plugins/bigquery/dbt/adapters/bigquery/connections.py\n@@ -309,7 +309,7 @@\n \n logger.debug('On {}: {}', conn.name, sql)\n \n- if self.profile.query_comment.job_label:\n+ if self.profile.query_comment and self.profile.query_comment.job_label:\n query_comment = self.query_header.comment.query_comment\n labels = self._labels_from_query_comment(query_comment)\n else:\n", "issue": "'NoneType' object has no attribute 'job_label'\nI am trying to configure the bigquery connection profile. Somehow get the following error message after \"dbt debug\". \r\n\r\n>'NoneType' object has no attribute 'job_label'\r\n\r\nNot sure how to debug this. Can someone give me a hint please?\r\n\r\n\n", "before_files": [{"content": "import json\nimport re\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom functools import lru_cache\nimport agate\nfrom requests.exceptions import ConnectionError\nfrom typing import Optional, Any, Dict, Tuple\n\nimport google.auth\nimport google.auth.exceptions\nimport google.cloud.bigquery\nimport google.cloud.exceptions\nfrom google.api_core import retry, client_info\nfrom google.auth import impersonated_credentials\nfrom google.oauth2 import (\n credentials as GoogleCredentials,\n service_account as GoogleServiceAccountCredentials\n)\n\nfrom dbt.utils import format_bytes, format_rows_number\nfrom dbt.clients import agate_helper, gcloud\nfrom dbt.tracking import active_user\nfrom dbt.contracts.connection import ConnectionState, AdapterResponse\nfrom dbt.exceptions import (\n FailedToConnectException, RuntimeException, DatabaseException\n)\nfrom dbt.adapters.base import BaseConnectionManager, Credentials\nfrom dbt.logger import GLOBAL_LOGGER as logger\nfrom dbt.version import __version__ as dbt_version\n\nfrom dbt.dataclass_schema import StrEnum\n\n\nBQ_QUERY_JOB_SPLIT = '-----Query Job SQL Follows-----'\n\nWRITE_TRUNCATE = google.cloud.bigquery.job.WriteDisposition.WRITE_TRUNCATE\n\nREOPENABLE_ERRORS = (\n ConnectionResetError,\n ConnectionError,\n)\n\nRETRYABLE_ERRORS = (\n google.cloud.exceptions.ServerError,\n google.cloud.exceptions.BadRequest,\n ConnectionResetError,\n ConnectionError,\n)\n\n\n@lru_cache()\ndef get_bigquery_defaults(scopes=None) -> Tuple[Any, Optional[str]]:\n \"\"\"\n Returns (credentials, project_id)\n\n project_id is returned available from the environment; otherwise None\n \"\"\"\n # Cached, because the underlying implementation shells out, taking ~1s\n return google.auth.default(scopes=scopes)\n\n\nclass Priority(StrEnum):\n Interactive = 'interactive'\n Batch = 'batch'\n\n\nclass BigQueryConnectionMethod(StrEnum):\n OAUTH = 'oauth'\n SERVICE_ACCOUNT = 'service-account'\n SERVICE_ACCOUNT_JSON = 'service-account-json'\n OAUTH_SECRETS = 'oauth-secrets'\n\n\n@dataclass\nclass BigQueryAdapterResponse(AdapterResponse):\n bytes_processed: Optional[int] = None\n\n\n@dataclass\nclass BigQueryCredentials(Credentials):\n method: BigQueryConnectionMethod\n # BigQuery allows an empty database / project, where it defers to the\n # environment for the project\n database: Optional[str]\n timeout_seconds: Optional[int] = 300\n location: Optional[str] = None\n priority: Optional[Priority] = None\n retries: Optional[int] = 1\n maximum_bytes_billed: Optional[int] = None\n impersonate_service_account: Optional[str] = None\n\n # Keyfile json creds\n keyfile: Optional[str] = None\n keyfile_json: Optional[Dict[str, Any]] = None\n\n # oauth-secrets\n token: Optional[str] = None\n refresh_token: Optional[str] = None\n client_id: Optional[str] = None\n client_secret: Optional[str] = None\n token_uri: Optional[str] = None\n\n _ALIASES = {\n 'project': 'database',\n 'dataset': 'schema',\n }\n\n @property\n def type(self):\n return 'bigquery'\n\n def _connection_keys(self):\n return ('method', 'database', 'schema', 'location', 'priority',\n 'timeout_seconds', 'maximum_bytes_billed')\n\n @classmethod\n def __pre_deserialize__(cls, d: Dict[Any, Any]) -> Dict[Any, Any]:\n # We need to inject the correct value of the database (aka project) at\n # this stage, ref\n # https://github.com/fishtown-analytics/dbt/pull/2908#discussion_r532927436.\n\n # `database` is an alias of `project` in BigQuery\n if 'database' not in d:\n _, database = get_bigquery_defaults()\n d['database'] = database\n return d\n\n\nclass BigQueryConnectionManager(BaseConnectionManager):\n TYPE = 'bigquery'\n\n SCOPE = ('https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/drive')\n\n QUERY_TIMEOUT = 300\n RETRIES = 1\n DEFAULT_INITIAL_DELAY = 1.0 # Seconds\n DEFAULT_MAXIMUM_DELAY = 1.0 # Seconds\n\n @classmethod\n def handle_error(cls, error, message):\n error_msg = \"\\n\".join([item['message'] for item in error.errors])\n raise DatabaseException(error_msg)\n\n def clear_transaction(self):\n pass\n\n @contextmanager\n def exception_handler(self, sql):\n try:\n yield\n\n except google.cloud.exceptions.BadRequest as e:\n message = \"Bad request while running query\"\n self.handle_error(e, message)\n\n except google.cloud.exceptions.Forbidden as e:\n message = \"Access denied while running query\"\n self.handle_error(e, message)\n\n except google.auth.exceptions.RefreshError as e:\n message = \"Unable to generate access token, if you're using \" \\\n \"impersonate_service_account, make sure your \" \\\n 'initial account has the \"roles/' \\\n 'iam.serviceAccountTokenCreator\" role on the ' \\\n 'account you are trying to impersonate.\\n\\n' \\\n f'{str(e)}'\n raise RuntimeException(message)\n\n except Exception as e:\n logger.debug(\"Unhandled error while running:\\n{}\".format(sql))\n logger.debug(e)\n if isinstance(e, RuntimeException):\n # during a sql query, an internal to dbt exception was raised.\n # this sounds a lot like a signal handler and probably has\n # useful information, so raise it without modification.\n raise\n exc_message = str(e)\n # the google bigquery library likes to add the query log, which we\n # don't want to log. Hopefully they never change this!\n if BQ_QUERY_JOB_SPLIT in exc_message:\n exc_message = exc_message.split(BQ_QUERY_JOB_SPLIT)[0].strip()\n raise RuntimeException(exc_message)\n\n def cancel_open(self) -> None:\n pass\n\n @classmethod\n def close(cls, connection):\n connection.state = ConnectionState.CLOSED\n\n return connection\n\n def begin(self):\n pass\n\n def commit(self):\n pass\n\n @classmethod\n def get_bigquery_credentials(cls, profile_credentials):\n method = profile_credentials.method\n creds = GoogleServiceAccountCredentials.Credentials\n\n if method == BigQueryConnectionMethod.OAUTH:\n credentials, _ = get_bigquery_defaults(scopes=cls.SCOPE)\n return credentials\n\n elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT:\n keyfile = profile_credentials.keyfile\n return creds.from_service_account_file(keyfile, scopes=cls.SCOPE)\n\n elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT_JSON:\n details = profile_credentials.keyfile_json\n return creds.from_service_account_info(details, scopes=cls.SCOPE)\n\n elif method == BigQueryConnectionMethod.OAUTH_SECRETS:\n return GoogleCredentials.Credentials(\n token=profile_credentials.token,\n refresh_token=profile_credentials.refresh_token,\n client_id=profile_credentials.client_id,\n client_secret=profile_credentials.client_secret,\n token_uri=profile_credentials.token_uri,\n scopes=cls.SCOPE\n )\n\n error = ('Invalid `method` in profile: \"{}\"'.format(method))\n raise FailedToConnectException(error)\n\n @classmethod\n def get_impersonated_bigquery_credentials(cls, profile_credentials):\n source_credentials = cls.get_bigquery_credentials(profile_credentials)\n return impersonated_credentials.Credentials(\n source_credentials=source_credentials,\n target_principal=profile_credentials.impersonate_service_account,\n target_scopes=list(cls.SCOPE),\n lifetime=profile_credentials.timeout_seconds,\n )\n\n @classmethod\n def get_bigquery_client(cls, profile_credentials):\n if profile_credentials.impersonate_service_account:\n creds =\\\n cls.get_impersonated_bigquery_credentials(profile_credentials)\n else:\n creds = cls.get_bigquery_credentials(profile_credentials)\n database = profile_credentials.database\n location = getattr(profile_credentials, 'location', None)\n\n info = client_info.ClientInfo(user_agent=f'dbt-{dbt_version}')\n return google.cloud.bigquery.Client(\n database,\n creds,\n location=location,\n client_info=info,\n )\n\n @classmethod\n def open(cls, connection):\n if connection.state == 'open':\n logger.debug('Connection is already open, skipping open.')\n return connection\n\n try:\n handle = cls.get_bigquery_client(connection.credentials)\n\n except google.auth.exceptions.DefaultCredentialsError:\n logger.info(\"Please log into GCP to continue\")\n gcloud.setup_default_credentials()\n\n handle = cls.get_bigquery_client(connection.credentials)\n\n except Exception as e:\n logger.debug(\"Got an error when attempting to create a bigquery \"\n \"client: '{}'\".format(e))\n\n connection.handle = None\n connection.state = 'fail'\n\n raise FailedToConnectException(str(e))\n\n connection.handle = handle\n connection.state = 'open'\n return connection\n\n @classmethod\n def get_timeout(cls, conn):\n credentials = conn.credentials\n return credentials.timeout_seconds\n\n @classmethod\n def get_retries(cls, conn) -> int:\n credentials = conn.credentials\n if credentials.retries is not None:\n return credentials.retries\n else:\n return 1\n\n @classmethod\n def get_table_from_response(cls, resp):\n column_names = [field.name for field in resp.schema]\n return agate_helper.table_from_data_flat(resp, column_names)\n\n def raw_execute(self, sql, fetch=False, *, use_legacy_sql=False):\n conn = self.get_thread_connection()\n client = conn.handle\n\n logger.debug('On {}: {}', conn.name, sql)\n\n if self.profile.query_comment.job_label:\n query_comment = self.query_header.comment.query_comment\n labels = self._labels_from_query_comment(query_comment)\n else:\n labels = {}\n\n if active_user:\n labels['dbt_invocation_id'] = active_user.invocation_id\n\n job_params = {'use_legacy_sql': use_legacy_sql, 'labels': labels}\n\n priority = conn.credentials.priority\n if priority == Priority.Batch:\n job_params['priority'] = google.cloud.bigquery.QueryPriority.BATCH\n else:\n job_params[\n 'priority'] = google.cloud.bigquery.QueryPriority.INTERACTIVE\n\n maximum_bytes_billed = conn.credentials.maximum_bytes_billed\n if maximum_bytes_billed is not None and maximum_bytes_billed != 0:\n job_params['maximum_bytes_billed'] = maximum_bytes_billed\n\n def fn():\n return self._query_and_results(client, sql, conn, job_params)\n\n query_job, iterator = self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n return query_job, iterator\n\n def execute(\n self, sql, auto_begin=False, fetch=None\n ) -> Tuple[BigQueryAdapterResponse, agate.Table]:\n sql = self._add_query_comment(sql)\n # auto_begin is ignored on bigquery, and only included for consistency\n query_job, iterator = self.raw_execute(sql, fetch=fetch)\n\n if fetch:\n table = self.get_table_from_response(iterator)\n else:\n table = agate_helper.empty_table()\n\n message = 'OK'\n code = None\n num_rows = None\n bytes_processed = None\n\n if query_job.statement_type == 'CREATE_VIEW':\n code = 'CREATE VIEW'\n\n elif query_job.statement_type == 'CREATE_TABLE_AS_SELECT':\n conn = self.get_thread_connection()\n client = conn.handle\n query_table = client.get_table(query_job.destination)\n code = 'CREATE TABLE'\n num_rows = query_table.num_rows\n bytes_processed = query_job.total_bytes_processed\n message = '{} ({} rows, {} processed)'.format(\n code,\n format_rows_number(num_rows),\n format_bytes(bytes_processed)\n )\n\n elif query_job.statement_type == 'SCRIPT':\n code = 'SCRIPT'\n bytes_processed = query_job.total_bytes_processed\n message = f'{code} ({format_bytes(bytes_processed)} processed)'\n\n elif query_job.statement_type in ['INSERT', 'DELETE', 'MERGE']:\n code = query_job.statement_type\n num_rows = query_job.num_dml_affected_rows\n bytes_processed = query_job.total_bytes_processed\n message = '{} ({} rows, {} processed)'.format(\n code,\n format_rows_number(num_rows),\n format_bytes(bytes_processed),\n )\n\n response = BigQueryAdapterResponse(\n _message=message,\n rows_affected=num_rows,\n code=code,\n bytes_processed=bytes_processed\n )\n\n return response, table\n\n def get_partitions_metadata(self, table):\n def standard_to_legacy(table):\n return table.project + ':' + table.dataset + '.' + table.identifier\n\n legacy_sql = 'SELECT * FROM ['\\\n + standard_to_legacy(table) + '$__PARTITIONS_SUMMARY__]'\n\n sql = self._add_query_comment(legacy_sql)\n # auto_begin is ignored on bigquery, and only included for consistency\n _, iterator =\\\n self.raw_execute(sql, fetch='fetch_result', use_legacy_sql=True)\n return self.get_table_from_response(iterator)\n\n def create_bigquery_table(self, database, schema, table_name, callback,\n sql):\n \"\"\"Create a bigquery table. The caller must supply a callback\n that takes one argument, a `google.cloud.bigquery.Table`, and mutates\n it.\n \"\"\"\n conn = self.get_thread_connection()\n client = conn.handle\n\n view_ref = self.table_ref(database, schema, table_name, conn)\n view = google.cloud.bigquery.Table(view_ref)\n callback(view)\n\n def fn():\n return client.create_table(view)\n self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n def create_view(self, database, schema, table_name, sql):\n def callback(table):\n table.view_query = sql\n table.view_use_legacy_sql = False\n\n self.create_bigquery_table(database, schema, table_name, callback, sql)\n\n def create_table(self, database, schema, table_name, sql):\n conn = self.get_thread_connection()\n client = conn.handle\n\n table_ref = self.table_ref(database, schema, table_name, conn)\n job_params = {'destination': table_ref,\n 'write_disposition': WRITE_TRUNCATE}\n\n timeout = self.get_timeout(conn)\n\n def fn():\n return self._query_and_results(client, sql, conn, job_params,\n timeout=timeout)\n self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n def create_date_partitioned_table(self, database, schema, table_name):\n def callback(table):\n table.partitioning_type = 'DAY'\n\n self.create_bigquery_table(database, schema, table_name, callback,\n 'CREATE DAY PARTITIONED TABLE')\n\n def copy_bq_table(self, source, destination, write_disposition):\n conn = self.get_thread_connection()\n client = conn.handle\n\n source_ref = self.table_ref(\n source.database, source.schema, source.table, conn)\n destination_ref = self.table_ref(\n destination.database, destination.schema, destination.table, conn)\n\n logger.debug(\n 'Copying table \"{}\" to \"{}\" with disposition: \"{}\"',\n source_ref.path, destination_ref.path, write_disposition)\n\n def copy_and_results():\n job_config = google.cloud.bigquery.CopyJobConfig(\n write_disposition=write_disposition)\n copy_job = client.copy_table(\n source_ref, destination_ref, job_config=job_config)\n iterator = copy_job.result(timeout=self.get_timeout(conn))\n return copy_job, iterator\n\n self._retry_and_handle(\n msg='copy table \"{}\" to \"{}\"'.format(\n source_ref.path, destination_ref.path),\n conn=conn, fn=copy_and_results)\n\n @staticmethod\n def dataset(database, schema, conn):\n dataset_ref = conn.handle.dataset(schema, database)\n return google.cloud.bigquery.Dataset(dataset_ref)\n\n @staticmethod\n def dataset_from_id(dataset_id):\n return google.cloud.bigquery.Dataset.from_string(dataset_id)\n\n def table_ref(self, database, schema, table_name, conn):\n dataset = self.dataset(database, schema, conn)\n return dataset.table(table_name)\n\n def get_bq_table(self, database, schema, identifier):\n \"\"\"Get a bigquery table for a schema/model.\"\"\"\n conn = self.get_thread_connection()\n table_ref = self.table_ref(database, schema, identifier, conn)\n return conn.handle.get_table(table_ref)\n\n def drop_dataset(self, database, schema):\n conn = self.get_thread_connection()\n dataset = self.dataset(database, schema, conn)\n client = conn.handle\n\n def fn():\n return client.delete_dataset(\n dataset, delete_contents=True, not_found_ok=True)\n\n self._retry_and_handle(\n msg='drop dataset', conn=conn, fn=fn)\n\n def create_dataset(self, database, schema):\n conn = self.get_thread_connection()\n client = conn.handle\n dataset = self.dataset(database, schema, conn)\n\n def fn():\n return client.create_dataset(dataset, exists_ok=True)\n self._retry_and_handle(msg='create dataset', conn=conn, fn=fn)\n\n def _query_and_results(self, client, sql, conn, job_params, timeout=None):\n \"\"\"Query the client and wait for results.\"\"\"\n # Cannot reuse job_config if destination is set and ddl is used\n job_config = google.cloud.bigquery.QueryJobConfig(**job_params)\n query_job = client.query(sql, job_config=job_config)\n iterator = query_job.result(timeout=timeout)\n\n return query_job, iterator\n\n def _retry_and_handle(self, msg, conn, fn):\n \"\"\"retry a function call within the context of exception_handler.\"\"\"\n def reopen_conn_on_error(error):\n if isinstance(error, REOPENABLE_ERRORS):\n logger.warning('Reopening connection after {!r}', error)\n self.close(conn)\n self.open(conn)\n return\n\n with self.exception_handler(msg):\n return retry.retry_target(\n target=fn,\n predicate=_ErrorCounter(self.get_retries(conn)).count_error,\n sleep_generator=self._retry_generator(),\n deadline=None,\n on_error=reopen_conn_on_error)\n\n def _retry_generator(self):\n \"\"\"Generates retry intervals that exponentially back off.\"\"\"\n return retry.exponential_sleep_generator(\n initial=self.DEFAULT_INITIAL_DELAY,\n maximum=self.DEFAULT_MAXIMUM_DELAY)\n\n def _labels_from_query_comment(self, comment: str) -> Dict:\n try:\n comment_labels = json.loads(comment)\n except (TypeError, ValueError):\n return {'query_comment': _sanitize_label(comment)}\n return {\n _sanitize_label(key): _sanitize_label(str(value))\n for key, value in comment_labels.items()\n }\n\n\nclass _ErrorCounter(object):\n \"\"\"Counts errors seen up to a threshold then raises the next error.\"\"\"\n\n def __init__(self, retries):\n self.retries = retries\n self.error_count = 0\n\n def count_error(self, error):\n if self.retries == 0:\n return False # Don't log\n self.error_count += 1\n if _is_retryable(error) and self.error_count <= self.retries:\n logger.debug(\n 'Retry attempt {} of {} after error: {}',\n self.error_count, self.retries, repr(error))\n return True\n else:\n return False\n\n\ndef _is_retryable(error):\n \"\"\"Return true for errors that are unlikely to occur again if retried.\"\"\"\n if isinstance(error, RETRYABLE_ERRORS):\n return True\n elif isinstance(error, google.api_core.exceptions.Forbidden) and any(\n e['reason'] == 'rateLimitExceeded' for e in error.errors):\n return True\n return False\n\n\n_SANITIZE_LABEL_PATTERN = re.compile(r\"[^a-z0-9_-]\")\n\n\ndef _sanitize_label(value: str) -> str:\n \"\"\"Return a legal value for a BigQuery label.\"\"\"\n value = value.strip().lower()\n value = _SANITIZE_LABEL_PATTERN.sub(\"_\", value)\n return value\n", "path": "plugins/bigquery/dbt/adapters/bigquery/connections.py"}], "after_files": [{"content": "import json\nimport re\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom functools import lru_cache\nimport agate\nfrom requests.exceptions import ConnectionError\nfrom typing import Optional, Any, Dict, Tuple\n\nimport google.auth\nimport google.auth.exceptions\nimport google.cloud.bigquery\nimport google.cloud.exceptions\nfrom google.api_core import retry, client_info\nfrom google.auth import impersonated_credentials\nfrom google.oauth2 import (\n credentials as GoogleCredentials,\n service_account as GoogleServiceAccountCredentials\n)\n\nfrom dbt.utils import format_bytes, format_rows_number\nfrom dbt.clients import agate_helper, gcloud\nfrom dbt.tracking import active_user\nfrom dbt.contracts.connection import ConnectionState, AdapterResponse\nfrom dbt.exceptions import (\n FailedToConnectException, RuntimeException, DatabaseException\n)\nfrom dbt.adapters.base import BaseConnectionManager, Credentials\nfrom dbt.logger import GLOBAL_LOGGER as logger\nfrom dbt.version import __version__ as dbt_version\n\nfrom dbt.dataclass_schema import StrEnum\n\n\nBQ_QUERY_JOB_SPLIT = '-----Query Job SQL Follows-----'\n\nWRITE_TRUNCATE = google.cloud.bigquery.job.WriteDisposition.WRITE_TRUNCATE\n\nREOPENABLE_ERRORS = (\n ConnectionResetError,\n ConnectionError,\n)\n\nRETRYABLE_ERRORS = (\n google.cloud.exceptions.ServerError,\n google.cloud.exceptions.BadRequest,\n ConnectionResetError,\n ConnectionError,\n)\n\n\n@lru_cache()\ndef get_bigquery_defaults(scopes=None) -> Tuple[Any, Optional[str]]:\n \"\"\"\n Returns (credentials, project_id)\n\n project_id is returned available from the environment; otherwise None\n \"\"\"\n # Cached, because the underlying implementation shells out, taking ~1s\n return google.auth.default(scopes=scopes)\n\n\nclass Priority(StrEnum):\n Interactive = 'interactive'\n Batch = 'batch'\n\n\nclass BigQueryConnectionMethod(StrEnum):\n OAUTH = 'oauth'\n SERVICE_ACCOUNT = 'service-account'\n SERVICE_ACCOUNT_JSON = 'service-account-json'\n OAUTH_SECRETS = 'oauth-secrets'\n\n\n@dataclass\nclass BigQueryAdapterResponse(AdapterResponse):\n bytes_processed: Optional[int] = None\n\n\n@dataclass\nclass BigQueryCredentials(Credentials):\n method: BigQueryConnectionMethod\n # BigQuery allows an empty database / project, where it defers to the\n # environment for the project\n database: Optional[str]\n timeout_seconds: Optional[int] = 300\n location: Optional[str] = None\n priority: Optional[Priority] = None\n retries: Optional[int] = 1\n maximum_bytes_billed: Optional[int] = None\n impersonate_service_account: Optional[str] = None\n\n # Keyfile json creds\n keyfile: Optional[str] = None\n keyfile_json: Optional[Dict[str, Any]] = None\n\n # oauth-secrets\n token: Optional[str] = None\n refresh_token: Optional[str] = None\n client_id: Optional[str] = None\n client_secret: Optional[str] = None\n token_uri: Optional[str] = None\n\n _ALIASES = {\n 'project': 'database',\n 'dataset': 'schema',\n }\n\n @property\n def type(self):\n return 'bigquery'\n\n def _connection_keys(self):\n return ('method', 'database', 'schema', 'location', 'priority',\n 'timeout_seconds', 'maximum_bytes_billed')\n\n @classmethod\n def __pre_deserialize__(cls, d: Dict[Any, Any]) -> Dict[Any, Any]:\n # We need to inject the correct value of the database (aka project) at\n # this stage, ref\n # https://github.com/fishtown-analytics/dbt/pull/2908#discussion_r532927436.\n\n # `database` is an alias of `project` in BigQuery\n if 'database' not in d:\n _, database = get_bigquery_defaults()\n d['database'] = database\n return d\n\n\nclass BigQueryConnectionManager(BaseConnectionManager):\n TYPE = 'bigquery'\n\n SCOPE = ('https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/drive')\n\n QUERY_TIMEOUT = 300\n RETRIES = 1\n DEFAULT_INITIAL_DELAY = 1.0 # Seconds\n DEFAULT_MAXIMUM_DELAY = 1.0 # Seconds\n\n @classmethod\n def handle_error(cls, error, message):\n error_msg = \"\\n\".join([item['message'] for item in error.errors])\n raise DatabaseException(error_msg)\n\n def clear_transaction(self):\n pass\n\n @contextmanager\n def exception_handler(self, sql):\n try:\n yield\n\n except google.cloud.exceptions.BadRequest as e:\n message = \"Bad request while running query\"\n self.handle_error(e, message)\n\n except google.cloud.exceptions.Forbidden as e:\n message = \"Access denied while running query\"\n self.handle_error(e, message)\n\n except google.auth.exceptions.RefreshError as e:\n message = \"Unable to generate access token, if you're using \" \\\n \"impersonate_service_account, make sure your \" \\\n 'initial account has the \"roles/' \\\n 'iam.serviceAccountTokenCreator\" role on the ' \\\n 'account you are trying to impersonate.\\n\\n' \\\n f'{str(e)}'\n raise RuntimeException(message)\n\n except Exception as e:\n logger.debug(\"Unhandled error while running:\\n{}\".format(sql))\n logger.debug(e)\n if isinstance(e, RuntimeException):\n # during a sql query, an internal to dbt exception was raised.\n # this sounds a lot like a signal handler and probably has\n # useful information, so raise it without modification.\n raise\n exc_message = str(e)\n # the google bigquery library likes to add the query log, which we\n # don't want to log. Hopefully they never change this!\n if BQ_QUERY_JOB_SPLIT in exc_message:\n exc_message = exc_message.split(BQ_QUERY_JOB_SPLIT)[0].strip()\n raise RuntimeException(exc_message)\n\n def cancel_open(self) -> None:\n pass\n\n @classmethod\n def close(cls, connection):\n connection.state = ConnectionState.CLOSED\n\n return connection\n\n def begin(self):\n pass\n\n def commit(self):\n pass\n\n @classmethod\n def get_bigquery_credentials(cls, profile_credentials):\n method = profile_credentials.method\n creds = GoogleServiceAccountCredentials.Credentials\n\n if method == BigQueryConnectionMethod.OAUTH:\n credentials, _ = get_bigquery_defaults(scopes=cls.SCOPE)\n return credentials\n\n elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT:\n keyfile = profile_credentials.keyfile\n return creds.from_service_account_file(keyfile, scopes=cls.SCOPE)\n\n elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT_JSON:\n details = profile_credentials.keyfile_json\n return creds.from_service_account_info(details, scopes=cls.SCOPE)\n\n elif method == BigQueryConnectionMethod.OAUTH_SECRETS:\n return GoogleCredentials.Credentials(\n token=profile_credentials.token,\n refresh_token=profile_credentials.refresh_token,\n client_id=profile_credentials.client_id,\n client_secret=profile_credentials.client_secret,\n token_uri=profile_credentials.token_uri,\n scopes=cls.SCOPE\n )\n\n error = ('Invalid `method` in profile: \"{}\"'.format(method))\n raise FailedToConnectException(error)\n\n @classmethod\n def get_impersonated_bigquery_credentials(cls, profile_credentials):\n source_credentials = cls.get_bigquery_credentials(profile_credentials)\n return impersonated_credentials.Credentials(\n source_credentials=source_credentials,\n target_principal=profile_credentials.impersonate_service_account,\n target_scopes=list(cls.SCOPE),\n lifetime=profile_credentials.timeout_seconds,\n )\n\n @classmethod\n def get_bigquery_client(cls, profile_credentials):\n if profile_credentials.impersonate_service_account:\n creds =\\\n cls.get_impersonated_bigquery_credentials(profile_credentials)\n else:\n creds = cls.get_bigquery_credentials(profile_credentials)\n database = profile_credentials.database\n location = getattr(profile_credentials, 'location', None)\n\n info = client_info.ClientInfo(user_agent=f'dbt-{dbt_version}')\n return google.cloud.bigquery.Client(\n database,\n creds,\n location=location,\n client_info=info,\n )\n\n @classmethod\n def open(cls, connection):\n if connection.state == 'open':\n logger.debug('Connection is already open, skipping open.')\n return connection\n\n try:\n handle = cls.get_bigquery_client(connection.credentials)\n\n except google.auth.exceptions.DefaultCredentialsError:\n logger.info(\"Please log into GCP to continue\")\n gcloud.setup_default_credentials()\n\n handle = cls.get_bigquery_client(connection.credentials)\n\n except Exception as e:\n logger.debug(\"Got an error when attempting to create a bigquery \"\n \"client: '{}'\".format(e))\n\n connection.handle = None\n connection.state = 'fail'\n\n raise FailedToConnectException(str(e))\n\n connection.handle = handle\n connection.state = 'open'\n return connection\n\n @classmethod\n def get_timeout(cls, conn):\n credentials = conn.credentials\n return credentials.timeout_seconds\n\n @classmethod\n def get_retries(cls, conn) -> int:\n credentials = conn.credentials\n if credentials.retries is not None:\n return credentials.retries\n else:\n return 1\n\n @classmethod\n def get_table_from_response(cls, resp):\n column_names = [field.name for field in resp.schema]\n return agate_helper.table_from_data_flat(resp, column_names)\n\n def raw_execute(self, sql, fetch=False, *, use_legacy_sql=False):\n conn = self.get_thread_connection()\n client = conn.handle\n\n logger.debug('On {}: {}', conn.name, sql)\n\n if self.profile.query_comment and self.profile.query_comment.job_label:\n query_comment = self.query_header.comment.query_comment\n labels = self._labels_from_query_comment(query_comment)\n else:\n labels = {}\n\n if active_user:\n labels['dbt_invocation_id'] = active_user.invocation_id\n\n job_params = {'use_legacy_sql': use_legacy_sql, 'labels': labels}\n\n priority = conn.credentials.priority\n if priority == Priority.Batch:\n job_params['priority'] = google.cloud.bigquery.QueryPriority.BATCH\n else:\n job_params[\n 'priority'] = google.cloud.bigquery.QueryPriority.INTERACTIVE\n\n maximum_bytes_billed = conn.credentials.maximum_bytes_billed\n if maximum_bytes_billed is not None and maximum_bytes_billed != 0:\n job_params['maximum_bytes_billed'] = maximum_bytes_billed\n\n def fn():\n return self._query_and_results(client, sql, conn, job_params)\n\n query_job, iterator = self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n return query_job, iterator\n\n def execute(\n self, sql, auto_begin=False, fetch=None\n ) -> Tuple[BigQueryAdapterResponse, agate.Table]:\n sql = self._add_query_comment(sql)\n # auto_begin is ignored on bigquery, and only included for consistency\n query_job, iterator = self.raw_execute(sql, fetch=fetch)\n\n if fetch:\n table = self.get_table_from_response(iterator)\n else:\n table = agate_helper.empty_table()\n\n message = 'OK'\n code = None\n num_rows = None\n bytes_processed = None\n\n if query_job.statement_type == 'CREATE_VIEW':\n code = 'CREATE VIEW'\n\n elif query_job.statement_type == 'CREATE_TABLE_AS_SELECT':\n conn = self.get_thread_connection()\n client = conn.handle\n query_table = client.get_table(query_job.destination)\n code = 'CREATE TABLE'\n num_rows = query_table.num_rows\n bytes_processed = query_job.total_bytes_processed\n message = '{} ({} rows, {} processed)'.format(\n code,\n format_rows_number(num_rows),\n format_bytes(bytes_processed)\n )\n\n elif query_job.statement_type == 'SCRIPT':\n code = 'SCRIPT'\n bytes_processed = query_job.total_bytes_processed\n message = f'{code} ({format_bytes(bytes_processed)} processed)'\n\n elif query_job.statement_type in ['INSERT', 'DELETE', 'MERGE']:\n code = query_job.statement_type\n num_rows = query_job.num_dml_affected_rows\n bytes_processed = query_job.total_bytes_processed\n message = '{} ({} rows, {} processed)'.format(\n code,\n format_rows_number(num_rows),\n format_bytes(bytes_processed),\n )\n\n response = BigQueryAdapterResponse(\n _message=message,\n rows_affected=num_rows,\n code=code,\n bytes_processed=bytes_processed\n )\n\n return response, table\n\n def get_partitions_metadata(self, table):\n def standard_to_legacy(table):\n return table.project + ':' + table.dataset + '.' + table.identifier\n\n legacy_sql = 'SELECT * FROM ['\\\n + standard_to_legacy(table) + '$__PARTITIONS_SUMMARY__]'\n\n sql = self._add_query_comment(legacy_sql)\n # auto_begin is ignored on bigquery, and only included for consistency\n _, iterator =\\\n self.raw_execute(sql, fetch='fetch_result', use_legacy_sql=True)\n return self.get_table_from_response(iterator)\n\n def create_bigquery_table(self, database, schema, table_name, callback,\n sql):\n \"\"\"Create a bigquery table. The caller must supply a callback\n that takes one argument, a `google.cloud.bigquery.Table`, and mutates\n it.\n \"\"\"\n conn = self.get_thread_connection()\n client = conn.handle\n\n view_ref = self.table_ref(database, schema, table_name, conn)\n view = google.cloud.bigquery.Table(view_ref)\n callback(view)\n\n def fn():\n return client.create_table(view)\n self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n def create_view(self, database, schema, table_name, sql):\n def callback(table):\n table.view_query = sql\n table.view_use_legacy_sql = False\n\n self.create_bigquery_table(database, schema, table_name, callback, sql)\n\n def create_table(self, database, schema, table_name, sql):\n conn = self.get_thread_connection()\n client = conn.handle\n\n table_ref = self.table_ref(database, schema, table_name, conn)\n job_params = {'destination': table_ref,\n 'write_disposition': WRITE_TRUNCATE}\n\n timeout = self.get_timeout(conn)\n\n def fn():\n return self._query_and_results(client, sql, conn, job_params,\n timeout=timeout)\n self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n def create_date_partitioned_table(self, database, schema, table_name):\n def callback(table):\n table.partitioning_type = 'DAY'\n\n self.create_bigquery_table(database, schema, table_name, callback,\n 'CREATE DAY PARTITIONED TABLE')\n\n def copy_bq_table(self, source, destination, write_disposition):\n conn = self.get_thread_connection()\n client = conn.handle\n\n source_ref = self.table_ref(\n source.database, source.schema, source.table, conn)\n destination_ref = self.table_ref(\n destination.database, destination.schema, destination.table, conn)\n\n logger.debug(\n 'Copying table \"{}\" to \"{}\" with disposition: \"{}\"',\n source_ref.path, destination_ref.path, write_disposition)\n\n def copy_and_results():\n job_config = google.cloud.bigquery.CopyJobConfig(\n write_disposition=write_disposition)\n copy_job = client.copy_table(\n source_ref, destination_ref, job_config=job_config)\n iterator = copy_job.result(timeout=self.get_timeout(conn))\n return copy_job, iterator\n\n self._retry_and_handle(\n msg='copy table \"{}\" to \"{}\"'.format(\n source_ref.path, destination_ref.path),\n conn=conn, fn=copy_and_results)\n\n @staticmethod\n def dataset(database, schema, conn):\n dataset_ref = conn.handle.dataset(schema, database)\n return google.cloud.bigquery.Dataset(dataset_ref)\n\n @staticmethod\n def dataset_from_id(dataset_id):\n return google.cloud.bigquery.Dataset.from_string(dataset_id)\n\n def table_ref(self, database, schema, table_name, conn):\n dataset = self.dataset(database, schema, conn)\n return dataset.table(table_name)\n\n def get_bq_table(self, database, schema, identifier):\n \"\"\"Get a bigquery table for a schema/model.\"\"\"\n conn = self.get_thread_connection()\n table_ref = self.table_ref(database, schema, identifier, conn)\n return conn.handle.get_table(table_ref)\n\n def drop_dataset(self, database, schema):\n conn = self.get_thread_connection()\n dataset = self.dataset(database, schema, conn)\n client = conn.handle\n\n def fn():\n return client.delete_dataset(\n dataset, delete_contents=True, not_found_ok=True)\n\n self._retry_and_handle(\n msg='drop dataset', conn=conn, fn=fn)\n\n def create_dataset(self, database, schema):\n conn = self.get_thread_connection()\n client = conn.handle\n dataset = self.dataset(database, schema, conn)\n\n def fn():\n return client.create_dataset(dataset, exists_ok=True)\n self._retry_and_handle(msg='create dataset', conn=conn, fn=fn)\n\n def _query_and_results(self, client, sql, conn, job_params, timeout=None):\n \"\"\"Query the client and wait for results.\"\"\"\n # Cannot reuse job_config if destination is set and ddl is used\n job_config = google.cloud.bigquery.QueryJobConfig(**job_params)\n query_job = client.query(sql, job_config=job_config)\n iterator = query_job.result(timeout=timeout)\n\n return query_job, iterator\n\n def _retry_and_handle(self, msg, conn, fn):\n \"\"\"retry a function call within the context of exception_handler.\"\"\"\n def reopen_conn_on_error(error):\n if isinstance(error, REOPENABLE_ERRORS):\n logger.warning('Reopening connection after {!r}', error)\n self.close(conn)\n self.open(conn)\n return\n\n with self.exception_handler(msg):\n return retry.retry_target(\n target=fn,\n predicate=_ErrorCounter(self.get_retries(conn)).count_error,\n sleep_generator=self._retry_generator(),\n deadline=None,\n on_error=reopen_conn_on_error)\n\n def _retry_generator(self):\n \"\"\"Generates retry intervals that exponentially back off.\"\"\"\n return retry.exponential_sleep_generator(\n initial=self.DEFAULT_INITIAL_DELAY,\n maximum=self.DEFAULT_MAXIMUM_DELAY)\n\n def _labels_from_query_comment(self, comment: str) -> Dict:\n try:\n comment_labels = json.loads(comment)\n except (TypeError, ValueError):\n return {'query_comment': _sanitize_label(comment)}\n return {\n _sanitize_label(key): _sanitize_label(str(value))\n for key, value in comment_labels.items()\n }\n\n\nclass _ErrorCounter(object):\n \"\"\"Counts errors seen up to a threshold then raises the next error.\"\"\"\n\n def __init__(self, retries):\n self.retries = retries\n self.error_count = 0\n\n def count_error(self, error):\n if self.retries == 0:\n return False # Don't log\n self.error_count += 1\n if _is_retryable(error) and self.error_count <= self.retries:\n logger.debug(\n 'Retry attempt {} of {} after error: {}',\n self.error_count, self.retries, repr(error))\n return True\n else:\n return False\n\n\ndef _is_retryable(error):\n \"\"\"Return true for errors that are unlikely to occur again if retried.\"\"\"\n if isinstance(error, RETRYABLE_ERRORS):\n return True\n elif isinstance(error, google.api_core.exceptions.Forbidden) and any(\n e['reason'] == 'rateLimitExceeded' for e in error.errors):\n return True\n return False\n\n\n_SANITIZE_LABEL_PATTERN = re.compile(r\"[^a-z0-9_-]\")\n\n\ndef _sanitize_label(value: str) -> str:\n \"\"\"Return a legal value for a BigQuery label.\"\"\"\n value = value.strip().lower()\n value = _SANITIZE_LABEL_PATTERN.sub(\"_\", value)\n return value\n", "path": "plugins/bigquery/dbt/adapters/bigquery/connections.py"}]} |
gh_patches_debug_1511 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-980 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PaymentMethod detachment doesn't work because it doesn't unset customer
**Describe the bug**
I'm using 4a828a48092a3904094917776ce725ec9aa3fce5 (after the #914 merge) and trying to set up an SCA complaint flow.
As I said in #941, the events don't sync this model. Though even if I sync it manually (using sync_from_stripe_data), it doesn't work correctly for the detached payment_method case. The customer isn't actually detached (the content of the stripe event is `customer=null`)
**To Reproduce**
Steps to reproduce the behavior:
1. Detach a payment method from the stripe api
2. Try to sync the payment method object: `payment_method.sync_from_stripe_data(payment_method.api_retrieve())`
3. Observe that the customer association isn't removed even though stripe returns `customer=null`.
**Expected behavior**
The payment method should get detached from the customer.
Example event:
```
{
"object": {
"id": "pm_1F8uMTHa6wE0PhFmIrAlCMuB",
"object": "payment_method",
"billing_details": {
"address": {
"city": null,
"country": "GB",
"line1": null,
"line2": null,
"postal_code": "W1",
"state": null
},
"email": "[email protected]",
"name": "Jeo",
"phone": null
},
"card": {
"brand": "visa",
"checks": {
"address_line1_check": null,
"address_postal_code_check": "pass",
"cvc_check": "pass"
},
"country": "US",
"exp_month": 2,
"exp_year": 2022,
"fingerprint": "No6qZ6uMjc9xCthT",
"funding": "credit",
"generated_from": null,
"last4": "4242",
"three_d_secure_usage": {
"supported": true
},
"wallet": null
},
"created": 1566157701,
"customer": null,
"livemode": false,
"metadata": {
},
"type": "card"
},
"previous_attributes": {
"customer": "cus_FXgo0IlbgH2cQS"
}
}```
**Environment**
- dj-stripe version: master at 4a828a48092a3904094917776ce725ec9aa3fce5
- Your Stripe account's default API version: [e.g. 2019-02-19 - shown as "default" on https://dashboard.stripe.com/developers]
- Database: Postgres
- Python version: 3.7.4
- Django version: 2.2.4
**Can you reproduce the issue with the latest version of master?**
Yes
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/models/payment_methods.py`
Content:
```
1 import stripe
2 from django.db import models, transaction
3 from stripe.error import InvalidRequestError
4
5 from .. import enums
6 from .. import settings as djstripe_settings
7 from ..exceptions import StripeObjectManipulationException
8 from ..fields import (
9 JSONField,
10 StripeCurrencyCodeField,
11 StripeDecimalCurrencyAmountField,
12 StripeEnumField,
13 )
14 from .base import StripeModel, logger
15 from .core import Customer
16
17
18 class DjstripePaymentMethod(models.Model):
19 """
20 An internal model that abstracts the legacy Card and BankAccount
21 objects with Source objects.
22
23 Contains two fields: `id` and `type`:
24 - `id` is the id of the Stripe object.
25 - `type` can be `card`, `bank_account` or `source`.
26 """
27
28 id = models.CharField(max_length=255, primary_key=True)
29 type = models.CharField(max_length=12, db_index=True)
30
31 @classmethod
32 def from_stripe_object(cls, data):
33 source_type = data["object"]
34 model = cls._model_for_type(source_type)
35
36 with transaction.atomic():
37 model.sync_from_stripe_data(data)
38 instance, _ = cls.objects.get_or_create(
39 id=data["id"], defaults={"type": source_type}
40 )
41
42 return instance
43
44 @classmethod
45 def _get_or_create_source(cls, data, source_type):
46 try:
47 model = cls._model_for_type(source_type)
48 model._get_or_create_from_stripe_object(data)
49 except ValueError as e:
50 # This may happen if we have source types we don't know about.
51 # Let's not make dj-stripe entirely unusable if that happens.
52 logger.warning("Could not sync source of type %r: %s", source_type, e)
53
54 return cls.objects.get_or_create(id=data["id"], defaults={"type": source_type})
55
56 @classmethod
57 def _model_for_type(cls, type):
58 if type == "card":
59 return Card
60 elif type == "source":
61 return Source
62 elif type == "bank_account":
63 return BankAccount
64
65 raise ValueError("Unknown source type: {}".format(type))
66
67 @property
68 def object_model(self):
69 return self._model_for_type(self.type)
70
71 def resolve(self):
72 return self.object_model.objects.get(id=self.id)
73
74
75 class LegacySourceMixin:
76 """
77 Mixin for functionality shared between the legacy Card & BankAccount sources
78 """
79
80 @classmethod
81 def _get_customer_from_kwargs(cls, **kwargs):
82 if "customer" not in kwargs or not isinstance(kwargs["customer"], Customer):
83 raise StripeObjectManipulationException(
84 "{}s must be manipulated through a Customer. "
85 "Pass a Customer object into this call.".format(cls.__name__)
86 )
87
88 customer = kwargs["customer"]
89 del kwargs["customer"]
90
91 return customer, kwargs
92
93 @classmethod
94 def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):
95 # OVERRIDING the parent version of this function
96 # Cards & Bank Accounts must be manipulated through a customer or account.
97 # TODO: When managed accounts are supported, this method needs to
98 # check if either a customer or account is supplied to determine
99 # the correct object to use.
100
101 customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)
102
103 return customer.api_retrieve().sources.create(api_key=api_key, **clean_kwargs)
104
105 @classmethod
106 def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):
107 # OVERRIDING the parent version of this function
108 # Cards & Bank Accounts must be manipulated through a customer or account.
109 # TODO: When managed accounts are supported, this method needs to
110 # check if either a customer or account is supplied to determine
111 # the correct object to use.
112
113 customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)
114
115 return (
116 customer.api_retrieve(api_key=api_key)
117 .sources.list(object=cls.stripe_class.OBJECT_NAME, **clean_kwargs)
118 .auto_paging_iter()
119 )
120
121 def get_stripe_dashboard_url(self):
122 return self.customer.get_stripe_dashboard_url()
123
124 def remove(self):
125 """
126 Removes a legacy source from this customer's account.
127 """
128
129 # First, wipe default source on all customers that use this card.
130 Customer.objects.filter(default_source=self.id).update(default_source=None)
131
132 try:
133 self._api_delete()
134 except InvalidRequestError as exc:
135 if "No such source:" in str(exc) or "No such customer:" in str(exc):
136 # The exception was thrown because the stripe customer or card
137 # was already deleted on the stripe side, ignore the exception
138 pass
139 else:
140 # The exception was raised for another reason, re-raise it
141 raise
142
143 self.delete()
144
145 def api_retrieve(self, api_key=None):
146 # OVERRIDING the parent version of this function
147 # Cards & Banks Accounts must be manipulated through a customer or account.
148 # TODO: When managed accounts are supported, this method needs to check if
149 # either a customer or account is supplied to determine the
150 # correct object to use.
151 api_key = api_key or self.default_api_key
152 customer = self.customer.api_retrieve(api_key=api_key)
153
154 # If the customer is deleted, the sources attribute will be absent.
155 # eg. {"id": "cus_XXXXXXXX", "deleted": True}
156 if "sources" not in customer:
157 # We fake a native stripe InvalidRequestError so that it's caught
158 # like an invalid ID error.
159 raise InvalidRequestError("No such source: %s" % (self.id), "id")
160
161 return customer.sources.retrieve(self.id, expand=self.expand_fields)
162
163
164 class BankAccount(LegacySourceMixin, StripeModel):
165 stripe_class = stripe.BankAccount
166
167 account = models.ForeignKey(
168 "Account",
169 on_delete=models.PROTECT,
170 null=True,
171 blank=True,
172 related_name="bank_account",
173 help_text="The account the charge was made on behalf of. Null here indicates "
174 "that this value was never set.",
175 )
176 account_holder_name = models.TextField(
177 max_length=5000,
178 default="",
179 blank=True,
180 help_text="The name of the person or business that owns the bank account.",
181 )
182 account_holder_type = StripeEnumField(
183 enum=enums.BankAccountHolderType,
184 help_text="The type of entity that holds the account.",
185 )
186 bank_name = models.CharField(
187 max_length=255,
188 help_text="Name of the bank associated with the routing number "
189 "(e.g., `WELLS FARGO`).",
190 )
191 country = models.CharField(
192 max_length=2,
193 help_text="Two-letter ISO code representing the country the bank account "
194 "is located in.",
195 )
196 currency = StripeCurrencyCodeField()
197 customer = models.ForeignKey(
198 "Customer", on_delete=models.SET_NULL, null=True, related_name="bank_account"
199 )
200 default_for_currency = models.NullBooleanField(
201 help_text="Whether this external account is the default account for "
202 "its currency."
203 )
204 fingerprint = models.CharField(
205 max_length=16,
206 help_text=(
207 "Uniquely identifies this particular bank account. "
208 "You can use this attribute to check whether two bank accounts are "
209 "the same."
210 ),
211 )
212 last4 = models.CharField(max_length=4)
213 routing_number = models.CharField(
214 max_length=255, help_text="The routing transit number for the bank account."
215 )
216 status = StripeEnumField(enum=enums.BankAccountStatus)
217
218
219 class Card(LegacySourceMixin, StripeModel):
220 """
221 You can store multiple cards on a customer in order to charge the customer later.
222
223 This is a legacy model which only applies to the "v2" Stripe API (eg. Checkout.js).
224 You should strive to use the Stripe "v3" API (eg. Stripe Elements).
225 Also see: https://stripe.com/docs/stripe-js/elements/migrating
226 When using Elements, you will not be using Card objects. Instead, you will use
227 Source objects.
228 A Source object of type "card" is equivalent to a Card object. However, Card
229 objects cannot be converted into Source objects by Stripe at this time.
230
231 Stripe documentation: https://stripe.com/docs/api/python#cards
232 """
233
234 stripe_class = stripe.Card
235
236 address_city = models.TextField(
237 max_length=5000,
238 blank=True,
239 default="",
240 help_text="City/District/Suburb/Town/Village.",
241 )
242 address_country = models.TextField(
243 max_length=5000, blank=True, default="", help_text="Billing address country."
244 )
245 address_line1 = models.TextField(
246 max_length=5000,
247 blank=True,
248 default="",
249 help_text="Street address/PO Box/Company name.",
250 )
251 address_line1_check = StripeEnumField(
252 enum=enums.CardCheckResult,
253 blank=True,
254 default="",
255 help_text="If `address_line1` was provided, results of the check.",
256 )
257 address_line2 = models.TextField(
258 max_length=5000,
259 blank=True,
260 default="",
261 help_text="Apartment/Suite/Unit/Building.",
262 )
263 address_state = models.TextField(
264 max_length=5000,
265 blank=True,
266 default="",
267 help_text="State/County/Province/Region.",
268 )
269 address_zip = models.TextField(
270 max_length=5000, blank=True, default="", help_text="ZIP or postal code."
271 )
272 address_zip_check = StripeEnumField(
273 enum=enums.CardCheckResult,
274 blank=True,
275 default="",
276 help_text="If `address_zip` was provided, results of the check.",
277 )
278 brand = StripeEnumField(enum=enums.CardBrand, help_text="Card brand.")
279 country = models.CharField(
280 max_length=2,
281 default="",
282 blank=True,
283 help_text="Two-letter ISO code representing the country of the card.",
284 )
285 customer = models.ForeignKey(
286 "Customer", on_delete=models.SET_NULL, null=True, related_name="legacy_cards"
287 )
288 cvc_check = StripeEnumField(
289 enum=enums.CardCheckResult,
290 default="",
291 blank=True,
292 help_text="If a CVC was provided, results of the check.",
293 )
294 dynamic_last4 = models.CharField(
295 max_length=4,
296 default="",
297 blank=True,
298 help_text="(For tokenized numbers only.) The last four digits of the device "
299 "account number.",
300 )
301 exp_month = models.IntegerField(help_text="Card expiration month.")
302 exp_year = models.IntegerField(help_text="Card expiration year.")
303 fingerprint = models.CharField(
304 default="",
305 blank=True,
306 max_length=16,
307 help_text="Uniquely identifies this particular card number.",
308 )
309 funding = StripeEnumField(
310 enum=enums.CardFundingType, help_text="Card funding type."
311 )
312 last4 = models.CharField(max_length=4, help_text="Last four digits of Card number.")
313 name = models.TextField(
314 max_length=5000, default="", blank=True, help_text="Cardholder name."
315 )
316 tokenization_method = StripeEnumField(
317 enum=enums.CardTokenizationMethod,
318 default="",
319 blank=True,
320 help_text="If the card number is tokenized, this is the method that was used.",
321 )
322
323 def str_parts(self):
324 return [
325 "brand={brand}".format(brand=self.brand),
326 "last4={last4}".format(last4=self.last4),
327 "exp_month={exp_month}".format(exp_month=self.exp_month),
328 "exp_year={exp_year}".format(exp_year=self.exp_year),
329 ] + super().str_parts()
330
331 @classmethod
332 def create_token(
333 cls,
334 number,
335 exp_month,
336 exp_year,
337 cvc,
338 api_key=djstripe_settings.STRIPE_SECRET_KEY,
339 **kwargs
340 ):
341 """
342 Creates a single use token that wraps the details of a credit card.
343 This token can be used in place of a credit card dictionary with any API method.
344 These tokens can only be used once: by creating a new charge object,
345 or attaching them to a customer.
346 (Source: https://stripe.com/docs/api/python#create_card_token)
347
348 :param exp_month: The card's expiration month.
349 :type exp_month: Two digit int
350 :param exp_year: The card's expiration year.
351 :type exp_year: Two or Four digit int
352 :param number: The card number
353 :type number: string without any separators (no spaces)
354 :param cvc: Card security code.
355 :type cvc: string
356 """
357
358 card = {
359 "number": number,
360 "exp_month": exp_month,
361 "exp_year": exp_year,
362 "cvc": cvc,
363 }
364 card.update(kwargs)
365
366 return stripe.Token.create(api_key=api_key, card=card)
367
368
369 class Source(StripeModel):
370 """
371 Stripe documentation: https://stripe.com/docs/api#sources
372 """
373
374 amount = StripeDecimalCurrencyAmountField(
375 null=True,
376 blank=True,
377 help_text=(
378 "Amount associated with the source. "
379 "This is the amount for which the source will be chargeable once ready. "
380 "Required for `single_use` sources."
381 ),
382 )
383 client_secret = models.CharField(
384 max_length=255,
385 help_text=(
386 "The client secret of the source. "
387 "Used for client-side retrieval using a publishable key."
388 ),
389 )
390 currency = StripeCurrencyCodeField(default="", blank=True)
391 flow = StripeEnumField(
392 enum=enums.SourceFlow, help_text="The authentication flow of the source."
393 )
394 owner = JSONField(
395 help_text=(
396 "Information about the owner of the payment instrument that may be "
397 "used or required by particular source types."
398 )
399 )
400 statement_descriptor = models.CharField(
401 max_length=255,
402 default="",
403 blank=True,
404 help_text="Extra information about a source. This will appear on your "
405 "customer's statement every time you charge the source.",
406 )
407 status = StripeEnumField(
408 enum=enums.SourceStatus,
409 help_text="The status of the source. Only `chargeable` sources can be used "
410 "to create a charge.",
411 )
412 type = StripeEnumField(enum=enums.SourceType, help_text="The type of the source.")
413 usage = StripeEnumField(
414 enum=enums.SourceUsage,
415 help_text="Whether this source should be reusable or not. "
416 "Some source types may or may not be reusable by construction, "
417 "while other may leave the option at creation.",
418 )
419
420 # Flows
421 code_verification = JSONField(
422 null=True,
423 blank=True,
424 help_text="Information related to the code verification flow. "
425 "Present if the source is authenticated by a verification code "
426 "(`flow` is `code_verification`).",
427 )
428 receiver = JSONField(
429 null=True,
430 blank=True,
431 help_text="Information related to the receiver flow. "
432 "Present if the source is a receiver (`flow` is `receiver`).",
433 )
434 redirect = JSONField(
435 null=True,
436 blank=True,
437 help_text="Information related to the redirect flow. "
438 "Present if the source is authenticated by a redirect (`flow` is `redirect`).",
439 )
440
441 source_data = JSONField(help_text="The data corresponding to the source type.")
442
443 customer = models.ForeignKey(
444 "Customer",
445 on_delete=models.SET_NULL,
446 null=True,
447 blank=True,
448 related_name="sources",
449 )
450
451 stripe_class = stripe.Source
452 stripe_dashboard_item_name = "sources"
453
454 @classmethod
455 def _manipulate_stripe_object_hook(cls, data):
456 # The source_data dict is an alias of all the source types
457 data["source_data"] = data[data["type"]]
458 return data
459
460 def _attach_objects_hook(self, cls, data):
461 customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)
462 if customer:
463 self.customer = customer
464 else:
465 self.customer = None
466
467 def detach(self):
468 """
469 Detach the source from its customer.
470 """
471
472 # First, wipe default source on all customers that use this.
473 Customer.objects.filter(default_source=self.id).update(default_source=None)
474
475 try:
476 # TODO - we could use the return value of sync_from_stripe_data
477 # or call its internals - self._sync/_attach_objects_hook etc here
478 # to update `self` at this point?
479 self.sync_from_stripe_data(self.api_retrieve().detach())
480 return True
481 except (InvalidRequestError, NotImplementedError):
482 # The source was already detached. Resyncing.
483 # NotImplementedError is an artifact of stripe-python<2.0
484 # https://github.com/stripe/stripe-python/issues/376
485 self.sync_from_stripe_data(self.api_retrieve())
486 return False
487
488
489 class PaymentMethod(StripeModel):
490 """
491 Stripe documentation: https://stripe.com/docs/api#payment_methods
492 """
493
494 billing_details = JSONField(
495 help_text=(
496 "Billing information associated with the PaymentMethod that may be used or "
497 "required by particular types of payment methods."
498 )
499 )
500 card = JSONField(
501 help_text="If this is a card PaymentMethod, this hash contains details "
502 "about the card."
503 )
504 card_present = JSONField(
505 null=True,
506 blank=True,
507 help_text="If this is an card_present PaymentMethod, this hash contains "
508 "details about the Card Present payment method.",
509 )
510 customer = models.ForeignKey(
511 "Customer",
512 on_delete=models.SET_NULL,
513 null=True,
514 blank=True,
515 related_name="payment_methods",
516 help_text="Customer to which this PaymentMethod is saved."
517 "This will not be set when the PaymentMethod has not been saved to a Customer.",
518 )
519 type = models.CharField(
520 max_length=255,
521 null=True,
522 blank=True,
523 help_text="The type of the PaymentMethod. An additional hash is included "
524 "on the PaymentMethod with a name matching this value. It contains additional "
525 "information specific to the PaymentMethod type.",
526 )
527
528 stripe_class = stripe.PaymentMethod
529
530 @classmethod
531 def attach(
532 cls, payment_method, customer, api_key=djstripe_settings.STRIPE_SECRET_KEY
533 ):
534 """
535 Attach a payment method to a customer
536 :param payment_method:
537 :type payment_method: str, PaymentMethod
538 :param customer:
539 :type customer: str, Customer
540 :param api_key:
541 :return:
542 """
543
544 if isinstance(payment_method, StripeModel):
545 payment_method = payment_method.id
546
547 if isinstance(customer, StripeModel):
548 customer = customer.id
549
550 extra_kwargs = {}
551 if not isinstance(payment_method, stripe.PaymentMethod):
552 # send api_key if we're not passing in a Stripe object
553 # avoids "Received unknown parameter: api_key" since api uses the
554 # key cached in the Stripe object
555 extra_kwargs = {"api_key": api_key}
556
557 stripe_payment_method = stripe.PaymentMethod.attach(
558 payment_method, customer=customer, **extra_kwargs
559 )
560 return cls.sync_from_stripe_data(stripe_payment_method)
561
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/djstripe/models/payment_methods.py b/djstripe/models/payment_methods.py
--- a/djstripe/models/payment_methods.py
+++ b/djstripe/models/payment_methods.py
@@ -527,6 +527,13 @@
stripe_class = stripe.PaymentMethod
+ def _attach_objects_hook(self, cls, data):
+ customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)
+ if customer:
+ self.customer = customer
+ else:
+ self.customer = None
+
@classmethod
def attach(
cls, payment_method, customer, api_key=djstripe_settings.STRIPE_SECRET_KEY
| {"golden_diff": "diff --git a/djstripe/models/payment_methods.py b/djstripe/models/payment_methods.py\n--- a/djstripe/models/payment_methods.py\n+++ b/djstripe/models/payment_methods.py\n@@ -527,6 +527,13 @@\n \n stripe_class = stripe.PaymentMethod\n \n+ def _attach_objects_hook(self, cls, data):\n+ customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)\n+ if customer:\n+ self.customer = customer\n+ else:\n+ self.customer = None\n+\n @classmethod\n def attach(\n cls, payment_method, customer, api_key=djstripe_settings.STRIPE_SECRET_KEY\n", "issue": "PaymentMethod detachment doesn't work because it doesn't unset customer\n**Describe the bug**\r\nI'm using 4a828a48092a3904094917776ce725ec9aa3fce5 (after the #914 merge) and trying to set up an SCA complaint flow.\r\nAs I said in #941, the events don't sync this model. Though even if I sync it manually (using sync_from_stripe_data), it doesn't work correctly for the detached payment_method case. The customer isn't actually detached (the content of the stripe event is `customer=null`)\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Detach a payment method from the stripe api\r\n2. Try to sync the payment method object: `payment_method.sync_from_stripe_data(payment_method.api_retrieve())`\r\n3. Observe that the customer association isn't removed even though stripe returns `customer=null`.\r\n\r\n**Expected behavior**\r\nThe payment method should get detached from the customer.\r\n\r\nExample event:\r\n```\r\n{\r\n \"object\": {\r\n \"id\": \"pm_1F8uMTHa6wE0PhFmIrAlCMuB\",\r\n \"object\": \"payment_method\",\r\n \"billing_details\": {\r\n \"address\": {\r\n \"city\": null,\r\n \"country\": \"GB\",\r\n \"line1\": null,\r\n \"line2\": null,\r\n \"postal_code\": \"W1\",\r\n \"state\": null\r\n },\r\n \"email\": \"[email protected]\",\r\n \"name\": \"Jeo\",\r\n \"phone\": null\r\n },\r\n \"card\": {\r\n \"brand\": \"visa\",\r\n \"checks\": {\r\n \"address_line1_check\": null,\r\n \"address_postal_code_check\": \"pass\",\r\n \"cvc_check\": \"pass\"\r\n },\r\n \"country\": \"US\",\r\n \"exp_month\": 2,\r\n \"exp_year\": 2022,\r\n \"fingerprint\": \"No6qZ6uMjc9xCthT\",\r\n \"funding\": \"credit\",\r\n \"generated_from\": null,\r\n \"last4\": \"4242\",\r\n \"three_d_secure_usage\": {\r\n \"supported\": true\r\n },\r\n \"wallet\": null\r\n },\r\n \"created\": 1566157701,\r\n \"customer\": null,\r\n \"livemode\": false,\r\n \"metadata\": {\r\n },\r\n \"type\": \"card\"\r\n },\r\n \"previous_attributes\": {\r\n \"customer\": \"cus_FXgo0IlbgH2cQS\"\r\n }\r\n}```\r\n\r\n**Environment**\r\n- dj-stripe version: master at 4a828a48092a3904094917776ce725ec9aa3fce5\r\n- Your Stripe account's default API version: [e.g. 2019-02-19 - shown as \"default\" on https://dashboard.stripe.com/developers]\r\n- Database: Postgres\r\n- Python version: 3.7.4\r\n- Django version: 2.2.4\r\n\r\n**Can you reproduce the issue with the latest version of master?**\r\n\r\nYes\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "import stripe\nfrom django.db import models, transaction\nfrom stripe.error import InvalidRequestError\n\nfrom .. import enums\nfrom .. import settings as djstripe_settings\nfrom ..exceptions import StripeObjectManipulationException\nfrom ..fields import (\n JSONField,\n StripeCurrencyCodeField,\n StripeDecimalCurrencyAmountField,\n StripeEnumField,\n)\nfrom .base import StripeModel, logger\nfrom .core import Customer\n\n\nclass DjstripePaymentMethod(models.Model):\n \"\"\"\n An internal model that abstracts the legacy Card and BankAccount\n objects with Source objects.\n\n Contains two fields: `id` and `type`:\n - `id` is the id of the Stripe object.\n - `type` can be `card`, `bank_account` or `source`.\n \"\"\"\n\n id = models.CharField(max_length=255, primary_key=True)\n type = models.CharField(max_length=12, db_index=True)\n\n @classmethod\n def from_stripe_object(cls, data):\n source_type = data[\"object\"]\n model = cls._model_for_type(source_type)\n\n with transaction.atomic():\n model.sync_from_stripe_data(data)\n instance, _ = cls.objects.get_or_create(\n id=data[\"id\"], defaults={\"type\": source_type}\n )\n\n return instance\n\n @classmethod\n def _get_or_create_source(cls, data, source_type):\n try:\n model = cls._model_for_type(source_type)\n model._get_or_create_from_stripe_object(data)\n except ValueError as e:\n # This may happen if we have source types we don't know about.\n # Let's not make dj-stripe entirely unusable if that happens.\n logger.warning(\"Could not sync source of type %r: %s\", source_type, e)\n\n return cls.objects.get_or_create(id=data[\"id\"], defaults={\"type\": source_type})\n\n @classmethod\n def _model_for_type(cls, type):\n if type == \"card\":\n return Card\n elif type == \"source\":\n return Source\n elif type == \"bank_account\":\n return BankAccount\n\n raise ValueError(\"Unknown source type: {}\".format(type))\n\n @property\n def object_model(self):\n return self._model_for_type(self.type)\n\n def resolve(self):\n return self.object_model.objects.get(id=self.id)\n\n\nclass LegacySourceMixin:\n \"\"\"\n Mixin for functionality shared between the legacy Card & BankAccount sources\n \"\"\"\n\n @classmethod\n def _get_customer_from_kwargs(cls, **kwargs):\n if \"customer\" not in kwargs or not isinstance(kwargs[\"customer\"], Customer):\n raise StripeObjectManipulationException(\n \"{}s must be manipulated through a Customer. \"\n \"Pass a Customer object into this call.\".format(cls.__name__)\n )\n\n customer = kwargs[\"customer\"]\n del kwargs[\"customer\"]\n\n return customer, kwargs\n\n @classmethod\n def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n # OVERRIDING the parent version of this function\n # Cards & Bank Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to\n # check if either a customer or account is supplied to determine\n # the correct object to use.\n\n customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)\n\n return customer.api_retrieve().sources.create(api_key=api_key, **clean_kwargs)\n\n @classmethod\n def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n # OVERRIDING the parent version of this function\n # Cards & Bank Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to\n # check if either a customer or account is supplied to determine\n # the correct object to use.\n\n customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)\n\n return (\n customer.api_retrieve(api_key=api_key)\n .sources.list(object=cls.stripe_class.OBJECT_NAME, **clean_kwargs)\n .auto_paging_iter()\n )\n\n def get_stripe_dashboard_url(self):\n return self.customer.get_stripe_dashboard_url()\n\n def remove(self):\n \"\"\"\n Removes a legacy source from this customer's account.\n \"\"\"\n\n # First, wipe default source on all customers that use this card.\n Customer.objects.filter(default_source=self.id).update(default_source=None)\n\n try:\n self._api_delete()\n except InvalidRequestError as exc:\n if \"No such source:\" in str(exc) or \"No such customer:\" in str(exc):\n # The exception was thrown because the stripe customer or card\n # was already deleted on the stripe side, ignore the exception\n pass\n else:\n # The exception was raised for another reason, re-raise it\n raise\n\n self.delete()\n\n def api_retrieve(self, api_key=None):\n # OVERRIDING the parent version of this function\n # Cards & Banks Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to check if\n # either a customer or account is supplied to determine the\n # correct object to use.\n api_key = api_key or self.default_api_key\n customer = self.customer.api_retrieve(api_key=api_key)\n\n # If the customer is deleted, the sources attribute will be absent.\n # eg. {\"id\": \"cus_XXXXXXXX\", \"deleted\": True}\n if \"sources\" not in customer:\n # We fake a native stripe InvalidRequestError so that it's caught\n # like an invalid ID error.\n raise InvalidRequestError(\"No such source: %s\" % (self.id), \"id\")\n\n return customer.sources.retrieve(self.id, expand=self.expand_fields)\n\n\nclass BankAccount(LegacySourceMixin, StripeModel):\n stripe_class = stripe.BankAccount\n\n account = models.ForeignKey(\n \"Account\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"bank_account\",\n help_text=\"The account the charge was made on behalf of. Null here indicates \"\n \"that this value was never set.\",\n )\n account_holder_name = models.TextField(\n max_length=5000,\n default=\"\",\n blank=True,\n help_text=\"The name of the person or business that owns the bank account.\",\n )\n account_holder_type = StripeEnumField(\n enum=enums.BankAccountHolderType,\n help_text=\"The type of entity that holds the account.\",\n )\n bank_name = models.CharField(\n max_length=255,\n help_text=\"Name of the bank associated with the routing number \"\n \"(e.g., `WELLS FARGO`).\",\n )\n country = models.CharField(\n max_length=2,\n help_text=\"Two-letter ISO code representing the country the bank account \"\n \"is located in.\",\n )\n currency = StripeCurrencyCodeField()\n customer = models.ForeignKey(\n \"Customer\", on_delete=models.SET_NULL, null=True, related_name=\"bank_account\"\n )\n default_for_currency = models.NullBooleanField(\n help_text=\"Whether this external account is the default account for \"\n \"its currency.\"\n )\n fingerprint = models.CharField(\n max_length=16,\n help_text=(\n \"Uniquely identifies this particular bank account. \"\n \"You can use this attribute to check whether two bank accounts are \"\n \"the same.\"\n ),\n )\n last4 = models.CharField(max_length=4)\n routing_number = models.CharField(\n max_length=255, help_text=\"The routing transit number for the bank account.\"\n )\n status = StripeEnumField(enum=enums.BankAccountStatus)\n\n\nclass Card(LegacySourceMixin, StripeModel):\n \"\"\"\n You can store multiple cards on a customer in order to charge the customer later.\n\n This is a legacy model which only applies to the \"v2\" Stripe API (eg. Checkout.js).\n You should strive to use the Stripe \"v3\" API (eg. Stripe Elements).\n Also see: https://stripe.com/docs/stripe-js/elements/migrating\n When using Elements, you will not be using Card objects. Instead, you will use\n Source objects.\n A Source object of type \"card\" is equivalent to a Card object. However, Card\n objects cannot be converted into Source objects by Stripe at this time.\n\n Stripe documentation: https://stripe.com/docs/api/python#cards\n \"\"\"\n\n stripe_class = stripe.Card\n\n address_city = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"City/District/Suburb/Town/Village.\",\n )\n address_country = models.TextField(\n max_length=5000, blank=True, default=\"\", help_text=\"Billing address country.\"\n )\n address_line1 = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"Street address/PO Box/Company name.\",\n )\n address_line1_check = StripeEnumField(\n enum=enums.CardCheckResult,\n blank=True,\n default=\"\",\n help_text=\"If `address_line1` was provided, results of the check.\",\n )\n address_line2 = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"Apartment/Suite/Unit/Building.\",\n )\n address_state = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"State/County/Province/Region.\",\n )\n address_zip = models.TextField(\n max_length=5000, blank=True, default=\"\", help_text=\"ZIP or postal code.\"\n )\n address_zip_check = StripeEnumField(\n enum=enums.CardCheckResult,\n blank=True,\n default=\"\",\n help_text=\"If `address_zip` was provided, results of the check.\",\n )\n brand = StripeEnumField(enum=enums.CardBrand, help_text=\"Card brand.\")\n country = models.CharField(\n max_length=2,\n default=\"\",\n blank=True,\n help_text=\"Two-letter ISO code representing the country of the card.\",\n )\n customer = models.ForeignKey(\n \"Customer\", on_delete=models.SET_NULL, null=True, related_name=\"legacy_cards\"\n )\n cvc_check = StripeEnumField(\n enum=enums.CardCheckResult,\n default=\"\",\n blank=True,\n help_text=\"If a CVC was provided, results of the check.\",\n )\n dynamic_last4 = models.CharField(\n max_length=4,\n default=\"\",\n blank=True,\n help_text=\"(For tokenized numbers only.) The last four digits of the device \"\n \"account number.\",\n )\n exp_month = models.IntegerField(help_text=\"Card expiration month.\")\n exp_year = models.IntegerField(help_text=\"Card expiration year.\")\n fingerprint = models.CharField(\n default=\"\",\n blank=True,\n max_length=16,\n help_text=\"Uniquely identifies this particular card number.\",\n )\n funding = StripeEnumField(\n enum=enums.CardFundingType, help_text=\"Card funding type.\"\n )\n last4 = models.CharField(max_length=4, help_text=\"Last four digits of Card number.\")\n name = models.TextField(\n max_length=5000, default=\"\", blank=True, help_text=\"Cardholder name.\"\n )\n tokenization_method = StripeEnumField(\n enum=enums.CardTokenizationMethod,\n default=\"\",\n blank=True,\n help_text=\"If the card number is tokenized, this is the method that was used.\",\n )\n\n def str_parts(self):\n return [\n \"brand={brand}\".format(brand=self.brand),\n \"last4={last4}\".format(last4=self.last4),\n \"exp_month={exp_month}\".format(exp_month=self.exp_month),\n \"exp_year={exp_year}\".format(exp_year=self.exp_year),\n ] + super().str_parts()\n\n @classmethod\n def create_token(\n cls,\n number,\n exp_month,\n exp_year,\n cvc,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n **kwargs\n ):\n \"\"\"\n Creates a single use token that wraps the details of a credit card.\n This token can be used in place of a credit card dictionary with any API method.\n These tokens can only be used once: by creating a new charge object,\n or attaching them to a customer.\n (Source: https://stripe.com/docs/api/python#create_card_token)\n\n :param exp_month: The card's expiration month.\n :type exp_month: Two digit int\n :param exp_year: The card's expiration year.\n :type exp_year: Two or Four digit int\n :param number: The card number\n :type number: string without any separators (no spaces)\n :param cvc: Card security code.\n :type cvc: string\n \"\"\"\n\n card = {\n \"number\": number,\n \"exp_month\": exp_month,\n \"exp_year\": exp_year,\n \"cvc\": cvc,\n }\n card.update(kwargs)\n\n return stripe.Token.create(api_key=api_key, card=card)\n\n\nclass Source(StripeModel):\n \"\"\"\n Stripe documentation: https://stripe.com/docs/api#sources\n \"\"\"\n\n amount = StripeDecimalCurrencyAmountField(\n null=True,\n blank=True,\n help_text=(\n \"Amount associated with the source. \"\n \"This is the amount for which the source will be chargeable once ready. \"\n \"Required for `single_use` sources.\"\n ),\n )\n client_secret = models.CharField(\n max_length=255,\n help_text=(\n \"The client secret of the source. \"\n \"Used for client-side retrieval using a publishable key.\"\n ),\n )\n currency = StripeCurrencyCodeField(default=\"\", blank=True)\n flow = StripeEnumField(\n enum=enums.SourceFlow, help_text=\"The authentication flow of the source.\"\n )\n owner = JSONField(\n help_text=(\n \"Information about the owner of the payment instrument that may be \"\n \"used or required by particular source types.\"\n )\n )\n statement_descriptor = models.CharField(\n max_length=255,\n default=\"\",\n blank=True,\n help_text=\"Extra information about a source. This will appear on your \"\n \"customer's statement every time you charge the source.\",\n )\n status = StripeEnumField(\n enum=enums.SourceStatus,\n help_text=\"The status of the source. Only `chargeable` sources can be used \"\n \"to create a charge.\",\n )\n type = StripeEnumField(enum=enums.SourceType, help_text=\"The type of the source.\")\n usage = StripeEnumField(\n enum=enums.SourceUsage,\n help_text=\"Whether this source should be reusable or not. \"\n \"Some source types may or may not be reusable by construction, \"\n \"while other may leave the option at creation.\",\n )\n\n # Flows\n code_verification = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the code verification flow. \"\n \"Present if the source is authenticated by a verification code \"\n \"(`flow` is `code_verification`).\",\n )\n receiver = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the receiver flow. \"\n \"Present if the source is a receiver (`flow` is `receiver`).\",\n )\n redirect = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the redirect flow. \"\n \"Present if the source is authenticated by a redirect (`flow` is `redirect`).\",\n )\n\n source_data = JSONField(help_text=\"The data corresponding to the source type.\")\n\n customer = models.ForeignKey(\n \"Customer\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"sources\",\n )\n\n stripe_class = stripe.Source\n stripe_dashboard_item_name = \"sources\"\n\n @classmethod\n def _manipulate_stripe_object_hook(cls, data):\n # The source_data dict is an alias of all the source types\n data[\"source_data\"] = data[data[\"type\"]]\n return data\n\n def _attach_objects_hook(self, cls, data):\n customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)\n if customer:\n self.customer = customer\n else:\n self.customer = None\n\n def detach(self):\n \"\"\"\n Detach the source from its customer.\n \"\"\"\n\n # First, wipe default source on all customers that use this.\n Customer.objects.filter(default_source=self.id).update(default_source=None)\n\n try:\n # TODO - we could use the return value of sync_from_stripe_data\n # or call its internals - self._sync/_attach_objects_hook etc here\n # to update `self` at this point?\n self.sync_from_stripe_data(self.api_retrieve().detach())\n return True\n except (InvalidRequestError, NotImplementedError):\n # The source was already detached. Resyncing.\n # NotImplementedError is an artifact of stripe-python<2.0\n # https://github.com/stripe/stripe-python/issues/376\n self.sync_from_stripe_data(self.api_retrieve())\n return False\n\n\nclass PaymentMethod(StripeModel):\n \"\"\"\n Stripe documentation: https://stripe.com/docs/api#payment_methods\n \"\"\"\n\n billing_details = JSONField(\n help_text=(\n \"Billing information associated with the PaymentMethod that may be used or \"\n \"required by particular types of payment methods.\"\n )\n )\n card = JSONField(\n help_text=\"If this is a card PaymentMethod, this hash contains details \"\n \"about the card.\"\n )\n card_present = JSONField(\n null=True,\n blank=True,\n help_text=\"If this is an card_present PaymentMethod, this hash contains \"\n \"details about the Card Present payment method.\",\n )\n customer = models.ForeignKey(\n \"Customer\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"payment_methods\",\n help_text=\"Customer to which this PaymentMethod is saved.\"\n \"This will not be set when the PaymentMethod has not been saved to a Customer.\",\n )\n type = models.CharField(\n max_length=255,\n null=True,\n blank=True,\n help_text=\"The type of the PaymentMethod. An additional hash is included \"\n \"on the PaymentMethod with a name matching this value. It contains additional \"\n \"information specific to the PaymentMethod type.\",\n )\n\n stripe_class = stripe.PaymentMethod\n\n @classmethod\n def attach(\n cls, payment_method, customer, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Attach a payment method to a customer\n :param payment_method:\n :type payment_method: str, PaymentMethod\n :param customer:\n :type customer: str, Customer\n :param api_key:\n :return:\n \"\"\"\n\n if isinstance(payment_method, StripeModel):\n payment_method = payment_method.id\n\n if isinstance(customer, StripeModel):\n customer = customer.id\n\n extra_kwargs = {}\n if not isinstance(payment_method, stripe.PaymentMethod):\n # send api_key if we're not passing in a Stripe object\n # avoids \"Received unknown parameter: api_key\" since api uses the\n # key cached in the Stripe object\n extra_kwargs = {\"api_key\": api_key}\n\n stripe_payment_method = stripe.PaymentMethod.attach(\n payment_method, customer=customer, **extra_kwargs\n )\n return cls.sync_from_stripe_data(stripe_payment_method)\n", "path": "djstripe/models/payment_methods.py"}], "after_files": [{"content": "import stripe\nfrom django.db import models, transaction\nfrom stripe.error import InvalidRequestError\n\nfrom .. import enums\nfrom .. import settings as djstripe_settings\nfrom ..exceptions import StripeObjectManipulationException\nfrom ..fields import (\n JSONField,\n StripeCurrencyCodeField,\n StripeDecimalCurrencyAmountField,\n StripeEnumField,\n)\nfrom .base import StripeModel, logger\nfrom .core import Customer\n\n\nclass DjstripePaymentMethod(models.Model):\n \"\"\"\n An internal model that abstracts the legacy Card and BankAccount\n objects with Source objects.\n\n Contains two fields: `id` and `type`:\n - `id` is the id of the Stripe object.\n - `type` can be `card`, `bank_account` or `source`.\n \"\"\"\n\n id = models.CharField(max_length=255, primary_key=True)\n type = models.CharField(max_length=12, db_index=True)\n\n @classmethod\n def from_stripe_object(cls, data):\n source_type = data[\"object\"]\n model = cls._model_for_type(source_type)\n\n with transaction.atomic():\n model.sync_from_stripe_data(data)\n instance, _ = cls.objects.get_or_create(\n id=data[\"id\"], defaults={\"type\": source_type}\n )\n\n return instance\n\n @classmethod\n def _get_or_create_source(cls, data, source_type):\n try:\n model = cls._model_for_type(source_type)\n model._get_or_create_from_stripe_object(data)\n except ValueError as e:\n # This may happen if we have source types we don't know about.\n # Let's not make dj-stripe entirely unusable if that happens.\n logger.warning(\"Could not sync source of type %r: %s\", source_type, e)\n\n return cls.objects.get_or_create(id=data[\"id\"], defaults={\"type\": source_type})\n\n @classmethod\n def _model_for_type(cls, type):\n if type == \"card\":\n return Card\n elif type == \"source\":\n return Source\n elif type == \"bank_account\":\n return BankAccount\n\n raise ValueError(\"Unknown source type: {}\".format(type))\n\n @property\n def object_model(self):\n return self._model_for_type(self.type)\n\n def resolve(self):\n return self.object_model.objects.get(id=self.id)\n\n\nclass LegacySourceMixin:\n \"\"\"\n Mixin for functionality shared between the legacy Card & BankAccount sources\n \"\"\"\n\n @classmethod\n def _get_customer_from_kwargs(cls, **kwargs):\n if \"customer\" not in kwargs or not isinstance(kwargs[\"customer\"], Customer):\n raise StripeObjectManipulationException(\n \"{}s must be manipulated through a Customer. \"\n \"Pass a Customer object into this call.\".format(cls.__name__)\n )\n\n customer = kwargs[\"customer\"]\n del kwargs[\"customer\"]\n\n return customer, kwargs\n\n @classmethod\n def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n # OVERRIDING the parent version of this function\n # Cards & Bank Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to\n # check if either a customer or account is supplied to determine\n # the correct object to use.\n\n customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)\n\n return customer.api_retrieve().sources.create(api_key=api_key, **clean_kwargs)\n\n @classmethod\n def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n # OVERRIDING the parent version of this function\n # Cards & Bank Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to\n # check if either a customer or account is supplied to determine\n # the correct object to use.\n\n customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)\n\n return (\n customer.api_retrieve(api_key=api_key)\n .sources.list(object=cls.stripe_class.OBJECT_NAME, **clean_kwargs)\n .auto_paging_iter()\n )\n\n def get_stripe_dashboard_url(self):\n return self.customer.get_stripe_dashboard_url()\n\n def remove(self):\n \"\"\"\n Removes a legacy source from this customer's account.\n \"\"\"\n\n # First, wipe default source on all customers that use this card.\n Customer.objects.filter(default_source=self.id).update(default_source=None)\n\n try:\n self._api_delete()\n except InvalidRequestError as exc:\n if \"No such source:\" in str(exc) or \"No such customer:\" in str(exc):\n # The exception was thrown because the stripe customer or card\n # was already deleted on the stripe side, ignore the exception\n pass\n else:\n # The exception was raised for another reason, re-raise it\n raise\n\n self.delete()\n\n def api_retrieve(self, api_key=None):\n # OVERRIDING the parent version of this function\n # Cards & Banks Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to check if\n # either a customer or account is supplied to determine the\n # correct object to use.\n api_key = api_key or self.default_api_key\n customer = self.customer.api_retrieve(api_key=api_key)\n\n # If the customer is deleted, the sources attribute will be absent.\n # eg. {\"id\": \"cus_XXXXXXXX\", \"deleted\": True}\n if \"sources\" not in customer:\n # We fake a native stripe InvalidRequestError so that it's caught\n # like an invalid ID error.\n raise InvalidRequestError(\"No such source: %s\" % (self.id), \"id\")\n\n return customer.sources.retrieve(self.id, expand=self.expand_fields)\n\n\nclass BankAccount(LegacySourceMixin, StripeModel):\n stripe_class = stripe.BankAccount\n\n account = models.ForeignKey(\n \"Account\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"bank_account\",\n help_text=\"The account the charge was made on behalf of. Null here indicates \"\n \"that this value was never set.\",\n )\n account_holder_name = models.TextField(\n max_length=5000,\n default=\"\",\n blank=True,\n help_text=\"The name of the person or business that owns the bank account.\",\n )\n account_holder_type = StripeEnumField(\n enum=enums.BankAccountHolderType,\n help_text=\"The type of entity that holds the account.\",\n )\n bank_name = models.CharField(\n max_length=255,\n help_text=\"Name of the bank associated with the routing number \"\n \"(e.g., `WELLS FARGO`).\",\n )\n country = models.CharField(\n max_length=2,\n help_text=\"Two-letter ISO code representing the country the bank account \"\n \"is located in.\",\n )\n currency = StripeCurrencyCodeField()\n customer = models.ForeignKey(\n \"Customer\", on_delete=models.SET_NULL, null=True, related_name=\"bank_account\"\n )\n default_for_currency = models.NullBooleanField(\n help_text=\"Whether this external account is the default account for \"\n \"its currency.\"\n )\n fingerprint = models.CharField(\n max_length=16,\n help_text=(\n \"Uniquely identifies this particular bank account. \"\n \"You can use this attribute to check whether two bank accounts are \"\n \"the same.\"\n ),\n )\n last4 = models.CharField(max_length=4)\n routing_number = models.CharField(\n max_length=255, help_text=\"The routing transit number for the bank account.\"\n )\n status = StripeEnumField(enum=enums.BankAccountStatus)\n\n\nclass Card(LegacySourceMixin, StripeModel):\n \"\"\"\n You can store multiple cards on a customer in order to charge the customer later.\n\n This is a legacy model which only applies to the \"v2\" Stripe API (eg. Checkout.js).\n You should strive to use the Stripe \"v3\" API (eg. Stripe Elements).\n Also see: https://stripe.com/docs/stripe-js/elements/migrating\n When using Elements, you will not be using Card objects. Instead, you will use\n Source objects.\n A Source object of type \"card\" is equivalent to a Card object. However, Card\n objects cannot be converted into Source objects by Stripe at this time.\n\n Stripe documentation: https://stripe.com/docs/api/python#cards\n \"\"\"\n\n stripe_class = stripe.Card\n\n address_city = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"City/District/Suburb/Town/Village.\",\n )\n address_country = models.TextField(\n max_length=5000, blank=True, default=\"\", help_text=\"Billing address country.\"\n )\n address_line1 = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"Street address/PO Box/Company name.\",\n )\n address_line1_check = StripeEnumField(\n enum=enums.CardCheckResult,\n blank=True,\n default=\"\",\n help_text=\"If `address_line1` was provided, results of the check.\",\n )\n address_line2 = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"Apartment/Suite/Unit/Building.\",\n )\n address_state = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"State/County/Province/Region.\",\n )\n address_zip = models.TextField(\n max_length=5000, blank=True, default=\"\", help_text=\"ZIP or postal code.\"\n )\n address_zip_check = StripeEnumField(\n enum=enums.CardCheckResult,\n blank=True,\n default=\"\",\n help_text=\"If `address_zip` was provided, results of the check.\",\n )\n brand = StripeEnumField(enum=enums.CardBrand, help_text=\"Card brand.\")\n country = models.CharField(\n max_length=2,\n default=\"\",\n blank=True,\n help_text=\"Two-letter ISO code representing the country of the card.\",\n )\n customer = models.ForeignKey(\n \"Customer\", on_delete=models.SET_NULL, null=True, related_name=\"legacy_cards\"\n )\n cvc_check = StripeEnumField(\n enum=enums.CardCheckResult,\n default=\"\",\n blank=True,\n help_text=\"If a CVC was provided, results of the check.\",\n )\n dynamic_last4 = models.CharField(\n max_length=4,\n default=\"\",\n blank=True,\n help_text=\"(For tokenized numbers only.) The last four digits of the device \"\n \"account number.\",\n )\n exp_month = models.IntegerField(help_text=\"Card expiration month.\")\n exp_year = models.IntegerField(help_text=\"Card expiration year.\")\n fingerprint = models.CharField(\n default=\"\",\n blank=True,\n max_length=16,\n help_text=\"Uniquely identifies this particular card number.\",\n )\n funding = StripeEnumField(\n enum=enums.CardFundingType, help_text=\"Card funding type.\"\n )\n last4 = models.CharField(max_length=4, help_text=\"Last four digits of Card number.\")\n name = models.TextField(\n max_length=5000, default=\"\", blank=True, help_text=\"Cardholder name.\"\n )\n tokenization_method = StripeEnumField(\n enum=enums.CardTokenizationMethod,\n default=\"\",\n blank=True,\n help_text=\"If the card number is tokenized, this is the method that was used.\",\n )\n\n def str_parts(self):\n return [\n \"brand={brand}\".format(brand=self.brand),\n \"last4={last4}\".format(last4=self.last4),\n \"exp_month={exp_month}\".format(exp_month=self.exp_month),\n \"exp_year={exp_year}\".format(exp_year=self.exp_year),\n ] + super().str_parts()\n\n @classmethod\n def create_token(\n cls,\n number,\n exp_month,\n exp_year,\n cvc,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n **kwargs\n ):\n \"\"\"\n Creates a single use token that wraps the details of a credit card.\n This token can be used in place of a credit card dictionary with any API method.\n These tokens can only be used once: by creating a new charge object,\n or attaching them to a customer.\n (Source: https://stripe.com/docs/api/python#create_card_token)\n\n :param exp_month: The card's expiration month.\n :type exp_month: Two digit int\n :param exp_year: The card's expiration year.\n :type exp_year: Two or Four digit int\n :param number: The card number\n :type number: string without any separators (no spaces)\n :param cvc: Card security code.\n :type cvc: string\n \"\"\"\n\n card = {\n \"number\": number,\n \"exp_month\": exp_month,\n \"exp_year\": exp_year,\n \"cvc\": cvc,\n }\n card.update(kwargs)\n\n return stripe.Token.create(api_key=api_key, card=card)\n\n\nclass Source(StripeModel):\n \"\"\"\n Stripe documentation: https://stripe.com/docs/api#sources\n \"\"\"\n\n amount = StripeDecimalCurrencyAmountField(\n null=True,\n blank=True,\n help_text=(\n \"Amount associated with the source. \"\n \"This is the amount for which the source will be chargeable once ready. \"\n \"Required for `single_use` sources.\"\n ),\n )\n client_secret = models.CharField(\n max_length=255,\n help_text=(\n \"The client secret of the source. \"\n \"Used for client-side retrieval using a publishable key.\"\n ),\n )\n currency = StripeCurrencyCodeField(default=\"\", blank=True)\n flow = StripeEnumField(\n enum=enums.SourceFlow, help_text=\"The authentication flow of the source.\"\n )\n owner = JSONField(\n help_text=(\n \"Information about the owner of the payment instrument that may be \"\n \"used or required by particular source types.\"\n )\n )\n statement_descriptor = models.CharField(\n max_length=255,\n default=\"\",\n blank=True,\n help_text=\"Extra information about a source. This will appear on your \"\n \"customer's statement every time you charge the source.\",\n )\n status = StripeEnumField(\n enum=enums.SourceStatus,\n help_text=\"The status of the source. Only `chargeable` sources can be used \"\n \"to create a charge.\",\n )\n type = StripeEnumField(enum=enums.SourceType, help_text=\"The type of the source.\")\n usage = StripeEnumField(\n enum=enums.SourceUsage,\n help_text=\"Whether this source should be reusable or not. \"\n \"Some source types may or may not be reusable by construction, \"\n \"while other may leave the option at creation.\",\n )\n\n # Flows\n code_verification = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the code verification flow. \"\n \"Present if the source is authenticated by a verification code \"\n \"(`flow` is `code_verification`).\",\n )\n receiver = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the receiver flow. \"\n \"Present if the source is a receiver (`flow` is `receiver`).\",\n )\n redirect = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the redirect flow. \"\n \"Present if the source is authenticated by a redirect (`flow` is `redirect`).\",\n )\n\n source_data = JSONField(help_text=\"The data corresponding to the source type.\")\n\n customer = models.ForeignKey(\n \"Customer\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"sources\",\n )\n\n stripe_class = stripe.Source\n stripe_dashboard_item_name = \"sources\"\n\n @classmethod\n def _manipulate_stripe_object_hook(cls, data):\n # The source_data dict is an alias of all the source types\n data[\"source_data\"] = data[data[\"type\"]]\n return data\n\n def _attach_objects_hook(self, cls, data):\n customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)\n if customer:\n self.customer = customer\n else:\n self.customer = None\n\n def detach(self):\n \"\"\"\n Detach the source from its customer.\n \"\"\"\n\n # First, wipe default source on all customers that use this.\n Customer.objects.filter(default_source=self.id).update(default_source=None)\n\n try:\n # TODO - we could use the return value of sync_from_stripe_data\n # or call its internals - self._sync/_attach_objects_hook etc here\n # to update `self` at this point?\n self.sync_from_stripe_data(self.api_retrieve().detach())\n return True\n except (InvalidRequestError, NotImplementedError):\n # The source was already detached. Resyncing.\n # NotImplementedError is an artifact of stripe-python<2.0\n # https://github.com/stripe/stripe-python/issues/376\n self.sync_from_stripe_data(self.api_retrieve())\n return False\n\n\nclass PaymentMethod(StripeModel):\n \"\"\"\n Stripe documentation: https://stripe.com/docs/api#payment_methods\n \"\"\"\n\n billing_details = JSONField(\n help_text=(\n \"Billing information associated with the PaymentMethod that may be used or \"\n \"required by particular types of payment methods.\"\n )\n )\n card = JSONField(\n help_text=\"If this is a card PaymentMethod, this hash contains details \"\n \"about the card.\"\n )\n card_present = JSONField(\n null=True,\n blank=True,\n help_text=\"If this is an card_present PaymentMethod, this hash contains \"\n \"details about the Card Present payment method.\",\n )\n customer = models.ForeignKey(\n \"Customer\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"payment_methods\",\n help_text=\"Customer to which this PaymentMethod is saved.\"\n \"This will not be set when the PaymentMethod has not been saved to a Customer.\",\n )\n type = models.CharField(\n max_length=255,\n null=True,\n blank=True,\n help_text=\"The type of the PaymentMethod. An additional hash is included \"\n \"on the PaymentMethod with a name matching this value. It contains additional \"\n \"information specific to the PaymentMethod type.\",\n )\n\n stripe_class = stripe.PaymentMethod\n\n def _attach_objects_hook(self, cls, data):\n customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)\n if customer:\n self.customer = customer\n else:\n self.customer = None\n\n @classmethod\n def attach(\n cls, payment_method, customer, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Attach a payment method to a customer\n :param payment_method:\n :type payment_method: str, PaymentMethod\n :param customer:\n :type customer: str, Customer\n :param api_key:\n :return:\n \"\"\"\n\n if isinstance(payment_method, StripeModel):\n payment_method = payment_method.id\n\n if isinstance(customer, StripeModel):\n customer = customer.id\n\n extra_kwargs = {}\n if not isinstance(payment_method, stripe.PaymentMethod):\n # send api_key if we're not passing in a Stripe object\n # avoids \"Received unknown parameter: api_key\" since api uses the\n # key cached in the Stripe object\n extra_kwargs = {\"api_key\": api_key}\n\n stripe_payment_method = stripe.PaymentMethod.attach(\n payment_method, customer=customer, **extra_kwargs\n )\n return cls.sync_from_stripe_data(stripe_payment_method)\n", "path": "djstripe/models/payment_methods.py"}]} |
gh_patches_debug_1512 | rasdani/github-patches | git_diff | ansible__ansible-modules-extras-3417 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ec2_lc_find not returning AssociatePublicIpAddress
##### ISSUE TYPE
Bug Report
##### COMPONENT NAME
ec2_lc_find
##### ANSIBLE VERSION
```
ansible 2.2.0.0
config file = /home/centos/ansiblebase/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
No significant changes
##### OS / ENVIRONMENT
Started with Ansible Tower 3.0.3 on CentOS 7 x86_64
Did a yum update on ansible to 2.2.
Did pip install boto3.
##### SUMMARY
Running ec2_lc_find fails with a missing key for AssociatePublicIpAddress
##### STEPS TO REPRODUCE
```
- ec2_lc_find:
region: "{{ region }}"
name_regex: lc_name-*"
sort_order: ascending
limit: 3
register: old_lc_result
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
Correctly returns load configurations matching regex.
##### ACTUAL RESULTS
```
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'AssociatePublicIpAddress'
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 225, in <module>\n main()\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 217, in main\n find_launch_configs(client, module)\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 187, in find_launch_configs\n 'associate_public_address': lc['AssociatePublicIpAddress'],\nKeyError: 'AssociatePublicIpAddress'\n", "module_stdout": "", "msg": "MODULE FAILURE"}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cloud/amazon/ec2_lc_find.py`
Content:
```
1 #!/usr/bin/python
2 # encoding: utf-8
3
4 # (c) 2015, Jose Armesto <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # This module is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This software is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with this software. If not, see <http://www.gnu.org/licenses/>.
20
21 DOCUMENTATION = """
22 ---
23 module: ec2_lc_find
24 short_description: Find AWS Autoscaling Launch Configurations
25 description:
26 - Returns list of matching Launch Configurations for a given name, along with other useful information
27 - Results can be sorted and sliced
28 - It depends on boto
29 - Based on the work by Tom Bamford (https://github.com/tombamford)
30
31 version_added: "2.2"
32 author: "Jose Armesto (@fiunchinho)"
33 options:
34 region:
35 description:
36 - The AWS region to use.
37 required: true
38 aliases: ['aws_region', 'ec2_region']
39 name_regex:
40 description:
41 - A Launch Configuration to match
42 - It'll be compiled as regex
43 required: True
44 sort_order:
45 description:
46 - Order in which to sort results.
47 choices: ['ascending', 'descending']
48 default: 'ascending'
49 required: false
50 limit:
51 description:
52 - How many results to show.
53 - Corresponds to Python slice notation like list[:limit].
54 default: null
55 required: false
56 requirements:
57 - "python >= 2.6"
58 - boto3
59 """
60
61 EXAMPLES = '''
62 # Note: These examples do not set authentication details, see the AWS Guide for details.
63
64 # Search for the Launch Configurations that start with "app"
65 - ec2_lc_find:
66 name_regex: app.*
67 sort_order: descending
68 limit: 2
69 '''
70
71 RETURN = '''
72 image_id:
73 description: AMI id
74 returned: when Launch Configuration was found
75 type: string
76 sample: "ami-0d75df7e"
77 user_data:
78 description: User data used to start instance
79 returned: when Launch Configuration was found
80 type: string
81 user_data: "ZXhwb3J0IENMT1VE"
82 name:
83 description: Name of the AMI
84 returned: when Launch Configuration was found
85 type: string
86 sample: "myapp-v123"
87 arn:
88 description: Name of the AMI
89 returned: when Launch Configuration was found
90 type: string
91 sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
92 instance_type:
93 description: Type of ec2 instance
94 returned: when Launch Configuration was found
95 type: string
96 sample: "t2.small"
97 created_time:
98 description: When it was created
99 returned: when Launch Configuration was found
100 type: string
101 sample: "2016-06-29T14:59:22.222000+00:00"
102 ebs_optimized:
103 description: Launch Configuration EBS optimized property
104 returned: when Launch Configuration was found
105 type: boolean
106 sample: False
107 instance_monitoring:
108 description: Launch Configuration instance monitoring property
109 returned: when Launch Configuration was found
110 type: string
111 sample: {"Enabled": false}
112 classic_link_vpc_security_groups:
113 description: Launch Configuration classic link vpc security groups property
114 returned: when Launch Configuration was found
115 type: list
116 sample: []
117 block_device_mappings:
118 description: Launch Configuration block device mappings property
119 returned: when Launch Configuration was found
120 type: list
121 sample: []
122 keyname:
123 description: Launch Configuration ssh key
124 returned: when Launch Configuration was found
125 type: string
126 sample: mykey
127 security_groups:
128 description: Launch Configuration security groups
129 returned: when Launch Configuration was found
130 type: list
131 sample: []
132 kernel_id:
133 description: Launch Configuration kernel to use
134 returned: when Launch Configuration was found
135 type: string
136 sample: ''
137 ram_disk_id:
138 description: Launch Configuration ram disk property
139 returned: when Launch Configuration was found
140 type: string
141 sample: ''
142 associate_public_address:
143 description: Assign public address or not
144 returned: when Launch Configuration was found
145 type: boolean
146 sample: True
147 ...
148 '''
149
150
151 def find_launch_configs(client, module):
152 name_regex = module.params.get('name_regex')
153 sort_order = module.params.get('sort_order')
154 limit = module.params.get('limit')
155
156 paginator = client.get_paginator('describe_launch_configurations')
157
158 response_iterator = paginator.paginate(
159 PaginationConfig={
160 'MaxItems': 1000,
161 'PageSize': 100
162 }
163 )
164
165 results = []
166
167 for response in response_iterator:
168 response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
169 response['LaunchConfigurations'])
170
171 for lc in response['LaunchConfigurations']:
172 data = {
173 'name': lc['LaunchConfigurationName'],
174 'arn': lc['LaunchConfigurationARN'],
175 'created_time': lc['CreatedTime'],
176 'user_data': lc['UserData'],
177 'instance_type': lc['InstanceType'],
178 'image_id': lc['ImageId'],
179 'ebs_optimized': lc['EbsOptimized'],
180 'instance_monitoring': lc['InstanceMonitoring'],
181 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
182 'block_device_mappings': lc['BlockDeviceMappings'],
183 'keyname': lc['KeyName'],
184 'security_groups': lc['SecurityGroups'],
185 'kernel_id': lc['KernelId'],
186 'ram_disk_id': lc['RamdiskId'],
187 'associate_public_address': lc['AssociatePublicIpAddress'],
188 }
189
190 results.append(data)
191
192 results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
193
194 if limit:
195 results = results[:int(limit)]
196
197 module.exit_json(changed=False, results=results)
198
199
200 def main():
201 argument_spec = ec2_argument_spec()
202 argument_spec.update(dict(
203 region=dict(required=True, aliases=['aws_region', 'ec2_region']),
204 name_regex=dict(required=True),
205 sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
206 limit=dict(required=False, type='int'),
207 )
208 )
209
210 module = AnsibleModule(
211 argument_spec=argument_spec,
212 )
213
214 region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)
215
216 client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)
217 find_launch_configs(client, module)
218
219
220 # import module snippets
221 from ansible.module_utils.basic import *
222 from ansible.module_utils.ec2 import *
223
224 if __name__ == '__main__':
225 main()
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cloud/amazon/ec2_lc_find.py b/cloud/amazon/ec2_lc_find.py
--- a/cloud/amazon/ec2_lc_find.py
+++ b/cloud/amazon/ec2_lc_find.py
@@ -184,7 +184,7 @@
'security_groups': lc['SecurityGroups'],
'kernel_id': lc['KernelId'],
'ram_disk_id': lc['RamdiskId'],
- 'associate_public_address': lc['AssociatePublicIpAddress'],
+ 'associate_public_address': lc.get('AssociatePublicIpAddress', False),
}
results.append(data)
| {"golden_diff": "diff --git a/cloud/amazon/ec2_lc_find.py b/cloud/amazon/ec2_lc_find.py\n--- a/cloud/amazon/ec2_lc_find.py\n+++ b/cloud/amazon/ec2_lc_find.py\n@@ -184,7 +184,7 @@\n 'security_groups': lc['SecurityGroups'],\n 'kernel_id': lc['KernelId'],\n 'ram_disk_id': lc['RamdiskId'],\n- 'associate_public_address': lc['AssociatePublicIpAddress'],\n+ 'associate_public_address': lc.get('AssociatePublicIpAddress', False),\n }\n \n results.append(data)\n", "issue": "ec2_lc_find not returning AssociatePublicIpAddress\n##### ISSUE TYPE\r\nBug Report\r\n\r\n##### COMPONENT NAME\r\nec2_lc_find\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible 2.2.0.0\r\n config file = /home/centos/ansiblebase/ansible.cfg\r\n configured module search path = Default w/o overrides\r\n```\r\n\r\n##### CONFIGURATION\r\nNo significant changes\r\n\r\n##### OS / ENVIRONMENT\r\nStarted with Ansible Tower 3.0.3 on CentOS 7 x86_64\r\nDid a yum update on ansible to 2.2.\r\nDid pip install boto3.\r\n\r\n##### SUMMARY\r\nRunning ec2_lc_find fails with a missing key for AssociatePublicIpAddress\r\n\r\n##### STEPS TO REPRODUCE\r\n```\r\n- ec2_lc_find:\r\n region: \"{{ region }}\"\r\n name_regex: lc_name-*\"\r\n sort_order: ascending\r\n limit: 3\r\n register: old_lc_result\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\nCorrectly returns load configurations matching regex.\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nAn exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'AssociatePublicIpAddress'\r\nfatal: [localhost]: FAILED! => {\"changed\": false, \"failed\": true, \"module_stderr\": \"Traceback (most recent call last):\\n File \\\"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\\\", line 225, in <module>\\n main()\\n File \\\"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\\\", line 217, in main\\n find_launch_configs(client, module)\\n File \\\"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\\\", line 187, in find_launch_configs\\n 'associate_public_address': lc['AssociatePublicIpAddress'],\\nKeyError: 'AssociatePublicIpAddress'\\n\", \"module_stdout\": \"\", \"msg\": \"MODULE FAILURE\"}\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# encoding: utf-8\n\n# (c) 2015, Jose Armesto <[email protected]>\n#\n# This file is part of Ansible\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = \"\"\"\n---\nmodule: ec2_lc_find\nshort_description: Find AWS Autoscaling Launch Configurations\ndescription:\n - Returns list of matching Launch Configurations for a given name, along with other useful information\n - Results can be sorted and sliced\n - It depends on boto\n - Based on the work by Tom Bamford (https://github.com/tombamford)\n\nversion_added: \"2.2\"\nauthor: \"Jose Armesto (@fiunchinho)\"\noptions:\n region:\n description:\n - The AWS region to use.\n required: true\n aliases: ['aws_region', 'ec2_region']\n name_regex:\n description:\n - A Launch Configuration to match\n - It'll be compiled as regex\n required: True\n sort_order:\n description:\n - Order in which to sort results.\n choices: ['ascending', 'descending']\n default: 'ascending'\n required: false\n limit:\n description:\n - How many results to show.\n - Corresponds to Python slice notation like list[:limit].\n default: null\n required: false\nrequirements:\n - \"python >= 2.6\"\n - boto3\n\"\"\"\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Search for the Launch Configurations that start with \"app\"\n- ec2_lc_find:\n name_regex: app.*\n sort_order: descending\n limit: 2\n'''\n\nRETURN = '''\nimage_id:\n description: AMI id\n returned: when Launch Configuration was found\n type: string\n sample: \"ami-0d75df7e\"\nuser_data:\n description: User data used to start instance\n returned: when Launch Configuration was found\n type: string\n user_data: \"ZXhwb3J0IENMT1VE\"\nname:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"myapp-v123\"\narn:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject\"\ninstance_type:\n description: Type of ec2 instance\n returned: when Launch Configuration was found\n type: string\n sample: \"t2.small\"\ncreated_time:\n description: When it was created\n returned: when Launch Configuration was found\n type: string\n sample: \"2016-06-29T14:59:22.222000+00:00\"\nebs_optimized:\n description: Launch Configuration EBS optimized property\n returned: when Launch Configuration was found\n type: boolean\n sample: False\ninstance_monitoring:\n description: Launch Configuration instance monitoring property\n returned: when Launch Configuration was found\n type: string\n sample: {\"Enabled\": false}\nclassic_link_vpc_security_groups:\n description: Launch Configuration classic link vpc security groups property\n returned: when Launch Configuration was found\n type: list\n sample: []\nblock_device_mappings:\n description: Launch Configuration block device mappings property\n returned: when Launch Configuration was found\n type: list\n sample: []\nkeyname:\n description: Launch Configuration ssh key\n returned: when Launch Configuration was found\n type: string\n sample: mykey\nsecurity_groups:\n description: Launch Configuration security groups\n returned: when Launch Configuration was found\n type: list\n sample: []\nkernel_id:\n description: Launch Configuration kernel to use\n returned: when Launch Configuration was found\n type: string\n sample: ''\nram_disk_id:\n description: Launch Configuration ram disk property\n returned: when Launch Configuration was found\n type: string\n sample: ''\nassociate_public_address:\n description: Assign public address or not\n returned: when Launch Configuration was found\n type: boolean\n sample: True\n...\n'''\n\n\ndef find_launch_configs(client, module):\n name_regex = module.params.get('name_regex')\n sort_order = module.params.get('sort_order')\n limit = module.params.get('limit')\n\n paginator = client.get_paginator('describe_launch_configurations')\n\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 1000,\n 'PageSize': 100\n }\n )\n\n results = []\n\n for response in response_iterator:\n response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),\n response['LaunchConfigurations'])\n\n for lc in response['LaunchConfigurations']:\n data = {\n 'name': lc['LaunchConfigurationName'],\n 'arn': lc['LaunchConfigurationARN'],\n 'created_time': lc['CreatedTime'],\n 'user_data': lc['UserData'],\n 'instance_type': lc['InstanceType'],\n 'image_id': lc['ImageId'],\n 'ebs_optimized': lc['EbsOptimized'],\n 'instance_monitoring': lc['InstanceMonitoring'],\n 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],\n 'block_device_mappings': lc['BlockDeviceMappings'],\n 'keyname': lc['KeyName'],\n 'security_groups': lc['SecurityGroups'],\n 'kernel_id': lc['KernelId'],\n 'ram_disk_id': lc['RamdiskId'],\n 'associate_public_address': lc['AssociatePublicIpAddress'],\n }\n\n results.append(data)\n\n results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))\n\n if limit:\n results = results[:int(limit)]\n\n module.exit_json(changed=False, results=results)\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n region=dict(required=True, aliases=['aws_region', 'ec2_region']),\n name_regex=dict(required=True),\n sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),\n limit=dict(required=False, type='int'),\n )\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n )\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)\n\n client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)\n find_launch_configs(client, module)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/amazon/ec2_lc_find.py"}], "after_files": [{"content": "#!/usr/bin/python\n# encoding: utf-8\n\n# (c) 2015, Jose Armesto <[email protected]>\n#\n# This file is part of Ansible\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = \"\"\"\n---\nmodule: ec2_lc_find\nshort_description: Find AWS Autoscaling Launch Configurations\ndescription:\n - Returns list of matching Launch Configurations for a given name, along with other useful information\n - Results can be sorted and sliced\n - It depends on boto\n - Based on the work by Tom Bamford (https://github.com/tombamford)\n\nversion_added: \"2.2\"\nauthor: \"Jose Armesto (@fiunchinho)\"\noptions:\n region:\n description:\n - The AWS region to use.\n required: true\n aliases: ['aws_region', 'ec2_region']\n name_regex:\n description:\n - A Launch Configuration to match\n - It'll be compiled as regex\n required: True\n sort_order:\n description:\n - Order in which to sort results.\n choices: ['ascending', 'descending']\n default: 'ascending'\n required: false\n limit:\n description:\n - How many results to show.\n - Corresponds to Python slice notation like list[:limit].\n default: null\n required: false\nrequirements:\n - \"python >= 2.6\"\n - boto3\n\"\"\"\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Search for the Launch Configurations that start with \"app\"\n- ec2_lc_find:\n name_regex: app.*\n sort_order: descending\n limit: 2\n'''\n\nRETURN = '''\nimage_id:\n description: AMI id\n returned: when Launch Configuration was found\n type: string\n sample: \"ami-0d75df7e\"\nuser_data:\n description: User data used to start instance\n returned: when Launch Configuration was found\n type: string\n user_data: \"ZXhwb3J0IENMT1VE\"\nname:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"myapp-v123\"\narn:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject\"\ninstance_type:\n description: Type of ec2 instance\n returned: when Launch Configuration was found\n type: string\n sample: \"t2.small\"\ncreated_time:\n description: When it was created\n returned: when Launch Configuration was found\n type: string\n sample: \"2016-06-29T14:59:22.222000+00:00\"\nebs_optimized:\n description: Launch Configuration EBS optimized property\n returned: when Launch Configuration was found\n type: boolean\n sample: False\ninstance_monitoring:\n description: Launch Configuration instance monitoring property\n returned: when Launch Configuration was found\n type: string\n sample: {\"Enabled\": false}\nclassic_link_vpc_security_groups:\n description: Launch Configuration classic link vpc security groups property\n returned: when Launch Configuration was found\n type: list\n sample: []\nblock_device_mappings:\n description: Launch Configuration block device mappings property\n returned: when Launch Configuration was found\n type: list\n sample: []\nkeyname:\n description: Launch Configuration ssh key\n returned: when Launch Configuration was found\n type: string\n sample: mykey\nsecurity_groups:\n description: Launch Configuration security groups\n returned: when Launch Configuration was found\n type: list\n sample: []\nkernel_id:\n description: Launch Configuration kernel to use\n returned: when Launch Configuration was found\n type: string\n sample: ''\nram_disk_id:\n description: Launch Configuration ram disk property\n returned: when Launch Configuration was found\n type: string\n sample: ''\nassociate_public_address:\n description: Assign public address or not\n returned: when Launch Configuration was found\n type: boolean\n sample: True\n...\n'''\n\n\ndef find_launch_configs(client, module):\n name_regex = module.params.get('name_regex')\n sort_order = module.params.get('sort_order')\n limit = module.params.get('limit')\n\n paginator = client.get_paginator('describe_launch_configurations')\n\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 1000,\n 'PageSize': 100\n }\n )\n\n results = []\n\n for response in response_iterator:\n response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),\n response['LaunchConfigurations'])\n\n for lc in response['LaunchConfigurations']:\n data = {\n 'name': lc['LaunchConfigurationName'],\n 'arn': lc['LaunchConfigurationARN'],\n 'created_time': lc['CreatedTime'],\n 'user_data': lc['UserData'],\n 'instance_type': lc['InstanceType'],\n 'image_id': lc['ImageId'],\n 'ebs_optimized': lc['EbsOptimized'],\n 'instance_monitoring': lc['InstanceMonitoring'],\n 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],\n 'block_device_mappings': lc['BlockDeviceMappings'],\n 'keyname': lc['KeyName'],\n 'security_groups': lc['SecurityGroups'],\n 'kernel_id': lc['KernelId'],\n 'ram_disk_id': lc['RamdiskId'],\n 'associate_public_address': lc.get('AssociatePublicIpAddress', False),\n }\n\n results.append(data)\n\n results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))\n\n if limit:\n results = results[:int(limit)]\n\n module.exit_json(changed=False, results=results)\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n region=dict(required=True, aliases=['aws_region', 'ec2_region']),\n name_regex=dict(required=True),\n sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),\n limit=dict(required=False, type='int'),\n )\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n )\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)\n\n client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)\n find_launch_configs(client, module)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/amazon/ec2_lc_find.py"}]} |
gh_patches_debug_1513 | rasdani/github-patches | git_diff | zulip__zulip-5407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pull GCI docs into main Zulip repo.
Some of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places.
cc @synicalsyntax @lonerz @tommyip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import print_function
3
4 import logging
5 import re
6 import scrapy
7
8 from scrapy import Request
9 from scrapy.linkextractors import IGNORED_EXTENSIONS
10 from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
11 from scrapy.utils.url import url_has_any_extension
12
13 from typing import Any, Generator, List, Optional, Tuple
14
15 EXCLUDED_URLS = [
16 # Google calendar returns 404s on HEAD requests unconditionally
17 'https://calendar.google.com/calendar/[email protected]',
18 # Returns 409 errors to HEAD requests frequently
19 'https://medium.freecodecamp.com',
20 ]
21
22
23 class BaseDocumentationSpider(scrapy.Spider):
24 name = None # type: Optional[str]
25 # Exclude domain address.
26 deny_domains = [] # type: List[str]
27 start_urls = [] # type: List[str]
28 deny = [] # type: List[str]
29 file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]
30 tags = ('a', 'area', 'img')
31 attrs = ('href', 'src')
32
33 def __init__(self, *args, **kwargs):
34 # type: (*Any, **Any) -> None
35 super(BaseDocumentationSpider, self).__init__(*args, **kwargs)
36 self.has_error = False
37
38 def _set_error_state(self):
39 # type: () -> None
40 self.has_error = True
41
42 def _has_extension(self, url):
43 # type: (str) -> bool
44 return url_has_any_extension(url, self.file_extensions)
45
46 def _is_external_url(self, url):
47 # type: (str) -> bool
48 return url.startswith('http') or self._has_extension(url)
49
50 def check_existing(self, response):
51 # type: (Any) -> None
52 self.log(response)
53
54 def check_permalink(self, response):
55 # type: (Any) -> None
56 self.log(response)
57 xpath_template = "//*[@id='{permalink}' or @name='{permalink}']"
58 m = re.match(r".+\#(?P<permalink>.*)$", response.request.url) # Get anchor value.
59 if not m:
60 return
61 permalink = m.group('permalink')
62 # Check permalink existing on response page.
63 if not response.selector.xpath(xpath_template.format(permalink=permalink)):
64 self._set_error_state()
65 raise Exception(
66 "Permalink #{} is not found on page {}".format(permalink, response.request.url))
67
68 def parse(self, response):
69 # type: (Any) -> Generator[Request, None, None]
70 self.log(response)
71 for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],
72 tags=self.tags, attrs=self.attrs, deny=self.deny,
73 canonicalize=False).extract_links(response):
74 callback = self.parse # type: Any
75 dont_filter = False
76 method = 'GET'
77 if self._is_external_url(link.url):
78 callback = self.check_existing
79 method = 'HEAD'
80 elif '#' in link.url:
81 dont_filter = True
82 callback = self.check_permalink
83 yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,
84 errback=self.error_callback)
85
86 def retry_request_with_get(self, request):
87 # type: (Request) -> Generator[Request, None, None]
88 request.method = 'GET'
89 request.dont_filter = True
90 yield request
91
92 def exclude_error(self, url):
93 # type: (str) -> bool
94 if url in EXCLUDED_URLS:
95 return True
96 return False
97
98 def error_callback(self, failure):
99 # type: (Any) -> Optional[Generator[Any, None, None]]
100 if hasattr(failure.value, 'response') and failure.value.response:
101 response = failure.value.response
102 if self.exclude_error(response.url):
103 return None
104 if response.status == 404:
105 self._set_error_state()
106 raise Exception('Page not found: {}'.format(response))
107 if response.status == 405 and response.request.method == 'HEAD':
108 # Method 'HEAD' not allowed, repeat request with 'GET'
109 return self.retry_request_with_get(response.request)
110 self.log("Error! Please check link: {}".format(response), logging.ERROR)
111 elif isinstance(failure.type, IOError):
112 self._set_error_state()
113 else:
114 raise Exception(failure.value)
115 return None
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
--- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
+++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
@@ -17,6 +17,8 @@
'https://calendar.google.com/calendar/[email protected]',
# Returns 409 errors to HEAD requests frequently
'https://medium.freecodecamp.com',
+ # Returns 404 to HEAD requests unconditionally
+ 'https://www.git-tower.com/blog/command-line-cheat-sheet/',
]
| {"golden_diff": "diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n--- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n+++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n@@ -17,6 +17,8 @@\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n+ # Returns 404 to HEAD requests unconditionally\n+ 'https://www.git-tower.com/blog/command-line-cheat-sheet/',\n ]\n", "issue": "Pull GCI docs into main Zulip repo.\nSome of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places.\r\n\r\ncc @synicalsyntax @lonerz @tommyip \n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport logging\nimport re\nimport scrapy\n\nfrom scrapy import Request\nfrom scrapy.linkextractors import IGNORED_EXTENSIONS\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.utils.url import url_has_any_extension\n\nfrom typing import Any, Generator, List, Optional, Tuple\n\nEXCLUDED_URLS = [\n # Google calendar returns 404s on HEAD requests unconditionally\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n]\n\n\nclass BaseDocumentationSpider(scrapy.Spider):\n name = None # type: Optional[str]\n # Exclude domain address.\n deny_domains = [] # type: List[str]\n start_urls = [] # type: List[str]\n deny = [] # type: List[str]\n file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]\n tags = ('a', 'area', 'img')\n attrs = ('href', 'src')\n\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n super(BaseDocumentationSpider, self).__init__(*args, **kwargs)\n self.has_error = False\n\n def _set_error_state(self):\n # type: () -> None\n self.has_error = True\n\n def _has_extension(self, url):\n # type: (str) -> bool\n return url_has_any_extension(url, self.file_extensions)\n\n def _is_external_url(self, url):\n # type: (str) -> bool\n return url.startswith('http') or self._has_extension(url)\n\n def check_existing(self, response):\n # type: (Any) -> None\n self.log(response)\n\n def check_permalink(self, response):\n # type: (Any) -> None\n self.log(response)\n xpath_template = \"//*[@id='{permalink}' or @name='{permalink}']\"\n m = re.match(r\".+\\#(?P<permalink>.*)$\", response.request.url) # Get anchor value.\n if not m:\n return\n permalink = m.group('permalink')\n # Check permalink existing on response page.\n if not response.selector.xpath(xpath_template.format(permalink=permalink)):\n self._set_error_state()\n raise Exception(\n \"Permalink #{} is not found on page {}\".format(permalink, response.request.url))\n\n def parse(self, response):\n # type: (Any) -> Generator[Request, None, None]\n self.log(response)\n for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],\n tags=self.tags, attrs=self.attrs, deny=self.deny,\n canonicalize=False).extract_links(response):\n callback = self.parse # type: Any\n dont_filter = False\n method = 'GET'\n if self._is_external_url(link.url):\n callback = self.check_existing\n method = 'HEAD'\n elif '#' in link.url:\n dont_filter = True\n callback = self.check_permalink\n yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,\n errback=self.error_callback)\n\n def retry_request_with_get(self, request):\n # type: (Request) -> Generator[Request, None, None]\n request.method = 'GET'\n request.dont_filter = True\n yield request\n\n def exclude_error(self, url):\n # type: (str) -> bool\n if url in EXCLUDED_URLS:\n return True\n return False\n\n def error_callback(self, failure):\n # type: (Any) -> Optional[Generator[Any, None, None]]\n if hasattr(failure.value, 'response') and failure.value.response:\n response = failure.value.response\n if self.exclude_error(response.url):\n return None\n if response.status == 404:\n self._set_error_state()\n raise Exception('Page not found: {}'.format(response))\n if response.status == 405 and response.request.method == 'HEAD':\n # Method 'HEAD' not allowed, repeat request with 'GET'\n return self.retry_request_with_get(response.request)\n self.log(\"Error! Please check link: {}\".format(response), logging.ERROR)\n elif isinstance(failure.type, IOError):\n self._set_error_state()\n else:\n raise Exception(failure.value)\n return None\n", "path": "tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport logging\nimport re\nimport scrapy\n\nfrom scrapy import Request\nfrom scrapy.linkextractors import IGNORED_EXTENSIONS\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.utils.url import url_has_any_extension\n\nfrom typing import Any, Generator, List, Optional, Tuple\n\nEXCLUDED_URLS = [\n # Google calendar returns 404s on HEAD requests unconditionally\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n # Returns 404 to HEAD requests unconditionally\n 'https://www.git-tower.com/blog/command-line-cheat-sheet/',\n]\n\n\nclass BaseDocumentationSpider(scrapy.Spider):\n name = None # type: Optional[str]\n # Exclude domain address.\n deny_domains = [] # type: List[str]\n start_urls = [] # type: List[str]\n deny = [] # type: List[str]\n file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]\n tags = ('a', 'area', 'img')\n attrs = ('href', 'src')\n\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n super(BaseDocumentationSpider, self).__init__(*args, **kwargs)\n self.has_error = False\n\n def _set_error_state(self):\n # type: () -> None\n self.has_error = True\n\n def _has_extension(self, url):\n # type: (str) -> bool\n return url_has_any_extension(url, self.file_extensions)\n\n def _is_external_url(self, url):\n # type: (str) -> bool\n return url.startswith('http') or self._has_extension(url)\n\n def check_existing(self, response):\n # type: (Any) -> None\n self.log(response)\n\n def check_permalink(self, response):\n # type: (Any) -> None\n self.log(response)\n xpath_template = \"//*[@id='{permalink}' or @name='{permalink}']\"\n m = re.match(r\".+\\#(?P<permalink>.*)$\", response.request.url) # Get anchor value.\n if not m:\n return\n permalink = m.group('permalink')\n # Check permalink existing on response page.\n if not response.selector.xpath(xpath_template.format(permalink=permalink)):\n self._set_error_state()\n raise Exception(\n \"Permalink #{} is not found on page {}\".format(permalink, response.request.url))\n\n def parse(self, response):\n # type: (Any) -> Generator[Request, None, None]\n self.log(response)\n for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],\n tags=self.tags, attrs=self.attrs, deny=self.deny,\n canonicalize=False).extract_links(response):\n callback = self.parse # type: Any\n dont_filter = False\n method = 'GET'\n if self._is_external_url(link.url):\n callback = self.check_existing\n method = 'HEAD'\n elif '#' in link.url:\n dont_filter = True\n callback = self.check_permalink\n yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,\n errback=self.error_callback)\n\n def retry_request_with_get(self, request):\n # type: (Request) -> Generator[Request, None, None]\n request.method = 'GET'\n request.dont_filter = True\n yield request\n\n def exclude_error(self, url):\n # type: (str) -> bool\n if url in EXCLUDED_URLS:\n return True\n return False\n\n def error_callback(self, failure):\n # type: (Any) -> Optional[Generator[Any, None, None]]\n if hasattr(failure.value, 'response') and failure.value.response:\n response = failure.value.response\n if self.exclude_error(response.url):\n return None\n if response.status == 404:\n self._set_error_state()\n raise Exception('Page not found: {}'.format(response))\n if response.status == 405 and response.request.method == 'HEAD':\n # Method 'HEAD' not allowed, repeat request with 'GET'\n return self.retry_request_with_get(response.request)\n self.log(\"Error! Please check link: {}\".format(response), logging.ERROR)\n elif isinstance(failure.type, IOError):\n self._set_error_state()\n else:\n raise Exception(failure.value)\n return None\n", "path": "tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py"}]} |
gh_patches_debug_1514 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1113 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rust hook requires `--path` attribute
Cargo has changed how packages get installed and requires an extra `--path <destination>` attribute.
Symptom:
```
[INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt.
[INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default')
Return code: 101
Expected return code: 0
Output: (none)
Errors:
error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package.
```
I guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87
Do we want to make pre-commit compatible with multiple versions of cargo or just the latest one?
/cc @asottile @chriskuehl
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/rust.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os.path
5
6 import toml
7
8 import pre_commit.constants as C
9 from pre_commit.envcontext import envcontext
10 from pre_commit.envcontext import Var
11 from pre_commit.languages import helpers
12 from pre_commit.util import clean_path_on_failure
13 from pre_commit.util import cmd_output
14
15
16 ENVIRONMENT_DIR = 'rustenv'
17 get_default_version = helpers.basic_get_default_version
18 healthy = helpers.basic_healthy
19
20
21 def get_env_patch(target_dir):
22 return (
23 (
24 'PATH',
25 (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),
26 ),
27 )
28
29
30 @contextlib.contextmanager
31 def in_env(prefix):
32 target_dir = prefix.path(
33 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
34 )
35 with envcontext(get_env_patch(target_dir)):
36 yield
37
38
39 def _add_dependencies(cargo_toml_path, additional_dependencies):
40 with open(cargo_toml_path, 'r+') as f:
41 cargo_toml = toml.load(f)
42 cargo_toml.setdefault('dependencies', {})
43 for dep in additional_dependencies:
44 name, _, spec = dep.partition(':')
45 cargo_toml['dependencies'][name] = spec or '*'
46 f.seek(0)
47 toml.dump(cargo_toml, f)
48 f.truncate()
49
50
51 def install_environment(prefix, version, additional_dependencies):
52 helpers.assert_version_default('rust', version)
53 directory = prefix.path(
54 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
55 )
56
57 # There are two cases where we might want to specify more dependencies:
58 # as dependencies for the library being built, and as binary packages
59 # to be `cargo install`'d.
60 #
61 # Unlike e.g. Python, if we just `cargo install` a library, it won't be
62 # used for compilation. And if we add a crate providing a binary to the
63 # `Cargo.toml`, the binary won't be built.
64 #
65 # Because of this, we allow specifying "cli" dependencies by prefixing
66 # with 'cli:'.
67 cli_deps = {
68 dep for dep in additional_dependencies if dep.startswith('cli:')
69 }
70 lib_deps = set(additional_dependencies) - cli_deps
71
72 if len(lib_deps) > 0:
73 _add_dependencies(prefix.path('Cargo.toml'), lib_deps)
74
75 with clean_path_on_failure(directory):
76 packages_to_install = {()}
77 for cli_dep in cli_deps:
78 cli_dep = cli_dep[len('cli:'):]
79 package, _, version = cli_dep.partition(':')
80 if version != '':
81 packages_to_install.add((package, '--version', version))
82 else:
83 packages_to_install.add((package,))
84
85 for package in packages_to_install:
86 cmd_output(
87 'cargo', 'install', '--bins', '--root', directory, *package,
88 cwd=prefix.prefix_dir
89 )
90
91
92 def run_hook(hook, file_args):
93 with in_env(hook.prefix):
94 return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py
--- a/pre_commit/languages/rust.py
+++ b/pre_commit/languages/rust.py
@@ -73,7 +73,7 @@
_add_dependencies(prefix.path('Cargo.toml'), lib_deps)
with clean_path_on_failure(directory):
- packages_to_install = {()}
+ packages_to_install = {('--path', '.')}
for cli_dep in cli_deps:
cli_dep = cli_dep[len('cli:'):]
package, _, version = cli_dep.partition(':')
| {"golden_diff": "diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py\n--- a/pre_commit/languages/rust.py\n+++ b/pre_commit/languages/rust.py\n@@ -73,7 +73,7 @@\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n \n with clean_path_on_failure(directory):\n- packages_to_install = {()}\n+ packages_to_install = {('--path', '.')}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n", "issue": "rust hook requires `--path` attribute\nCargo has changed how packages get installed and requires an extra `--path <destination>` attribute.\r\n\r\nSymptom:\r\n```\r\n[INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt.\r\n[INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default')\r\nReturn code: 101\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors: \r\n error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package.\r\n```\r\n\r\nI guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87\r\n\r\nDo we want to make pre-commit compatible with multiple versions of cargo or just the latest one?\r\n\r\n/cc @asottile @chriskuehl \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os.path\n\nimport toml\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'rustenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(target_dir):\n return (\n (\n 'PATH',\n (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),\n ),\n )\n\n\[email protected]\ndef in_env(prefix):\n target_dir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(target_dir)):\n yield\n\n\ndef _add_dependencies(cargo_toml_path, additional_dependencies):\n with open(cargo_toml_path, 'r+') as f:\n cargo_toml = toml.load(f)\n cargo_toml.setdefault('dependencies', {})\n for dep in additional_dependencies:\n name, _, spec = dep.partition(':')\n cargo_toml['dependencies'][name] = spec or '*'\n f.seek(0)\n toml.dump(cargo_toml, f)\n f.truncate()\n\n\ndef install_environment(prefix, version, additional_dependencies):\n helpers.assert_version_default('rust', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # There are two cases where we might want to specify more dependencies:\n # as dependencies for the library being built, and as binary packages\n # to be `cargo install`'d.\n #\n # Unlike e.g. Python, if we just `cargo install` a library, it won't be\n # used for compilation. And if we add a crate providing a binary to the\n # `Cargo.toml`, the binary won't be built.\n #\n # Because of this, we allow specifying \"cli\" dependencies by prefixing\n # with 'cli:'.\n cli_deps = {\n dep for dep in additional_dependencies if dep.startswith('cli:')\n }\n lib_deps = set(additional_dependencies) - cli_deps\n\n if len(lib_deps) > 0:\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n\n with clean_path_on_failure(directory):\n packages_to_install = {()}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n if version != '':\n packages_to_install.add((package, '--version', version))\n else:\n packages_to_install.add((package,))\n\n for package in packages_to_install:\n cmd_output(\n 'cargo', 'install', '--bins', '--root', directory, *package,\n cwd=prefix.prefix_dir\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/rust.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os.path\n\nimport toml\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'rustenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(target_dir):\n return (\n (\n 'PATH',\n (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),\n ),\n )\n\n\[email protected]\ndef in_env(prefix):\n target_dir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(target_dir)):\n yield\n\n\ndef _add_dependencies(cargo_toml_path, additional_dependencies):\n with open(cargo_toml_path, 'r+') as f:\n cargo_toml = toml.load(f)\n cargo_toml.setdefault('dependencies', {})\n for dep in additional_dependencies:\n name, _, spec = dep.partition(':')\n cargo_toml['dependencies'][name] = spec or '*'\n f.seek(0)\n toml.dump(cargo_toml, f)\n f.truncate()\n\n\ndef install_environment(prefix, version, additional_dependencies):\n helpers.assert_version_default('rust', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # There are two cases where we might want to specify more dependencies:\n # as dependencies for the library being built, and as binary packages\n # to be `cargo install`'d.\n #\n # Unlike e.g. Python, if we just `cargo install` a library, it won't be\n # used for compilation. And if we add a crate providing a binary to the\n # `Cargo.toml`, the binary won't be built.\n #\n # Because of this, we allow specifying \"cli\" dependencies by prefixing\n # with 'cli:'.\n cli_deps = {\n dep for dep in additional_dependencies if dep.startswith('cli:')\n }\n lib_deps = set(additional_dependencies) - cli_deps\n\n if len(lib_deps) > 0:\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n\n with clean_path_on_failure(directory):\n packages_to_install = {('--path', '.')}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n if version != '':\n packages_to_install.add((package, '--version', version))\n else:\n packages_to_install.add((package,))\n\n for package in packages_to_install:\n cmd_output(\n 'cargo', 'install', '--bins', '--root', directory, *package,\n cwd=prefix.prefix_dir\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/rust.py"}]} |
gh_patches_debug_1515 | rasdani/github-patches | git_diff | statsmodels__statsmodels-3044 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TST disable test_webuse again
see #2233
one file is being downloaded from sourceforge which has frequent connection problems
the download from our source on github worked without problems
But I want to get 0.7 out instead of being distracted by network problems
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `statsmodels/datasets/utils.py`
Content:
```
1 from statsmodels.compat.numpy import recarray_select
2 from statsmodels.compat.python import (range, StringIO, urlopen,
3 HTTPError, URLError, lrange,
4 cPickle, urljoin, BytesIO, long, PY3)
5 import sys
6 import shutil
7 from os import environ
8 from os import makedirs
9 from os.path import expanduser
10 from os.path import exists
11 from os.path import join
12
13 import numpy as np
14 from numpy import array
15 from pandas import read_csv, DataFrame, Index
16
17
18 def webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):
19 """
20 Download and return an example dataset from Stata.
21
22 Parameters
23 ----------
24 data : str
25 Name of dataset to fetch.
26 baseurl : str
27 The base URL to the stata datasets.
28 as_df : bool
29 If True, returns a `pandas.DataFrame`
30
31 Returns
32 -------
33 dta : Record Array
34 A record array containing the Stata dataset.
35
36 Examples
37 --------
38 >>> dta = webuse('auto')
39
40 Notes
41 -----
42 Make sure baseurl has trailing forward slash. Doesn't do any
43 error checking in response URLs.
44 """
45 # lazy imports
46 from statsmodels.iolib import genfromdta
47
48 url = urljoin(baseurl, data+'.dta')
49 dta = urlopen(url)
50 dta = BytesIO(dta.read()) # make it truly file-like
51 if as_df: # could make this faster if we don't process dta twice?
52 return DataFrame.from_records(genfromdta(dta))
53 else:
54 return genfromdta(dta)
55
56
57 class Dataset(dict):
58 def __init__(self, **kw):
59 # define some default attributes, so pylint can find them
60 self.endog = None
61 self.exog = None
62 self.data = None
63 self.names = None
64
65 dict.__init__(self, kw)
66 self.__dict__ = self
67 # Some datasets have string variables. If you want a raw_data
68 # attribute you must create this in the dataset's load function.
69 try: # some datasets have string variables
70 self.raw_data = self.data.view((float, len(self.names)))
71 except:
72 pass
73
74 def __repr__(self):
75 return str(self.__class__)
76
77
78 def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):
79 names = list(data.dtype.names)
80
81 if isinstance(endog_idx, (int, long)):
82 endog = array(data[names[endog_idx]], dtype=dtype)
83 endog_name = names[endog_idx]
84 endog_idx = [endog_idx]
85 else:
86 endog_name = [names[i] for i in endog_idx]
87
88 if stack:
89 endog = np.column_stack(data[field] for field in endog_name)
90 else:
91 endog = data[endog_name]
92
93 if exog_idx is None:
94 exog_name = [names[i] for i in range(len(names))
95 if i not in endog_idx]
96 else:
97 exog_name = [names[i] for i in exog_idx]
98
99 if stack:
100 exog = np.column_stack(data[field] for field in exog_name)
101 else:
102 exog = recarray_select(data, exog_name)
103
104 if dtype:
105 endog = endog.astype(dtype)
106 exog = exog.astype(dtype)
107
108 dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
109 endog_name=endog_name, exog_name=exog_name)
110
111 return dataset
112
113
114 def process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,
115 index_idx=None):
116
117 data = DataFrame(data, dtype=dtype)
118 names = data.columns
119
120 if isinstance(endog_idx, (int, long)):
121 endog_name = names[endog_idx]
122 endog = data[endog_name]
123 if exog_idx is None:
124 exog = data.drop([endog_name], axis=1)
125 else:
126 exog = data.filter(names[exog_idx])
127 else:
128 endog = data.ix[:, endog_idx]
129 endog_name = list(endog.columns)
130 if exog_idx is None:
131 exog = data.drop(endog_name, axis=1)
132 elif isinstance(exog_idx, (int, long)):
133 exog = data.filter([names[exog_idx]])
134 else:
135 exog = data.filter(names[exog_idx])
136
137 if index_idx is not None: # NOTE: will have to be improved for dates
138 endog.index = Index(data.ix[:, index_idx])
139 exog.index = Index(data.ix[:, index_idx])
140 data = data.set_index(names[index_idx])
141
142 exog_name = list(exog.columns)
143 dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,
144 endog_name=endog_name, exog_name=exog_name)
145 return dataset
146
147
148 def _maybe_reset_index(data):
149 """
150 All the Rdatasets have the integer row.labels from R if there is no
151 real index. Strip this for a zero-based index
152 """
153 if data.index.equals(Index(lrange(1, len(data) + 1))):
154 data = data.reset_index(drop=True)
155 return data
156
157
158 def _get_cache(cache):
159 if cache is False:
160 # do not do any caching or load from cache
161 cache = None
162 elif cache is True: # use default dir for cache
163 cache = get_data_home(None)
164 else:
165 cache = get_data_home(cache)
166 return cache
167
168
169 def _cache_it(data, cache_path):
170 if PY3:
171 # for some reason encode("zip") won't work for me in Python 3?
172 import zlib
173 # use protocol 2 so can open with python 2.x if cached in 3.x
174 open(cache_path, "wb").write(zlib.compress(cPickle.dumps(data,
175 protocol=2)))
176 else:
177 open(cache_path, "wb").write(cPickle.dumps(data).encode("zip"))
178
179
180 def _open_cache(cache_path):
181 if PY3:
182 # NOTE: don't know why but decode('zip') doesn't work on my
183 # Python 3 build
184 import zlib
185 data = zlib.decompress(open(cache_path, 'rb').read())
186 # return as bytes object encoded in utf-8 for cross-compat of cached
187 data = cPickle.loads(data).encode('utf-8')
188 else:
189 data = open(cache_path, 'rb').read().decode('zip')
190 data = cPickle.loads(data)
191 return data
192
193
194 def _urlopen_cached(url, cache):
195 """
196 Tries to load data from cache location otherwise downloads it. If it
197 downloads the data and cache is not None then it will put the downloaded
198 data in the cache path.
199 """
200 from_cache = False
201 if cache is not None:
202 cache_path = join(cache,
203 url.split("://")[-1].replace('/', ',') + ".zip")
204 try:
205 data = _open_cache(cache_path)
206 from_cache = True
207 except:
208 pass
209
210 # not using the cache or didn't find it in cache
211 if not from_cache:
212 data = urlopen(url).read()
213 if cache is not None: # then put it in the cache
214 _cache_it(data, cache_path)
215 return data, from_cache
216
217
218 def _get_data(base_url, dataname, cache, extension="csv"):
219 url = base_url + (dataname + ".%s") % extension
220 try:
221 data, from_cache = _urlopen_cached(url, cache)
222 except HTTPError as err:
223 if '404' in str(err):
224 raise ValueError("Dataset %s was not found." % dataname)
225 else:
226 raise err
227
228 data = data.decode('utf-8', 'strict')
229 return StringIO(data), from_cache
230
231
232 def _get_dataset_meta(dataname, package, cache):
233 # get the index, you'll probably want this cached because you have
234 # to download info about all the data to get info about any of the data...
235 index_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/master/"
236 "datasets.csv")
237 data, _ = _urlopen_cached(index_url, cache)
238 # Python 3
239 if PY3: # pragma: no cover
240 data = data.decode('utf-8', 'strict')
241 index = read_csv(StringIO(data))
242 idx = np.logical_and(index.Item == dataname, index.Package == package)
243 dataset_meta = index.ix[idx]
244 return dataset_meta["Title"].item()
245
246
247 def get_rdataset(dataname, package="datasets", cache=False):
248 """download and return R dataset
249
250 Parameters
251 ----------
252 dataname : str
253 The name of the dataset you want to download
254 package : str
255 The package in which the dataset is found. The default is the core
256 'datasets' package.
257 cache : bool or str
258 If True, will download this data into the STATSMODELS_DATA folder.
259 The default location is a folder called statsmodels_data in the
260 user home folder. Otherwise, you can specify a path to a folder to
261 use for caching the data. If False, the data will not be cached.
262
263 Returns
264 -------
265 dataset : Dataset instance
266 A `statsmodels.data.utils.Dataset` instance. This objects has
267 attributes::
268
269 * data - A pandas DataFrame containing the data
270 * title - The dataset title
271 * package - The package from which the data came
272 * from_cache - Whether not cached data was retrieved
273 * __doc__ - The verbatim R documentation.
274
275
276 Notes
277 -----
278 If the R dataset has an integer index. This is reset to be zero-based.
279 Otherwise the index is preserved. The caching facilities are dumb. That
280 is, no download dates, e-tags, or otherwise identifying information
281 is checked to see if the data should be downloaded again or not. If the
282 dataset is in the cache, it's used.
283 """
284 # NOTE: use raw github bc html site might not be most up to date
285 data_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
286 "master/csv/"+package+"/")
287 docs_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
288 "master/doc/"+package+"/rst/")
289 cache = _get_cache(cache)
290 data, from_cache = _get_data(data_base_url, dataname, cache)
291 data = read_csv(data, index_col=0)
292 data = _maybe_reset_index(data)
293
294 title = _get_dataset_meta(dataname, package, cache)
295 doc, _ = _get_data(docs_base_url, dataname, cache, "rst")
296
297 return Dataset(data=data, __doc__=doc.read(), package=package, title=title,
298 from_cache=from_cache)
299
300 # The below function were taken from sklearn
301
302
303 def get_data_home(data_home=None):
304 """Return the path of the statsmodels data dir.
305
306 This folder is used by some large dataset loaders to avoid
307 downloading the data several times.
308
309 By default the data dir is set to a folder named 'statsmodels_data'
310 in the user home folder.
311
312 Alternatively, it can be set by the 'STATSMODELS_DATA' environment
313 variable or programatically by giving an explit folder path. The
314 '~' symbol is expanded to the user home folder.
315
316 If the folder does not already exist, it is automatically created.
317 """
318 if data_home is None:
319 data_home = environ.get('STATSMODELS_DATA',
320 join('~', 'statsmodels_data'))
321 data_home = expanduser(data_home)
322 if not exists(data_home):
323 makedirs(data_home)
324 return data_home
325
326
327 def clear_data_home(data_home=None):
328 """Delete all the content of the data home cache."""
329 data_home = get_data_home(data_home)
330 shutil.rmtree(data_home)
331
332 def check_internet():
333 """Check if internet is available"""
334 try:
335 urlopen("https://github.com")
336 except URLError as err:
337 return False
338 return True
339
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/statsmodels/datasets/utils.py b/statsmodels/datasets/utils.py
--- a/statsmodels/datasets/utils.py
+++ b/statsmodels/datasets/utils.py
@@ -329,10 +329,11 @@
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
-def check_internet():
+def check_internet(url=None):
"""Check if internet is available"""
+ url = "https://github.com" if url is None else url
try:
- urlopen("https://github.com")
+ urlopen(url)
except URLError as err:
return False
return True
| {"golden_diff": "diff --git a/statsmodels/datasets/utils.py b/statsmodels/datasets/utils.py\n--- a/statsmodels/datasets/utils.py\n+++ b/statsmodels/datasets/utils.py\n@@ -329,10 +329,11 @@\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n \n-def check_internet():\n+def check_internet(url=None):\n \"\"\"Check if internet is available\"\"\"\n+ url = \"https://github.com\" if url is None else url\n try:\n- urlopen(\"https://github.com\")\n+ urlopen(url)\n except URLError as err:\n return False\n return True\n", "issue": "TST disable test_webuse again\n see #2233\n\none file is being downloaded from sourceforge which has frequent connection problems\nthe download from our source on github worked without problems\n\nBut I want to get 0.7 out instead of being distracted by network problems\n\n", "before_files": [{"content": "from statsmodels.compat.numpy import recarray_select\nfrom statsmodels.compat.python import (range, StringIO, urlopen,\n HTTPError, URLError, lrange,\n cPickle, urljoin, BytesIO, long, PY3)\nimport sys\nimport shutil\nfrom os import environ\nfrom os import makedirs\nfrom os.path import expanduser\nfrom os.path import exists\nfrom os.path import join\n\nimport numpy as np\nfrom numpy import array\nfrom pandas import read_csv, DataFrame, Index\n\n\ndef webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):\n \"\"\"\n Download and return an example dataset from Stata.\n\n Parameters\n ----------\n data : str\n Name of dataset to fetch.\n baseurl : str\n The base URL to the stata datasets.\n as_df : bool\n If True, returns a `pandas.DataFrame`\n\n Returns\n -------\n dta : Record Array\n A record array containing the Stata dataset.\n\n Examples\n --------\n >>> dta = webuse('auto')\n\n Notes\n -----\n Make sure baseurl has trailing forward slash. Doesn't do any\n error checking in response URLs.\n \"\"\"\n # lazy imports\n from statsmodels.iolib import genfromdta\n\n url = urljoin(baseurl, data+'.dta')\n dta = urlopen(url)\n dta = BytesIO(dta.read()) # make it truly file-like\n if as_df: # could make this faster if we don't process dta twice?\n return DataFrame.from_records(genfromdta(dta))\n else:\n return genfromdta(dta)\n\n\nclass Dataset(dict):\n def __init__(self, **kw):\n # define some default attributes, so pylint can find them\n self.endog = None\n self.exog = None\n self.data = None\n self.names = None\n\n dict.__init__(self, kw)\n self.__dict__ = self\n # Some datasets have string variables. If you want a raw_data\n # attribute you must create this in the dataset's load function.\n try: # some datasets have string variables\n self.raw_data = self.data.view((float, len(self.names)))\n except:\n pass\n\n def __repr__(self):\n return str(self.__class__)\n\n\ndef process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):\n names = list(data.dtype.names)\n\n if isinstance(endog_idx, (int, long)):\n endog = array(data[names[endog_idx]], dtype=dtype)\n endog_name = names[endog_idx]\n endog_idx = [endog_idx]\n else:\n endog_name = [names[i] for i in endog_idx]\n\n if stack:\n endog = np.column_stack(data[field] for field in endog_name)\n else:\n endog = data[endog_name]\n\n if exog_idx is None:\n exog_name = [names[i] for i in range(len(names))\n if i not in endog_idx]\n else:\n exog_name = [names[i] for i in exog_idx]\n\n if stack:\n exog = np.column_stack(data[field] for field in exog_name)\n else:\n exog = recarray_select(data, exog_name)\n\n if dtype:\n endog = endog.astype(dtype)\n exog = exog.astype(dtype)\n\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name=endog_name, exog_name=exog_name)\n\n return dataset\n\n\ndef process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,\n index_idx=None):\n\n data = DataFrame(data, dtype=dtype)\n names = data.columns\n\n if isinstance(endog_idx, (int, long)):\n endog_name = names[endog_idx]\n endog = data[endog_name]\n if exog_idx is None:\n exog = data.drop([endog_name], axis=1)\n else:\n exog = data.filter(names[exog_idx])\n else:\n endog = data.ix[:, endog_idx]\n endog_name = list(endog.columns)\n if exog_idx is None:\n exog = data.drop(endog_name, axis=1)\n elif isinstance(exog_idx, (int, long)):\n exog = data.filter([names[exog_idx]])\n else:\n exog = data.filter(names[exog_idx])\n\n if index_idx is not None: # NOTE: will have to be improved for dates\n endog.index = Index(data.ix[:, index_idx])\n exog.index = Index(data.ix[:, index_idx])\n data = data.set_index(names[index_idx])\n\n exog_name = list(exog.columns)\n dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,\n endog_name=endog_name, exog_name=exog_name)\n return dataset\n\n\ndef _maybe_reset_index(data):\n \"\"\"\n All the Rdatasets have the integer row.labels from R if there is no\n real index. Strip this for a zero-based index\n \"\"\"\n if data.index.equals(Index(lrange(1, len(data) + 1))):\n data = data.reset_index(drop=True)\n return data\n\n\ndef _get_cache(cache):\n if cache is False:\n # do not do any caching or load from cache\n cache = None\n elif cache is True: # use default dir for cache\n cache = get_data_home(None)\n else:\n cache = get_data_home(cache)\n return cache\n\n\ndef _cache_it(data, cache_path):\n if PY3:\n # for some reason encode(\"zip\") won't work for me in Python 3?\n import zlib\n # use protocol 2 so can open with python 2.x if cached in 3.x\n open(cache_path, \"wb\").write(zlib.compress(cPickle.dumps(data,\n protocol=2)))\n else:\n open(cache_path, \"wb\").write(cPickle.dumps(data).encode(\"zip\"))\n\n\ndef _open_cache(cache_path):\n if PY3:\n # NOTE: don't know why but decode('zip') doesn't work on my\n # Python 3 build\n import zlib\n data = zlib.decompress(open(cache_path, 'rb').read())\n # return as bytes object encoded in utf-8 for cross-compat of cached\n data = cPickle.loads(data).encode('utf-8')\n else:\n data = open(cache_path, 'rb').read().decode('zip')\n data = cPickle.loads(data)\n return data\n\n\ndef _urlopen_cached(url, cache):\n \"\"\"\n Tries to load data from cache location otherwise downloads it. If it\n downloads the data and cache is not None then it will put the downloaded\n data in the cache path.\n \"\"\"\n from_cache = False\n if cache is not None:\n cache_path = join(cache,\n url.split(\"://\")[-1].replace('/', ',') + \".zip\")\n try:\n data = _open_cache(cache_path)\n from_cache = True\n except:\n pass\n\n # not using the cache or didn't find it in cache\n if not from_cache:\n data = urlopen(url).read()\n if cache is not None: # then put it in the cache\n _cache_it(data, cache_path)\n return data, from_cache\n\n\ndef _get_data(base_url, dataname, cache, extension=\"csv\"):\n url = base_url + (dataname + \".%s\") % extension\n try:\n data, from_cache = _urlopen_cached(url, cache)\n except HTTPError as err:\n if '404' in str(err):\n raise ValueError(\"Dataset %s was not found.\" % dataname)\n else:\n raise err\n\n data = data.decode('utf-8', 'strict')\n return StringIO(data), from_cache\n\n\ndef _get_dataset_meta(dataname, package, cache):\n # get the index, you'll probably want this cached because you have\n # to download info about all the data to get info about any of the data...\n index_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/master/\"\n \"datasets.csv\")\n data, _ = _urlopen_cached(index_url, cache)\n # Python 3\n if PY3: # pragma: no cover\n data = data.decode('utf-8', 'strict')\n index = read_csv(StringIO(data))\n idx = np.logical_and(index.Item == dataname, index.Package == package)\n dataset_meta = index.ix[idx]\n return dataset_meta[\"Title\"].item()\n\n\ndef get_rdataset(dataname, package=\"datasets\", cache=False):\n \"\"\"download and return R dataset\n\n Parameters\n ----------\n dataname : str\n The name of the dataset you want to download\n package : str\n The package in which the dataset is found. The default is the core\n 'datasets' package.\n cache : bool or str\n If True, will download this data into the STATSMODELS_DATA folder.\n The default location is a folder called statsmodels_data in the\n user home folder. Otherwise, you can specify a path to a folder to\n use for caching the data. If False, the data will not be cached.\n\n Returns\n -------\n dataset : Dataset instance\n A `statsmodels.data.utils.Dataset` instance. This objects has\n attributes::\n\n * data - A pandas DataFrame containing the data\n * title - The dataset title\n * package - The package from which the data came\n * from_cache - Whether not cached data was retrieved\n * __doc__ - The verbatim R documentation.\n\n\n Notes\n -----\n If the R dataset has an integer index. This is reset to be zero-based.\n Otherwise the index is preserved. The caching facilities are dumb. That\n is, no download dates, e-tags, or otherwise identifying information\n is checked to see if the data should be downloaded again or not. If the\n dataset is in the cache, it's used.\n \"\"\"\n # NOTE: use raw github bc html site might not be most up to date\n data_base_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/\"\n \"master/csv/\"+package+\"/\")\n docs_base_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/\"\n \"master/doc/\"+package+\"/rst/\")\n cache = _get_cache(cache)\n data, from_cache = _get_data(data_base_url, dataname, cache)\n data = read_csv(data, index_col=0)\n data = _maybe_reset_index(data)\n\n title = _get_dataset_meta(dataname, package, cache)\n doc, _ = _get_data(docs_base_url, dataname, cache, \"rst\")\n\n return Dataset(data=data, __doc__=doc.read(), package=package, title=title,\n from_cache=from_cache)\n\n# The below function were taken from sklearn\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the statsmodels data dir.\n\n This folder is used by some large dataset loaders to avoid\n downloading the data several times.\n\n By default the data dir is set to a folder named 'statsmodels_data'\n in the user home folder.\n\n Alternatively, it can be set by the 'STATSMODELS_DATA' environment\n variable or programatically by giving an explit folder path. The\n '~' symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n \"\"\"\n if data_home is None:\n data_home = environ.get('STATSMODELS_DATA',\n join('~', 'statsmodels_data'))\n data_home = expanduser(data_home)\n if not exists(data_home):\n makedirs(data_home)\n return data_home\n\n\ndef clear_data_home(data_home=None):\n \"\"\"Delete all the content of the data home cache.\"\"\"\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n\ndef check_internet():\n \"\"\"Check if internet is available\"\"\"\n try:\n urlopen(\"https://github.com\")\n except URLError as err:\n return False\n return True\n", "path": "statsmodels/datasets/utils.py"}], "after_files": [{"content": "from statsmodels.compat.numpy import recarray_select\nfrom statsmodels.compat.python import (range, StringIO, urlopen,\n HTTPError, URLError, lrange,\n cPickle, urljoin, BytesIO, long, PY3)\nimport sys\nimport shutil\nfrom os import environ\nfrom os import makedirs\nfrom os.path import expanduser\nfrom os.path import exists\nfrom os.path import join\n\nimport numpy as np\nfrom numpy import array\nfrom pandas import read_csv, DataFrame, Index\n\n\ndef webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):\n \"\"\"\n Download and return an example dataset from Stata.\n\n Parameters\n ----------\n data : str\n Name of dataset to fetch.\n baseurl : str\n The base URL to the stata datasets.\n as_df : bool\n If True, returns a `pandas.DataFrame`\n\n Returns\n -------\n dta : Record Array\n A record array containing the Stata dataset.\n\n Examples\n --------\n >>> dta = webuse('auto')\n\n Notes\n -----\n Make sure baseurl has trailing forward slash. Doesn't do any\n error checking in response URLs.\n \"\"\"\n # lazy imports\n from statsmodels.iolib import genfromdta\n\n url = urljoin(baseurl, data+'.dta')\n dta = urlopen(url)\n dta = BytesIO(dta.read()) # make it truly file-like\n if as_df: # could make this faster if we don't process dta twice?\n return DataFrame.from_records(genfromdta(dta))\n else:\n return genfromdta(dta)\n\n\nclass Dataset(dict):\n def __init__(self, **kw):\n # define some default attributes, so pylint can find them\n self.endog = None\n self.exog = None\n self.data = None\n self.names = None\n\n dict.__init__(self, kw)\n self.__dict__ = self\n # Some datasets have string variables. If you want a raw_data\n # attribute you must create this in the dataset's load function.\n try: # some datasets have string variables\n self.raw_data = self.data.view((float, len(self.names)))\n except:\n pass\n\n def __repr__(self):\n return str(self.__class__)\n\n\ndef process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):\n names = list(data.dtype.names)\n\n if isinstance(endog_idx, (int, long)):\n endog = array(data[names[endog_idx]], dtype=dtype)\n endog_name = names[endog_idx]\n endog_idx = [endog_idx]\n else:\n endog_name = [names[i] for i in endog_idx]\n\n if stack:\n endog = np.column_stack(data[field] for field in endog_name)\n else:\n endog = data[endog_name]\n\n if exog_idx is None:\n exog_name = [names[i] for i in range(len(names))\n if i not in endog_idx]\n else:\n exog_name = [names[i] for i in exog_idx]\n\n if stack:\n exog = np.column_stack(data[field] for field in exog_name)\n else:\n exog = recarray_select(data, exog_name)\n\n if dtype:\n endog = endog.astype(dtype)\n exog = exog.astype(dtype)\n\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name=endog_name, exog_name=exog_name)\n\n return dataset\n\n\ndef process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,\n index_idx=None):\n\n data = DataFrame(data, dtype=dtype)\n names = data.columns\n\n if isinstance(endog_idx, (int, long)):\n endog_name = names[endog_idx]\n endog = data[endog_name]\n if exog_idx is None:\n exog = data.drop([endog_name], axis=1)\n else:\n exog = data.filter(names[exog_idx])\n else:\n endog = data.ix[:, endog_idx]\n endog_name = list(endog.columns)\n if exog_idx is None:\n exog = data.drop(endog_name, axis=1)\n elif isinstance(exog_idx, (int, long)):\n exog = data.filter([names[exog_idx]])\n else:\n exog = data.filter(names[exog_idx])\n\n if index_idx is not None: # NOTE: will have to be improved for dates\n endog.index = Index(data.ix[:, index_idx])\n exog.index = Index(data.ix[:, index_idx])\n data = data.set_index(names[index_idx])\n\n exog_name = list(exog.columns)\n dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,\n endog_name=endog_name, exog_name=exog_name)\n return dataset\n\n\ndef _maybe_reset_index(data):\n \"\"\"\n All the Rdatasets have the integer row.labels from R if there is no\n real index. Strip this for a zero-based index\n \"\"\"\n if data.index.equals(Index(lrange(1, len(data) + 1))):\n data = data.reset_index(drop=True)\n return data\n\n\ndef _get_cache(cache):\n if cache is False:\n # do not do any caching or load from cache\n cache = None\n elif cache is True: # use default dir for cache\n cache = get_data_home(None)\n else:\n cache = get_data_home(cache)\n return cache\n\n\ndef _cache_it(data, cache_path):\n if PY3:\n # for some reason encode(\"zip\") won't work for me in Python 3?\n import zlib\n # use protocol 2 so can open with python 2.x if cached in 3.x\n open(cache_path, \"wb\").write(zlib.compress(cPickle.dumps(data,\n protocol=2)))\n else:\n open(cache_path, \"wb\").write(cPickle.dumps(data).encode(\"zip\"))\n\n\ndef _open_cache(cache_path):\n if PY3:\n # NOTE: don't know why but decode('zip') doesn't work on my\n # Python 3 build\n import zlib\n data = zlib.decompress(open(cache_path, 'rb').read())\n # return as bytes object encoded in utf-8 for cross-compat of cached\n data = cPickle.loads(data).encode('utf-8')\n else:\n data = open(cache_path, 'rb').read().decode('zip')\n data = cPickle.loads(data)\n return data\n\n\ndef _urlopen_cached(url, cache):\n \"\"\"\n Tries to load data from cache location otherwise downloads it. If it\n downloads the data and cache is not None then it will put the downloaded\n data in the cache path.\n \"\"\"\n from_cache = False\n if cache is not None:\n cache_path = join(cache,\n url.split(\"://\")[-1].replace('/', ',') + \".zip\")\n try:\n data = _open_cache(cache_path)\n from_cache = True\n except:\n pass\n\n # not using the cache or didn't find it in cache\n if not from_cache:\n data = urlopen(url).read()\n if cache is not None: # then put it in the cache\n _cache_it(data, cache_path)\n return data, from_cache\n\n\ndef _get_data(base_url, dataname, cache, extension=\"csv\"):\n url = base_url + (dataname + \".%s\") % extension\n try:\n data, from_cache = _urlopen_cached(url, cache)\n except HTTPError as err:\n if '404' in str(err):\n raise ValueError(\"Dataset %s was not found.\" % dataname)\n else:\n raise err\n\n data = data.decode('utf-8', 'strict')\n return StringIO(data), from_cache\n\n\ndef _get_dataset_meta(dataname, package, cache):\n # get the index, you'll probably want this cached because you have\n # to download info about all the data to get info about any of the data...\n index_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/master/\"\n \"datasets.csv\")\n data, _ = _urlopen_cached(index_url, cache)\n # Python 3\n if PY3: # pragma: no cover\n data = data.decode('utf-8', 'strict')\n index = read_csv(StringIO(data))\n idx = np.logical_and(index.Item == dataname, index.Package == package)\n dataset_meta = index.ix[idx]\n return dataset_meta[\"Title\"].item()\n\n\ndef get_rdataset(dataname, package=\"datasets\", cache=False):\n \"\"\"download and return R dataset\n\n Parameters\n ----------\n dataname : str\n The name of the dataset you want to download\n package : str\n The package in which the dataset is found. The default is the core\n 'datasets' package.\n cache : bool or str\n If True, will download this data into the STATSMODELS_DATA folder.\n The default location is a folder called statsmodels_data in the\n user home folder. Otherwise, you can specify a path to a folder to\n use for caching the data. If False, the data will not be cached.\n\n Returns\n -------\n dataset : Dataset instance\n A `statsmodels.data.utils.Dataset` instance. This objects has\n attributes::\n\n * data - A pandas DataFrame containing the data\n * title - The dataset title\n * package - The package from which the data came\n * from_cache - Whether not cached data was retrieved\n * __doc__ - The verbatim R documentation.\n\n\n Notes\n -----\n If the R dataset has an integer index. This is reset to be zero-based.\n Otherwise the index is preserved. The caching facilities are dumb. That\n is, no download dates, e-tags, or otherwise identifying information\n is checked to see if the data should be downloaded again or not. If the\n dataset is in the cache, it's used.\n \"\"\"\n # NOTE: use raw github bc html site might not be most up to date\n data_base_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/\"\n \"master/csv/\"+package+\"/\")\n docs_base_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/\"\n \"master/doc/\"+package+\"/rst/\")\n cache = _get_cache(cache)\n data, from_cache = _get_data(data_base_url, dataname, cache)\n data = read_csv(data, index_col=0)\n data = _maybe_reset_index(data)\n\n title = _get_dataset_meta(dataname, package, cache)\n doc, _ = _get_data(docs_base_url, dataname, cache, \"rst\")\n\n return Dataset(data=data, __doc__=doc.read(), package=package, title=title,\n from_cache=from_cache)\n\n# The below function were taken from sklearn\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the statsmodels data dir.\n\n This folder is used by some large dataset loaders to avoid\n downloading the data several times.\n\n By default the data dir is set to a folder named 'statsmodels_data'\n in the user home folder.\n\n Alternatively, it can be set by the 'STATSMODELS_DATA' environment\n variable or programatically by giving an explit folder path. The\n '~' symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n \"\"\"\n if data_home is None:\n data_home = environ.get('STATSMODELS_DATA',\n join('~', 'statsmodels_data'))\n data_home = expanduser(data_home)\n if not exists(data_home):\n makedirs(data_home)\n return data_home\n\n\ndef clear_data_home(data_home=None):\n \"\"\"Delete all the content of the data home cache.\"\"\"\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n\ndef check_internet(url=None):\n \"\"\"Check if internet is available\"\"\"\n url = \"https://github.com\" if url is None else url\n try:\n urlopen(url)\n except URLError as err:\n return False\n return True\n", "path": "statsmodels/datasets/utils.py"}]} |
gh_patches_debug_1516 | rasdani/github-patches | git_diff | geopandas__geopandas-94 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
to_file(): 'long' isn't a valid Fiona property type
The question http://gis.stackexchange.com/questions/89206/geopandas-error-when-writing-to-file-valueerror-long-is-not-in-list revealed a bug to me.
If you pass schema={'geometry': 'Point', 'properties': {'foo': 'long'}} into fiona.open(), the type 'long' isn't found at https://github.com/Toblerity/Fiona/blob/master/src/fiona/ogrext.pyx#L973. OGR doesn't distinguish between long and int, so converting 'long' to 'int' within Fiona may help...
## But :)
Fiona will always return 'int' in the .schema attribute and this could cause trouble for programs that pass 'long' and expect it to stick. So, let's fix up geopandas so it always uses 'int' and never 'long'.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/geodataframe.py`
Content:
```
1 try:
2 from collections import OrderedDict
3 except ImportError:
4 # Python 2.6
5 from ordereddict import OrderedDict
6 from collections import defaultdict
7 import json
8 import os
9 import sys
10
11 import numpy as np
12 from pandas import DataFrame, Series
13 from shapely.geometry import mapping, shape
14 from shapely.geometry.base import BaseGeometry
15 from six import string_types
16 from six import string_types, iteritems
17
18 from geopandas import GeoSeries
19 from geopandas.base import GeoPandasBase
20 from geopandas.plotting import plot_dataframe
21 import geopandas.io
22
23
24 DEFAULT_GEO_COLUMN_NAME = 'geometry'
25 PY3 = sys.version_info[0] == 3
26
27
28 class GeoDataFrame(GeoPandasBase, DataFrame):
29 """
30 A GeoDataFrame object is a pandas.DataFrame that has a column
31 with geometry. In addition to the standard DataFrame constructor arguments,
32 GeoDataFrame also accepts the following keyword arguments:
33
34 Keyword Arguments
35 -----------------
36 crs : str (optional)
37 Coordinate system
38 geometry : str or array (optional)
39 If str, column to use as geometry. If array, will be set as 'geometry'
40 column on GeoDataFrame.
41 """
42 _metadata = ['crs', '_geometry_column_name']
43 _geometry_column_name = DEFAULT_GEO_COLUMN_NAME
44
45 def __init__(self, *args, **kwargs):
46 crs = kwargs.pop('crs', None)
47 geometry = kwargs.pop('geometry', None)
48 super(GeoDataFrame, self).__init__(*args, **kwargs)
49 self.crs = crs
50 if geometry is not None:
51 self.set_geometry(geometry, inplace=True)
52
53 def __setattr__(self, attr, val):
54 # have to special case geometry b/c pandas tries to use as column...
55 if attr == 'geometry':
56 object.__setattr__(self, attr, val)
57 else:
58 super(GeoDataFrame, self).__setattr__(attr, val)
59
60 def _get_geometry(self):
61 if self._geometry_column_name not in self:
62 raise AttributeError("No geometry data set yet (expected in"
63 " column '%s'." % self._geometry_column_name)
64 return self[self._geometry_column_name]
65
66 def _set_geometry(self, col):
67 # TODO: Use pandas' core.common.is_list_like() here.
68 if not isinstance(col, (list, np.ndarray, Series)):
69 raise ValueError("Must use a list-like to set the geometry"
70 " property")
71
72 self.set_geometry(col, inplace=True)
73
74 geometry = property(fget=_get_geometry, fset=_set_geometry,
75 doc="Geometry data for GeoDataFrame")
76
77 def set_geometry(self, col, drop=False, inplace=False, crs=None):
78 """
79 Set the GeoDataFrame geometry using either an existing column or
80 the specified input. By default yields a new object.
81
82 The original geometry column is replaced with the input.
83
84 Parameters
85 ----------
86 keys : column label or array
87 drop : boolean, default True
88 Delete column to be used as the new geometry
89 inplace : boolean, default False
90 Modify the GeoDataFrame in place (do not create a new object)
91 crs : str/result of fion.get_crs (optional)
92 Coordinate system to use. If passed, overrides both DataFrame and
93 col's crs. Otherwise, tries to get crs from passed col values or
94 DataFrame.
95
96 Examples
97 --------
98 >>> df1 = df.set_geometry([Point(0,0), Point(1,1), Point(2,2)])
99 >>> df2 = df.set_geometry('geom1')
100
101 Returns
102 -------
103 geodataframe : GeoDataFrame
104 """
105 # Most of the code here is taken from DataFrame.set_index()
106 if inplace:
107 frame = self
108 else:
109 frame = self.copy()
110
111 if not crs:
112 crs = getattr(col, 'crs', self.crs)
113
114 to_remove = None
115 geo_column_name = DEFAULT_GEO_COLUMN_NAME
116 if isinstance(col, Series):
117 level = col.values
118 elif isinstance(col, (list, np.ndarray)):
119 level = col
120 elif hasattr(col, 'ndim') and col.ndim != 1:
121 raise ValueError("Must pass array with one dimension only.")
122 else:
123 try:
124 level = frame[col].values
125 except KeyError:
126 raise ValueError("Unknown column %s" % col)
127 except:
128 raise
129 if drop:
130 to_remove = col
131 geo_column_name = DEFAULT_GEO_COLUMN_NAME
132 else:
133 geo_column_name = col
134
135 if to_remove:
136 del frame[to_remove]
137
138 if isinstance(level, GeoSeries) and level.crs != crs:
139 # Avoids caching issues/crs sharing issues
140 level = level.copy()
141 level.crs = crs
142
143 # Check that we are using a listlike of geometries
144 if not all(isinstance(item, BaseGeometry) for item in level):
145 raise TypeError("Input geometry column must contain valid geometry objects.")
146 frame[geo_column_name] = level
147 frame._geometry_column_name = geo_column_name
148 frame.crs = crs
149
150 if not inplace:
151 return frame
152
153 @classmethod
154 def from_file(cls, filename, **kwargs):
155 """
156 Alternate constructor to create a GeoDataFrame from a file.
157
158 Example:
159 df = geopandas.GeoDataFrame.from_file('nybb.shp')
160
161 Wraps geopandas.read_file(). For additional help, see read_file()
162
163 """
164 return geopandas.io.file.read_file(filename, **kwargs)
165
166 @classmethod
167 def from_features(cls, features, crs=None):
168 """
169 Alternate constructor to create GeoDataFrame from an iterable of
170 features. Each element must be a feature dictionary or implement
171 the __geo_interface__.
172 See: https://gist.github.com/sgillies/2217756
173
174 Note: This method does not attempt to align rows. Properties that are
175 not present in all features of the source file will not be properly
176 aligned. This should be fixed.
177
178 """
179 geoms = []
180 columns = defaultdict(lambda: [])
181 for f in features:
182 if hasattr(f, "__geo_interface__"):
183 f = f.__geo_interface__
184 else:
185 f = f
186
187 geoms.append(shape(f['geometry']))
188 for key, value in f['properties'].iteritems():
189 columns[key].append(value)
190 geom = GeoSeries(geoms)
191 df = GeoDataFrame(columns)
192 df['geometry'] = geom
193 df.crs = crs
194 return df
195
196 @classmethod
197 def from_postgis(cls, sql, con, geom_col='geom', crs=None, index_col=None,
198 coerce_float=True, params=None):
199 """
200 Alternate constructor to create a GeoDataFrame from a sql query
201 containing a geometry column.
202
203 Example:
204 df = geopandas.GeoDataFrame.from_postgis(con,
205 "SELECT geom, highway FROM roads;")
206
207 Wraps geopandas.read_postgis(). For additional help, see read_postgis()
208
209 """
210 return geopandas.io.sql.read_postgis(sql, con, geom_col, crs, index_col,
211 coerce_float, params)
212
213
214 def to_json(self, na='null', **kwargs):
215 """Returns a GeoJSON representation of the GeoDataFrame.
216
217 Parameters
218 ----------
219 na : {'null', 'drop', 'keep'}, default 'null'
220 Indicates how to output missing (NaN) values in the GeoDataFrame
221 * null: ouput the missing entries as JSON null
222 * drop: remove the property from the feature. This applies to
223 each feature individually so that features may have
224 different properties
225 * keep: output the missing entries as NaN
226
227 The remaining *kwargs* are passed to json.dumps().
228 """
229 def fill_none(row):
230 """
231 Takes in a Series, converts to a dictionary with null values
232 set to None
233
234 """
235 na_keys = row.index[row.isnull()]
236 d = row.to_dict()
237 for k in na_keys:
238 d[k] = None
239 return d
240
241 # na_methods must take in a Series and return dict-like
242 na_methods = {'null': fill_none,
243 'drop': lambda row: row.dropna(),
244 'keep': lambda row: row}
245
246 if na not in na_methods:
247 raise ValueError('Unknown na method {}'.format(na))
248 f = na_methods[na]
249
250 def feature(i, row):
251 row = f(row)
252 return {
253 'id': str(i),
254 'type': 'Feature',
255 'properties':
256 dict((k, v) for k, v in iteritems(row) if k != self._geometry_column_name),
257 'geometry': mapping(row[self._geometry_column_name]) }
258
259 return json.dumps(
260 {'type': 'FeatureCollection',
261 'features': [feature(i, row) for i, row in self.iterrows()]},
262 **kwargs )
263
264 def to_file(self, filename, driver="ESRI Shapefile", **kwargs):
265 """
266 Write this GeoDataFrame to an OGR data source
267
268 A dictionary of supported OGR providers is available via:
269 >>> import fiona
270 >>> fiona.supported_drivers
271
272 Parameters
273 ----------
274 filename : string
275 File path or file handle to write to.
276 driver : string, default 'ESRI Shapefile'
277 The OGR format driver used to write the vector file.
278
279 The *kwargs* are passed to fiona.open and can be used to write
280 to multi-layer data, store data within archives (zip files), etc.
281 """
282 import fiona
283 def convert_type(in_type):
284 if in_type == object:
285 return 'str'
286 return type(np.asscalar(np.zeros(1, in_type))).__name__
287
288 def feature(i, row):
289 return {
290 'id': str(i),
291 'type': 'Feature',
292 'properties':
293 dict((k, v) for k, v in iteritems(row) if k != 'geometry'),
294 'geometry': mapping(row['geometry']) }
295
296 properties = OrderedDict([(col, convert_type(_type)) for col, _type
297 in zip(self.columns, self.dtypes) if col!='geometry'])
298 # Need to check geom_types before we write to file...
299 # Some (most?) providers expect a single geometry type:
300 # Point, LineString, or Polygon
301 geom_types = self['geometry'].geom_type.unique()
302 from os.path import commonprefix # To find longest common prefix
303 geom_type = commonprefix([g[::-1] for g in geom_types])[::-1] # Reverse
304 if geom_type == '': # No common suffix = mixed geometry types
305 raise ValueError("Geometry column cannot contains mutiple "
306 "geometry types when writing to file.")
307 schema = {'geometry': geom_type, 'properties': properties}
308 filename = os.path.abspath(os.path.expanduser(filename))
309 with fiona.open(filename, 'w', driver=driver, crs=self.crs,
310 schema=schema, **kwargs) as c:
311 for i, row in self.iterrows():
312 c.write(feature(i, row))
313
314 def to_crs(self, crs=None, epsg=None, inplace=False):
315 """Transform geometries to a new coordinate reference system
316
317 This method will transform all points in all objects. It has
318 no notion or projecting entire geometries. All segments
319 joining points are assumed to be lines in the current
320 projection, not geodesics. Objects crossing the dateline (or
321 other projection boundary) will have undesirable behavior.
322 """
323 if inplace:
324 df = self
325 else:
326 df = self.copy()
327 geom = df.geometry.to_crs(crs=crs, epsg=epsg)
328 df.geometry = geom
329 df.crs = geom.crs
330 if not inplace:
331 return df
332
333 def __getitem__(self, key):
334 """
335 If the result is a column containing only 'geometry', return a
336 GeoSeries. If it's a DataFrame with a 'geometry' column, return a
337 GeoDataFrame.
338 """
339 result = super(GeoDataFrame, self).__getitem__(key)
340 geo_col = self._geometry_column_name
341 if isinstance(key, string_types) and key == geo_col:
342 result.__class__ = GeoSeries
343 result.crs = self.crs
344 elif isinstance(result, DataFrame) and geo_col in result:
345 result.__class__ = GeoDataFrame
346 result.crs = self.crs
347 result._geometry_column_name = geo_col
348 elif isinstance(result, DataFrame) and geo_col not in result:
349 result.__class__ = DataFrame
350 result.crs = self.crs
351 return result
352
353 #
354 # Implement pandas methods
355 #
356
357 @property
358 def _constructor(self):
359 return GeoDataFrame
360
361 def __finalize__(self, other, method=None, **kwargs):
362 """ propagate metadata from other to self """
363 # NOTE: backported from pandas master (upcoming v0.13)
364 for name in self._metadata:
365 object.__setattr__(self, name, getattr(other, name, None))
366 return self
367
368 def copy(self, deep=True):
369 """
370 Make a copy of this GeoDataFrame object
371
372 Parameters
373 ----------
374 deep : boolean, default True
375 Make a deep copy, i.e. also copy data
376
377 Returns
378 -------
379 copy : GeoDataFrame
380 """
381 # FIXME: this will likely be unnecessary in pandas >= 0.13
382 data = self._data
383 if deep:
384 data = data.copy()
385 return GeoDataFrame(data).__finalize__(self)
386
387 def plot(self, *args, **kwargs):
388 return plot_dataframe(self, *args, **kwargs)
389
390 def _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):
391 if inplace:
392 raise ValueError("Can't do inplace setting when converting from"
393 " DataFrame to GeoDataFrame")
394 gf = GeoDataFrame(self)
395 # this will copy so that BlockManager gets copied
396 return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)
397
398 if PY3:
399 DataFrame.set_geometry = _dataframe_set_geometry
400 else:
401 import types
402 DataFrame.set_geometry = types.MethodType(_dataframe_set_geometry, None,
403 DataFrame)
404
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geopandas/geodataframe.py b/geopandas/geodataframe.py
--- a/geopandas/geodataframe.py
+++ b/geopandas/geodataframe.py
@@ -283,7 +283,10 @@
def convert_type(in_type):
if in_type == object:
return 'str'
- return type(np.asscalar(np.zeros(1, in_type))).__name__
+ out_type = type(np.asscalar(np.zeros(1, in_type))).__name__
+ if out_type == 'long':
+ out_type = 'int'
+ return out_type
def feature(i, row):
return {
| {"golden_diff": "diff --git a/geopandas/geodataframe.py b/geopandas/geodataframe.py\n--- a/geopandas/geodataframe.py\n+++ b/geopandas/geodataframe.py\n@@ -283,7 +283,10 @@\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n- return type(np.asscalar(np.zeros(1, in_type))).__name__\n+ out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n+ if out_type == 'long':\n+ out_type = 'int'\n+ return out_type\n \n def feature(i, row):\n return {\n", "issue": "to_file(): 'long' isn't a valid Fiona property type\nThe question http://gis.stackexchange.com/questions/89206/geopandas-error-when-writing-to-file-valueerror-long-is-not-in-list revealed a bug to me.\n\nIf you pass schema={'geometry': 'Point', 'properties': {'foo': 'long'}} into fiona.open(), the type 'long' isn't found at https://github.com/Toblerity/Fiona/blob/master/src/fiona/ogrext.pyx#L973. OGR doesn't distinguish between long and int, so converting 'long' to 'int' within Fiona may help...\n## But :)\n\nFiona will always return 'int' in the .schema attribute and this could cause trouble for programs that pass 'long' and expect it to stick. So, let's fix up geopandas so it always uses 'int' and never 'long'.\n\n", "before_files": [{"content": "try:\n from collections import OrderedDict\nexcept ImportError:\n # Python 2.6\n from ordereddict import OrderedDict\nfrom collections import defaultdict\nimport json\nimport os\nimport sys\n\nimport numpy as np\nfrom pandas import DataFrame, Series\nfrom shapely.geometry import mapping, shape\nfrom shapely.geometry.base import BaseGeometry\nfrom six import string_types\nfrom six import string_types, iteritems\n\nfrom geopandas import GeoSeries\nfrom geopandas.base import GeoPandasBase\nfrom geopandas.plotting import plot_dataframe\nimport geopandas.io\n\n\nDEFAULT_GEO_COLUMN_NAME = 'geometry'\nPY3 = sys.version_info[0] == 3\n\n\nclass GeoDataFrame(GeoPandasBase, DataFrame):\n \"\"\"\n A GeoDataFrame object is a pandas.DataFrame that has a column\n with geometry. In addition to the standard DataFrame constructor arguments,\n GeoDataFrame also accepts the following keyword arguments:\n\n Keyword Arguments\n -----------------\n crs : str (optional)\n Coordinate system\n geometry : str or array (optional)\n If str, column to use as geometry. If array, will be set as 'geometry'\n column on GeoDataFrame.\n \"\"\"\n _metadata = ['crs', '_geometry_column_name']\n _geometry_column_name = DEFAULT_GEO_COLUMN_NAME\n\n def __init__(self, *args, **kwargs):\n crs = kwargs.pop('crs', None)\n geometry = kwargs.pop('geometry', None)\n super(GeoDataFrame, self).__init__(*args, **kwargs)\n self.crs = crs\n if geometry is not None:\n self.set_geometry(geometry, inplace=True)\n\n def __setattr__(self, attr, val):\n # have to special case geometry b/c pandas tries to use as column...\n if attr == 'geometry':\n object.__setattr__(self, attr, val)\n else:\n super(GeoDataFrame, self).__setattr__(attr, val)\n\n def _get_geometry(self):\n if self._geometry_column_name not in self:\n raise AttributeError(\"No geometry data set yet (expected in\"\n \" column '%s'.\" % self._geometry_column_name)\n return self[self._geometry_column_name]\n\n def _set_geometry(self, col):\n # TODO: Use pandas' core.common.is_list_like() here.\n if not isinstance(col, (list, np.ndarray, Series)):\n raise ValueError(\"Must use a list-like to set the geometry\"\n \" property\")\n\n self.set_geometry(col, inplace=True)\n\n geometry = property(fget=_get_geometry, fset=_set_geometry,\n doc=\"Geometry data for GeoDataFrame\")\n\n def set_geometry(self, col, drop=False, inplace=False, crs=None):\n \"\"\"\n Set the GeoDataFrame geometry using either an existing column or\n the specified input. By default yields a new object.\n\n The original geometry column is replaced with the input.\n\n Parameters\n ----------\n keys : column label or array\n drop : boolean, default True\n Delete column to be used as the new geometry\n inplace : boolean, default False\n Modify the GeoDataFrame in place (do not create a new object)\n crs : str/result of fion.get_crs (optional)\n Coordinate system to use. If passed, overrides both DataFrame and\n col's crs. Otherwise, tries to get crs from passed col values or\n DataFrame.\n\n Examples\n --------\n >>> df1 = df.set_geometry([Point(0,0), Point(1,1), Point(2,2)])\n >>> df2 = df.set_geometry('geom1')\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n # Most of the code here is taken from DataFrame.set_index()\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n if not crs:\n crs = getattr(col, 'crs', self.crs)\n\n to_remove = None\n geo_column_name = DEFAULT_GEO_COLUMN_NAME\n if isinstance(col, Series):\n level = col.values\n elif isinstance(col, (list, np.ndarray)):\n level = col\n elif hasattr(col, 'ndim') and col.ndim != 1:\n raise ValueError(\"Must pass array with one dimension only.\")\n else:\n try:\n level = frame[col].values\n except KeyError:\n raise ValueError(\"Unknown column %s\" % col)\n except:\n raise\n if drop:\n to_remove = col\n geo_column_name = DEFAULT_GEO_COLUMN_NAME\n else:\n geo_column_name = col\n\n if to_remove:\n del frame[to_remove]\n\n if isinstance(level, GeoSeries) and level.crs != crs:\n # Avoids caching issues/crs sharing issues\n level = level.copy()\n level.crs = crs\n\n # Check that we are using a listlike of geometries\n if not all(isinstance(item, BaseGeometry) for item in level):\n raise TypeError(\"Input geometry column must contain valid geometry objects.\")\n frame[geo_column_name] = level\n frame._geometry_column_name = geo_column_name\n frame.crs = crs\n\n if not inplace:\n return frame\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"\n Alternate constructor to create a GeoDataFrame from a file.\n \n Example:\n df = geopandas.GeoDataFrame.from_file('nybb.shp')\n\n Wraps geopandas.read_file(). For additional help, see read_file()\n\n \"\"\"\n return geopandas.io.file.read_file(filename, **kwargs)\n\n @classmethod\n def from_features(cls, features, crs=None):\n \"\"\"\n Alternate constructor to create GeoDataFrame from an iterable of\n features. Each element must be a feature dictionary or implement\n the __geo_interface__.\n See: https://gist.github.com/sgillies/2217756\n\n Note: This method does not attempt to align rows. Properties that are\n not present in all features of the source file will not be properly\n aligned. This should be fixed.\n\n \"\"\"\n geoms = []\n columns = defaultdict(lambda: [])\n for f in features:\n if hasattr(f, \"__geo_interface__\"):\n f = f.__geo_interface__\n else:\n f = f\n\n geoms.append(shape(f['geometry']))\n for key, value in f['properties'].iteritems():\n columns[key].append(value)\n geom = GeoSeries(geoms)\n df = GeoDataFrame(columns)\n df['geometry'] = geom\n df.crs = crs\n return df\n\n @classmethod\n def from_postgis(cls, sql, con, geom_col='geom', crs=None, index_col=None,\n coerce_float=True, params=None):\n \"\"\"\n Alternate constructor to create a GeoDataFrame from a sql query\n containing a geometry column.\n\n Example:\n df = geopandas.GeoDataFrame.from_postgis(con,\n \"SELECT geom, highway FROM roads;\")\n\n Wraps geopandas.read_postgis(). For additional help, see read_postgis()\n\n \"\"\"\n return geopandas.io.sql.read_postgis(sql, con, geom_col, crs, index_col, \n coerce_float, params)\n\n\n def to_json(self, na='null', **kwargs):\n \"\"\"Returns a GeoJSON representation of the GeoDataFrame.\n\n Parameters\n ----------\n na : {'null', 'drop', 'keep'}, default 'null'\n Indicates how to output missing (NaN) values in the GeoDataFrame\n * null: ouput the missing entries as JSON null\n * drop: remove the property from the feature. This applies to\n each feature individually so that features may have\n different properties\n * keep: output the missing entries as NaN\n \n The remaining *kwargs* are passed to json.dumps().\n \"\"\"\n def fill_none(row):\n \"\"\"\n Takes in a Series, converts to a dictionary with null values\n set to None\n\n \"\"\"\n na_keys = row.index[row.isnull()]\n d = row.to_dict()\n for k in na_keys:\n d[k] = None\n return d\n\n # na_methods must take in a Series and return dict-like\n na_methods = {'null': fill_none,\n 'drop': lambda row: row.dropna(),\n 'keep': lambda row: row}\n\n if na not in na_methods:\n raise ValueError('Unknown na method {}'.format(na))\n f = na_methods[na]\n\n def feature(i, row):\n row = f(row)\n return {\n 'id': str(i),\n 'type': 'Feature',\n 'properties':\n dict((k, v) for k, v in iteritems(row) if k != self._geometry_column_name),\n 'geometry': mapping(row[self._geometry_column_name]) }\n\n return json.dumps(\n {'type': 'FeatureCollection',\n 'features': [feature(i, row) for i, row in self.iterrows()]},\n **kwargs )\n \n def to_file(self, filename, driver=\"ESRI Shapefile\", **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n \n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n filename : string \n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n\n The *kwargs* are passed to fiona.open and can be used to write \n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n import fiona\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n return type(np.asscalar(np.zeros(1, in_type))).__name__\n \n def feature(i, row):\n return {\n 'id': str(i),\n 'type': 'Feature',\n 'properties':\n dict((k, v) for k, v in iteritems(row) if k != 'geometry'),\n 'geometry': mapping(row['geometry']) }\n \n properties = OrderedDict([(col, convert_type(_type)) for col, _type \n in zip(self.columns, self.dtypes) if col!='geometry'])\n # Need to check geom_types before we write to file... \n # Some (most?) providers expect a single geometry type: \n # Point, LineString, or Polygon\n geom_types = self['geometry'].geom_type.unique()\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types])[::-1] # Reverse\n if geom_type == '': # No common suffix = mixed geometry types\n raise ValueError(\"Geometry column cannot contains mutiple \"\n \"geometry types when writing to file.\")\n schema = {'geometry': geom_type, 'properties': properties}\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=self.crs, \n schema=schema, **kwargs) as c:\n for i, row in self.iterrows():\n c.write(feature(i, row))\n\n def to_crs(self, crs=None, epsg=None, inplace=False):\n \"\"\"Transform geometries to a new coordinate reference system\n\n This method will transform all points in all objects. It has\n no notion or projecting entire geometries. All segments\n joining points are assumed to be lines in the current\n projection, not geodesics. Objects crossing the dateline (or\n other projection boundary) will have undesirable behavior.\n \"\"\"\n if inplace:\n df = self\n else:\n df = self.copy()\n geom = df.geometry.to_crs(crs=crs, epsg=epsg)\n df.geometry = geom\n df.crs = geom.crs\n if not inplace:\n return df\n\n def __getitem__(self, key):\n \"\"\"\n If the result is a column containing only 'geometry', return a\n GeoSeries. If it's a DataFrame with a 'geometry' column, return a\n GeoDataFrame.\n \"\"\"\n result = super(GeoDataFrame, self).__getitem__(key)\n geo_col = self._geometry_column_name\n if isinstance(key, string_types) and key == geo_col:\n result.__class__ = GeoSeries\n result.crs = self.crs\n elif isinstance(result, DataFrame) and geo_col in result:\n result.__class__ = GeoDataFrame\n result.crs = self.crs\n result._geometry_column_name = geo_col\n elif isinstance(result, DataFrame) and geo_col not in result:\n result.__class__ = DataFrame\n result.crs = self.crs\n return result\n\n #\n # Implement pandas methods\n #\n\n @property\n def _constructor(self):\n return GeoDataFrame\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\" propagate metadata from other to self \"\"\"\n # NOTE: backported from pandas master (upcoming v0.13)\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this GeoDataFrame object\n\n Parameters\n ----------\n deep : boolean, default True\n Make a deep copy, i.e. also copy data\n\n Returns\n -------\n copy : GeoDataFrame\n \"\"\"\n # FIXME: this will likely be unnecessary in pandas >= 0.13\n data = self._data\n if deep:\n data = data.copy()\n return GeoDataFrame(data).__finalize__(self)\n\n def plot(self, *args, **kwargs):\n return plot_dataframe(self, *args, **kwargs)\n\ndef _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):\n if inplace:\n raise ValueError(\"Can't do inplace setting when converting from\"\n \" DataFrame to GeoDataFrame\")\n gf = GeoDataFrame(self)\n # this will copy so that BlockManager gets copied\n return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)\n\nif PY3:\n DataFrame.set_geometry = _dataframe_set_geometry\nelse:\n import types\n DataFrame.set_geometry = types.MethodType(_dataframe_set_geometry, None,\n DataFrame)\n", "path": "geopandas/geodataframe.py"}], "after_files": [{"content": "try:\n from collections import OrderedDict\nexcept ImportError:\n # Python 2.6\n from ordereddict import OrderedDict\nfrom collections import defaultdict\nimport json\nimport os\nimport sys\n\nimport numpy as np\nfrom pandas import DataFrame, Series\nfrom shapely.geometry import mapping, shape\nfrom shapely.geometry.base import BaseGeometry\nfrom six import string_types\nfrom six import string_types, iteritems\n\nfrom geopandas import GeoSeries\nfrom geopandas.base import GeoPandasBase\nfrom geopandas.plotting import plot_dataframe\nimport geopandas.io\n\n\nDEFAULT_GEO_COLUMN_NAME = 'geometry'\nPY3 = sys.version_info[0] == 3\n\n\nclass GeoDataFrame(GeoPandasBase, DataFrame):\n \"\"\"\n A GeoDataFrame object is a pandas.DataFrame that has a column\n with geometry. In addition to the standard DataFrame constructor arguments,\n GeoDataFrame also accepts the following keyword arguments:\n\n Keyword Arguments\n -----------------\n crs : str (optional)\n Coordinate system\n geometry : str or array (optional)\n If str, column to use as geometry. If array, will be set as 'geometry'\n column on GeoDataFrame.\n \"\"\"\n _metadata = ['crs', '_geometry_column_name']\n _geometry_column_name = DEFAULT_GEO_COLUMN_NAME\n\n def __init__(self, *args, **kwargs):\n crs = kwargs.pop('crs', None)\n geometry = kwargs.pop('geometry', None)\n super(GeoDataFrame, self).__init__(*args, **kwargs)\n self.crs = crs\n if geometry is not None:\n self.set_geometry(geometry, inplace=True)\n\n def __setattr__(self, attr, val):\n # have to special case geometry b/c pandas tries to use as column...\n if attr == 'geometry':\n object.__setattr__(self, attr, val)\n else:\n super(GeoDataFrame, self).__setattr__(attr, val)\n\n def _get_geometry(self):\n if self._geometry_column_name not in self:\n raise AttributeError(\"No geometry data set yet (expected in\"\n \" column '%s'.\" % self._geometry_column_name)\n return self[self._geometry_column_name]\n\n def _set_geometry(self, col):\n # TODO: Use pandas' core.common.is_list_like() here.\n if not isinstance(col, (list, np.ndarray, Series)):\n raise ValueError(\"Must use a list-like to set the geometry\"\n \" property\")\n\n self.set_geometry(col, inplace=True)\n\n geometry = property(fget=_get_geometry, fset=_set_geometry,\n doc=\"Geometry data for GeoDataFrame\")\n\n def set_geometry(self, col, drop=False, inplace=False, crs=None):\n \"\"\"\n Set the GeoDataFrame geometry using either an existing column or\n the specified input. By default yields a new object.\n\n The original geometry column is replaced with the input.\n\n Parameters\n ----------\n keys : column label or array\n drop : boolean, default True\n Delete column to be used as the new geometry\n inplace : boolean, default False\n Modify the GeoDataFrame in place (do not create a new object)\n crs : str/result of fion.get_crs (optional)\n Coordinate system to use. If passed, overrides both DataFrame and\n col's crs. Otherwise, tries to get crs from passed col values or\n DataFrame.\n\n Examples\n --------\n >>> df1 = df.set_geometry([Point(0,0), Point(1,1), Point(2,2)])\n >>> df2 = df.set_geometry('geom1')\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n # Most of the code here is taken from DataFrame.set_index()\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n if not crs:\n crs = getattr(col, 'crs', self.crs)\n\n to_remove = None\n geo_column_name = DEFAULT_GEO_COLUMN_NAME\n if isinstance(col, Series):\n level = col.values\n elif isinstance(col, (list, np.ndarray)):\n level = col\n elif hasattr(col, 'ndim') and col.ndim != 1:\n raise ValueError(\"Must pass array with one dimension only.\")\n else:\n try:\n level = frame[col].values\n except KeyError:\n raise ValueError(\"Unknown column %s\" % col)\n except:\n raise\n if drop:\n to_remove = col\n geo_column_name = DEFAULT_GEO_COLUMN_NAME\n else:\n geo_column_name = col\n\n if to_remove:\n del frame[to_remove]\n\n if isinstance(level, GeoSeries) and level.crs != crs:\n # Avoids caching issues/crs sharing issues\n level = level.copy()\n level.crs = crs\n\n # Check that we are using a listlike of geometries\n if not all(isinstance(item, BaseGeometry) for item in level):\n raise TypeError(\"Input geometry column must contain valid geometry objects.\")\n frame[geo_column_name] = level\n frame._geometry_column_name = geo_column_name\n frame.crs = crs\n\n if not inplace:\n return frame\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"\n Alternate constructor to create a GeoDataFrame from a file.\n \n Example:\n df = geopandas.GeoDataFrame.from_file('nybb.shp')\n\n Wraps geopandas.read_file(). For additional help, see read_file()\n\n \"\"\"\n return geopandas.io.file.read_file(filename, **kwargs)\n\n @classmethod\n def from_features(cls, features, crs=None):\n \"\"\"\n Alternate constructor to create GeoDataFrame from an iterable of\n features. Each element must be a feature dictionary or implement\n the __geo_interface__.\n See: https://gist.github.com/sgillies/2217756\n\n Note: This method does not attempt to align rows. Properties that are\n not present in all features of the source file will not be properly\n aligned. This should be fixed.\n\n \"\"\"\n geoms = []\n columns = defaultdict(lambda: [])\n for f in features:\n if hasattr(f, \"__geo_interface__\"):\n f = f.__geo_interface__\n else:\n f = f\n\n geoms.append(shape(f['geometry']))\n for key, value in f['properties'].iteritems():\n columns[key].append(value)\n geom = GeoSeries(geoms)\n df = GeoDataFrame(columns)\n df['geometry'] = geom\n df.crs = crs\n return df\n\n @classmethod\n def from_postgis(cls, sql, con, geom_col='geom', crs=None, index_col=None,\n coerce_float=True, params=None):\n \"\"\"\n Alternate constructor to create a GeoDataFrame from a sql query\n containing a geometry column.\n\n Example:\n df = geopandas.GeoDataFrame.from_postgis(con,\n \"SELECT geom, highway FROM roads;\")\n\n Wraps geopandas.read_postgis(). For additional help, see read_postgis()\n\n \"\"\"\n return geopandas.io.sql.read_postgis(sql, con, geom_col, crs, index_col, \n coerce_float, params)\n\n\n def to_json(self, na='null', **kwargs):\n \"\"\"Returns a GeoJSON representation of the GeoDataFrame.\n\n Parameters\n ----------\n na : {'null', 'drop', 'keep'}, default 'null'\n Indicates how to output missing (NaN) values in the GeoDataFrame\n * null: ouput the missing entries as JSON null\n * drop: remove the property from the feature. This applies to\n each feature individually so that features may have\n different properties\n * keep: output the missing entries as NaN\n \n The remaining *kwargs* are passed to json.dumps().\n \"\"\"\n def fill_none(row):\n \"\"\"\n Takes in a Series, converts to a dictionary with null values\n set to None\n\n \"\"\"\n na_keys = row.index[row.isnull()]\n d = row.to_dict()\n for k in na_keys:\n d[k] = None\n return d\n\n # na_methods must take in a Series and return dict-like\n na_methods = {'null': fill_none,\n 'drop': lambda row: row.dropna(),\n 'keep': lambda row: row}\n\n if na not in na_methods:\n raise ValueError('Unknown na method {}'.format(na))\n f = na_methods[na]\n\n def feature(i, row):\n row = f(row)\n return {\n 'id': str(i),\n 'type': 'Feature',\n 'properties':\n dict((k, v) for k, v in iteritems(row) if k != self._geometry_column_name),\n 'geometry': mapping(row[self._geometry_column_name]) }\n\n return json.dumps(\n {'type': 'FeatureCollection',\n 'features': [feature(i, row) for i, row in self.iterrows()]},\n **kwargs )\n \n def to_file(self, filename, driver=\"ESRI Shapefile\", **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n \n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n filename : string \n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n\n The *kwargs* are passed to fiona.open and can be used to write \n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n import fiona\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n \n def feature(i, row):\n return {\n 'id': str(i),\n 'type': 'Feature',\n 'properties':\n dict((k, v) for k, v in iteritems(row) if k != 'geometry'),\n 'geometry': mapping(row['geometry']) }\n \n properties = OrderedDict([(col, convert_type(_type)) for col, _type \n in zip(self.columns, self.dtypes) if col!='geometry'])\n # Need to check geom_types before we write to file... \n # Some (most?) providers expect a single geometry type: \n # Point, LineString, or Polygon\n geom_types = self['geometry'].geom_type.unique()\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types])[::-1] # Reverse\n if geom_type == '': # No common suffix = mixed geometry types\n raise ValueError(\"Geometry column cannot contains mutiple \"\n \"geometry types when writing to file.\")\n schema = {'geometry': geom_type, 'properties': properties}\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=self.crs, \n schema=schema, **kwargs) as c:\n for i, row in self.iterrows():\n c.write(feature(i, row))\n\n def to_crs(self, crs=None, epsg=None, inplace=False):\n \"\"\"Transform geometries to a new coordinate reference system\n\n This method will transform all points in all objects. It has\n no notion or projecting entire geometries. All segments\n joining points are assumed to be lines in the current\n projection, not geodesics. Objects crossing the dateline (or\n other projection boundary) will have undesirable behavior.\n \"\"\"\n if inplace:\n df = self\n else:\n df = self.copy()\n geom = df.geometry.to_crs(crs=crs, epsg=epsg)\n df.geometry = geom\n df.crs = geom.crs\n if not inplace:\n return df\n\n def __getitem__(self, key):\n \"\"\"\n If the result is a column containing only 'geometry', return a\n GeoSeries. If it's a DataFrame with a 'geometry' column, return a\n GeoDataFrame.\n \"\"\"\n result = super(GeoDataFrame, self).__getitem__(key)\n geo_col = self._geometry_column_name\n if isinstance(key, string_types) and key == geo_col:\n result.__class__ = GeoSeries\n result.crs = self.crs\n elif isinstance(result, DataFrame) and geo_col in result:\n result.__class__ = GeoDataFrame\n result.crs = self.crs\n result._geometry_column_name = geo_col\n elif isinstance(result, DataFrame) and geo_col not in result:\n result.__class__ = DataFrame\n result.crs = self.crs\n return result\n\n #\n # Implement pandas methods\n #\n\n @property\n def _constructor(self):\n return GeoDataFrame\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\" propagate metadata from other to self \"\"\"\n # NOTE: backported from pandas master (upcoming v0.13)\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this GeoDataFrame object\n\n Parameters\n ----------\n deep : boolean, default True\n Make a deep copy, i.e. also copy data\n\n Returns\n -------\n copy : GeoDataFrame\n \"\"\"\n # FIXME: this will likely be unnecessary in pandas >= 0.13\n data = self._data\n if deep:\n data = data.copy()\n return GeoDataFrame(data).__finalize__(self)\n\n def plot(self, *args, **kwargs):\n return plot_dataframe(self, *args, **kwargs)\n\ndef _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):\n if inplace:\n raise ValueError(\"Can't do inplace setting when converting from\"\n \" DataFrame to GeoDataFrame\")\n gf = GeoDataFrame(self)\n # this will copy so that BlockManager gets copied\n return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)\n\nif PY3:\n DataFrame.set_geometry = _dataframe_set_geometry\nelse:\n import types\n DataFrame.set_geometry = types.MethodType(_dataframe_set_geometry, None,\n DataFrame)\n", "path": "geopandas/geodataframe.py"}]} |
gh_patches_debug_1517 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Put retention days on Cloud Watch logs. with the TRaceback
Here is the Policy that I'm running periodically. I'm getting throttling Errors.
policies:
- name: Custodian-loggroup-retention
resource: log-group
description: |
Checks log groups weekely and sets the log retention for log groups that doesn't have log retention set.
mode:
type: periodic
schedule: "cron(0 12 ? * 2 *)"
role: CustodianLambdaExecutionRole
packages:
- boto3
- botocore
filters:
- type: value
key: retentionInDays
value: absent
actions:
- type: retention
days: 400
**Here is the traceback.**
[DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:ResourceCount Count:820 policy:custodian-loggroup-retention restype:log-group scope:policy
[DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:PolicyException Count:1 policy:custodian-loggroup-retention restype:log-group
[DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:ApiCalls Count:110 policy:custodian-loggroup-retention restype:log-group
[ERROR] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 Error while executing policy
Traceback (most recent call last):
File "/var/task/c7n/policy.py", line 320, in run
results = a.process(resources)
File "/var/task/c7n/resources/cw.py", line 201, in process
retentionInDays=days)
File "/var/task/botocore/client.py", line 276, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/var/task/botocore/client.py", line 586, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the PutRetentionPolicy operation (reached max retries: 4): Rate exceeded
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/cw.py`
Content:
```
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from concurrent.futures import as_completed
17 from datetime import datetime, timedelta
18
19 from c7n.actions import BaseAction
20 from c7n.exceptions import PolicyValidationError
21 from c7n.filters import Filter, MetricsFilter
22 from c7n.filters.iamaccess import CrossAccountAccessFilter
23 from c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo
24 from c7n.manager import resources
25 from c7n.resolver import ValuesFrom
26 from c7n.tags import universal_augment
27 from c7n.utils import type_schema, local_session, chunks, get_retry
28
29
30 @resources.register('alarm')
31 class Alarm(QueryResourceManager):
32
33 class resource_type(TypeInfo):
34 service = 'cloudwatch'
35 arn_type = 'alarm'
36 enum_spec = ('describe_alarms', 'MetricAlarms', None)
37 id = 'AlarmArn'
38 filter_name = 'AlarmNames'
39 filter_type = 'list'
40 name = 'AlarmName'
41 date = 'AlarmConfigurationUpdatedTimestamp'
42 config_type = 'AWS::CloudWatch::Alarm'
43
44 retry = staticmethod(get_retry(('Throttled',)))
45
46
47 @Alarm.action_registry.register('delete')
48 class AlarmDelete(BaseAction):
49 """Delete a cloudwatch alarm.
50
51 :example:
52
53 .. code-block:: yaml
54
55 policies:
56 - name: cloudwatch-delete-stale-alarms
57 resource: alarm
58 filters:
59 - type: value
60 value_type: age
61 key: StateUpdatedTimestamp
62 value: 30
63 op: ge
64 - StateValue: INSUFFICIENT_DATA
65 actions:
66 - delete
67 """
68
69 schema = type_schema('delete')
70 permissions = ('cloudwatch:DeleteAlarms',)
71
72 def process(self, resources):
73 client = local_session(
74 self.manager.session_factory).client('cloudwatch')
75
76 for resource_set in chunks(resources, size=100):
77 self.manager.retry(
78 client.delete_alarms,
79 AlarmNames=[r['AlarmName'] for r in resource_set])
80
81
82 @resources.register('event-rule')
83 class EventRule(QueryResourceManager):
84
85 class resource_type(TypeInfo):
86 service = 'events'
87 arn_type = 'event-rule'
88 enum_spec = ('list_rules', 'Rules', None)
89 name = "Name"
90 id = "Name"
91 filter_name = "NamePrefix"
92 filter_type = "scalar"
93
94
95 @EventRule.filter_registry.register('metrics')
96 class EventRuleMetrics(MetricsFilter):
97
98 def get_dimensions(self, resource):
99 return [{'Name': 'RuleName', 'Value': resource['Name']}]
100
101
102 @resources.register('event-rule-target')
103 class EventRuleTarget(ChildResourceManager):
104
105 class resource_type(TypeInfo):
106 service = 'events'
107 arn = False
108 arn_type = 'event-rule-target'
109 enum_spec = ('list_targets_by_rule', 'Targets', None)
110 parent_spec = ('event-rule', 'Rule', True)
111 name = id = 'Id'
112
113
114 @EventRuleTarget.filter_registry.register('cross-account')
115 class CrossAccountFilter(CrossAccountAccessFilter):
116
117 schema = type_schema(
118 'cross-account',
119 # white list accounts
120 whitelist_from=ValuesFrom.schema,
121 whitelist={'type': 'array', 'items': {'type': 'string'}})
122
123 # dummy permission
124 permissions = ('events:ListTargetsByRule',)
125
126 def __call__(self, r):
127 account_id = r['Arn'].split(':', 5)[4]
128 return account_id not in self.accounts
129
130
131 @EventRuleTarget.action_registry.register('delete')
132 class DeleteTarget(BaseAction):
133
134 schema = type_schema('delete')
135 permissions = ('events:RemoveTargets',)
136
137 def process(self, resources):
138 client = local_session(self.manager.session_factory).client('events')
139 rule_targets = {}
140 for r in resources:
141 rule_targets.setdefault(r['c7n:parent-id'], []).append(r['Id'])
142
143 for rule_id, target_ids in rule_targets.items():
144 client.remove_targets(
145 Ids=target_ids,
146 Rule=rule_id)
147
148
149 @resources.register('log-group')
150 class LogGroup(QueryResourceManager):
151
152 class resource_type(TypeInfo):
153 service = 'logs'
154 arn_type = 'log-group'
155 enum_spec = ('describe_log_groups', 'logGroups', None)
156 name = 'logGroupName'
157 id = 'arn'
158 filter_name = 'logGroupNamePrefix'
159 filter_type = 'scalar'
160 dimension = 'LogGroupName'
161 date = 'creationTime'
162 universal_taggable = True
163
164 def augment(self, resources):
165 resources = universal_augment(self, resources)
166 for r in resources:
167 r['creationTime'] = r['creationTime'] / 1000.0
168 return resources
169
170 def get_arns(self, resources):
171 # log group arn in resource describe has ':*' suffix, not all
172 # apis can use that form, so normalize to standard arn.
173 return [r['arn'][:-2] for r in resources]
174
175
176 @LogGroup.action_registry.register('retention')
177 class Retention(BaseAction):
178 """Action to set the retention period (in days) for CloudWatch log groups
179
180 :example:
181
182 .. code-block:: yaml
183
184 policies:
185 - name: cloudwatch-set-log-group-retention
186 resource: log-group
187 actions:
188 - type: retention
189 days: 200
190 """
191
192 schema = type_schema('retention', days={'type': 'integer'})
193 permissions = ('logs:PutRetentionPolicy',)
194
195 def process(self, resources):
196 client = local_session(self.manager.session_factory).client('logs')
197 days = self.data['days']
198 for r in resources:
199 client.put_retention_policy(
200 logGroupName=r['logGroupName'],
201 retentionInDays=days)
202
203
204 @LogGroup.action_registry.register('delete')
205 class Delete(BaseAction):
206 """
207
208 :example:
209
210 .. code-block:: yaml
211
212 policies:
213 - name: cloudwatch-delete-stale-log-group
214 resource: log-group
215 filters:
216 - type: last-write
217 days: 182.5
218 actions:
219 - delete
220 """
221
222 schema = type_schema('delete')
223 permissions = ('logs:DeleteLogGroup',)
224
225 def process(self, resources):
226 client = local_session(self.manager.session_factory).client('logs')
227 for r in resources:
228 client.delete_log_group(logGroupName=r['logGroupName'])
229
230
231 @LogGroup.filter_registry.register('last-write')
232 class LastWriteDays(Filter):
233 """Filters CloudWatch log groups by last write
234
235 :example:
236
237 .. code-block:: yaml
238
239 policies:
240 - name: cloudwatch-stale-groups
241 resource: log-group
242 filters:
243 - type: last-write
244 days: 60
245 """
246
247 schema = type_schema(
248 'last-write', days={'type': 'number'})
249 permissions = ('logs:DescribeLogStreams',)
250
251 def process(self, resources, event=None):
252 client = local_session(self.manager.session_factory).client('logs')
253 self.date_threshold = datetime.utcnow() - timedelta(
254 days=self.data['days'])
255 return [r for r in resources if self.check_group(client, r)]
256
257 def check_group(self, client, group):
258 streams = client.describe_log_streams(
259 logGroupName=group['logGroupName'],
260 orderBy='LastEventTime',
261 descending=True,
262 limit=3).get('logStreams')
263 group['streams'] = streams
264 if not streams:
265 last_timestamp = group['creationTime']
266 elif streams[0]['storedBytes'] == 0:
267 last_timestamp = streams[0]['creationTime']
268 else:
269 last_timestamp = streams[0]['lastIngestionTime']
270
271 last_write = datetime.fromtimestamp(last_timestamp / 1000.0)
272 group['lastWrite'] = last_write
273 return self.date_threshold > last_write
274
275
276 @LogGroup.filter_registry.register('cross-account')
277 class LogCrossAccountFilter(CrossAccountAccessFilter):
278
279 schema = type_schema(
280 'cross-account',
281 # white list accounts
282 whitelist_from=ValuesFrom.schema,
283 whitelist={'type': 'array', 'items': {'type': 'string'}})
284
285 permissions = ('logs:DescribeSubscriptionFilters',)
286
287 def process(self, resources, event=None):
288 client = local_session(self.manager.session_factory).client('logs')
289 accounts = self.get_accounts()
290 results = []
291 with self.executor_factory(max_workers=1) as w:
292 futures = []
293 for rset in chunks(resources, 50):
294 futures.append(
295 w.submit(
296 self.process_resource_set, client, accounts, rset))
297 for f in as_completed(futures):
298 if f.exception():
299 self.log.error(
300 "Error checking log groups cross-account %s",
301 f.exception())
302 continue
303 results.extend(f.result())
304 return results
305
306 def process_resource_set(self, client, accounts, resources):
307 results = []
308 for r in resources:
309 found = False
310 filters = self.manager.retry(
311 client.describe_subscription_filters,
312 logGroupName=r['logGroupName']).get('subscriptionFilters', ())
313 for f in filters:
314 if 'destinationArn' not in f:
315 continue
316 account_id = f['destinationArn'].split(':', 5)[4]
317 if account_id not in accounts:
318 r.setdefault('c7n:CrossAccountViolations', []).append(
319 account_id)
320 found = True
321 if found:
322 results.append(r)
323 return results
324
325
326 @LogGroup.action_registry.register('set-encryption')
327 class EncryptLogGroup(BaseAction):
328 """Encrypt/Decrypt a log group
329
330 :example:
331
332 .. code-block:: yaml
333
334 policies:
335 - name: encrypt-log-group
336 resource: log-group
337 filters:
338 - kmsKeyId: absent
339 actions:
340 - type: set-encryption
341 kms-key: alias/mylogkey
342 state: True
343
344 - name: decrypt-log-group
345 resource: log-group
346 filters:
347 - kmsKeyId: kms:key:arn
348 actions:
349 - type: set-encryption
350 state: False
351 """
352 schema = type_schema(
353 'set-encryption',
354 **{'kms-key': {'type': 'string'},
355 'state': {'type': 'boolean'}})
356 permissions = (
357 'logs:AssociateKmsKey', 'logs:DisassociateKmsKey', 'kms:DescribeKey')
358
359 def validate(self):
360 if not self.data.get('state', True):
361 return self
362 key = self.data.get('kms-key', '')
363 if not key:
364 raise ValueError('Must specify either a KMS key ARN or Alias')
365 if 'alias/' not in key and ':key/' not in key:
366 raise PolicyValidationError(
367 "Invalid kms key format %s" % key)
368 return self
369
370 def resolve_key(self, key):
371 if not key:
372 return
373
374 # Qualified arn for key
375 if key.startswith('arn:') and ':key/' in key:
376 return key
377
378 # Alias
379 key = local_session(
380 self.manager.session_factory).client(
381 'kms').describe_key(
382 KeyId=key)['KeyMetadata']['Arn']
383 return key
384
385 def process(self, resources):
386 session = local_session(self.manager.session_factory)
387 client = session.client('logs')
388
389 state = self.data.get('state', True)
390 key = self.resolve_key(self.data.get('kms-key'))
391
392 for r in resources:
393 try:
394 if state:
395 client.associate_kms_key(
396 logGroupName=r['logGroupName'], kmsKeyId=key)
397 else:
398 client.disassociate_kms_key(logGroupName=r['logGroupName'])
399 except client.exceptions.ResourceNotFoundException:
400 continue
401
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/cw.py b/c7n/resources/cw.py
--- a/c7n/resources/cw.py
+++ b/c7n/resources/cw.py
@@ -196,7 +196,8 @@
client = local_session(self.manager.session_factory).client('logs')
days = self.data['days']
for r in resources:
- client.put_retention_policy(
+ self.manager.retry(
+ client.put_retention_policy,
logGroupName=r['logGroupName'],
retentionInDays=days)
| {"golden_diff": "diff --git a/c7n/resources/cw.py b/c7n/resources/cw.py\n--- a/c7n/resources/cw.py\n+++ b/c7n/resources/cw.py\n@@ -196,7 +196,8 @@\n client = local_session(self.manager.session_factory).client('logs')\n days = self.data['days']\n for r in resources:\n- client.put_retention_policy(\n+ self.manager.retry(\n+ client.put_retention_policy,\n logGroupName=r['logGroupName'],\n retentionInDays=days)\n", "issue": "Put retention days on Cloud Watch logs. with the TRaceback\nHere is the Policy that I'm running periodically. I'm getting throttling Errors.\r\n\r\npolicies:\r\n - name: Custodian-loggroup-retention\r\n resource: log-group\r\n description: |\r\n Checks log groups weekely and sets the log retention for log groups that doesn't have log retention set.\r\n mode:\r\n type: periodic\r\n schedule: \"cron(0 12 ? * 2 *)\"\r\n role: CustodianLambdaExecutionRole\r\n packages:\r\n - boto3\r\n - botocore\r\n filters:\r\n - type: value\r\n key: retentionInDays\r\n value: absent\r\n actions:\r\n - type: retention\r\n days: 400\r\n\r\n**Here is the traceback.**\r\n\r\n[DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:ResourceCount Count:820 policy:custodian-loggroup-retention restype:log-group scope:policy\r\n[DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:PolicyException Count:1 policy:custodian-loggroup-retention restype:log-group\r\n[DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:ApiCalls Count:110 policy:custodian-loggroup-retention restype:log-group\r\n[ERROR] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 Error while executing policy\r\nTraceback (most recent call last):\r\nFile \"/var/task/c7n/policy.py\", line 320, in run\r\nresults = a.process(resources)\r\nFile \"/var/task/c7n/resources/cw.py\", line 201, in process\r\nretentionInDays=days)\r\nFile \"/var/task/botocore/client.py\", line 276, in _api_call\r\nreturn self._make_api_call(operation_name, kwargs)\r\nFile \"/var/task/botocore/client.py\", line 586, in _make_api_call\r\nraise error_class(parsed_response, operation_name)\r\nbotocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the PutRetentionPolicy operation (reached max retries: 4): Rate exceeded\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom concurrent.futures import as_completed\nfrom datetime import datetime, timedelta\n\nfrom c7n.actions import BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import Filter, MetricsFilter\nfrom c7n.filters.iamaccess import CrossAccountAccessFilter\nfrom c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo\nfrom c7n.manager import resources\nfrom c7n.resolver import ValuesFrom\nfrom c7n.tags import universal_augment\nfrom c7n.utils import type_schema, local_session, chunks, get_retry\n\n\[email protected]('alarm')\nclass Alarm(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudwatch'\n arn_type = 'alarm'\n enum_spec = ('describe_alarms', 'MetricAlarms', None)\n id = 'AlarmArn'\n filter_name = 'AlarmNames'\n filter_type = 'list'\n name = 'AlarmName'\n date = 'AlarmConfigurationUpdatedTimestamp'\n config_type = 'AWS::CloudWatch::Alarm'\n\n retry = staticmethod(get_retry(('Throttled',)))\n\n\[email protected]_registry.register('delete')\nclass AlarmDelete(BaseAction):\n \"\"\"Delete a cloudwatch alarm.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-alarms\n resource: alarm\n filters:\n - type: value\n value_type: age\n key: StateUpdatedTimestamp\n value: 30\n op: ge\n - StateValue: INSUFFICIENT_DATA\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudwatch:DeleteAlarms',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('cloudwatch')\n\n for resource_set in chunks(resources, size=100):\n self.manager.retry(\n client.delete_alarms,\n AlarmNames=[r['AlarmName'] for r in resource_set])\n\n\[email protected]('event-rule')\nclass EventRule(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'events'\n arn_type = 'event-rule'\n enum_spec = ('list_rules', 'Rules', None)\n name = \"Name\"\n id = \"Name\"\n filter_name = \"NamePrefix\"\n filter_type = \"scalar\"\n\n\[email protected]_registry.register('metrics')\nclass EventRuleMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n return [{'Name': 'RuleName', 'Value': resource['Name']}]\n\n\[email protected]('event-rule-target')\nclass EventRuleTarget(ChildResourceManager):\n\n class resource_type(TypeInfo):\n service = 'events'\n arn = False\n arn_type = 'event-rule-target'\n enum_spec = ('list_targets_by_rule', 'Targets', None)\n parent_spec = ('event-rule', 'Rule', True)\n name = id = 'Id'\n\n\[email protected]_registry.register('cross-account')\nclass CrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n # dummy permission\n permissions = ('events:ListTargetsByRule',)\n\n def __call__(self, r):\n account_id = r['Arn'].split(':', 5)[4]\n return account_id not in self.accounts\n\n\[email protected]_registry.register('delete')\nclass DeleteTarget(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('events:RemoveTargets',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('events')\n rule_targets = {}\n for r in resources:\n rule_targets.setdefault(r['c7n:parent-id'], []).append(r['Id'])\n\n for rule_id, target_ids in rule_targets.items():\n client.remove_targets(\n Ids=target_ids,\n Rule=rule_id)\n\n\[email protected]('log-group')\nclass LogGroup(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'logs'\n arn_type = 'log-group'\n enum_spec = ('describe_log_groups', 'logGroups', None)\n name = 'logGroupName'\n id = 'arn'\n filter_name = 'logGroupNamePrefix'\n filter_type = 'scalar'\n dimension = 'LogGroupName'\n date = 'creationTime'\n universal_taggable = True\n\n def augment(self, resources):\n resources = universal_augment(self, resources)\n for r in resources:\n r['creationTime'] = r['creationTime'] / 1000.0\n return resources\n\n def get_arns(self, resources):\n # log group arn in resource describe has ':*' suffix, not all\n # apis can use that form, so normalize to standard arn.\n return [r['arn'][:-2] for r in resources]\n\n\[email protected]_registry.register('retention')\nclass Retention(BaseAction):\n \"\"\"Action to set the retention period (in days) for CloudWatch log groups\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-set-log-group-retention\n resource: log-group\n actions:\n - type: retention\n days: 200\n \"\"\"\n\n schema = type_schema('retention', days={'type': 'integer'})\n permissions = ('logs:PutRetentionPolicy',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n days = self.data['days']\n for r in resources:\n client.put_retention_policy(\n logGroupName=r['logGroupName'],\n retentionInDays=days)\n\n\[email protected]_registry.register('delete')\nclass Delete(BaseAction):\n \"\"\"\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-log-group\n resource: log-group\n filters:\n - type: last-write\n days: 182.5\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('logs:DeleteLogGroup',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n for r in resources:\n client.delete_log_group(logGroupName=r['logGroupName'])\n\n\[email protected]_registry.register('last-write')\nclass LastWriteDays(Filter):\n \"\"\"Filters CloudWatch log groups by last write\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-stale-groups\n resource: log-group\n filters:\n - type: last-write\n days: 60\n \"\"\"\n\n schema = type_schema(\n 'last-write', days={'type': 'number'})\n permissions = ('logs:DescribeLogStreams',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n self.date_threshold = datetime.utcnow() - timedelta(\n days=self.data['days'])\n return [r for r in resources if self.check_group(client, r)]\n\n def check_group(self, client, group):\n streams = client.describe_log_streams(\n logGroupName=group['logGroupName'],\n orderBy='LastEventTime',\n descending=True,\n limit=3).get('logStreams')\n group['streams'] = streams\n if not streams:\n last_timestamp = group['creationTime']\n elif streams[0]['storedBytes'] == 0:\n last_timestamp = streams[0]['creationTime']\n else:\n last_timestamp = streams[0]['lastIngestionTime']\n\n last_write = datetime.fromtimestamp(last_timestamp / 1000.0)\n group['lastWrite'] = last_write\n return self.date_threshold > last_write\n\n\[email protected]_registry.register('cross-account')\nclass LogCrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n permissions = ('logs:DescribeSubscriptionFilters',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n accounts = self.get_accounts()\n results = []\n with self.executor_factory(max_workers=1) as w:\n futures = []\n for rset in chunks(resources, 50):\n futures.append(\n w.submit(\n self.process_resource_set, client, accounts, rset))\n for f in as_completed(futures):\n if f.exception():\n self.log.error(\n \"Error checking log groups cross-account %s\",\n f.exception())\n continue\n results.extend(f.result())\n return results\n\n def process_resource_set(self, client, accounts, resources):\n results = []\n for r in resources:\n found = False\n filters = self.manager.retry(\n client.describe_subscription_filters,\n logGroupName=r['logGroupName']).get('subscriptionFilters', ())\n for f in filters:\n if 'destinationArn' not in f:\n continue\n account_id = f['destinationArn'].split(':', 5)[4]\n if account_id not in accounts:\n r.setdefault('c7n:CrossAccountViolations', []).append(\n account_id)\n found = True\n if found:\n results.append(r)\n return results\n\n\[email protected]_registry.register('set-encryption')\nclass EncryptLogGroup(BaseAction):\n \"\"\"Encrypt/Decrypt a log group\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: encrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: absent\n actions:\n - type: set-encryption\n kms-key: alias/mylogkey\n state: True\n\n - name: decrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: kms:key:arn\n actions:\n - type: set-encryption\n state: False\n \"\"\"\n schema = type_schema(\n 'set-encryption',\n **{'kms-key': {'type': 'string'},\n 'state': {'type': 'boolean'}})\n permissions = (\n 'logs:AssociateKmsKey', 'logs:DisassociateKmsKey', 'kms:DescribeKey')\n\n def validate(self):\n if not self.data.get('state', True):\n return self\n key = self.data.get('kms-key', '')\n if not key:\n raise ValueError('Must specify either a KMS key ARN or Alias')\n if 'alias/' not in key and ':key/' not in key:\n raise PolicyValidationError(\n \"Invalid kms key format %s\" % key)\n return self\n\n def resolve_key(self, key):\n if not key:\n return\n\n # Qualified arn for key\n if key.startswith('arn:') and ':key/' in key:\n return key\n\n # Alias\n key = local_session(\n self.manager.session_factory).client(\n 'kms').describe_key(\n KeyId=key)['KeyMetadata']['Arn']\n return key\n\n def process(self, resources):\n session = local_session(self.manager.session_factory)\n client = session.client('logs')\n\n state = self.data.get('state', True)\n key = self.resolve_key(self.data.get('kms-key'))\n\n for r in resources:\n try:\n if state:\n client.associate_kms_key(\n logGroupName=r['logGroupName'], kmsKeyId=key)\n else:\n client.disassociate_kms_key(logGroupName=r['logGroupName'])\n except client.exceptions.ResourceNotFoundException:\n continue\n", "path": "c7n/resources/cw.py"}], "after_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom concurrent.futures import as_completed\nfrom datetime import datetime, timedelta\n\nfrom c7n.actions import BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import Filter, MetricsFilter\nfrom c7n.filters.iamaccess import CrossAccountAccessFilter\nfrom c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo\nfrom c7n.manager import resources\nfrom c7n.resolver import ValuesFrom\nfrom c7n.tags import universal_augment\nfrom c7n.utils import type_schema, local_session, chunks, get_retry\n\n\[email protected]('alarm')\nclass Alarm(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudwatch'\n arn_type = 'alarm'\n enum_spec = ('describe_alarms', 'MetricAlarms', None)\n id = 'AlarmArn'\n filter_name = 'AlarmNames'\n filter_type = 'list'\n name = 'AlarmName'\n date = 'AlarmConfigurationUpdatedTimestamp'\n config_type = 'AWS::CloudWatch::Alarm'\n\n retry = staticmethod(get_retry(('Throttled',)))\n\n\[email protected]_registry.register('delete')\nclass AlarmDelete(BaseAction):\n \"\"\"Delete a cloudwatch alarm.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-alarms\n resource: alarm\n filters:\n - type: value\n value_type: age\n key: StateUpdatedTimestamp\n value: 30\n op: ge\n - StateValue: INSUFFICIENT_DATA\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudwatch:DeleteAlarms',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('cloudwatch')\n\n for resource_set in chunks(resources, size=100):\n self.manager.retry(\n client.delete_alarms,\n AlarmNames=[r['AlarmName'] for r in resource_set])\n\n\[email protected]('event-rule')\nclass EventRule(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'events'\n arn_type = 'event-rule'\n enum_spec = ('list_rules', 'Rules', None)\n name = \"Name\"\n id = \"Name\"\n filter_name = \"NamePrefix\"\n filter_type = \"scalar\"\n\n\[email protected]_registry.register('metrics')\nclass EventRuleMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n return [{'Name': 'RuleName', 'Value': resource['Name']}]\n\n\[email protected]('event-rule-target')\nclass EventRuleTarget(ChildResourceManager):\n\n class resource_type(TypeInfo):\n service = 'events'\n arn = False\n arn_type = 'event-rule-target'\n enum_spec = ('list_targets_by_rule', 'Targets', None)\n parent_spec = ('event-rule', 'Rule', True)\n name = id = 'Id'\n\n\[email protected]_registry.register('cross-account')\nclass CrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n # dummy permission\n permissions = ('events:ListTargetsByRule',)\n\n def __call__(self, r):\n account_id = r['Arn'].split(':', 5)[4]\n return account_id not in self.accounts\n\n\[email protected]_registry.register('delete')\nclass DeleteTarget(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('events:RemoveTargets',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('events')\n rule_targets = {}\n for r in resources:\n rule_targets.setdefault(r['c7n:parent-id'], []).append(r['Id'])\n\n for rule_id, target_ids in rule_targets.items():\n client.remove_targets(\n Ids=target_ids,\n Rule=rule_id)\n\n\[email protected]('log-group')\nclass LogGroup(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'logs'\n arn_type = 'log-group'\n enum_spec = ('describe_log_groups', 'logGroups', None)\n name = 'logGroupName'\n id = 'arn'\n filter_name = 'logGroupNamePrefix'\n filter_type = 'scalar'\n dimension = 'LogGroupName'\n date = 'creationTime'\n universal_taggable = True\n\n def augment(self, resources):\n resources = universal_augment(self, resources)\n for r in resources:\n r['creationTime'] = r['creationTime'] / 1000.0\n return resources\n\n def get_arns(self, resources):\n # log group arn in resource describe has ':*' suffix, not all\n # apis can use that form, so normalize to standard arn.\n return [r['arn'][:-2] for r in resources]\n\n\[email protected]_registry.register('retention')\nclass Retention(BaseAction):\n \"\"\"Action to set the retention period (in days) for CloudWatch log groups\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-set-log-group-retention\n resource: log-group\n actions:\n - type: retention\n days: 200\n \"\"\"\n\n schema = type_schema('retention', days={'type': 'integer'})\n permissions = ('logs:PutRetentionPolicy',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n days = self.data['days']\n for r in resources:\n self.manager.retry(\n client.put_retention_policy,\n logGroupName=r['logGroupName'],\n retentionInDays=days)\n\n\[email protected]_registry.register('delete')\nclass Delete(BaseAction):\n \"\"\"\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-log-group\n resource: log-group\n filters:\n - type: last-write\n days: 182.5\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('logs:DeleteLogGroup',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n for r in resources:\n client.delete_log_group(logGroupName=r['logGroupName'])\n\n\[email protected]_registry.register('last-write')\nclass LastWriteDays(Filter):\n \"\"\"Filters CloudWatch log groups by last write\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-stale-groups\n resource: log-group\n filters:\n - type: last-write\n days: 60\n \"\"\"\n\n schema = type_schema(\n 'last-write', days={'type': 'number'})\n permissions = ('logs:DescribeLogStreams',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n self.date_threshold = datetime.utcnow() - timedelta(\n days=self.data['days'])\n return [r for r in resources if self.check_group(client, r)]\n\n def check_group(self, client, group):\n streams = client.describe_log_streams(\n logGroupName=group['logGroupName'],\n orderBy='LastEventTime',\n descending=True,\n limit=3).get('logStreams')\n group['streams'] = streams\n if not streams:\n last_timestamp = group['creationTime']\n elif streams[0]['storedBytes'] == 0:\n last_timestamp = streams[0]['creationTime']\n else:\n last_timestamp = streams[0]['lastIngestionTime']\n\n last_write = datetime.fromtimestamp(last_timestamp / 1000.0)\n group['lastWrite'] = last_write\n return self.date_threshold > last_write\n\n\[email protected]_registry.register('cross-account')\nclass LogCrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n permissions = ('logs:DescribeSubscriptionFilters',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n accounts = self.get_accounts()\n results = []\n with self.executor_factory(max_workers=1) as w:\n futures = []\n for rset in chunks(resources, 50):\n futures.append(\n w.submit(\n self.process_resource_set, client, accounts, rset))\n for f in as_completed(futures):\n if f.exception():\n self.log.error(\n \"Error checking log groups cross-account %s\",\n f.exception())\n continue\n results.extend(f.result())\n return results\n\n def process_resource_set(self, client, accounts, resources):\n results = []\n for r in resources:\n found = False\n filters = self.manager.retry(\n client.describe_subscription_filters,\n logGroupName=r['logGroupName']).get('subscriptionFilters', ())\n for f in filters:\n if 'destinationArn' not in f:\n continue\n account_id = f['destinationArn'].split(':', 5)[4]\n if account_id not in accounts:\n r.setdefault('c7n:CrossAccountViolations', []).append(\n account_id)\n found = True\n if found:\n results.append(r)\n return results\n\n\[email protected]_registry.register('set-encryption')\nclass EncryptLogGroup(BaseAction):\n \"\"\"Encrypt/Decrypt a log group\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: encrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: absent\n actions:\n - type: set-encryption\n kms-key: alias/mylogkey\n state: True\n\n - name: decrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: kms:key:arn\n actions:\n - type: set-encryption\n state: False\n \"\"\"\n schema = type_schema(\n 'set-encryption',\n **{'kms-key': {'type': 'string'},\n 'state': {'type': 'boolean'}})\n permissions = (\n 'logs:AssociateKmsKey', 'logs:DisassociateKmsKey', 'kms:DescribeKey')\n\n def validate(self):\n if not self.data.get('state', True):\n return self\n key = self.data.get('kms-key', '')\n if not key:\n raise ValueError('Must specify either a KMS key ARN or Alias')\n if 'alias/' not in key and ':key/' not in key:\n raise PolicyValidationError(\n \"Invalid kms key format %s\" % key)\n return self\n\n def resolve_key(self, key):\n if not key:\n return\n\n # Qualified arn for key\n if key.startswith('arn:') and ':key/' in key:\n return key\n\n # Alias\n key = local_session(\n self.manager.session_factory).client(\n 'kms').describe_key(\n KeyId=key)['KeyMetadata']['Arn']\n return key\n\n def process(self, resources):\n session = local_session(self.manager.session_factory)\n client = session.client('logs')\n\n state = self.data.get('state', True)\n key = self.resolve_key(self.data.get('kms-key'))\n\n for r in resources:\n try:\n if state:\n client.associate_kms_key(\n logGroupName=r['logGroupName'], kmsKeyId=key)\n else:\n client.disassociate_kms_key(logGroupName=r['logGroupName'])\n except client.exceptions.ResourceNotFoundException:\n continue\n", "path": "c7n/resources/cw.py"}]} |
gh_patches_debug_1518 | rasdani/github-patches | git_diff | ray-project__ray-8491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Time to initialize a policy grows linearly with the number of agents
<!--
General questions should be asked on the mailing list [email protected].
Questions about how to use Ray should be asked on
[StackOverflow](https://stackoverflow.com/questions/tagged/ray).
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 18.04
- **Ray installed from (source or binary)**: Binary
- **Ray version**: 0.7.4
- **Python version**: 3.7.4
- **Exact command to reproduce**: N/A
<!--
You can obtain the Ray version with
python -c "import ray; print(ray.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
I noticed that in multi agent settings, the time to initialize a policy per agent increases as more agents are initialized. In the sample output I provided below, you can see that the time to initialize a single DynamicTFPolicy grows from 4.6 seconds to 15.3 seconds from the first agent to the tenth agent created. Line 291 of `rllib/policy/dynamic_tf_policy.py` is
```python
self._sess.run(tf.global_variables_initializer())
```
which I believe will run one time for each agent initialized. If I'm not mistaken, this means that every variable in the computation graph is being initialized each time that we initialize a DynamicTFPolicy. If initializing a DynamicTFPolicy adds new variables to the computation graph (as I believe it does), this would explain why the time to initialize a DynamicTFPolicy grows over time: We are initializing every variable in the computation graph, and the computation graph is growing. My question is, why does line 291 run a global variables initializer? Is there a reason for this that I can't see inside this method? How hard would it be to modify this to only initialize variables in the individual policy that we care to initialize?
I'm asking this because as detailed in #5753, I'm trying to modify rllib to allow initialization and removal of policies during training. The overhead incurred by this initialization quickly slows the training script down enough to be useless. Also, if anyone knows what the resource bottleneck is for policy initialization, that would be very helpful to know for when we're picking new hardware. Does it need a ton of cores to run in parallel, or more memory, or a bigger GPU or more GPUs or something? Thanks.
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
```
(pytorch) root@e3a955e42cae:~/bees/bees# python trainer.py settings/settings.json
2019-10-23 12:38:04,168 WARNING worker.py:1426 -- WARNING: Not updating worker name since `setproctitle` is not installed. Install this with `pip install setproctitle` (or ray[debug]) to enable monitoring of worker processes.
2019-10-23 12:38:04,169 INFO resource_spec.py:205 -- Starting Ray with 3.52 GiB memory available for workers and up to 1.78 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2019-10-23 12:38:05,354 INFO trainer.py:344 -- Tip: set 'eager': true or the --eager flag to enable TensorFlow eager execution
2019-10-23 12:38:05,754 WARNING ppo.py:149 -- Using the simple minibatch optimizer. This will significantly reduce performance, consider simple_optimizer=False.
DTFP: 4.604122s
DTFP: 4.856234s
DTFP: 5.630484s
DTFP: 6.850456s
DTFP: 7.856700s
DTFP: 9.624164s
DTFP: 10.894944s
DTFP: 12.129192s
DTFP: 14.210247s
DTFP: 15.342738s
```
Line 130 in `tf_policy_template.py` (modified to print debug output above)
```python
t = time.time()
DynamicTFPolicy.__init__(
self,
obs_space,
action_space,
config,
loss_fn,
stats_fn=stats_fn,
grad_stats_fn=grad_stats_fn,
before_loss_init=before_loss_init_wrapper,
make_model=make_model,
action_sampler_fn=action_sampler_fn,
existing_model=existing_model,
existing_inputs=existing_inputs,
get_batch_divisibility_req=get_batch_divisibility_req,
obs_include_prev_action_reward=obs_include_prev_action_reward)
print("DTFP: %fs" % (time.time() - t))
```
Snippet of trainer script used.
```python
# pylint: disable=invalid-name
if __name__ == "__main__":
ray.init()
# Get ``settings`` file for now.
settings_file = sys.argv[1]
with open(settings_file, "r") as f:
settings = json.load(f)
env_config = settings["env"]
time_steps = env_config["time_steps"]
space_env = create_env(settings)
env = create_env(settings)
# Register environment
register_env("world", lambda _: env)
# Build environment instance to get ``obs_space``.
obs_space = space_env.observation_space
act_space = space_env.action_space
# You can also have multiple policies per trainer, but here we just
# show one each for PPO and DQN.
policies: Dict[str, Tuple[Any, gym.Space, gym.Space, Dict[Any, Any]]] = {
"0": (PPOTFPolicy, obs_space, act_space, {}),
"1": (PPOTFPolicy, obs_space, act_space, {}),
"2": (PPOTFPolicy, obs_space, act_space, {}),
"3": (PPOTFPolicy, obs_space, act_space, {}),
"4": (PPOTFPolicy, obs_space, act_space, {}),
"5": (PPOTFPolicy, obs_space, act_space, {}),
"6": (PPOTFPolicy, obs_space, act_space, {}),
"7": (PPOTFPolicy, obs_space, act_space, {}),
"8": (PPOTFPolicy, obs_space, act_space, {}),
"9": (PPOTFPolicy, obs_space, act_space, {}),
}
def policy_mapping_fn(agent_id: int) -> str:
""" Returns the given agent's policy identifier. """
return str(agent_id)
ppo_trainer = PPOTrainer(
env="bee_world",
config={
"multiagent": {
"policies": policies,
"policy_mapping_fn": policy_mapping_fn,
"policies_to_train": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
},
"simple_optimizer": True,
# Disable filters, otherwise we would need to synchronize those
# as well to the DQN agent.
"observation_filter": "NoFilter",
"num_workers": 2,
"num_gpus": 1,
"train_batch_size": 2,
"sample_batch_size": 1,
"sgd_minibatch_size": 2,
},
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/experimental/tf_utils.py`
Content:
```
1 from collections import deque, OrderedDict
2 import numpy as np
3
4 from ray.rllib.utils import try_import_tf
5
6 tf = try_import_tf()
7
8
9 def unflatten(vector, shapes):
10 i = 0
11 arrays = []
12 for shape in shapes:
13 size = np.prod(shape, dtype=np.int)
14 array = vector[i:(i + size)].reshape(shape)
15 arrays.append(array)
16 i += size
17 assert len(vector) == i, "Passed weight does not have the correct shape."
18 return arrays
19
20
21 class TensorFlowVariables:
22 """A class used to set and get weights for Tensorflow networks.
23
24 Attributes:
25 sess (tf.Session): The tensorflow session used to run assignment.
26 variables (Dict[str, tf.Variable]): Extracted variables from the loss
27 or additional variables that are passed in.
28 placeholders (Dict[str, tf.placeholders]): Placeholders for weights.
29 assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.
30 """
31
32 def __init__(self, output, sess=None, input_variables=None):
33 """Creates TensorFlowVariables containing extracted variables.
34
35 The variables are extracted by performing a BFS search on the
36 dependency graph with loss as the root node. After the tree is
37 traversed and those variables are collected, we append input_variables
38 to the collected variables. For each variable in the list, the
39 variable has a placeholder and assignment operation created for it.
40
41 Args:
42 output (tf.Operation, List[tf.Operation]): The tensorflow
43 operation to extract all variables from.
44 sess (tf.Session): Session used for running the get and set
45 methods.
46 input_variables (List[tf.Variables]): Variables to include in the
47 list.
48 """
49 self.sess = sess
50 if not isinstance(output, (list, tuple)):
51 output = [output]
52 queue = deque(output)
53 variable_names = []
54 explored_inputs = set(output)
55
56 # We do a BFS on the dependency graph of the input function to find
57 # the variables.
58 while len(queue) != 0:
59 tf_obj = queue.popleft()
60 if tf_obj is None:
61 continue
62 # The object put into the queue is not necessarily an operation,
63 # so we want the op attribute to get the operation underlying the
64 # object. Only operations contain the inputs that we can explore.
65 if hasattr(tf_obj, "op"):
66 tf_obj = tf_obj.op
67 for input_op in tf_obj.inputs:
68 if input_op not in explored_inputs:
69 queue.append(input_op)
70 explored_inputs.add(input_op)
71 # Tensorflow control inputs can be circular, so we keep track of
72 # explored operations.
73 for control in tf_obj.control_inputs:
74 if control not in explored_inputs:
75 queue.append(control)
76 explored_inputs.add(control)
77 if ("Variable" in tf_obj.node_def.op
78 or "VarHandle" in tf_obj.node_def.op):
79 variable_names.append(tf_obj.node_def.name)
80 self.variables = OrderedDict()
81 variable_list = [
82 v for v in tf.global_variables()
83 if v.op.node_def.name in variable_names
84 ]
85 if input_variables is not None:
86 variable_list += input_variables
87 for v in variable_list:
88 self.variables[v.op.node_def.name] = v
89
90 self.placeholders = {}
91 self.assignment_nodes = {}
92
93 # Create new placeholders to put in custom weights.
94 for k, var in self.variables.items():
95 self.placeholders[k] = tf.placeholder(
96 var.value().dtype,
97 var.get_shape().as_list(),
98 name="Placeholder_" + k)
99 self.assignment_nodes[k] = var.assign(self.placeholders[k])
100
101 def set_session(self, sess):
102 """Sets the current session used by the class.
103
104 Args:
105 sess (tf.Session): Session to set the attribute with.
106 """
107 self.sess = sess
108
109 def get_flat_size(self):
110 """Returns the total length of all of the flattened variables.
111
112 Returns:
113 The length of all flattened variables concatenated.
114 """
115 return sum(
116 np.prod(v.get_shape().as_list()) for v in self.variables.values())
117
118 def _check_sess(self):
119 """Checks if the session is set, and if not throw an error message."""
120 assert self.sess is not None, ("The session is not set. Set the "
121 "session either by passing it into the "
122 "TensorFlowVariables constructor or by "
123 "calling set_session(sess).")
124
125 def get_flat(self):
126 """Gets the weights and returns them as a flat array.
127
128 Returns:
129 1D Array containing the flattened weights.
130 """
131 self._check_sess()
132 return np.concatenate([
133 v.eval(session=self.sess).flatten()
134 for v in self.variables.values()
135 ])
136
137 def set_flat(self, new_weights):
138 """Sets the weights to new_weights, converting from a flat array.
139
140 Note:
141 You can only set all weights in the network using this function,
142 i.e., the length of the array must match get_flat_size.
143
144 Args:
145 new_weights (np.ndarray): Flat array containing weights.
146 """
147 self._check_sess()
148 shapes = [v.get_shape().as_list() for v in self.variables.values()]
149 arrays = unflatten(new_weights, shapes)
150 placeholders = [
151 self.placeholders[k] for k, v in self.variables.items()
152 ]
153 self.sess.run(
154 list(self.assignment_nodes.values()),
155 feed_dict=dict(zip(placeholders, arrays)))
156
157 def get_weights(self):
158 """Returns a dictionary containing the weights of the network.
159
160 Returns:
161 Dictionary mapping variable names to their weights.
162 """
163 self._check_sess()
164 return {
165 k: v.eval(session=self.sess)
166 for k, v in self.variables.items()
167 }
168
169 def set_weights(self, new_weights):
170 """Sets the weights to new_weights.
171
172 Note:
173 Can set subsets of variables as well, by only passing in the
174 variables you want to be set.
175
176 Args:
177 new_weights (Dict): Dictionary mapping variable names to their
178 weights.
179 """
180 self._check_sess()
181 assign_list = [
182 self.assignment_nodes[name] for name in new_weights.keys()
183 if name in self.assignment_nodes
184 ]
185 assert assign_list, ("No variables in the input matched those in the "
186 "network. Possible cause: Two networks were "
187 "defined in the same TensorFlow graph. To fix "
188 "this, place each network definition in its own "
189 "tf.Graph.")
190 self.sess.run(
191 assign_list,
192 feed_dict={
193 self.placeholders[name]: value
194 for (name, value) in new_weights.items()
195 if name in self.placeholders
196 })
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/experimental/tf_utils.py b/python/ray/experimental/tf_utils.py
--- a/python/ray/experimental/tf_utils.py
+++ b/python/ray/experimental/tf_utils.py
@@ -161,10 +161,7 @@
Dictionary mapping variable names to their weights.
"""
self._check_sess()
- return {
- k: v.eval(session=self.sess)
- for k, v in self.variables.items()
- }
+ return self.sess.run(self.variables)
def set_weights(self, new_weights):
"""Sets the weights to new_weights.
| {"golden_diff": "diff --git a/python/ray/experimental/tf_utils.py b/python/ray/experimental/tf_utils.py\n--- a/python/ray/experimental/tf_utils.py\n+++ b/python/ray/experimental/tf_utils.py\n@@ -161,10 +161,7 @@\n Dictionary mapping variable names to their weights.\n \"\"\"\n self._check_sess()\n- return {\n- k: v.eval(session=self.sess)\n- for k, v in self.variables.items()\n- }\n+ return self.sess.run(self.variables)\n \n def set_weights(self, new_weights):\n \"\"\"Sets the weights to new_weights.\n", "issue": "Time to initialize a policy grows linearly with the number of agents\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\nQuestions about how to use Ray should be asked on\r\n[StackOverflow](https://stackoverflow.com/questions/tagged/ray).\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 18.04\r\n- **Ray installed from (source or binary)**: Binary\r\n- **Ray version**: 0.7.4\r\n- **Python version**: 3.7.4\r\n- **Exact command to reproduce**: N/A\r\n\r\n<!--\r\nYou can obtain the Ray version with\r\n\r\npython -c \"import ray; print(ray.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nI noticed that in multi agent settings, the time to initialize a policy per agent increases as more agents are initialized. In the sample output I provided below, you can see that the time to initialize a single DynamicTFPolicy grows from 4.6 seconds to 15.3 seconds from the first agent to the tenth agent created. Line 291 of `rllib/policy/dynamic_tf_policy.py` is\r\n```python\r\nself._sess.run(tf.global_variables_initializer())\r\n```\r\nwhich I believe will run one time for each agent initialized. If I'm not mistaken, this means that every variable in the computation graph is being initialized each time that we initialize a DynamicTFPolicy. If initializing a DynamicTFPolicy adds new variables to the computation graph (as I believe it does), this would explain why the time to initialize a DynamicTFPolicy grows over time: We are initializing every variable in the computation graph, and the computation graph is growing. My question is, why does line 291 run a global variables initializer? Is there a reason for this that I can't see inside this method? How hard would it be to modify this to only initialize variables in the individual policy that we care to initialize?\r\n\r\nI'm asking this because as detailed in #5753, I'm trying to modify rllib to allow initialization and removal of policies during training. The overhead incurred by this initialization quickly slows the training script down enough to be useless. Also, if anyone knows what the resource bottleneck is for policy initialization, that would be very helpful to know for when we're picking new hardware. Does it need a ton of cores to run in parallel, or more memory, or a bigger GPU or more GPUs or something? Thanks.\r\n\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\r\n```\r\n(pytorch) root@e3a955e42cae:~/bees/bees# python trainer.py settings/settings.json\r\n2019-10-23 12:38:04,168 WARNING worker.py:1426 -- WARNING: Not updating worker name since `setproctitle` is not installed. Install this with `pip install setproctitle` (or ray[debug]) to enable monitoring of worker processes.\r\n2019-10-23 12:38:04,169 INFO resource_spec.py:205 -- Starting Ray with 3.52 GiB memory available for workers and up to 1.78 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).\r\n2019-10-23 12:38:05,354 INFO trainer.py:344 -- Tip: set 'eager': true or the --eager flag to enable TensorFlow eager execution\r\n2019-10-23 12:38:05,754 WARNING ppo.py:149 -- Using the simple minibatch optimizer. This will significantly reduce performance, consider simple_optimizer=False.\r\nDTFP: 4.604122s\r\nDTFP: 4.856234s\r\nDTFP: 5.630484s\r\nDTFP: 6.850456s\r\nDTFP: 7.856700s\r\nDTFP: 9.624164s\r\nDTFP: 10.894944s\r\nDTFP: 12.129192s\r\nDTFP: 14.210247s\r\nDTFP: 15.342738s\r\n```\r\n\r\nLine 130 in `tf_policy_template.py` (modified to print debug output above)\r\n```python\r\n t = time.time()\r\n\r\n DynamicTFPolicy.__init__(\r\n self,\r\n obs_space,\r\n action_space,\r\n config,\r\n loss_fn,\r\n stats_fn=stats_fn,\r\n grad_stats_fn=grad_stats_fn,\r\n before_loss_init=before_loss_init_wrapper,\r\n make_model=make_model,\r\n action_sampler_fn=action_sampler_fn,\r\n existing_model=existing_model,\r\n existing_inputs=existing_inputs,\r\n get_batch_divisibility_req=get_batch_divisibility_req,\r\n obs_include_prev_action_reward=obs_include_prev_action_reward)\r\n\r\n print(\"DTFP: %fs\" % (time.time() - t))\r\n```\r\n\r\nSnippet of trainer script used.\r\n```python\r\n# pylint: disable=invalid-name\r\nif __name__ == \"__main__\":\r\n ray.init()\r\n\r\n # Get ``settings`` file for now.\r\n settings_file = sys.argv[1]\r\n with open(settings_file, \"r\") as f:\r\n settings = json.load(f)\r\n\r\n env_config = settings[\"env\"]\r\n time_steps = env_config[\"time_steps\"]\r\n\r\n space_env = create_env(settings)\r\n env = create_env(settings)\r\n\r\n # Register environment\r\n register_env(\"world\", lambda _: env)\r\n\r\n # Build environment instance to get ``obs_space``.\r\n obs_space = space_env.observation_space\r\n act_space = space_env.action_space\r\n\r\n # You can also have multiple policies per trainer, but here we just\r\n # show one each for PPO and DQN.\r\n policies: Dict[str, Tuple[Any, gym.Space, gym.Space, Dict[Any, Any]]] = {\r\n \"0\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"1\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"2\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"3\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"4\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"5\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"6\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"7\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"8\": (PPOTFPolicy, obs_space, act_space, {}),\r\n \"9\": (PPOTFPolicy, obs_space, act_space, {}),\r\n }\r\n\r\n def policy_mapping_fn(agent_id: int) -> str:\r\n \"\"\" Returns the given agent's policy identifier. \"\"\"\r\n return str(agent_id)\r\n\r\n ppo_trainer = PPOTrainer(\r\n env=\"bee_world\",\r\n config={\r\n \"multiagent\": {\r\n \"policies\": policies,\r\n \"policy_mapping_fn\": policy_mapping_fn,\r\n \"policies_to_train\": [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"],\r\n },\r\n \"simple_optimizer\": True,\r\n # Disable filters, otherwise we would need to synchronize those\r\n # as well to the DQN agent.\r\n \"observation_filter\": \"NoFilter\",\r\n \"num_workers\": 2,\r\n \"num_gpus\": 1,\r\n \"train_batch_size\": 2,\r\n \"sample_batch_size\": 1,\r\n \"sgd_minibatch_size\": 2,\r\n },\r\n )\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "from collections import deque, OrderedDict\nimport numpy as np\n\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\n\ndef unflatten(vector, shapes):\n i = 0\n arrays = []\n for shape in shapes:\n size = np.prod(shape, dtype=np.int)\n array = vector[i:(i + size)].reshape(shape)\n arrays.append(array)\n i += size\n assert len(vector) == i, \"Passed weight does not have the correct shape.\"\n return arrays\n\n\nclass TensorFlowVariables:\n \"\"\"A class used to set and get weights for Tensorflow networks.\n\n Attributes:\n sess (tf.Session): The tensorflow session used to run assignment.\n variables (Dict[str, tf.Variable]): Extracted variables from the loss\n or additional variables that are passed in.\n placeholders (Dict[str, tf.placeholders]): Placeholders for weights.\n assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.\n \"\"\"\n\n def __init__(self, output, sess=None, input_variables=None):\n \"\"\"Creates TensorFlowVariables containing extracted variables.\n\n The variables are extracted by performing a BFS search on the\n dependency graph with loss as the root node. After the tree is\n traversed and those variables are collected, we append input_variables\n to the collected variables. For each variable in the list, the\n variable has a placeholder and assignment operation created for it.\n\n Args:\n output (tf.Operation, List[tf.Operation]): The tensorflow\n operation to extract all variables from.\n sess (tf.Session): Session used for running the get and set\n methods.\n input_variables (List[tf.Variables]): Variables to include in the\n list.\n \"\"\"\n self.sess = sess\n if not isinstance(output, (list, tuple)):\n output = [output]\n queue = deque(output)\n variable_names = []\n explored_inputs = set(output)\n\n # We do a BFS on the dependency graph of the input function to find\n # the variables.\n while len(queue) != 0:\n tf_obj = queue.popleft()\n if tf_obj is None:\n continue\n # The object put into the queue is not necessarily an operation,\n # so we want the op attribute to get the operation underlying the\n # object. Only operations contain the inputs that we can explore.\n if hasattr(tf_obj, \"op\"):\n tf_obj = tf_obj.op\n for input_op in tf_obj.inputs:\n if input_op not in explored_inputs:\n queue.append(input_op)\n explored_inputs.add(input_op)\n # Tensorflow control inputs can be circular, so we keep track of\n # explored operations.\n for control in tf_obj.control_inputs:\n if control not in explored_inputs:\n queue.append(control)\n explored_inputs.add(control)\n if (\"Variable\" in tf_obj.node_def.op\n or \"VarHandle\" in tf_obj.node_def.op):\n variable_names.append(tf_obj.node_def.name)\n self.variables = OrderedDict()\n variable_list = [\n v for v in tf.global_variables()\n if v.op.node_def.name in variable_names\n ]\n if input_variables is not None:\n variable_list += input_variables\n for v in variable_list:\n self.variables[v.op.node_def.name] = v\n\n self.placeholders = {}\n self.assignment_nodes = {}\n\n # Create new placeholders to put in custom weights.\n for k, var in self.variables.items():\n self.placeholders[k] = tf.placeholder(\n var.value().dtype,\n var.get_shape().as_list(),\n name=\"Placeholder_\" + k)\n self.assignment_nodes[k] = var.assign(self.placeholders[k])\n\n def set_session(self, sess):\n \"\"\"Sets the current session used by the class.\n\n Args:\n sess (tf.Session): Session to set the attribute with.\n \"\"\"\n self.sess = sess\n\n def get_flat_size(self):\n \"\"\"Returns the total length of all of the flattened variables.\n\n Returns:\n The length of all flattened variables concatenated.\n \"\"\"\n return sum(\n np.prod(v.get_shape().as_list()) for v in self.variables.values())\n\n def _check_sess(self):\n \"\"\"Checks if the session is set, and if not throw an error message.\"\"\"\n assert self.sess is not None, (\"The session is not set. Set the \"\n \"session either by passing it into the \"\n \"TensorFlowVariables constructor or by \"\n \"calling set_session(sess).\")\n\n def get_flat(self):\n \"\"\"Gets the weights and returns them as a flat array.\n\n Returns:\n 1D Array containing the flattened weights.\n \"\"\"\n self._check_sess()\n return np.concatenate([\n v.eval(session=self.sess).flatten()\n for v in self.variables.values()\n ])\n\n def set_flat(self, new_weights):\n \"\"\"Sets the weights to new_weights, converting from a flat array.\n\n Note:\n You can only set all weights in the network using this function,\n i.e., the length of the array must match get_flat_size.\n\n Args:\n new_weights (np.ndarray): Flat array containing weights.\n \"\"\"\n self._check_sess()\n shapes = [v.get_shape().as_list() for v in self.variables.values()]\n arrays = unflatten(new_weights, shapes)\n placeholders = [\n self.placeholders[k] for k, v in self.variables.items()\n ]\n self.sess.run(\n list(self.assignment_nodes.values()),\n feed_dict=dict(zip(placeholders, arrays)))\n\n def get_weights(self):\n \"\"\"Returns a dictionary containing the weights of the network.\n\n Returns:\n Dictionary mapping variable names to their weights.\n \"\"\"\n self._check_sess()\n return {\n k: v.eval(session=self.sess)\n for k, v in self.variables.items()\n }\n\n def set_weights(self, new_weights):\n \"\"\"Sets the weights to new_weights.\n\n Note:\n Can set subsets of variables as well, by only passing in the\n variables you want to be set.\n\n Args:\n new_weights (Dict): Dictionary mapping variable names to their\n weights.\n \"\"\"\n self._check_sess()\n assign_list = [\n self.assignment_nodes[name] for name in new_weights.keys()\n if name in self.assignment_nodes\n ]\n assert assign_list, (\"No variables in the input matched those in the \"\n \"network. Possible cause: Two networks were \"\n \"defined in the same TensorFlow graph. To fix \"\n \"this, place each network definition in its own \"\n \"tf.Graph.\")\n self.sess.run(\n assign_list,\n feed_dict={\n self.placeholders[name]: value\n for (name, value) in new_weights.items()\n if name in self.placeholders\n })\n", "path": "python/ray/experimental/tf_utils.py"}], "after_files": [{"content": "from collections import deque, OrderedDict\nimport numpy as np\n\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\n\ndef unflatten(vector, shapes):\n i = 0\n arrays = []\n for shape in shapes:\n size = np.prod(shape, dtype=np.int)\n array = vector[i:(i + size)].reshape(shape)\n arrays.append(array)\n i += size\n assert len(vector) == i, \"Passed weight does not have the correct shape.\"\n return arrays\n\n\nclass TensorFlowVariables:\n \"\"\"A class used to set and get weights for Tensorflow networks.\n\n Attributes:\n sess (tf.Session): The tensorflow session used to run assignment.\n variables (Dict[str, tf.Variable]): Extracted variables from the loss\n or additional variables that are passed in.\n placeholders (Dict[str, tf.placeholders]): Placeholders for weights.\n assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.\n \"\"\"\n\n def __init__(self, output, sess=None, input_variables=None):\n \"\"\"Creates TensorFlowVariables containing extracted variables.\n\n The variables are extracted by performing a BFS search on the\n dependency graph with loss as the root node. After the tree is\n traversed and those variables are collected, we append input_variables\n to the collected variables. For each variable in the list, the\n variable has a placeholder and assignment operation created for it.\n\n Args:\n output (tf.Operation, List[tf.Operation]): The tensorflow\n operation to extract all variables from.\n sess (tf.Session): Session used for running the get and set\n methods.\n input_variables (List[tf.Variables]): Variables to include in the\n list.\n \"\"\"\n self.sess = sess\n if not isinstance(output, (list, tuple)):\n output = [output]\n queue = deque(output)\n variable_names = []\n explored_inputs = set(output)\n\n # We do a BFS on the dependency graph of the input function to find\n # the variables.\n while len(queue) != 0:\n tf_obj = queue.popleft()\n if tf_obj is None:\n continue\n # The object put into the queue is not necessarily an operation,\n # so we want the op attribute to get the operation underlying the\n # object. Only operations contain the inputs that we can explore.\n if hasattr(tf_obj, \"op\"):\n tf_obj = tf_obj.op\n for input_op in tf_obj.inputs:\n if input_op not in explored_inputs:\n queue.append(input_op)\n explored_inputs.add(input_op)\n # Tensorflow control inputs can be circular, so we keep track of\n # explored operations.\n for control in tf_obj.control_inputs:\n if control not in explored_inputs:\n queue.append(control)\n explored_inputs.add(control)\n if (\"Variable\" in tf_obj.node_def.op\n or \"VarHandle\" in tf_obj.node_def.op):\n variable_names.append(tf_obj.node_def.name)\n self.variables = OrderedDict()\n variable_list = [\n v for v in tf.global_variables()\n if v.op.node_def.name in variable_names\n ]\n if input_variables is not None:\n variable_list += input_variables\n for v in variable_list:\n self.variables[v.op.node_def.name] = v\n\n self.placeholders = {}\n self.assignment_nodes = {}\n\n # Create new placeholders to put in custom weights.\n for k, var in self.variables.items():\n self.placeholders[k] = tf.placeholder(\n var.value().dtype,\n var.get_shape().as_list(),\n name=\"Placeholder_\" + k)\n self.assignment_nodes[k] = var.assign(self.placeholders[k])\n\n def set_session(self, sess):\n \"\"\"Sets the current session used by the class.\n\n Args:\n sess (tf.Session): Session to set the attribute with.\n \"\"\"\n self.sess = sess\n\n def get_flat_size(self):\n \"\"\"Returns the total length of all of the flattened variables.\n\n Returns:\n The length of all flattened variables concatenated.\n \"\"\"\n return sum(\n np.prod(v.get_shape().as_list()) for v in self.variables.values())\n\n def _check_sess(self):\n \"\"\"Checks if the session is set, and if not throw an error message.\"\"\"\n assert self.sess is not None, (\"The session is not set. Set the \"\n \"session either by passing it into the \"\n \"TensorFlowVariables constructor or by \"\n \"calling set_session(sess).\")\n\n def get_flat(self):\n \"\"\"Gets the weights and returns them as a flat array.\n\n Returns:\n 1D Array containing the flattened weights.\n \"\"\"\n self._check_sess()\n return np.concatenate([\n v.eval(session=self.sess).flatten()\n for v in self.variables.values()\n ])\n\n def set_flat(self, new_weights):\n \"\"\"Sets the weights to new_weights, converting from a flat array.\n\n Note:\n You can only set all weights in the network using this function,\n i.e., the length of the array must match get_flat_size.\n\n Args:\n new_weights (np.ndarray): Flat array containing weights.\n \"\"\"\n self._check_sess()\n shapes = [v.get_shape().as_list() for v in self.variables.values()]\n arrays = unflatten(new_weights, shapes)\n placeholders = [\n self.placeholders[k] for k, v in self.variables.items()\n ]\n self.sess.run(\n list(self.assignment_nodes.values()),\n feed_dict=dict(zip(placeholders, arrays)))\n\n def get_weights(self):\n \"\"\"Returns a dictionary containing the weights of the network.\n\n Returns:\n Dictionary mapping variable names to their weights.\n \"\"\"\n self._check_sess()\n return self.sess.run(self.variables)\n\n def set_weights(self, new_weights):\n \"\"\"Sets the weights to new_weights.\n\n Note:\n Can set subsets of variables as well, by only passing in the\n variables you want to be set.\n\n Args:\n new_weights (Dict): Dictionary mapping variable names to their\n weights.\n \"\"\"\n self._check_sess()\n assign_list = [\n self.assignment_nodes[name] for name in new_weights.keys()\n if name in self.assignment_nodes\n ]\n assert assign_list, (\"No variables in the input matched those in the \"\n \"network. Possible cause: Two networks were \"\n \"defined in the same TensorFlow graph. To fix \"\n \"this, place each network definition in its own \"\n \"tf.Graph.\")\n self.sess.run(\n assign_list,\n feed_dict={\n self.placeholders[name]: value\n for (name, value) in new_weights.items()\n if name in self.placeholders\n })\n", "path": "python/ray/experimental/tf_utils.py"}]} |
gh_patches_debug_1519 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some translation strings missing/not applied
**Describe the bug**
Some translations are not being applied.
**To Reproduce**
change Bookwyrm language to other than English (tested with Lithuanian)
https://ziurkes.group.lt/user/athinkingmeat/books/read
https://ziurkes.group.lt/user/athinkingmeat/books/reading
https://ziurkes.group.lt/user/athinkingmeat/books/to-read
**Expected behavior**
All these links should have "read", "currently reading" and "to read" strings translated, but they are shown in English
**Screenshots**



**Instance**
https://ziurkes.group.lt/
**Additional context**
Probably is a problem with other languages as well
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/utils/isni.py`
Content:
```
1 """ISNI author checking utilities"""
2 import xml.etree.ElementTree as ET
3 import requests
4
5 from bookwyrm import activitypub, models
6
7
8 def request_isni_data(search_index, search_term, max_records=5):
9 """Request data from the ISNI API"""
10
11 search_string = f'{search_index}="{search_term}"'
12 query_params = {
13 "query": search_string,
14 "version": "1.1",
15 "operation": "searchRetrieve",
16 "recordSchema": "isni-b",
17 "maximumRecords": max_records,
18 "startRecord": "1",
19 "recordPacking": "xml",
20 "sortKeys": "RLV,pica,0,,",
21 }
22 result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=10)
23 # the OCLC ISNI server asserts the payload is encoded
24 # in latin1, but we know better
25 result.encoding = "utf-8"
26 return result.text
27
28
29 def make_name_string(element):
30 """create a string of form 'personal_name surname'"""
31
32 # NOTE: this will often be incorrect, many naming systems
33 # list "surname" before personal name
34 forename = element.find(".//forename")
35 surname = element.find(".//surname")
36 if forename is not None:
37 return "".join([forename.text, " ", surname.text])
38 return surname.text
39
40
41 def get_other_identifier(element, code):
42 """Get other identifiers associated with an author from their ISNI record"""
43
44 identifiers = element.findall(".//otherIdentifierOfIdentity")
45 for section_head in identifiers:
46 if (
47 section_head.find(".//type") is not None
48 and section_head.find(".//type").text == code
49 and section_head.find(".//identifier") is not None
50 ):
51 return section_head.find(".//identifier").text
52
53 # if we can't find it in otherIdentifierOfIdentity,
54 # try sources
55 for source in element.findall(".//sources"):
56 code_of_source = source.find(".//codeOfSource")
57 if code_of_source is not None and code_of_source.text.lower() == code.lower():
58 return source.find(".//sourceIdentifier").text
59
60 return ""
61
62
63 def get_external_information_uri(element, match_string):
64 """Get URLs associated with an author from their ISNI record"""
65
66 sources = element.findall(".//externalInformation")
67 for source in sources:
68 information = source.find(".//information")
69 uri = source.find(".//URI")
70 if (
71 uri is not None
72 and information is not None
73 and information.text.lower() == match_string.lower()
74 ):
75 return uri.text
76 return ""
77
78
79 def find_authors_by_name(name_string, description=False):
80 """Query the ISNI database for possible author matches by name"""
81
82 payload = request_isni_data("pica.na", name_string)
83 # parse xml
84 root = ET.fromstring(payload)
85 # build list of possible authors
86 possible_authors = []
87 for element in root.iter("responseRecord"):
88 personal_name = element.find(".//forename/..")
89 if not personal_name:
90 continue
91
92 author = get_author_from_isni(element.find(".//isniUnformatted").text)
93
94 if bool(description):
95
96 titles = []
97 # prefer title records from LoC+ coop, Australia, Ireland, or Singapore
98 # in that order
99 for source in ["LCNACO", "NLA", "N6I", "NLB"]:
100 for parent in element.findall(f'.//titleOfWork/[@source="{source}"]'):
101 titles.append(parent.find(".//title"))
102 for parent in element.findall(f'.//titleOfWork[@subsource="{source}"]'):
103 titles.append(parent.find(".//title"))
104 # otherwise just grab the first title listing
105 titles.append(element.find(".//title"))
106
107 if titles is not None:
108 # some of the "titles" in ISNI are a little ...iffy
109 # '@' is used by ISNI/OCLC to index the starting point ignoring stop words
110 # (e.g. "The @Government of no one")
111 title_elements = [
112 e for e in titles if not e.text.replace("@", "").isnumeric()
113 ]
114 if len(title_elements):
115 author.bio = title_elements[0].text.replace("@", "")
116 else:
117 author.bio = None
118
119 possible_authors.append(author)
120
121 return possible_authors
122
123
124 def get_author_from_isni(isni):
125 """Find data to populate a new author record from their ISNI"""
126
127 payload = request_isni_data("pica.isn", isni)
128 # parse xml
129 root = ET.fromstring(payload)
130 # there should only be a single responseRecord
131 # but let's use the first one just in case
132 element = root.find(".//responseRecord")
133 name = make_name_string(element.find(".//forename/.."))
134 viaf = get_other_identifier(element, "viaf")
135 # use a set to dedupe aliases in ISNI
136 aliases = set()
137 aliases_element = element.findall(".//personalNameVariant")
138 for entry in aliases_element:
139 aliases.add(make_name_string(entry))
140 # aliases needs to be list not set
141 aliases = list(aliases)
142 bio = element.find(".//nameTitle")
143 bio = bio.text if bio is not None else ""
144 wikipedia = get_external_information_uri(element, "Wikipedia")
145
146 author = activitypub.Author(
147 id=element.find(".//isniURI").text,
148 name=name,
149 isni=isni,
150 viafId=viaf,
151 aliases=aliases,
152 bio=bio,
153 wikipediaLink=wikipedia,
154 )
155
156 return author
157
158
159 def build_author_from_isni(match_value):
160 """Build basic author class object from ISNI URL"""
161
162 # if it is an isni value get the data
163 if match_value.startswith("https://isni.org/isni/"):
164 isni = match_value.replace("https://isni.org/isni/", "")
165 return {"author": get_author_from_isni(isni)}
166 # otherwise it's a name string
167 return {}
168
169
170 def augment_author_metadata(author, isni):
171 """Update any missing author fields from ISNI data"""
172
173 isni_author = get_author_from_isni(isni)
174 isni_author.to_model(model=models.Author, instance=author, overwrite=False)
175
176 # we DO want to overwrite aliases because we're adding them to the
177 # existing aliases and ISNI will usually have more.
178 # We need to dedupe because ISNI records often have lots of dupe aliases
179 aliases = set(isni_author.aliases)
180 for alias in author.aliases:
181 aliases.add(alias)
182 author.aliases = list(aliases)
183 author.save()
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/utils/isni.py b/bookwyrm/utils/isni.py
--- a/bookwyrm/utils/isni.py
+++ b/bookwyrm/utils/isni.py
@@ -19,7 +19,7 @@
"recordPacking": "xml",
"sortKeys": "RLV,pica,0,,",
}
- result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=10)
+ result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=15)
# the OCLC ISNI server asserts the payload is encoded
# in latin1, but we know better
result.encoding = "utf-8"
| {"golden_diff": "diff --git a/bookwyrm/utils/isni.py b/bookwyrm/utils/isni.py\n--- a/bookwyrm/utils/isni.py\n+++ b/bookwyrm/utils/isni.py\n@@ -19,7 +19,7 @@\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n- result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=10)\n+ result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=15)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n", "issue": "Some translation strings missing/not applied\n**Describe the bug**\r\nSome translations are not being applied.\r\n\r\n**To Reproduce**\r\n\r\nchange Bookwyrm language to other than English (tested with Lithuanian)\r\n\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/read\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/reading\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/to-read\r\n\r\n**Expected behavior**\r\nAll these links should have \"read\", \"currently reading\" and \"to read\" strings translated, but they are shown in English\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n**Instance**\r\n\r\nhttps://ziurkes.group.lt/\r\n\r\n**Additional context**\r\nProbably is a problem with other languages as well\r\n\n", "before_files": [{"content": "\"\"\"ISNI author checking utilities\"\"\"\nimport xml.etree.ElementTree as ET\nimport requests\n\nfrom bookwyrm import activitypub, models\n\n\ndef request_isni_data(search_index, search_term, max_records=5):\n \"\"\"Request data from the ISNI API\"\"\"\n\n search_string = f'{search_index}=\"{search_term}\"'\n query_params = {\n \"query\": search_string,\n \"version\": \"1.1\",\n \"operation\": \"searchRetrieve\",\n \"recordSchema\": \"isni-b\",\n \"maximumRecords\": max_records,\n \"startRecord\": \"1\",\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=10)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n return result.text\n\n\ndef make_name_string(element):\n \"\"\"create a string of form 'personal_name surname'\"\"\"\n\n # NOTE: this will often be incorrect, many naming systems\n # list \"surname\" before personal name\n forename = element.find(\".//forename\")\n surname = element.find(\".//surname\")\n if forename is not None:\n return \"\".join([forename.text, \" \", surname.text])\n return surname.text\n\n\ndef get_other_identifier(element, code):\n \"\"\"Get other identifiers associated with an author from their ISNI record\"\"\"\n\n identifiers = element.findall(\".//otherIdentifierOfIdentity\")\n for section_head in identifiers:\n if (\n section_head.find(\".//type\") is not None\n and section_head.find(\".//type\").text == code\n and section_head.find(\".//identifier\") is not None\n ):\n return section_head.find(\".//identifier\").text\n\n # if we can't find it in otherIdentifierOfIdentity,\n # try sources\n for source in element.findall(\".//sources\"):\n code_of_source = source.find(\".//codeOfSource\")\n if code_of_source is not None and code_of_source.text.lower() == code.lower():\n return source.find(\".//sourceIdentifier\").text\n\n return \"\"\n\n\ndef get_external_information_uri(element, match_string):\n \"\"\"Get URLs associated with an author from their ISNI record\"\"\"\n\n sources = element.findall(\".//externalInformation\")\n for source in sources:\n information = source.find(\".//information\")\n uri = source.find(\".//URI\")\n if (\n uri is not None\n and information is not None\n and information.text.lower() == match_string.lower()\n ):\n return uri.text\n return \"\"\n\n\ndef find_authors_by_name(name_string, description=False):\n \"\"\"Query the ISNI database for possible author matches by name\"\"\"\n\n payload = request_isni_data(\"pica.na\", name_string)\n # parse xml\n root = ET.fromstring(payload)\n # build list of possible authors\n possible_authors = []\n for element in root.iter(\"responseRecord\"):\n personal_name = element.find(\".//forename/..\")\n if not personal_name:\n continue\n\n author = get_author_from_isni(element.find(\".//isniUnformatted\").text)\n\n if bool(description):\n\n titles = []\n # prefer title records from LoC+ coop, Australia, Ireland, or Singapore\n # in that order\n for source in [\"LCNACO\", \"NLA\", \"N6I\", \"NLB\"]:\n for parent in element.findall(f'.//titleOfWork/[@source=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n for parent in element.findall(f'.//titleOfWork[@subsource=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n # otherwise just grab the first title listing\n titles.append(element.find(\".//title\"))\n\n if titles is not None:\n # some of the \"titles\" in ISNI are a little ...iffy\n # '@' is used by ISNI/OCLC to index the starting point ignoring stop words\n # (e.g. \"The @Government of no one\")\n title_elements = [\n e for e in titles if not e.text.replace(\"@\", \"\").isnumeric()\n ]\n if len(title_elements):\n author.bio = title_elements[0].text.replace(\"@\", \"\")\n else:\n author.bio = None\n\n possible_authors.append(author)\n\n return possible_authors\n\n\ndef get_author_from_isni(isni):\n \"\"\"Find data to populate a new author record from their ISNI\"\"\"\n\n payload = request_isni_data(\"pica.isn\", isni)\n # parse xml\n root = ET.fromstring(payload)\n # there should only be a single responseRecord\n # but let's use the first one just in case\n element = root.find(\".//responseRecord\")\n name = make_name_string(element.find(\".//forename/..\"))\n viaf = get_other_identifier(element, \"viaf\")\n # use a set to dedupe aliases in ISNI\n aliases = set()\n aliases_element = element.findall(\".//personalNameVariant\")\n for entry in aliases_element:\n aliases.add(make_name_string(entry))\n # aliases needs to be list not set\n aliases = list(aliases)\n bio = element.find(\".//nameTitle\")\n bio = bio.text if bio is not None else \"\"\n wikipedia = get_external_information_uri(element, \"Wikipedia\")\n\n author = activitypub.Author(\n id=element.find(\".//isniURI\").text,\n name=name,\n isni=isni,\n viafId=viaf,\n aliases=aliases,\n bio=bio,\n wikipediaLink=wikipedia,\n )\n\n return author\n\n\ndef build_author_from_isni(match_value):\n \"\"\"Build basic author class object from ISNI URL\"\"\"\n\n # if it is an isni value get the data\n if match_value.startswith(\"https://isni.org/isni/\"):\n isni = match_value.replace(\"https://isni.org/isni/\", \"\")\n return {\"author\": get_author_from_isni(isni)}\n # otherwise it's a name string\n return {}\n\n\ndef augment_author_metadata(author, isni):\n \"\"\"Update any missing author fields from ISNI data\"\"\"\n\n isni_author = get_author_from_isni(isni)\n isni_author.to_model(model=models.Author, instance=author, overwrite=False)\n\n # we DO want to overwrite aliases because we're adding them to the\n # existing aliases and ISNI will usually have more.\n # We need to dedupe because ISNI records often have lots of dupe aliases\n aliases = set(isni_author.aliases)\n for alias in author.aliases:\n aliases.add(alias)\n author.aliases = list(aliases)\n author.save()\n", "path": "bookwyrm/utils/isni.py"}], "after_files": [{"content": "\"\"\"ISNI author checking utilities\"\"\"\nimport xml.etree.ElementTree as ET\nimport requests\n\nfrom bookwyrm import activitypub, models\n\n\ndef request_isni_data(search_index, search_term, max_records=5):\n \"\"\"Request data from the ISNI API\"\"\"\n\n search_string = f'{search_index}=\"{search_term}\"'\n query_params = {\n \"query\": search_string,\n \"version\": \"1.1\",\n \"operation\": \"searchRetrieve\",\n \"recordSchema\": \"isni-b\",\n \"maximumRecords\": max_records,\n \"startRecord\": \"1\",\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=15)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n return result.text\n\n\ndef make_name_string(element):\n \"\"\"create a string of form 'personal_name surname'\"\"\"\n\n # NOTE: this will often be incorrect, many naming systems\n # list \"surname\" before personal name\n forename = element.find(\".//forename\")\n surname = element.find(\".//surname\")\n if forename is not None:\n return \"\".join([forename.text, \" \", surname.text])\n return surname.text\n\n\ndef get_other_identifier(element, code):\n \"\"\"Get other identifiers associated with an author from their ISNI record\"\"\"\n\n identifiers = element.findall(\".//otherIdentifierOfIdentity\")\n for section_head in identifiers:\n if (\n section_head.find(\".//type\") is not None\n and section_head.find(\".//type\").text == code\n and section_head.find(\".//identifier\") is not None\n ):\n return section_head.find(\".//identifier\").text\n\n # if we can't find it in otherIdentifierOfIdentity,\n # try sources\n for source in element.findall(\".//sources\"):\n code_of_source = source.find(\".//codeOfSource\")\n if code_of_source is not None and code_of_source.text.lower() == code.lower():\n return source.find(\".//sourceIdentifier\").text\n\n return \"\"\n\n\ndef get_external_information_uri(element, match_string):\n \"\"\"Get URLs associated with an author from their ISNI record\"\"\"\n\n sources = element.findall(\".//externalInformation\")\n for source in sources:\n information = source.find(\".//information\")\n uri = source.find(\".//URI\")\n if (\n uri is not None\n and information is not None\n and information.text.lower() == match_string.lower()\n ):\n return uri.text\n return \"\"\n\n\ndef find_authors_by_name(name_string, description=False):\n \"\"\"Query the ISNI database for possible author matches by name\"\"\"\n\n payload = request_isni_data(\"pica.na\", name_string)\n # parse xml\n root = ET.fromstring(payload)\n # build list of possible authors\n possible_authors = []\n for element in root.iter(\"responseRecord\"):\n personal_name = element.find(\".//forename/..\")\n if not personal_name:\n continue\n\n author = get_author_from_isni(element.find(\".//isniUnformatted\").text)\n\n if bool(description):\n\n titles = []\n # prefer title records from LoC+ coop, Australia, Ireland, or Singapore\n # in that order\n for source in [\"LCNACO\", \"NLA\", \"N6I\", \"NLB\"]:\n for parent in element.findall(f'.//titleOfWork/[@source=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n for parent in element.findall(f'.//titleOfWork[@subsource=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n # otherwise just grab the first title listing\n titles.append(element.find(\".//title\"))\n\n if titles is not None:\n # some of the \"titles\" in ISNI are a little ...iffy\n # '@' is used by ISNI/OCLC to index the starting point ignoring stop words\n # (e.g. \"The @Government of no one\")\n title_elements = [\n e for e in titles if not e.text.replace(\"@\", \"\").isnumeric()\n ]\n if len(title_elements):\n author.bio = title_elements[0].text.replace(\"@\", \"\")\n else:\n author.bio = None\n\n possible_authors.append(author)\n\n return possible_authors\n\n\ndef get_author_from_isni(isni):\n \"\"\"Find data to populate a new author record from their ISNI\"\"\"\n\n payload = request_isni_data(\"pica.isn\", isni)\n # parse xml\n root = ET.fromstring(payload)\n # there should only be a single responseRecord\n # but let's use the first one just in case\n element = root.find(\".//responseRecord\")\n name = make_name_string(element.find(\".//forename/..\"))\n viaf = get_other_identifier(element, \"viaf\")\n # use a set to dedupe aliases in ISNI\n aliases = set()\n aliases_element = element.findall(\".//personalNameVariant\")\n for entry in aliases_element:\n aliases.add(make_name_string(entry))\n # aliases needs to be list not set\n aliases = list(aliases)\n bio = element.find(\".//nameTitle\")\n bio = bio.text if bio is not None else \"\"\n wikipedia = get_external_information_uri(element, \"Wikipedia\")\n\n author = activitypub.Author(\n id=element.find(\".//isniURI\").text,\n name=name,\n isni=isni,\n viafId=viaf,\n aliases=aliases,\n bio=bio,\n wikipediaLink=wikipedia,\n )\n\n return author\n\n\ndef build_author_from_isni(match_value):\n \"\"\"Build basic author class object from ISNI URL\"\"\"\n\n # if it is an isni value get the data\n if match_value.startswith(\"https://isni.org/isni/\"):\n isni = match_value.replace(\"https://isni.org/isni/\", \"\")\n return {\"author\": get_author_from_isni(isni)}\n # otherwise it's a name string\n return {}\n\n\ndef augment_author_metadata(author, isni):\n \"\"\"Update any missing author fields from ISNI data\"\"\"\n\n isni_author = get_author_from_isni(isni)\n isni_author.to_model(model=models.Author, instance=author, overwrite=False)\n\n # we DO want to overwrite aliases because we're adding them to the\n # existing aliases and ISNI will usually have more.\n # We need to dedupe because ISNI records often have lots of dupe aliases\n aliases = set(isni_author.aliases)\n for alias in author.aliases:\n aliases.add(alias)\n author.aliases = list(aliases)\n author.save()\n", "path": "bookwyrm/utils/isni.py"}]} |
gh_patches_debug_1520 | rasdani/github-patches | git_diff | edgedb__edgedb-6313 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
InternalServerError: missing FROM-clause entry for table "ins_contents~3" when access policies are applied
<!-- Please search existing issues to avoid creating duplicates. -->
The `InternalServerError: missing FROM-clause entry for table "ins_contents~3"` error sometimes occurs in deeply nested queries *only when access policies are applied*
<!--
For the EdgeDB Version: run `edgedb query 'select sys::get_version_as_str()'` from your project directory (or run `select sys::get_version_as_str();` in the EdgeDB interactive shell).
For the EdgeDB CLI Version: Run `edgedb --version` from anywhere
-->
- EdgeDB Version: `3.4+301ba34`
- EdgeDB CLI Version: `3.5.0+907ff37`
- OS Version: `Ubuntu 23.04`
Please note that the query and schema has been modified to simplify the queries and schema
## 1. Setup
Set `global current_accounts_array` to contain `bffeb170-6d5d-11ee-bcd6-ad19361485ad`
```eql
insert Account {
id := <uuid>"bffeb170-6d5d-11ee-bcd6-ad19361485ad",
address := "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0"
}
```
## 2. Observe problem
### 2.1 Enable access policies and run problematic query
Expect `InternalServerError: missing FROM-clause entry for table "ins_contents~3"`
Please note these queries are generated by the js client, but occur in the ui as well
```eql
INSERT default::Policy {
account := (
WITH
__scope_0_defaultAccount := DETACHED default::Account
SELECT __scope_0_defaultAccount {
id
}
FILTER (__scope_0_defaultAccount.address = "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0")
),
key := <default::uint16>1,
stateHistory := (
INSERT default::PolicyState {
proposal := (
INSERT default::TransactionProposal {
hash := "0x8ab322da38c5c044c903b2de5bba9d8f93fee416a7fd5047bd516ae1a9aa5eee",
account := (
WITH
__scope_1_defaultAccount := DETACHED default::Account
SELECT __scope_1_defaultAccount {
id
}
FILTER (__scope_1_defaultAccount.address = "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0")
)
}
),
}
)
}
```
### 2.3 Disable access policies and re-run problematic query
Expect query to succeed
## 3. Workaround using WITH
Moving the `INSERT default::TransactionProposal` into a with block fixes the issue.
### 3.1 Enable access policies and run query
Expect query to succeed
```eql
WITH nestedProposal := (
INSERT default::TransactionProposal {
hash := "0x8ab322da38c5c044c903b2de5bba9d8f93fee416a7fd5047bd516ae1a9aa5ee5",
account := (
WITH
__scope_1_defaultAccount := DETACHED default::Account
SELECT __scope_1_defaultAccount {
id
}
FILTER (__scope_1_defaultAccount.address = "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0")
),
nonce := <default::uint64>5n
}
)
INSERT default::Policy {
account := (
WITH
__scope_0_defaultAccount := DETACHED default::Account
SELECT __scope_0_defaultAccount {
id
}
FILTER (__scope_0_defaultAccount.address = "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0")
),
key := <default::uint16>5,
stateHistory := (
INSERT default::PolicyState {
proposal := nestedProposal,
}
)
}
```
<!-- If the issue is about a query error, please also provide your schema -->
---
## Schema
```
module default {
global current_accounts_array: array<uuid>;
global current_accounts_set := array_unpack(global current_accounts_array);
global current_accounts := <Account>(global current_accounts_set);
type Account {
required address: str { constraint exclusive; }
access policy members_select_insert_update
allow select, insert, update
using (.id in global current_accounts_set);
}
type Policy {
required account: Account;
required key: int32;
required multi stateHistory: PolicyState {
constraint exclusive;
on source delete delete target;
on target delete allow;
}
constraint exclusive on ((.account, .key));
access policy members_select_insert_update
allow select, insert, update
using (.account in global current_accounts);
access policy can_be_deleted_when_inactive
allow delete
using (not .isActive);
}
type PolicyState {
link policy := .<stateHistory[is Policy];
proposal: TransactionProposal {
on source delete delete target;
on target delete delete source;
}
}
type TransactionProposal {
required hash: str { constraint exclusive; }
required account: Account;
required nonce: int32;
constraint exclusive on ((.account, .nonce));
index on (.nonce);
access policy members_only
allow all
using (.account in global current_accounts);
}
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edb/pgsql/compiler/context.py`
Content:
```
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19
20 """IR compiler context."""
21
22 from __future__ import annotations
23 from typing import *
24
25 import collections
26 import contextlib
27 import dataclasses
28 import enum
29 import uuid
30
31 import immutables as immu
32
33 from edb.common import compiler
34
35 from edb.pgsql import ast as pgast
36 from edb.pgsql import params as pgparams
37
38 from . import aliases
39
40 if TYPE_CHECKING:
41 from edb.ir import ast as irast
42
43
44 class ContextSwitchMode(enum.Enum):
45 TRANSPARENT = enum.auto()
46 SUBREL = enum.auto()
47 NEWREL = enum.auto()
48 SUBSTMT = enum.auto()
49 NEWSCOPE = enum.auto()
50
51
52 class ShapeFormat(enum.Enum):
53 SERIALIZED = enum.auto()
54 FLAT = enum.auto()
55
56
57 class OutputFormat(enum.Enum):
58 #: Result data output in PostgreSQL format.
59 NATIVE = enum.auto()
60 #: Result data output as a single JSON string.
61 JSON = enum.auto()
62 #: Result data output as a single PostgreSQL JSONB type value.
63 JSONB = enum.auto()
64 #: Result data output as a JSON string for each element in returned set.
65 JSON_ELEMENTS = enum.auto()
66 #: None mode: query result not returned, cardinality of result set
67 #: is returned instead.
68 NONE = enum.auto()
69 #: Like NATIVE, but objects without an explicit shape are serialized
70 #: as UUIDs.
71 NATIVE_INTERNAL = enum.auto()
72
73
74 NO_STMT = pgast.SelectStmt()
75
76
77 OverlayEntry = tuple[
78 str,
79 Union[pgast.BaseRelation, pgast.CommonTableExpr],
80 'irast.PathId',
81 ]
82
83
84 @dataclasses.dataclass(kw_only=True)
85 class RelOverlays:
86 """Container for relation overlays.
87
88 These track "overlays" that can be registered for different types,
89 in the context of DML.
90
91 Consider the query:
92 with X := (
93 insert Person {
94 name := "Sully",
95 notes := assert_distinct({
96 (insert Note {name := "1"}),
97 (select Note filter .name = "2"),
98 }),
99 }),
100 select X { name, notes: {name} };
101
102 When we go to select X, we find the source of that set without any
103 trouble (it's the result of the actual insert statement, more or
104 less; in any case, it's in a CTE that we then include).
105
106 Handling the notes are trickier, though:
107 * The links aren't in the link table yet, but only in a CTE.
108 (In similar update cases, with things like +=, they might be mixed
109 between both.)
110 * Some of the actual Note objects aren't in the table yet, just an insert
111 CTE. But some *are*, so we need to union them.
112
113 We solve these problems using overlays:
114 * Whenever we do DML (or reference WITH-bound DML),
115 we register overlays describing the changes done
116 to *all of the enclosing DML*. So here, the Note insert's overlays
117 get registered both for the Note insert and for the Person insert.
118 * When we try to compile a root set or pointer, we see if it is connected
119 to a DML statement, and if so we apply the overlays.
120
121 The overlay itself is simply a sequence of operations on relations
122 and CTEs that mix in the new data. In the obvious insert cases,
123 these consist of unioning the new data in.
124
125 This system works decently well but is also a little broken: I
126 think that both the "all of the enclosing DML" and the "see if it
127 is connected to a DML statement" have dangers; see Issue #3030.
128
129 In relctx, see range_for_material_objtype, range_for_ptrref, and
130 range_from_queryset (which those two call) for details on how
131 overlays are applied.
132 Overlays are added to with relctx.add_type_rel_overlay
133 and relctx.add_ptr_rel_overlay.
134
135
136 ===== NOTE ON MUTABILITY:
137 In typical use, the overlays are mutable: nested DML adds overlays
138 that are then consumed by code in enclosing contexts.
139
140 In some places, however, we need to temporarily customize the
141 overlay environment (during policy and trigger compilation, for
142 example).
143
144 The original version of overlays were implemented as a dict of
145 dicts of lists. Doing temporary customizations required doing
146 at least some copying. Doing a full deep copy always felt excessive
147 but doing anything short of that left me constantly terrified.
148
149 So instead we represent the overlays as a mutable object that
150 contains immutable maps. When we add overlays, we update the maps
151 and then reassign their values.
152
153 When we want to do a temporary adjustment, we can cheaply make a
154 fresh RelOverlays object and then modify that without touching the
155 original.
156 """
157
158 #: Relations used to "overlay" the main table for
159 #: the type. Mostly used with DML statements.
160 type: immu.Map[
161 Optional[irast.MutatingLikeStmt],
162 immu.Map[
163 uuid.UUID,
164 tuple[OverlayEntry, ...],
165 ],
166 ] = immu.Map()
167
168 #: Relations used to "overlay" the main table for
169 #: the pointer. Mostly used with DML statements.
170 ptr: immu.Map[
171 Optional[irast.MutatingLikeStmt],
172 immu.Map[
173 Tuple[uuid.UUID, str],
174 Tuple[
175 Tuple[
176 str,
177 Union[pgast.BaseRelation, pgast.CommonTableExpr],
178 irast.PathId,
179 ], ...
180 ],
181 ],
182 ] = immu.Map()
183
184 def copy(self) -> RelOverlays:
185 return RelOverlays(type=self.type, ptr=self.ptr)
186
187
188 class CompilerContextLevel(compiler.ContextLevel):
189 #: static compilation environment
190 env: Environment
191
192 #: mapping of named args to position
193 argmap: Dict[str, pgast.Param]
194
195 #: whether compiling in singleton expression mode
196 singleton_mode: bool
197
198 #: whether compiling a trigger
199 trigger_mode: bool
200
201 #: the top-level SQL statement
202 toplevel_stmt: pgast.Query
203
204 #: Record of DML CTEs generated for the corresponding IR DML.
205 #: CTEs generated for DML-containing FOR statements are keyed
206 #: by their iterator set.
207 dml_stmts: Dict[Union[irast.MutatingStmt, irast.Set],
208 pgast.CommonTableExpr]
209
210 #: SQL statement corresponding to the IR statement
211 #: currently being compiled.
212 stmt: pgast.SelectStmt
213
214 #: Current SQL subquery
215 rel: pgast.SelectStmt
216
217 #: SQL query hierarchy
218 rel_hierarchy: Dict[pgast.Query, pgast.Query]
219
220 #: CTEs representing decoded parameters
221 param_ctes: Dict[str, pgast.CommonTableExpr]
222
223 #: CTEs representing schema types, when rewritten based on access policy
224 type_ctes: Dict[FullRewriteKey, pgast.CommonTableExpr]
225
226 #: A set of type CTEs currently being generated
227 pending_type_ctes: Set[RewriteKey]
228
229 #: The logical parent of the current query in the
230 #: query hierarchy
231 parent_rel: Optional[pgast.Query]
232
233 #: Query to become current in the next SUBSTMT switch.
234 pending_query: Optional[pgast.SelectStmt]
235
236 #: Sets currently being materialized
237 materializing: FrozenSet[irast.Stmt]
238
239 #: Whether the expression currently being processed is
240 #: directly exposed to the output of the statement.
241 expr_exposed: Optional[bool]
242
243 #: A hack that indicates a tuple element that should be treated as
244 #: exposed. This enables us to treat 'bar' in (foo, bar).1 as exposed,
245 #: which eta-expansion and some casts rely on.
246 expr_exposed_tuple_cheat: Optional[irast.TupleElement]
247
248 #: Expression to use to force SQL expression volatility in this context
249 #: (Delayed with a lambda to avoid inserting it when not used.)
250 volatility_ref: Tuple[
251 Callable[[pgast.SelectStmt, CompilerContextLevel],
252 Optional[pgast.BaseExpr]], ...]
253
254 # Current path_id we are INSERTing, so that we can avoid creating
255 # a bogus volatility ref to it...
256 current_insert_path_id: Optional[irast.PathId]
257
258 #: Paths, for which semi-join is banned in this context.
259 disable_semi_join: FrozenSet[irast.PathId]
260
261 #: Paths, which need to be explicitly wrapped into SQL
262 #: optionality scaffolding.
263 force_optional: FrozenSet[irast.PathId]
264
265 #: Paths that can be ignored when they appear as the source of a
266 # computable. This is key to optimizing away free object sources in
267 # group by aggregates.
268 skippable_sources: FrozenSet[irast.PathId]
269
270 #: Specifies that references to a specific Set must be narrowed
271 #: by only selecting instances of type specified by the mapping value.
272 intersection_narrowing: Dict[irast.Set, irast.Set]
273
274 #: Which SQL query holds the SQL scope for the given PathId
275 path_scope: ChainMap[irast.PathId, Optional[pgast.SelectStmt]]
276
277 #: Relevant IR scope for this context.
278 scope_tree: irast.ScopeTreeNode
279
280 #: A stack of dml statements currently being compiled. Used for
281 #: figuring out what to record in type_rel_overlays.
282 dml_stmt_stack: List[irast.MutatingLikeStmt]
283
284 #: Relations used to "overlay" the main table for
285 #: the type. Mostly used with DML statements.
286 rel_overlays: RelOverlays
287
288 #: Mapping from path ids to "external" rels given by a particular relation
289 external_rels: Mapping[
290 irast.PathId,
291 Tuple[pgast.BaseRelation | pgast.CommonTableExpr, Tuple[str, ...]]
292 ]
293
294 #: The CTE and some metadata of any enclosing iterator-like
295 #: construct (which includes iterators, insert/update, and INSERT
296 #: ELSE select clauses) currently being compiled.
297 enclosing_cte_iterator: Optional[pgast.IteratorCTE]
298
299 #: Sets to force shape compilation on, because the values are
300 #: needed by DML.
301 shapes_needed_by_dml: Set[irast.Set]
302
303 def __init__(
304 self,
305 prevlevel: Optional[CompilerContextLevel],
306 mode: ContextSwitchMode,
307 *,
308 env: Optional[Environment] = None,
309 scope_tree: Optional[irast.ScopeTreeNode] = None,
310 ) -> None:
311 if prevlevel is None:
312 assert env is not None
313 assert scope_tree is not None
314
315 self.env = env
316 self.argmap = collections.OrderedDict()
317
318 self.singleton_mode = False
319
320 self.toplevel_stmt = NO_STMT
321 self.stmt = NO_STMT
322 self.rel = NO_STMT
323 self.rel_hierarchy = {}
324 self.param_ctes = {}
325 self.type_ctes = {}
326 self.pending_type_ctes = set()
327 self.dml_stmts = {}
328 self.parent_rel = None
329 self.pending_query = None
330 self.materializing = frozenset()
331
332 self.expr_exposed = None
333 self.expr_exposed_tuple_cheat = None
334 self.volatility_ref = ()
335 self.current_insert_path_id = None
336
337 self.disable_semi_join = frozenset()
338 self.force_optional = frozenset()
339 self.skippable_sources = frozenset()
340 self.intersection_narrowing = {}
341
342 self.path_scope = collections.ChainMap()
343 self.scope_tree = scope_tree
344 self.dml_stmt_stack = []
345 self.rel_overlays = RelOverlays()
346
347 self.external_rels = {}
348 self.enclosing_cte_iterator = None
349 self.shapes_needed_by_dml = set()
350
351 self.trigger_mode = False
352
353 else:
354 self.env = prevlevel.env
355 self.argmap = prevlevel.argmap
356
357 self.singleton_mode = prevlevel.singleton_mode
358
359 self.toplevel_stmt = prevlevel.toplevel_stmt
360 self.stmt = prevlevel.stmt
361 self.rel = prevlevel.rel
362 self.rel_hierarchy = prevlevel.rel_hierarchy
363 self.param_ctes = prevlevel.param_ctes
364 self.type_ctes = prevlevel.type_ctes
365 self.pending_type_ctes = prevlevel.pending_type_ctes
366 self.dml_stmts = prevlevel.dml_stmts
367 self.parent_rel = prevlevel.parent_rel
368 self.pending_query = prevlevel.pending_query
369 self.materializing = prevlevel.materializing
370
371 self.expr_exposed = prevlevel.expr_exposed
372 self.expr_exposed_tuple_cheat = prevlevel.expr_exposed_tuple_cheat
373 self.volatility_ref = prevlevel.volatility_ref
374 self.current_insert_path_id = prevlevel.current_insert_path_id
375
376 self.disable_semi_join = prevlevel.disable_semi_join
377 self.force_optional = prevlevel.force_optional
378 self.skippable_sources = prevlevel.skippable_sources
379 self.intersection_narrowing = prevlevel.intersection_narrowing
380
381 self.path_scope = prevlevel.path_scope
382 self.scope_tree = prevlevel.scope_tree
383 self.dml_stmt_stack = prevlevel.dml_stmt_stack
384 self.rel_overlays = prevlevel.rel_overlays
385 self.enclosing_cte_iterator = prevlevel.enclosing_cte_iterator
386 self.shapes_needed_by_dml = prevlevel.shapes_needed_by_dml
387 self.external_rels = prevlevel.external_rels
388
389 self.trigger_mode = prevlevel.trigger_mode
390
391 if mode is ContextSwitchMode.SUBSTMT:
392 if self.pending_query is not None:
393 self.rel = self.pending_query
394 else:
395 self.rel = pgast.SelectStmt()
396 if prevlevel.parent_rel is not None:
397 parent_rel = prevlevel.parent_rel
398 else:
399 parent_rel = prevlevel.rel
400 self.rel_hierarchy[self.rel] = parent_rel
401
402 self.stmt = self.rel
403 self.pending_query = None
404 self.parent_rel = None
405
406 elif mode is ContextSwitchMode.SUBREL:
407 self.rel = pgast.SelectStmt()
408 if prevlevel.parent_rel is not None:
409 parent_rel = prevlevel.parent_rel
410 else:
411 parent_rel = prevlevel.rel
412 self.rel_hierarchy[self.rel] = parent_rel
413 self.pending_query = None
414 self.parent_rel = None
415
416 elif mode is ContextSwitchMode.NEWREL:
417 self.rel = pgast.SelectStmt()
418 self.pending_query = None
419 self.parent_rel = None
420 self.path_scope = collections.ChainMap()
421 self.rel_hierarchy = {}
422 self.scope_tree = prevlevel.scope_tree.root
423
424 self.disable_semi_join = frozenset()
425 self.force_optional = frozenset()
426 self.intersection_narrowing = {}
427 self.pending_type_ctes = set(prevlevel.pending_type_ctes)
428
429 elif mode == ContextSwitchMode.NEWSCOPE:
430 self.path_scope = prevlevel.path_scope.new_child()
431
432 def subrel(
433 self,
434 ) -> compiler.CompilerContextManager[CompilerContextLevel]:
435 return self.new(ContextSwitchMode.SUBREL)
436
437 def newrel(
438 self,
439 ) -> compiler.CompilerContextManager[CompilerContextLevel]:
440 return self.new(ContextSwitchMode.NEWREL)
441
442 def substmt(
443 self,
444 ) -> compiler.CompilerContextManager[CompilerContextLevel]:
445 return self.new(ContextSwitchMode.SUBSTMT)
446
447 def newscope(
448 self,
449 ) -> compiler.CompilerContextManager[CompilerContextLevel]:
450 return self.new(ContextSwitchMode.NEWSCOPE)
451
452 def up_hierarchy(
453 self,
454 n: int, q: Optional[pgast.Query]=None
455 ) -> Optional[pgast.Query]:
456 # mostly intended as a debugging helper
457 q = q or self.rel
458 for _ in range(n):
459 if q:
460 q = self.rel_hierarchy.get(q)
461 return q
462
463
464 class CompilerContext(compiler.CompilerContext[CompilerContextLevel]):
465 ContextLevelClass = CompilerContextLevel
466 default_mode = ContextSwitchMode.TRANSPARENT
467
468
469 RewriteKey = Tuple[uuid.UUID, bool]
470 FullRewriteKey = Tuple[
471 uuid.UUID, bool, Optional[frozenset['irast.MutatingLikeStmt']]]
472
473
474 class Environment:
475 """Static compilation environment."""
476
477 aliases: aliases.AliasGenerator
478 output_format: Optional[OutputFormat]
479 named_param_prefix: Optional[tuple[str, ...]]
480 ptrref_source_visibility: Dict[irast.BasePointerRef, bool]
481 expected_cardinality_one: bool
482 ignore_object_shapes: bool
483 explicit_top_cast: Optional[irast.TypeRef]
484 singleton_mode: bool
485 query_params: List[irast.Param]
486 type_rewrites: Dict[RewriteKey, irast.Set]
487 scope_tree_nodes: Dict[int, irast.ScopeTreeNode]
488 external_rvars: Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]
489 materialized_views: Dict[uuid.UUID, irast.Set]
490 backend_runtime_params: pgparams.BackendRuntimeParams
491
492 #: A list of CTEs that implement constraint validation at the
493 #: query level.
494 check_ctes: List[pgast.CommonTableExpr]
495
496 def __init__(
497 self,
498 *,
499 output_format: Optional[OutputFormat],
500 named_param_prefix: Optional[tuple[str, ...]],
501 expected_cardinality_one: bool,
502 ignore_object_shapes: bool,
503 singleton_mode: bool,
504 expand_inhviews: bool,
505 explicit_top_cast: Optional[irast.TypeRef],
506 query_params: List[irast.Param],
507 type_rewrites: Dict[RewriteKey, irast.Set],
508 scope_tree_nodes: Dict[int, irast.ScopeTreeNode],
509 external_rvars: Optional[
510 Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]
511 ] = None,
512 backend_runtime_params: pgparams.BackendRuntimeParams,
513 ) -> None:
514 self.aliases = aliases.AliasGenerator()
515 self.output_format = output_format
516 self.named_param_prefix = named_param_prefix
517 self.ptrref_source_visibility = {}
518 self.expected_cardinality_one = expected_cardinality_one
519 self.ignore_object_shapes = ignore_object_shapes
520 self.singleton_mode = singleton_mode
521 self.expand_inhviews = expand_inhviews
522 self.explicit_top_cast = explicit_top_cast
523 self.query_params = query_params
524 self.type_rewrites = type_rewrites
525 self.scope_tree_nodes = scope_tree_nodes
526 self.external_rvars = external_rvars or {}
527 self.materialized_views = {}
528 self.check_ctes = []
529 self.backend_runtime_params = backend_runtime_params
530
531
532 # XXX: this context hack is necessary until pathctx is converted
533 # to use context levels instead of using env directly.
534 @contextlib.contextmanager
535 def output_format(
536 ctx: CompilerContextLevel,
537 output_format: OutputFormat,
538 ) -> Generator[None, None, None]:
539 original_output_format = ctx.env.output_format
540 original_ignore_object_shapes = ctx.env.ignore_object_shapes
541 ctx.env.output_format = output_format
542 ctx.env.ignore_object_shapes = False
543 try:
544 yield
545 finally:
546 ctx.env.output_format = original_output_format
547 ctx.env.ignore_object_shapes = original_ignore_object_shapes
548
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/edb/pgsql/compiler/context.py b/edb/pgsql/compiler/context.py
--- a/edb/pgsql/compiler/context.py
+++ b/edb/pgsql/compiler/context.py
@@ -420,6 +420,7 @@
self.path_scope = collections.ChainMap()
self.rel_hierarchy = {}
self.scope_tree = prevlevel.scope_tree.root
+ self.volatility_ref = ()
self.disable_semi_join = frozenset()
self.force_optional = frozenset()
| {"golden_diff": "diff --git a/edb/pgsql/compiler/context.py b/edb/pgsql/compiler/context.py\n--- a/edb/pgsql/compiler/context.py\n+++ b/edb/pgsql/compiler/context.py\n@@ -420,6 +420,7 @@\n self.path_scope = collections.ChainMap()\n self.rel_hierarchy = {}\n self.scope_tree = prevlevel.scope_tree.root\n+ self.volatility_ref = ()\n \n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n", "issue": "InternalServerError: missing FROM-clause entry for table \"ins_contents~3\" when access policies are applied\n<!-- Please search existing issues to avoid creating duplicates. -->\r\n\r\nThe `InternalServerError: missing FROM-clause entry for table \"ins_contents~3\"` error sometimes occurs in deeply nested queries *only when access policies are applied*\r\n\r\n<!--\r\nFor the EdgeDB Version: run `edgedb query 'select sys::get_version_as_str()'` from your project directory (or run `select sys::get_version_as_str();` in the EdgeDB interactive shell).\r\nFor the EdgeDB CLI Version: Run `edgedb --version` from anywhere\r\n-->\r\n\r\n- EdgeDB Version: `3.4+301ba34`\r\n- EdgeDB CLI Version: `3.5.0+907ff37`\r\n- OS Version: `Ubuntu 23.04`\r\n\r\nPlease note that the query and schema has been modified to simplify the queries and schema\r\n\r\n## 1. Setup\r\nSet `global current_accounts_array` to contain `bffeb170-6d5d-11ee-bcd6-ad19361485ad`\r\n\r\n```eql\r\ninsert Account {\r\n id := <uuid>\"bffeb170-6d5d-11ee-bcd6-ad19361485ad\",\r\n address := \"0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0\"\r\n}\r\n```\r\n\r\n## 2. Observe problem\r\n### 2.1 Enable access policies and run problematic query\r\nExpect `InternalServerError: missing FROM-clause entry for table \"ins_contents~3\"`\r\nPlease note these queries are generated by the js client, but occur in the ui as well\r\n```eql\r\nINSERT default::Policy {\r\n account := (\r\n WITH\r\n __scope_0_defaultAccount := DETACHED default::Account\r\n SELECT __scope_0_defaultAccount {\r\n id\r\n }\r\n FILTER (__scope_0_defaultAccount.address = \"0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0\")\r\n ),\r\n key := <default::uint16>1,\r\n stateHistory := (\r\n INSERT default::PolicyState {\r\n proposal := (\r\n INSERT default::TransactionProposal {\r\n hash := \"0x8ab322da38c5c044c903b2de5bba9d8f93fee416a7fd5047bd516ae1a9aa5eee\",\r\n account := (\r\n WITH\r\n __scope_1_defaultAccount := DETACHED default::Account\r\n SELECT __scope_1_defaultAccount {\r\n id\r\n }\r\n FILTER (__scope_1_defaultAccount.address = \"0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0\")\r\n )\r\n }\r\n ),\r\n }\r\n )\r\n }\r\n```\r\n\r\n### 2.3 Disable access policies and re-run problematic query\r\nExpect query to succeed\r\n\r\n## 3. Workaround using WITH\r\nMoving the `INSERT default::TransactionProposal` into a with block fixes the issue.\r\n\r\n### 3.1 Enable access policies and run query\r\nExpect query to succeed\r\n```eql\r\nWITH nestedProposal := (\r\n INSERT default::TransactionProposal {\r\n hash := \"0x8ab322da38c5c044c903b2de5bba9d8f93fee416a7fd5047bd516ae1a9aa5ee5\",\r\n account := (\r\n WITH\r\n __scope_1_defaultAccount := DETACHED default::Account\r\n SELECT __scope_1_defaultAccount {\r\n id\r\n }\r\n FILTER (__scope_1_defaultAccount.address = \"0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0\")\r\n ),\r\n nonce := <default::uint64>5n\r\n }\r\n)\r\nINSERT default::Policy {\r\n account := (\r\n WITH\r\n __scope_0_defaultAccount := DETACHED default::Account\r\n SELECT __scope_0_defaultAccount {\r\n id\r\n }\r\n FILTER (__scope_0_defaultAccount.address = \"0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0\")\r\n ),\r\n key := <default::uint16>5,\r\n stateHistory := (\r\n INSERT default::PolicyState {\r\n proposal := nestedProposal,\r\n }\r\n )\r\n }\r\n```\r\n\r\n<!-- If the issue is about a query error, please also provide your schema -->\r\n\r\n---\r\n## Schema\r\n```\r\nmodule default {\r\n global current_accounts_array: array<uuid>;\r\n global current_accounts_set := array_unpack(global current_accounts_array);\r\n global current_accounts := <Account>(global current_accounts_set);\r\n\r\n type Account {\r\n required address: str { constraint exclusive; }\r\n\r\n access policy members_select_insert_update\r\n allow select, insert, update\r\n using (.id in global current_accounts_set);\r\n }\r\n\r\ntype Policy {\r\n required account: Account;\r\n required key: int32;\r\n\r\n required multi stateHistory: PolicyState {\r\n constraint exclusive;\r\n on source delete delete target;\r\n on target delete allow;\r\n }\r\n\r\n constraint exclusive on ((.account, .key));\r\n\r\n access policy members_select_insert_update\r\n allow select, insert, update\r\n using (.account in global current_accounts);\r\n\r\n access policy can_be_deleted_when_inactive\r\n allow delete\r\n using (not .isActive);\r\n }\r\n\r\ntype PolicyState {\r\n link policy := .<stateHistory[is Policy];\r\n proposal: TransactionProposal {\r\n on source delete delete target; \r\n on target delete delete source;\r\n }\r\n }\r\n\r\ntype TransactionProposal {\r\n required hash: str { constraint exclusive; }\r\n required account: Account;\r\n required nonce: int32;\r\n\r\n constraint exclusive on ((.account, .nonce));\r\n index on (.nonce);\r\n\r\n access policy members_only\r\n allow all\r\n using (.account in global current_accounts);\r\n }\r\n}\r\n```\n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"IR compiler context.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport collections\nimport contextlib\nimport dataclasses\nimport enum\nimport uuid\n\nimport immutables as immu\n\nfrom edb.common import compiler\n\nfrom edb.pgsql import ast as pgast\nfrom edb.pgsql import params as pgparams\n\nfrom . import aliases\n\nif TYPE_CHECKING:\n from edb.ir import ast as irast\n\n\nclass ContextSwitchMode(enum.Enum):\n TRANSPARENT = enum.auto()\n SUBREL = enum.auto()\n NEWREL = enum.auto()\n SUBSTMT = enum.auto()\n NEWSCOPE = enum.auto()\n\n\nclass ShapeFormat(enum.Enum):\n SERIALIZED = enum.auto()\n FLAT = enum.auto()\n\n\nclass OutputFormat(enum.Enum):\n #: Result data output in PostgreSQL format.\n NATIVE = enum.auto()\n #: Result data output as a single JSON string.\n JSON = enum.auto()\n #: Result data output as a single PostgreSQL JSONB type value.\n JSONB = enum.auto()\n #: Result data output as a JSON string for each element in returned set.\n JSON_ELEMENTS = enum.auto()\n #: None mode: query result not returned, cardinality of result set\n #: is returned instead.\n NONE = enum.auto()\n #: Like NATIVE, but objects without an explicit shape are serialized\n #: as UUIDs.\n NATIVE_INTERNAL = enum.auto()\n\n\nNO_STMT = pgast.SelectStmt()\n\n\nOverlayEntry = tuple[\n str,\n Union[pgast.BaseRelation, pgast.CommonTableExpr],\n 'irast.PathId',\n]\n\n\[email protected](kw_only=True)\nclass RelOverlays:\n \"\"\"Container for relation overlays.\n\n These track \"overlays\" that can be registered for different types,\n in the context of DML.\n\n Consider the query:\n with X := (\n insert Person {\n name := \"Sully\",\n notes := assert_distinct({\n (insert Note {name := \"1\"}),\n (select Note filter .name = \"2\"),\n }),\n }),\n select X { name, notes: {name} };\n\n When we go to select X, we find the source of that set without any\n trouble (it's the result of the actual insert statement, more or\n less; in any case, it's in a CTE that we then include).\n\n Handling the notes are trickier, though:\n * The links aren't in the link table yet, but only in a CTE.\n (In similar update cases, with things like +=, they might be mixed\n between both.)\n * Some of the actual Note objects aren't in the table yet, just an insert\n CTE. But some *are*, so we need to union them.\n\n We solve these problems using overlays:\n * Whenever we do DML (or reference WITH-bound DML),\n we register overlays describing the changes done\n to *all of the enclosing DML*. So here, the Note insert's overlays\n get registered both for the Note insert and for the Person insert.\n * When we try to compile a root set or pointer, we see if it is connected\n to a DML statement, and if so we apply the overlays.\n\n The overlay itself is simply a sequence of operations on relations\n and CTEs that mix in the new data. In the obvious insert cases,\n these consist of unioning the new data in.\n\n This system works decently well but is also a little broken: I\n think that both the \"all of the enclosing DML\" and the \"see if it\n is connected to a DML statement\" have dangers; see Issue #3030.\n\n In relctx, see range_for_material_objtype, range_for_ptrref, and\n range_from_queryset (which those two call) for details on how\n overlays are applied.\n Overlays are added to with relctx.add_type_rel_overlay\n and relctx.add_ptr_rel_overlay.\n\n\n ===== NOTE ON MUTABILITY:\n In typical use, the overlays are mutable: nested DML adds overlays\n that are then consumed by code in enclosing contexts.\n\n In some places, however, we need to temporarily customize the\n overlay environment (during policy and trigger compilation, for\n example).\n\n The original version of overlays were implemented as a dict of\n dicts of lists. Doing temporary customizations required doing\n at least some copying. Doing a full deep copy always felt excessive\n but doing anything short of that left me constantly terrified.\n\n So instead we represent the overlays as a mutable object that\n contains immutable maps. When we add overlays, we update the maps\n and then reassign their values.\n\n When we want to do a temporary adjustment, we can cheaply make a\n fresh RelOverlays object and then modify that without touching the\n original.\n \"\"\"\n\n #: Relations used to \"overlay\" the main table for\n #: the type. Mostly used with DML statements.\n type: immu.Map[\n Optional[irast.MutatingLikeStmt],\n immu.Map[\n uuid.UUID,\n tuple[OverlayEntry, ...],\n ],\n ] = immu.Map()\n\n #: Relations used to \"overlay\" the main table for\n #: the pointer. Mostly used with DML statements.\n ptr: immu.Map[\n Optional[irast.MutatingLikeStmt],\n immu.Map[\n Tuple[uuid.UUID, str],\n Tuple[\n Tuple[\n str,\n Union[pgast.BaseRelation, pgast.CommonTableExpr],\n irast.PathId,\n ], ...\n ],\n ],\n ] = immu.Map()\n\n def copy(self) -> RelOverlays:\n return RelOverlays(type=self.type, ptr=self.ptr)\n\n\nclass CompilerContextLevel(compiler.ContextLevel):\n #: static compilation environment\n env: Environment\n\n #: mapping of named args to position\n argmap: Dict[str, pgast.Param]\n\n #: whether compiling in singleton expression mode\n singleton_mode: bool\n\n #: whether compiling a trigger\n trigger_mode: bool\n\n #: the top-level SQL statement\n toplevel_stmt: pgast.Query\n\n #: Record of DML CTEs generated for the corresponding IR DML.\n #: CTEs generated for DML-containing FOR statements are keyed\n #: by their iterator set.\n dml_stmts: Dict[Union[irast.MutatingStmt, irast.Set],\n pgast.CommonTableExpr]\n\n #: SQL statement corresponding to the IR statement\n #: currently being compiled.\n stmt: pgast.SelectStmt\n\n #: Current SQL subquery\n rel: pgast.SelectStmt\n\n #: SQL query hierarchy\n rel_hierarchy: Dict[pgast.Query, pgast.Query]\n\n #: CTEs representing decoded parameters\n param_ctes: Dict[str, pgast.CommonTableExpr]\n\n #: CTEs representing schema types, when rewritten based on access policy\n type_ctes: Dict[FullRewriteKey, pgast.CommonTableExpr]\n\n #: A set of type CTEs currently being generated\n pending_type_ctes: Set[RewriteKey]\n\n #: The logical parent of the current query in the\n #: query hierarchy\n parent_rel: Optional[pgast.Query]\n\n #: Query to become current in the next SUBSTMT switch.\n pending_query: Optional[pgast.SelectStmt]\n\n #: Sets currently being materialized\n materializing: FrozenSet[irast.Stmt]\n\n #: Whether the expression currently being processed is\n #: directly exposed to the output of the statement.\n expr_exposed: Optional[bool]\n\n #: A hack that indicates a tuple element that should be treated as\n #: exposed. This enables us to treat 'bar' in (foo, bar).1 as exposed,\n #: which eta-expansion and some casts rely on.\n expr_exposed_tuple_cheat: Optional[irast.TupleElement]\n\n #: Expression to use to force SQL expression volatility in this context\n #: (Delayed with a lambda to avoid inserting it when not used.)\n volatility_ref: Tuple[\n Callable[[pgast.SelectStmt, CompilerContextLevel],\n Optional[pgast.BaseExpr]], ...]\n\n # Current path_id we are INSERTing, so that we can avoid creating\n # a bogus volatility ref to it...\n current_insert_path_id: Optional[irast.PathId]\n\n #: Paths, for which semi-join is banned in this context.\n disable_semi_join: FrozenSet[irast.PathId]\n\n #: Paths, which need to be explicitly wrapped into SQL\n #: optionality scaffolding.\n force_optional: FrozenSet[irast.PathId]\n\n #: Paths that can be ignored when they appear as the source of a\n # computable. This is key to optimizing away free object sources in\n # group by aggregates.\n skippable_sources: FrozenSet[irast.PathId]\n\n #: Specifies that references to a specific Set must be narrowed\n #: by only selecting instances of type specified by the mapping value.\n intersection_narrowing: Dict[irast.Set, irast.Set]\n\n #: Which SQL query holds the SQL scope for the given PathId\n path_scope: ChainMap[irast.PathId, Optional[pgast.SelectStmt]]\n\n #: Relevant IR scope for this context.\n scope_tree: irast.ScopeTreeNode\n\n #: A stack of dml statements currently being compiled. Used for\n #: figuring out what to record in type_rel_overlays.\n dml_stmt_stack: List[irast.MutatingLikeStmt]\n\n #: Relations used to \"overlay\" the main table for\n #: the type. Mostly used with DML statements.\n rel_overlays: RelOverlays\n\n #: Mapping from path ids to \"external\" rels given by a particular relation\n external_rels: Mapping[\n irast.PathId,\n Tuple[pgast.BaseRelation | pgast.CommonTableExpr, Tuple[str, ...]]\n ]\n\n #: The CTE and some metadata of any enclosing iterator-like\n #: construct (which includes iterators, insert/update, and INSERT\n #: ELSE select clauses) currently being compiled.\n enclosing_cte_iterator: Optional[pgast.IteratorCTE]\n\n #: Sets to force shape compilation on, because the values are\n #: needed by DML.\n shapes_needed_by_dml: Set[irast.Set]\n\n def __init__(\n self,\n prevlevel: Optional[CompilerContextLevel],\n mode: ContextSwitchMode,\n *,\n env: Optional[Environment] = None,\n scope_tree: Optional[irast.ScopeTreeNode] = None,\n ) -> None:\n if prevlevel is None:\n assert env is not None\n assert scope_tree is not None\n\n self.env = env\n self.argmap = collections.OrderedDict()\n\n self.singleton_mode = False\n\n self.toplevel_stmt = NO_STMT\n self.stmt = NO_STMT\n self.rel = NO_STMT\n self.rel_hierarchy = {}\n self.param_ctes = {}\n self.type_ctes = {}\n self.pending_type_ctes = set()\n self.dml_stmts = {}\n self.parent_rel = None\n self.pending_query = None\n self.materializing = frozenset()\n\n self.expr_exposed = None\n self.expr_exposed_tuple_cheat = None\n self.volatility_ref = ()\n self.current_insert_path_id = None\n\n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n self.skippable_sources = frozenset()\n self.intersection_narrowing = {}\n\n self.path_scope = collections.ChainMap()\n self.scope_tree = scope_tree\n self.dml_stmt_stack = []\n self.rel_overlays = RelOverlays()\n\n self.external_rels = {}\n self.enclosing_cte_iterator = None\n self.shapes_needed_by_dml = set()\n\n self.trigger_mode = False\n\n else:\n self.env = prevlevel.env\n self.argmap = prevlevel.argmap\n\n self.singleton_mode = prevlevel.singleton_mode\n\n self.toplevel_stmt = prevlevel.toplevel_stmt\n self.stmt = prevlevel.stmt\n self.rel = prevlevel.rel\n self.rel_hierarchy = prevlevel.rel_hierarchy\n self.param_ctes = prevlevel.param_ctes\n self.type_ctes = prevlevel.type_ctes\n self.pending_type_ctes = prevlevel.pending_type_ctes\n self.dml_stmts = prevlevel.dml_stmts\n self.parent_rel = prevlevel.parent_rel\n self.pending_query = prevlevel.pending_query\n self.materializing = prevlevel.materializing\n\n self.expr_exposed = prevlevel.expr_exposed\n self.expr_exposed_tuple_cheat = prevlevel.expr_exposed_tuple_cheat\n self.volatility_ref = prevlevel.volatility_ref\n self.current_insert_path_id = prevlevel.current_insert_path_id\n\n self.disable_semi_join = prevlevel.disable_semi_join\n self.force_optional = prevlevel.force_optional\n self.skippable_sources = prevlevel.skippable_sources\n self.intersection_narrowing = prevlevel.intersection_narrowing\n\n self.path_scope = prevlevel.path_scope\n self.scope_tree = prevlevel.scope_tree\n self.dml_stmt_stack = prevlevel.dml_stmt_stack\n self.rel_overlays = prevlevel.rel_overlays\n self.enclosing_cte_iterator = prevlevel.enclosing_cte_iterator\n self.shapes_needed_by_dml = prevlevel.shapes_needed_by_dml\n self.external_rels = prevlevel.external_rels\n\n self.trigger_mode = prevlevel.trigger_mode\n\n if mode is ContextSwitchMode.SUBSTMT:\n if self.pending_query is not None:\n self.rel = self.pending_query\n else:\n self.rel = pgast.SelectStmt()\n if prevlevel.parent_rel is not None:\n parent_rel = prevlevel.parent_rel\n else:\n parent_rel = prevlevel.rel\n self.rel_hierarchy[self.rel] = parent_rel\n\n self.stmt = self.rel\n self.pending_query = None\n self.parent_rel = None\n\n elif mode is ContextSwitchMode.SUBREL:\n self.rel = pgast.SelectStmt()\n if prevlevel.parent_rel is not None:\n parent_rel = prevlevel.parent_rel\n else:\n parent_rel = prevlevel.rel\n self.rel_hierarchy[self.rel] = parent_rel\n self.pending_query = None\n self.parent_rel = None\n\n elif mode is ContextSwitchMode.NEWREL:\n self.rel = pgast.SelectStmt()\n self.pending_query = None\n self.parent_rel = None\n self.path_scope = collections.ChainMap()\n self.rel_hierarchy = {}\n self.scope_tree = prevlevel.scope_tree.root\n\n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n self.intersection_narrowing = {}\n self.pending_type_ctes = set(prevlevel.pending_type_ctes)\n\n elif mode == ContextSwitchMode.NEWSCOPE:\n self.path_scope = prevlevel.path_scope.new_child()\n\n def subrel(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.SUBREL)\n\n def newrel(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.NEWREL)\n\n def substmt(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.SUBSTMT)\n\n def newscope(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.NEWSCOPE)\n\n def up_hierarchy(\n self,\n n: int, q: Optional[pgast.Query]=None\n ) -> Optional[pgast.Query]:\n # mostly intended as a debugging helper\n q = q or self.rel\n for _ in range(n):\n if q:\n q = self.rel_hierarchy.get(q)\n return q\n\n\nclass CompilerContext(compiler.CompilerContext[CompilerContextLevel]):\n ContextLevelClass = CompilerContextLevel\n default_mode = ContextSwitchMode.TRANSPARENT\n\n\nRewriteKey = Tuple[uuid.UUID, bool]\nFullRewriteKey = Tuple[\n uuid.UUID, bool, Optional[frozenset['irast.MutatingLikeStmt']]]\n\n\nclass Environment:\n \"\"\"Static compilation environment.\"\"\"\n\n aliases: aliases.AliasGenerator\n output_format: Optional[OutputFormat]\n named_param_prefix: Optional[tuple[str, ...]]\n ptrref_source_visibility: Dict[irast.BasePointerRef, bool]\n expected_cardinality_one: bool\n ignore_object_shapes: bool\n explicit_top_cast: Optional[irast.TypeRef]\n singleton_mode: bool\n query_params: List[irast.Param]\n type_rewrites: Dict[RewriteKey, irast.Set]\n scope_tree_nodes: Dict[int, irast.ScopeTreeNode]\n external_rvars: Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]\n materialized_views: Dict[uuid.UUID, irast.Set]\n backend_runtime_params: pgparams.BackendRuntimeParams\n\n #: A list of CTEs that implement constraint validation at the\n #: query level.\n check_ctes: List[pgast.CommonTableExpr]\n\n def __init__(\n self,\n *,\n output_format: Optional[OutputFormat],\n named_param_prefix: Optional[tuple[str, ...]],\n expected_cardinality_one: bool,\n ignore_object_shapes: bool,\n singleton_mode: bool,\n expand_inhviews: bool,\n explicit_top_cast: Optional[irast.TypeRef],\n query_params: List[irast.Param],\n type_rewrites: Dict[RewriteKey, irast.Set],\n scope_tree_nodes: Dict[int, irast.ScopeTreeNode],\n external_rvars: Optional[\n Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]\n ] = None,\n backend_runtime_params: pgparams.BackendRuntimeParams,\n ) -> None:\n self.aliases = aliases.AliasGenerator()\n self.output_format = output_format\n self.named_param_prefix = named_param_prefix\n self.ptrref_source_visibility = {}\n self.expected_cardinality_one = expected_cardinality_one\n self.ignore_object_shapes = ignore_object_shapes\n self.singleton_mode = singleton_mode\n self.expand_inhviews = expand_inhviews\n self.explicit_top_cast = explicit_top_cast\n self.query_params = query_params\n self.type_rewrites = type_rewrites\n self.scope_tree_nodes = scope_tree_nodes\n self.external_rvars = external_rvars or {}\n self.materialized_views = {}\n self.check_ctes = []\n self.backend_runtime_params = backend_runtime_params\n\n\n# XXX: this context hack is necessary until pathctx is converted\n# to use context levels instead of using env directly.\[email protected]\ndef output_format(\n ctx: CompilerContextLevel,\n output_format: OutputFormat,\n) -> Generator[None, None, None]:\n original_output_format = ctx.env.output_format\n original_ignore_object_shapes = ctx.env.ignore_object_shapes\n ctx.env.output_format = output_format\n ctx.env.ignore_object_shapes = False\n try:\n yield\n finally:\n ctx.env.output_format = original_output_format\n ctx.env.ignore_object_shapes = original_ignore_object_shapes\n", "path": "edb/pgsql/compiler/context.py"}], "after_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"IR compiler context.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport collections\nimport contextlib\nimport dataclasses\nimport enum\nimport uuid\n\nimport immutables as immu\n\nfrom edb.common import compiler\n\nfrom edb.pgsql import ast as pgast\nfrom edb.pgsql import params as pgparams\n\nfrom . import aliases\n\nif TYPE_CHECKING:\n from edb.ir import ast as irast\n\n\nclass ContextSwitchMode(enum.Enum):\n TRANSPARENT = enum.auto()\n SUBREL = enum.auto()\n NEWREL = enum.auto()\n SUBSTMT = enum.auto()\n NEWSCOPE = enum.auto()\n\n\nclass ShapeFormat(enum.Enum):\n SERIALIZED = enum.auto()\n FLAT = enum.auto()\n\n\nclass OutputFormat(enum.Enum):\n #: Result data output in PostgreSQL format.\n NATIVE = enum.auto()\n #: Result data output as a single JSON string.\n JSON = enum.auto()\n #: Result data output as a single PostgreSQL JSONB type value.\n JSONB = enum.auto()\n #: Result data output as a JSON string for each element in returned set.\n JSON_ELEMENTS = enum.auto()\n #: None mode: query result not returned, cardinality of result set\n #: is returned instead.\n NONE = enum.auto()\n #: Like NATIVE, but objects without an explicit shape are serialized\n #: as UUIDs.\n NATIVE_INTERNAL = enum.auto()\n\n\nNO_STMT = pgast.SelectStmt()\n\n\nOverlayEntry = tuple[\n str,\n Union[pgast.BaseRelation, pgast.CommonTableExpr],\n 'irast.PathId',\n]\n\n\[email protected](kw_only=True)\nclass RelOverlays:\n \"\"\"Container for relation overlays.\n\n These track \"overlays\" that can be registered for different types,\n in the context of DML.\n\n Consider the query:\n with X := (\n insert Person {\n name := \"Sully\",\n notes := assert_distinct({\n (insert Note {name := \"1\"}),\n (select Note filter .name = \"2\"),\n }),\n }),\n select X { name, notes: {name} };\n\n When we go to select X, we find the source of that set without any\n trouble (it's the result of the actual insert statement, more or\n less; in any case, it's in a CTE that we then include).\n\n Handling the notes are trickier, though:\n * The links aren't in the link table yet, but only in a CTE.\n (In similar update cases, with things like +=, they might be mixed\n between both.)\n * Some of the actual Note objects aren't in the table yet, just an insert\n CTE. But some *are*, so we need to union them.\n\n We solve these problems using overlays:\n * Whenever we do DML (or reference WITH-bound DML),\n we register overlays describing the changes done\n to *all of the enclosing DML*. So here, the Note insert's overlays\n get registered both for the Note insert and for the Person insert.\n * When we try to compile a root set or pointer, we see if it is connected\n to a DML statement, and if so we apply the overlays.\n\n The overlay itself is simply a sequence of operations on relations\n and CTEs that mix in the new data. In the obvious insert cases,\n these consist of unioning the new data in.\n\n This system works decently well but is also a little broken: I\n think that both the \"all of the enclosing DML\" and the \"see if it\n is connected to a DML statement\" have dangers; see Issue #3030.\n\n In relctx, see range_for_material_objtype, range_for_ptrref, and\n range_from_queryset (which those two call) for details on how\n overlays are applied.\n Overlays are added to with relctx.add_type_rel_overlay\n and relctx.add_ptr_rel_overlay.\n\n\n ===== NOTE ON MUTABILITY:\n In typical use, the overlays are mutable: nested DML adds overlays\n that are then consumed by code in enclosing contexts.\n\n In some places, however, we need to temporarily customize the\n overlay environment (during policy and trigger compilation, for\n example).\n\n The original version of overlays were implemented as a dict of\n dicts of lists. Doing temporary customizations required doing\n at least some copying. Doing a full deep copy always felt excessive\n but doing anything short of that left me constantly terrified.\n\n So instead we represent the overlays as a mutable object that\n contains immutable maps. When we add overlays, we update the maps\n and then reassign their values.\n\n When we want to do a temporary adjustment, we can cheaply make a\n fresh RelOverlays object and then modify that without touching the\n original.\n \"\"\"\n\n #: Relations used to \"overlay\" the main table for\n #: the type. Mostly used with DML statements.\n type: immu.Map[\n Optional[irast.MutatingLikeStmt],\n immu.Map[\n uuid.UUID,\n tuple[OverlayEntry, ...],\n ],\n ] = immu.Map()\n\n #: Relations used to \"overlay\" the main table for\n #: the pointer. Mostly used with DML statements.\n ptr: immu.Map[\n Optional[irast.MutatingLikeStmt],\n immu.Map[\n Tuple[uuid.UUID, str],\n Tuple[\n Tuple[\n str,\n Union[pgast.BaseRelation, pgast.CommonTableExpr],\n irast.PathId,\n ], ...\n ],\n ],\n ] = immu.Map()\n\n def copy(self) -> RelOverlays:\n return RelOverlays(type=self.type, ptr=self.ptr)\n\n\nclass CompilerContextLevel(compiler.ContextLevel):\n #: static compilation environment\n env: Environment\n\n #: mapping of named args to position\n argmap: Dict[str, pgast.Param]\n\n #: whether compiling in singleton expression mode\n singleton_mode: bool\n\n #: whether compiling a trigger\n trigger_mode: bool\n\n #: the top-level SQL statement\n toplevel_stmt: pgast.Query\n\n #: Record of DML CTEs generated for the corresponding IR DML.\n #: CTEs generated for DML-containing FOR statements are keyed\n #: by their iterator set.\n dml_stmts: Dict[Union[irast.MutatingStmt, irast.Set],\n pgast.CommonTableExpr]\n\n #: SQL statement corresponding to the IR statement\n #: currently being compiled.\n stmt: pgast.SelectStmt\n\n #: Current SQL subquery\n rel: pgast.SelectStmt\n\n #: SQL query hierarchy\n rel_hierarchy: Dict[pgast.Query, pgast.Query]\n\n #: CTEs representing decoded parameters\n param_ctes: Dict[str, pgast.CommonTableExpr]\n\n #: CTEs representing schema types, when rewritten based on access policy\n type_ctes: Dict[FullRewriteKey, pgast.CommonTableExpr]\n\n #: A set of type CTEs currently being generated\n pending_type_ctes: Set[RewriteKey]\n\n #: The logical parent of the current query in the\n #: query hierarchy\n parent_rel: Optional[pgast.Query]\n\n #: Query to become current in the next SUBSTMT switch.\n pending_query: Optional[pgast.SelectStmt]\n\n #: Sets currently being materialized\n materializing: FrozenSet[irast.Stmt]\n\n #: Whether the expression currently being processed is\n #: directly exposed to the output of the statement.\n expr_exposed: Optional[bool]\n\n #: A hack that indicates a tuple element that should be treated as\n #: exposed. This enables us to treat 'bar' in (foo, bar).1 as exposed,\n #: which eta-expansion and some casts rely on.\n expr_exposed_tuple_cheat: Optional[irast.TupleElement]\n\n #: Expression to use to force SQL expression volatility in this context\n #: (Delayed with a lambda to avoid inserting it when not used.)\n volatility_ref: Tuple[\n Callable[[pgast.SelectStmt, CompilerContextLevel],\n Optional[pgast.BaseExpr]], ...]\n\n # Current path_id we are INSERTing, so that we can avoid creating\n # a bogus volatility ref to it...\n current_insert_path_id: Optional[irast.PathId]\n\n #: Paths, for which semi-join is banned in this context.\n disable_semi_join: FrozenSet[irast.PathId]\n\n #: Paths, which need to be explicitly wrapped into SQL\n #: optionality scaffolding.\n force_optional: FrozenSet[irast.PathId]\n\n #: Paths that can be ignored when they appear as the source of a\n # computable. This is key to optimizing away free object sources in\n # group by aggregates.\n skippable_sources: FrozenSet[irast.PathId]\n\n #: Specifies that references to a specific Set must be narrowed\n #: by only selecting instances of type specified by the mapping value.\n intersection_narrowing: Dict[irast.Set, irast.Set]\n\n #: Which SQL query holds the SQL scope for the given PathId\n path_scope: ChainMap[irast.PathId, Optional[pgast.SelectStmt]]\n\n #: Relevant IR scope for this context.\n scope_tree: irast.ScopeTreeNode\n\n #: A stack of dml statements currently being compiled. Used for\n #: figuring out what to record in type_rel_overlays.\n dml_stmt_stack: List[irast.MutatingLikeStmt]\n\n #: Relations used to \"overlay\" the main table for\n #: the type. Mostly used with DML statements.\n rel_overlays: RelOverlays\n\n #: Mapping from path ids to \"external\" rels given by a particular relation\n external_rels: Mapping[\n irast.PathId,\n Tuple[pgast.BaseRelation | pgast.CommonTableExpr, Tuple[str, ...]]\n ]\n\n #: The CTE and some metadata of any enclosing iterator-like\n #: construct (which includes iterators, insert/update, and INSERT\n #: ELSE select clauses) currently being compiled.\n enclosing_cte_iterator: Optional[pgast.IteratorCTE]\n\n #: Sets to force shape compilation on, because the values are\n #: needed by DML.\n shapes_needed_by_dml: Set[irast.Set]\n\n def __init__(\n self,\n prevlevel: Optional[CompilerContextLevel],\n mode: ContextSwitchMode,\n *,\n env: Optional[Environment] = None,\n scope_tree: Optional[irast.ScopeTreeNode] = None,\n ) -> None:\n if prevlevel is None:\n assert env is not None\n assert scope_tree is not None\n\n self.env = env\n self.argmap = collections.OrderedDict()\n\n self.singleton_mode = False\n\n self.toplevel_stmt = NO_STMT\n self.stmt = NO_STMT\n self.rel = NO_STMT\n self.rel_hierarchy = {}\n self.param_ctes = {}\n self.type_ctes = {}\n self.pending_type_ctes = set()\n self.dml_stmts = {}\n self.parent_rel = None\n self.pending_query = None\n self.materializing = frozenset()\n\n self.expr_exposed = None\n self.expr_exposed_tuple_cheat = None\n self.volatility_ref = ()\n self.current_insert_path_id = None\n\n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n self.skippable_sources = frozenset()\n self.intersection_narrowing = {}\n\n self.path_scope = collections.ChainMap()\n self.scope_tree = scope_tree\n self.dml_stmt_stack = []\n self.rel_overlays = RelOverlays()\n\n self.external_rels = {}\n self.enclosing_cte_iterator = None\n self.shapes_needed_by_dml = set()\n\n self.trigger_mode = False\n\n else:\n self.env = prevlevel.env\n self.argmap = prevlevel.argmap\n\n self.singleton_mode = prevlevel.singleton_mode\n\n self.toplevel_stmt = prevlevel.toplevel_stmt\n self.stmt = prevlevel.stmt\n self.rel = prevlevel.rel\n self.rel_hierarchy = prevlevel.rel_hierarchy\n self.param_ctes = prevlevel.param_ctes\n self.type_ctes = prevlevel.type_ctes\n self.pending_type_ctes = prevlevel.pending_type_ctes\n self.dml_stmts = prevlevel.dml_stmts\n self.parent_rel = prevlevel.parent_rel\n self.pending_query = prevlevel.pending_query\n self.materializing = prevlevel.materializing\n\n self.expr_exposed = prevlevel.expr_exposed\n self.expr_exposed_tuple_cheat = prevlevel.expr_exposed_tuple_cheat\n self.volatility_ref = prevlevel.volatility_ref\n self.current_insert_path_id = prevlevel.current_insert_path_id\n\n self.disable_semi_join = prevlevel.disable_semi_join\n self.force_optional = prevlevel.force_optional\n self.skippable_sources = prevlevel.skippable_sources\n self.intersection_narrowing = prevlevel.intersection_narrowing\n\n self.path_scope = prevlevel.path_scope\n self.scope_tree = prevlevel.scope_tree\n self.dml_stmt_stack = prevlevel.dml_stmt_stack\n self.rel_overlays = prevlevel.rel_overlays\n self.enclosing_cte_iterator = prevlevel.enclosing_cte_iterator\n self.shapes_needed_by_dml = prevlevel.shapes_needed_by_dml\n self.external_rels = prevlevel.external_rels\n\n self.trigger_mode = prevlevel.trigger_mode\n\n if mode is ContextSwitchMode.SUBSTMT:\n if self.pending_query is not None:\n self.rel = self.pending_query\n else:\n self.rel = pgast.SelectStmt()\n if prevlevel.parent_rel is not None:\n parent_rel = prevlevel.parent_rel\n else:\n parent_rel = prevlevel.rel\n self.rel_hierarchy[self.rel] = parent_rel\n\n self.stmt = self.rel\n self.pending_query = None\n self.parent_rel = None\n\n elif mode is ContextSwitchMode.SUBREL:\n self.rel = pgast.SelectStmt()\n if prevlevel.parent_rel is not None:\n parent_rel = prevlevel.parent_rel\n else:\n parent_rel = prevlevel.rel\n self.rel_hierarchy[self.rel] = parent_rel\n self.pending_query = None\n self.parent_rel = None\n\n elif mode is ContextSwitchMode.NEWREL:\n self.rel = pgast.SelectStmt()\n self.pending_query = None\n self.parent_rel = None\n self.path_scope = collections.ChainMap()\n self.rel_hierarchy = {}\n self.scope_tree = prevlevel.scope_tree.root\n self.volatility_ref = ()\n\n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n self.intersection_narrowing = {}\n self.pending_type_ctes = set(prevlevel.pending_type_ctes)\n\n elif mode == ContextSwitchMode.NEWSCOPE:\n self.path_scope = prevlevel.path_scope.new_child()\n\n def subrel(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.SUBREL)\n\n def newrel(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.NEWREL)\n\n def substmt(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.SUBSTMT)\n\n def newscope(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.NEWSCOPE)\n\n def up_hierarchy(\n self,\n n: int, q: Optional[pgast.Query]=None\n ) -> Optional[pgast.Query]:\n # mostly intended as a debugging helper\n q = q or self.rel\n for _ in range(n):\n if q:\n q = self.rel_hierarchy.get(q)\n return q\n\n\nclass CompilerContext(compiler.CompilerContext[CompilerContextLevel]):\n ContextLevelClass = CompilerContextLevel\n default_mode = ContextSwitchMode.TRANSPARENT\n\n\nRewriteKey = Tuple[uuid.UUID, bool]\nFullRewriteKey = Tuple[\n uuid.UUID, bool, Optional[frozenset['irast.MutatingLikeStmt']]]\n\n\nclass Environment:\n \"\"\"Static compilation environment.\"\"\"\n\n aliases: aliases.AliasGenerator\n output_format: Optional[OutputFormat]\n named_param_prefix: Optional[tuple[str, ...]]\n ptrref_source_visibility: Dict[irast.BasePointerRef, bool]\n expected_cardinality_one: bool\n ignore_object_shapes: bool\n explicit_top_cast: Optional[irast.TypeRef]\n singleton_mode: bool\n query_params: List[irast.Param]\n type_rewrites: Dict[RewriteKey, irast.Set]\n scope_tree_nodes: Dict[int, irast.ScopeTreeNode]\n external_rvars: Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]\n materialized_views: Dict[uuid.UUID, irast.Set]\n backend_runtime_params: pgparams.BackendRuntimeParams\n\n #: A list of CTEs that implement constraint validation at the\n #: query level.\n check_ctes: List[pgast.CommonTableExpr]\n\n def __init__(\n self,\n *,\n output_format: Optional[OutputFormat],\n named_param_prefix: Optional[tuple[str, ...]],\n expected_cardinality_one: bool,\n ignore_object_shapes: bool,\n singleton_mode: bool,\n expand_inhviews: bool,\n explicit_top_cast: Optional[irast.TypeRef],\n query_params: List[irast.Param],\n type_rewrites: Dict[RewriteKey, irast.Set],\n scope_tree_nodes: Dict[int, irast.ScopeTreeNode],\n external_rvars: Optional[\n Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]\n ] = None,\n backend_runtime_params: pgparams.BackendRuntimeParams,\n ) -> None:\n self.aliases = aliases.AliasGenerator()\n self.output_format = output_format\n self.named_param_prefix = named_param_prefix\n self.ptrref_source_visibility = {}\n self.expected_cardinality_one = expected_cardinality_one\n self.ignore_object_shapes = ignore_object_shapes\n self.singleton_mode = singleton_mode\n self.expand_inhviews = expand_inhviews\n self.explicit_top_cast = explicit_top_cast\n self.query_params = query_params\n self.type_rewrites = type_rewrites\n self.scope_tree_nodes = scope_tree_nodes\n self.external_rvars = external_rvars or {}\n self.materialized_views = {}\n self.check_ctes = []\n self.backend_runtime_params = backend_runtime_params\n\n\n# XXX: this context hack is necessary until pathctx is converted\n# to use context levels instead of using env directly.\[email protected]\ndef output_format(\n ctx: CompilerContextLevel,\n output_format: OutputFormat,\n) -> Generator[None, None, None]:\n original_output_format = ctx.env.output_format\n original_ignore_object_shapes = ctx.env.ignore_object_shapes\n ctx.env.output_format = output_format\n ctx.env.ignore_object_shapes = False\n try:\n yield\n finally:\n ctx.env.output_format = original_output_format\n ctx.env.ignore_object_shapes = original_ignore_object_shapes\n", "path": "edb/pgsql/compiler/context.py"}]} |
gh_patches_debug_1521 | rasdani/github-patches | git_diff | ivy-llc__ivy-23796 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mod
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/tensorflow/raw_ops.py`
Content:
```
1 # global
2 import ivy
3 import ivy.functional.frontends.tensorflow as tf_frontend
4 from ivy.functional.frontends.tensorflow import check_tensorflow_casting
5 from ivy.functional.frontends.tensorflow.func_wrapper import (
6 to_ivy_arrays_and_back,
7 map_raw_ops_alias,
8 to_ivy_dtype,
9 )
10
11 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
12 from ivy.utils.exceptions import IvyNotImplementedException
13
14
15 Acos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))
16 Acosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))
17 AddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))
18 AddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))
19 ArgMax = to_ivy_arrays_and_back(
20 with_unsupported_dtypes(
21 {"2.13.0 and below": ("complex",)},
22 "tensorflow",
23 )(
24 map_raw_ops_alias(
25 tf_frontend.math.argmax, kwargs_to_update={"dimension": "axis"}
26 )
27 )
28 )
29 ArgMin = to_ivy_arrays_and_back(
30 with_unsupported_dtypes(
31 {"2.13.0 and below": ("complex",)},
32 "tensorflow",
33 )(
34 map_raw_ops_alias(
35 tf_frontend.math.argmin, kwargs_to_update={"dimension": "axis"}
36 )
37 )
38 )
39 Asin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))
40 Atan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))
41 Atan2 = to_ivy_arrays_and_back(
42 with_unsupported_dtypes(
43 {"2.13.0 and below": "float16"},
44 "tensorflow",
45 )(map_raw_ops_alias(tf_frontend.math.atan2))
46 )
47 ConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))
48 Cos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))
49 Cosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))
50 Cumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))
51 Cumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))
52 Digamma = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.digamma))
53 Div = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))
54 Einsum = to_ivy_arrays_and_back(
55 with_supported_dtypes(
56 {
57 "2.13.0 and below": (
58 "bfloat16",
59 "complex128 ",
60 "complex64",
61 "float64",
62 "float32",
63 "float16",
64 "int64",
65 "int32",
66 ),
67 },
68 "tensorflow",
69 )(map_raw_ops_alias(tf_frontend.general_functions.einsum))
70 )
71 Identity = to_ivy_arrays_and_back(
72 map_raw_ops_alias(tf_frontend.general_functions.identity)
73 )
74 IdentityN = to_ivy_arrays_and_back(
75 map_raw_ops_alias(tf_frontend.general_functions.identity_n)
76 )
77 Igamma = to_ivy_arrays_and_back(
78 with_supported_dtypes(
79 {
80 "2.13.0 and below": (
81 "float64",
82 "float32",
83 "half",
84 ),
85 },
86 "tensorflow",
87 )(map_raw_ops_alias(tf_frontend.math.igamma))
88 )
89 LeakyRelu = to_ivy_arrays_and_back(
90 with_supported_dtypes(
91 {
92 "2.13.0 and below": ("bfloat16", "float16", "float32", "float64"),
93 },
94 "tensorflow",
95 )(
96 map_raw_ops_alias(
97 tf_frontend.nn.leaky_relu,
98 )
99 )
100 )
101 LessEqual = to_ivy_arrays_and_back(
102 with_unsupported_dtypes(
103 {
104 "2.13.0 and below": ("complex",),
105 },
106 "tensorflow",
107 )(map_raw_ops_alias(tf_frontend.math.less_equal))
108 )
109 Log1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))
110 LogSoftmax = to_ivy_arrays_and_back(
111 with_supported_dtypes(
112 {
113 "2.13.0 and below": (
114 "bfloat16",
115 "float32",
116 "float64",
117 ),
118 },
119 "tensorflow",
120 )(map_raw_ops_alias(tf_frontend.math.log_softmax))
121 )
122 LogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))
123 MatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))
124 Max = to_ivy_arrays_and_back(
125 with_unsupported_dtypes(
126 {
127 "2.13.0 and below": ("complex",),
128 },
129 "tensorflow",
130 )(
131 map_raw_ops_alias(
132 tf_frontend.math.reduce_max,
133 kwargs_to_update={
134 "input": "input_tensor",
135 "keep_dims": "keepdims",
136 },
137 )
138 )
139 )
140 Maximum = to_ivy_arrays_and_back(
141 with_unsupported_dtypes(
142 {
143 "2.13.0 and below": ("complex",),
144 },
145 "tensorflow",
146 )(map_raw_ops_alias(tf_frontend.math.maximum))
147 )
148 Mean = to_ivy_arrays_and_back(
149 map_raw_ops_alias(
150 tf_frontend.math.reduce_mean,
151 kwargs_to_update={
152 "input": "input_tensor",
153 "keep_dims": "keepdims",
154 },
155 )
156 )
157 Min = to_ivy_arrays_and_back(
158 with_unsupported_dtypes(
159 {
160 "2.13.0 and below": ("complex",),
161 },
162 "tensorflow",
163 )(
164 map_raw_ops_alias(
165 tf_frontend.math.reduce_min,
166 kwargs_to_update={
167 "input": "input_tensor",
168 "keep_dims": "keepdims",
169 },
170 )
171 )
172 )
173 Mul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))
174 Neg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))
175 Pow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))
176 RealDiv = to_ivy_arrays_and_back(
177 with_supported_dtypes(
178 {
179 "2.13.0 and below": (
180 "complex",
181 "bfloat16",
182 "float16",
183 "float64",
184 "float32",
185 ),
186 },
187 "tensorflow",
188 )(map_raw_ops_alias(tf_frontend.general_functions.realdiv))
189 )
190 Reciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))
191 Relu = to_ivy_arrays_and_back(
192 with_unsupported_dtypes(
193 {
194 "2.13.0 and below": ("complex", "float16"),
195 },
196 "tensorflow",
197 )(map_raw_ops_alias(tf_frontend.nn.relu))
198 )
199 Relu6 = to_ivy_arrays_and_back(
200 with_unsupported_dtypes(
201 {
202 "2.13.0 and below": ("complex", "float16"),
203 },
204 "tensorflow",
205 )(
206 map_raw_ops_alias(
207 tf_frontend.nn.relu6,
208 )
209 )
210 )
211 Reshape = to_ivy_arrays_and_back(
212 map_raw_ops_alias(tf_frontend.general_functions.reshape)
213 )
214 Roll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))
215 ShapeN = to_ivy_arrays_and_back(
216 map_raw_ops_alias(tf_frontend.general_functions.shape_n)
217 )
218 Sigmoid = to_ivy_arrays_and_back(
219 map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)
220 )
221 Sin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))
222 Size = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))
223 Softmax = to_ivy_arrays_and_back(
224 with_unsupported_dtypes(
225 {
226 "2.13.0 and below": ("float16",),
227 },
228 "tensorflow",
229 )(map_raw_ops_alias(tf_frontend.nn.softmax))
230 )
231 Split = to_ivy_arrays_and_back(
232 map_raw_ops_alias(
233 tf_frontend.split, kwargs_to_update={"num_split": "num_or_size_splits"}
234 )
235 )
236 SquaredDifference = to_ivy_arrays_and_back(
237 with_supported_dtypes(
238 {
239 "2.13.0 and below": (
240 "complex",
241 "bfloat16",
242 "float16",
243 "float64",
244 "float32",
245 "int32",
246 "int64",
247 ),
248 },
249 "tensorflow",
250 )(map_raw_ops_alias(tf_frontend.math.squared_difference))
251 )
252 Squeeze = to_ivy_arrays_and_back(
253 map_raw_ops_alias(tf_frontend.general_functions.squeeze)
254 )
255 Tan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))
256 Tanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))
257 Tile = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.tile))
258 Xlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))
259 Zeta = to_ivy_arrays_and_back(
260 with_supported_dtypes(
261 {
262 "2.13.0 and below": ("float32", "float64"),
263 },
264 "tensorflow",
265 )(map_raw_ops_alias(tf_frontend.math.zeta))
266 )
267
268
269 # --- Helpers --- #
270 # --------------- #
271
272
273 def _tf_to_ivy_ivy_arguments_for_conv(
274 padding, ex_pading, strides, dilations, data_format
275 ):
276 if data_format.find("C") == 1:
277 strides = strides[2:]
278 dilations = dilations[2:]
279 data_format = "channel_first"
280 pad_index = [4, 8]
281 else:
282 strides = strides[1:-1]
283 dilations = dilations[1:-1]
284 data_format = "channel_last"
285 pad_index = [2, 6]
286 if padding == "EXPLICIT":
287 padding = [
288 (ex_pading[i], ex_pading[i + 1])
289 for i in range(pad_index[0], pad_index[1], 2)
290 ]
291 return padding, strides, dilations, data_format
292
293
294 # --- Main --- #
295 # ------------ #
296
297
298 @to_ivy_arrays_and_back
299 def AccumulateNV2(inputs, shape, name="AccumulateNV2"):
300 # TODO
301 raise IvyNotImplementedException
302
303
304 @to_ivy_arrays_and_back
305 def Angle(
306 *,
307 input,
308 Tout=ivy.float32,
309 name="Angle",
310 ):
311 Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32
312 return ivy.astype(ivy.angle(input), Tout)
313
314
315 @with_unsupported_dtypes(
316 {
317 "2.13.0 and below": (
318 "float16",
319 "bool",
320 "bfloat16",
321 )
322 },
323 "tensorflow",
324 )
325 @to_ivy_arrays_and_back
326 def ApproximateEqual(
327 *,
328 x,
329 y,
330 tolerance=1e-05,
331 name="ApproximateEqual",
332 ):
333 x, y = check_tensorflow_casting(x, y)
334 return ivy.abs(x - y) < tolerance
335
336
337 @to_ivy_arrays_and_back
338 def Atanh(*, x, name="Atanh"):
339 return ivy.atanh(x)
340
341
342 @to_ivy_arrays_and_back
343 def BandedTriangularSolve(
344 matrix,
345 rhs,
346 lower=True,
347 adjoint=False,
348 name="BandedTriangularSolve",
349 ):
350 # TODO
351 raise IvyNotImplementedException
352
353
354 @to_ivy_arrays_and_back
355 def BatchMatMul(x, y, adj_x=False, adj_y=False, name="BatchMatMul"):
356 # TODO
357 raise IvyNotImplementedException
358
359
360 @to_ivy_arrays_and_back
361 def BatchMatMulV2(x, y, adj_x=False, adj_y=False, name="BatchMatMulV2"):
362 # TODO
363 raise IvyNotImplementedException
364
365
366 @to_ivy_arrays_and_back
367 def BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name="BatchMatMulV3"):
368 # TODO
369 raise IvyNotImplementedException
370
371
372 @to_ivy_arrays_and_back
373 def BitwiseAnd(*, x, y, name="BitwiseAnd"):
374 x, y = check_tensorflow_casting(x, y)
375 return ivy.bitwise_and(x, y)
376
377
378 @to_ivy_arrays_and_back
379 def BitwiseOr(*, x, y, name="BitwiseOr"):
380 x, y = check_tensorflow_casting(x, y)
381 return ivy.bitwise_or(x, y)
382
383
384 @to_ivy_arrays_and_back
385 def BitwiseXor(*, x, y, name="BitwiseXor"):
386 x, y = check_tensorflow_casting(x, y)
387 return ivy.bitwise_xor(x, y)
388
389
390 @to_ivy_arrays_and_back
391 def BroadcastTo(*, input, shape, name="BroadcastTo"):
392 return ivy.broadcast_to(input, shape=shape)
393
394
395 @to_ivy_arrays_and_back
396 def Ceil(*, x, name=None):
397 return ivy.ceil(x)
398
399
400 @to_ivy_arrays_and_back
401 def Cholesky(*, input, name="Cholesky"):
402 return ivy.astype(ivy.cholesky(input), input.dtype)
403
404
405 @to_ivy_arrays_and_back
406 def Complex(real, imag, Tout=ivy.complex64, name="Complex"):
407 # TODO
408 raise IvyNotImplementedException
409
410
411 @to_ivy_arrays_and_back
412 def Concat(*, concat_dim, values, name="Concat"):
413 return ivy.concat(values, axis=concat_dim)
414
415
416 @to_ivy_arrays_and_back
417 def Conv2D(
418 *,
419 input,
420 filter,
421 strides,
422 padding,
423 use_cudnn_on_gpu,
424 explicit_paddings,
425 data_format="NHWC",
426 dilations=[1, 1, 1, 1],
427 name="Conv2D",
428 ):
429 padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(
430 padding, explicit_paddings, strides, dilations, data_format
431 )
432 return ivy.conv_general_dilated(
433 input,
434 filter,
435 strides,
436 padding,
437 data_format=data_format,
438 dilations=dilations,
439 dims=2,
440 )
441
442
443 @to_ivy_arrays_and_back
444 def Conv3D(
445 *,
446 input,
447 filter,
448 strides,
449 padding,
450 data_format="NDHWC",
451 dilations=[1, 1, 1, 1, 1],
452 name="Conv3D",
453 ):
454 # ivy.backends.tensorflow expects strides and dilations to be
455 # a single integer value or a list of 3 values whereas the raw op
456 # expects a list of 5 values
457 if data_format == "NDHWC":
458 strides = strides[1:-1]
459 dilations = dilations[1:-1]
460 elif data_format == "NCDHW":
461 strides = strides[2:]
462 dilations = dilations[2:]
463
464 return tf_frontend.nn.conv3d(
465 input,
466 filter,
467 strides,
468 padding,
469 data_format=data_format,
470 dilations=dilations,
471 name=name,
472 )
473
474
475 @to_ivy_arrays_and_back
476 def Cross(*, a, b, name="Cross"):
477 a, b = check_tensorflow_casting(a, b)
478 return ivy.cross(a, b)
479
480
481 @to_ivy_arrays_and_back
482 def CumulativeLogsumexp(
483 x, axis, exclusive=False, reverse=False, name="CumulativeLogsumexp"
484 ):
485 # TODO
486 raise IvyNotImplementedException
487
488
489 @to_ivy_arrays_and_back
490 def DebugGradientIdentity(input, name="DebugGradientIdentity"):
491 # TODO
492 raise IvyNotImplementedException
493
494
495 @to_ivy_arrays_and_back
496 def Diag(*, diagonal, name="Diag"):
497 return ivy.astype(ivy.diag(diagonal), diagonal.dtype)
498
499
500 @with_supported_dtypes(
501 {"2.13.0 and below": ("bfloat16", "float16", "float32", "float64")},
502 "tensorflow",
503 )
504 @to_ivy_arrays_and_back
505 def Elu(features, name=None):
506 zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))
507 ones = ivy.ones_like(features, dtype=ivy.dtype(features))
508 ret_val = ivy.where(
509 # if x > 0 => x; else e^x - 1
510 features > zeros,
511 features,
512 ivy.subtract(ivy.exp(features), ones),
513 )
514 return ret_val
515
516
517 @to_ivy_arrays_and_back
518 def Equal(*, x, y, incompatible_shape_error=True, name="Equal"):
519 x, y = check_tensorflow_casting(x, y)
520 if incompatible_shape_error:
521 return ivy.equal(x, y)
522
523 try:
524 return ivy.equal(x, y)
525 except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):
526 return ivy.array(False)
527
528
529 @to_ivy_arrays_and_back
530 def EuclideanNorm(*, input, axis, keep_dims=False, name="EuclideanNorm"):
531 return ivy.astype(
532 ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype
533 )
534
535
536 @to_ivy_arrays_and_back
537 def Exp(*, x, name="Exp"):
538 return ivy.exp(x)
539
540
541 @to_ivy_arrays_and_back
542 def Expm1(*, x, name="Expm1"):
543 return ivy.expm1(x)
544
545
546 @to_ivy_arrays_and_back
547 def FFT(*, input, name="FFT"):
548 return ivy.astype(ivy.fft(input, -1), input.dtype)
549
550
551 @to_ivy_arrays_and_back
552 def FFT2D(*, input, name="FFT2D"):
553 return ivy.astype(ivy.fft2(input, dim=(-2, -1)), input.dtype)
554
555
556 @to_ivy_arrays_and_back
557 def Fill(*, dims, value, name="Full"):
558 return ivy.full(dims, value)
559
560
561 @to_ivy_arrays_and_back
562 def Floor(*, x, name="Floor"):
563 return ivy.floor(x)
564
565
566 @to_ivy_arrays_and_back
567 def FloorDiv(*, x, y, name="FloorDiv"):
568 x, y = check_tensorflow_casting(x, y)
569 return ivy.floor_divide(x, y)
570
571
572 @to_ivy_arrays_and_back
573 def FloorMod(*, x, y, name="FloorMod"):
574 x, y = check_tensorflow_casting(x, y)
575 return ivy.remainder(x, y)
576
577
578 @to_ivy_arrays_and_back
579 def Gather(*, params, indices, validate_indices=None, name="Gather"):
580 return ivy.gather(params, indices, axis=0, batch_dims=0)
581
582
583 @to_ivy_arrays_and_back
584 def Greater(*, x, y, name="Greater"):
585 x, y = check_tensorflow_casting(x, y)
586 return ivy.greater(x, y)
587
588
589 @to_ivy_arrays_and_back
590 def GreaterEqual(*, x, y, name="GreaterEqual"):
591 x, y = check_tensorflow_casting(x, y)
592 return ivy.greater_equal(x, y)
593
594
595 @to_ivy_arrays_and_back
596 def Imag(
597 *,
598 input,
599 Tout=ivy.float32,
600 name="Imag",
601 ):
602 Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32
603 return ivy.astype(ivy.imag(input), Tout)
604
605
606 @to_ivy_arrays_and_back
607 def Inv(*, x, name="Inv"):
608 return ivy.astype(ivy.reciprocal(x), x.dtype)
609
610
611 @to_ivy_arrays_and_back
612 def InvGrad(*, y, dy, name="InvGrad"):
613 return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))
614
615
616 @to_ivy_arrays_and_back
617 def Invert(*, x, name="Invert"):
618 return ivy.bitwise_invert(x)
619
620
621 @to_ivy_arrays_and_back
622 def LeftShift(*, x, y, name="LeftShift"):
623 return ivy.bitwise_left_shift(x, y)
624
625
626 @to_ivy_arrays_and_back
627 def Less(*, x, y, name="Less"):
628 x, y = check_tensorflow_casting(x, y)
629 return ivy.less(x, y)
630
631
632 @to_ivy_arrays_and_back
633 def LinSpace(*, start, stop, num, name=None):
634 return ivy.linspace(start, stop, num)
635
636
637 @to_ivy_arrays_and_back
638 def Log(*, x, name="Log"):
639 return ivy.log(x)
640
641
642 @to_ivy_arrays_and_back
643 def LogicalNot(*, x, name="LogicalNot"):
644 return ivy.logical_not(x)
645
646
647 @to_ivy_arrays_and_back
648 def MatMul(*, a, b, transpose_a=False, transpose_b=False, name="MatMul"):
649 a, b = check_tensorflow_casting(a, b)
650 return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
651
652
653 @to_ivy_arrays_and_back
654 def MatrixInverse(*, input, adjoint=False, name="MatrixInverse"):
655 return ivy.inv(input, adjoint=adjoint)
656
657
658 @to_ivy_arrays_and_back
659 def Minimum(*, x, y, name="Minimum"):
660 return ivy.minimum(x, y)
661
662
663 @to_ivy_arrays_and_back
664 def NotEqual(*, x, y, incompatible_shape_error=True, name="NotEqual"):
665 x, y = check_tensorflow_casting(x, y)
666 if incompatible_shape_error:
667 return ivy.not_equal(x, y)
668
669 try:
670 return ivy.not_equal(x, y)
671 except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):
672 return ivy.array(True)
673
674
675 @to_ivy_arrays_and_back
676 def NthElement(*, input, n, reverse=False, name="NthElement"):
677 return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)
678
679
680 @to_ivy_arrays_and_back
681 def OnesLike(*, x, name="OnesLike"):
682 return ivy.ones_like(x)
683
684
685 @to_ivy_arrays_and_back
686 def Pack(*, values, axis=0, name="Pack"):
687 return ivy.stack(values, axis=axis)
688
689
690 @to_ivy_arrays_and_back
691 def Pad(*, input, paddings, name="Pad"):
692 return ivy.constant_pad(input, paddings.to_list())
693
694
695 @to_ivy_arrays_and_back
696 def PadV2(*, input, paddings, constant_values, name="PadV2"):
697 return ivy.constant_pad(input, paddings.to_list(), value=constant_values)
698
699
700 @to_ivy_arrays_and_back
701 def Prod(*, input, axis, keep_dims=False, name="Prod"):
702 return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)
703
704
705 @to_ivy_arrays_and_back
706 def Real(input, Tout=ivy.float32, name="Real"):
707 # TODO
708 raise IvyNotImplementedException
709
710
711 @to_ivy_arrays_and_back
712 def Reverse(*, tensor, dims, name="Reverse"):
713 ret = tensor
714 for dim in enumerate(dims):
715 if dim[1]:
716 ret = ivy.flip(ret, axis=dim[0])
717 return ret
718
719
720 @to_ivy_arrays_and_back
721 def RightShift(*, x, y, name="RightShift"):
722 return ivy.bitwise_right_shift(x, y)
723
724
725 @to_ivy_arrays_and_back
726 def Round(*, x, name="Round"):
727 return ivy.round(x)
728
729
730 @to_ivy_arrays_and_back
731 def Rsqrt(*, x, name="Rsqrt"):
732 return ivy.sqrt(ivy.reciprocal(x))
733
734
735 @to_ivy_arrays_and_back
736 def Shape(*, input, output_type=ivy.int32, name="Shape"):
737 output_type = to_ivy_dtype(output_type)
738 return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)
739
740
741 @with_unsupported_dtypes(
742 {"2.13.0 and below": ("unsigned",)},
743 "tensorflow",
744 )
745 @to_ivy_arrays_and_back
746 def Sign(*, x, name="Sign"):
747 return ivy.sign(x, np_variant=False)
748
749
750 @to_ivy_arrays_and_back
751 def Sinh(*, x, name="Sinh"):
752 return ivy.sinh(x)
753
754
755 @to_ivy_arrays_and_back
756 def Softplus(*, features, name="Softplus"):
757 return ivy.softplus(features)
758
759
760 # Softsign
761 @to_ivy_arrays_and_back
762 def Softsign(*, features, name="Softsign"):
763 return ivy.softsign(features)
764
765
766 @to_ivy_arrays_and_back
767 def SplitV(*, value, size_splits, axis, num_split, name="SplitV"):
768 return ivy.split(value, num_or_size_splits=size_splits, axis=axis)
769
770
771 @to_ivy_arrays_and_back
772 def Sqrt(*, x, name="Sqrt"):
773 return ivy.sqrt(x)
774
775
776 @to_ivy_arrays_and_back
777 def Square(*, x, name="Square"):
778 return ivy.square(x)
779
780
781 @to_ivy_arrays_and_back
782 def Sum(*, input, axis, keep_dims=False, name="Sum"):
783 return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)
784
785
786 @with_supported_dtypes(
787 {"2.13.0 and below": ("float64", "float128", "halfcomplex64", "complex128")},
788 "tensorflow",
789 )
790 @to_ivy_arrays_and_back
791 def Svd(*, input, full_matrices=False, compute_uv=True, name=None):
792 return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)
793
794
795 @to_ivy_arrays_and_back
796 def TanhGrad(*, y, dy, name="TanhGrad"):
797 return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))
798
799
800 @to_ivy_arrays_and_back
801 def Transpose(*, x, perm, name="Transpose"):
802 ret = ivy.permute_dims(x, axes=perm)
803 return ret
804
805
806 @to_ivy_arrays_and_back
807 def TruncateDiv(*, x, y, name="TruncateDiv"):
808 return ivy.astype(ivy.trunc_divide(x, y), x.dtype)
809
810
811 @with_unsupported_dtypes({"2.13.0 and below": ("float16", "bfloat16")}, "tensorflow")
812 @to_ivy_arrays_and_back
813 def Unpack(*, value, num, axis=0, name="Unpack"):
814 return ivy.unstack(value, axis=axis)[:num]
815
816
817 @to_ivy_arrays_and_back
818 def Xdivy(*, x, y, name="Xdivy"):
819 if (x == 0).all():
820 return 0.0
821 return ivy.divide(x, y)
822
823
824 @with_unsupported_dtypes({"2.13.0 and below": ("bfloat16",)}, "tensorflow")
825 @to_ivy_arrays_and_back
826 def Xlog1py(*, x, y, name="Xlog1py"):
827 if (x == 0).all():
828 return 0.0
829 return ivy.multiply(x, ivy.log1p(y))
830
831
832 @to_ivy_arrays_and_back
833 def ZerosLike(*, x, name="ZerosLike"):
834 return ivy.zeros_like(x)
835
836
837 Add = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))
838 Slice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))
839 Sub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))
840
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py
--- a/ivy/functional/frontends/tensorflow/raw_ops.py
+++ b/ivy/functional/frontends/tensorflow/raw_ops.py
@@ -170,6 +170,7 @@
)
)
)
+Mod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.mod))
Mul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))
Neg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))
Pow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))
| {"golden_diff": "diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py\n--- a/ivy/functional/frontends/tensorflow/raw_ops.py\n+++ b/ivy/functional/frontends/tensorflow/raw_ops.py\n@@ -170,6 +170,7 @@\n )\n )\n )\n+Mod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.mod))\n Mul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))\n Neg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))\n Pow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))\n", "issue": "mod\n\n", "before_files": [{"content": "# global\nimport ivy\nimport ivy.functional.frontends.tensorflow as tf_frontend\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n map_raw_ops_alias,\n to_ivy_dtype,\n)\n\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n\nAcos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))\nAcosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))\nAddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))\nAddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\nArgMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmax, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\nArgMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmin, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\nAsin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))\nAtan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))\nAtan2 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": \"float16\"},\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.atan2))\n)\nConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))\nCos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))\nCosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))\nCumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))\nCumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))\nDigamma = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.digamma))\nDiv = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))\nEinsum = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"complex128 \",\n \"complex64\",\n \"float64\",\n \"float32\",\n \"float16\",\n \"int64\",\n \"int32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.einsum))\n)\nIdentity = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity)\n)\nIdentityN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity_n)\n)\nIgamma = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float64\",\n \"float32\",\n \"half\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.igamma))\n)\nLeakyRelu = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.leaky_relu,\n )\n )\n)\nLessEqual = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.less_equal))\n)\nLog1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))\nLogSoftmax = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.log_softmax))\n)\nLogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))\nMatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))\nMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_max,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\nMaximum = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.maximum))\n)\nMean = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.math.reduce_mean,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n)\nMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_min,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\nMul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))\nNeg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))\nPow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))\nRealDiv = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.realdiv))\n)\nReciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))\nRelu = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.relu))\n)\nRelu6 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.relu6,\n )\n )\n)\nReshape = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.reshape)\n)\nRoll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))\nShapeN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.shape_n)\n)\nSigmoid = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)\n)\nSin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))\nSize = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))\nSoftmax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"float16\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.softmax))\n)\nSplit = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.split, kwargs_to_update={\"num_split\": \"num_or_size_splits\"}\n )\n)\nSquaredDifference = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n \"int32\",\n \"int64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.squared_difference))\n)\nSqueeze = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.squeeze)\n)\nTan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))\nTanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))\nTile = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.tile))\nXlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))\nZeta = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.zeta))\n)\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _tf_to_ivy_ivy_arguments_for_conv(\n padding, ex_pading, strides, dilations, data_format\n):\n if data_format.find(\"C\") == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n data_format = \"channel_first\"\n pad_index = [4, 8]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n data_format = \"channel_last\"\n pad_index = [2, 6]\n if padding == \"EXPLICIT\":\n padding = [\n (ex_pading[i], ex_pading[i + 1])\n for i in range(pad_index[0], pad_index[1], 2)\n ]\n return padding, strides, dilations, data_format\n\n\n# --- Main --- #\n# ------------ #\n\n\n@to_ivy_arrays_and_back\ndef AccumulateNV2(inputs, shape, name=\"AccumulateNV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Angle(\n *,\n input,\n Tout=ivy.float32,\n name=\"Angle\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.angle(input), Tout)\n\n\n@with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float16\",\n \"bool\",\n \"bfloat16\",\n )\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef ApproximateEqual(\n *,\n x,\n y,\n tolerance=1e-05,\n name=\"ApproximateEqual\",\n):\n x, y = check_tensorflow_casting(x, y)\n return ivy.abs(x - y) < tolerance\n\n\n@to_ivy_arrays_and_back\ndef Atanh(*, x, name=\"Atanh\"):\n return ivy.atanh(x)\n\n\n@to_ivy_arrays_and_back\ndef BandedTriangularSolve(\n matrix,\n rhs,\n lower=True,\n adjoint=False,\n name=\"BandedTriangularSolve\",\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMul(x, y, adj_x=False, adj_y=False, name=\"BatchMatMul\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV2(x, y, adj_x=False, adj_y=False, name=\"BatchMatMulV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name=\"BatchMatMulV3\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BitwiseAnd(*, x, y, name=\"BitwiseAnd\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseOr(*, x, y, name=\"BitwiseOr\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseXor(*, x, y, name=\"BitwiseXor\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BroadcastTo(*, input, shape, name=\"BroadcastTo\"):\n return ivy.broadcast_to(input, shape=shape)\n\n\n@to_ivy_arrays_and_back\ndef Ceil(*, x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef Cholesky(*, input, name=\"Cholesky\"):\n return ivy.astype(ivy.cholesky(input), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Complex(real, imag, Tout=ivy.complex64, name=\"Complex\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Concat(*, concat_dim, values, name=\"Concat\"):\n return ivy.concat(values, axis=concat_dim)\n\n\n@to_ivy_arrays_and_back\ndef Conv2D(\n *,\n input,\n filter,\n strides,\n padding,\n use_cudnn_on_gpu,\n explicit_paddings,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=\"Conv2D\",\n):\n padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(\n padding, explicit_paddings, strides, dilations, data_format\n )\n return ivy.conv_general_dilated(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n dims=2,\n )\n\n\n@to_ivy_arrays_and_back\ndef Conv3D(\n *,\n input,\n filter,\n strides,\n padding,\n data_format=\"NDHWC\",\n dilations=[1, 1, 1, 1, 1],\n name=\"Conv3D\",\n):\n # ivy.backends.tensorflow expects strides and dilations to be\n # a single integer value or a list of 3 values whereas the raw op\n # expects a list of 5 values\n if data_format == \"NDHWC\":\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n elif data_format == \"NCDHW\":\n strides = strides[2:]\n dilations = dilations[2:]\n\n return tf_frontend.nn.conv3d(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n name=name,\n )\n\n\n@to_ivy_arrays_and_back\ndef Cross(*, a, b, name=\"Cross\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.cross(a, b)\n\n\n@to_ivy_arrays_and_back\ndef CumulativeLogsumexp(\n x, axis, exclusive=False, reverse=False, name=\"CumulativeLogsumexp\"\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef DebugGradientIdentity(input, name=\"DebugGradientIdentity\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Diag(*, diagonal, name=\"Diag\"):\n return ivy.astype(ivy.diag(diagonal), diagonal.dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"bfloat16\", \"float16\", \"float32\", \"float64\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Elu(features, name=None):\n zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))\n ones = ivy.ones_like(features, dtype=ivy.dtype(features))\n ret_val = ivy.where(\n # if x > 0 => x; else e^x - 1\n features > zeros,\n features,\n ivy.subtract(ivy.exp(features), ones),\n )\n return ret_val\n\n\n@to_ivy_arrays_and_back\ndef Equal(*, x, y, incompatible_shape_error=True, name=\"Equal\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.equal(x, y)\n\n try:\n return ivy.equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(False)\n\n\n@to_ivy_arrays_and_back\ndef EuclideanNorm(*, input, axis, keep_dims=False, name=\"EuclideanNorm\"):\n return ivy.astype(\n ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef Exp(*, x, name=\"Exp\"):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef Expm1(*, x, name=\"Expm1\"):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef FFT(*, input, name=\"FFT\"):\n return ivy.astype(ivy.fft(input, -1), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef FFT2D(*, input, name=\"FFT2D\"):\n return ivy.astype(ivy.fft2(input, dim=(-2, -1)), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Fill(*, dims, value, name=\"Full\"):\n return ivy.full(dims, value)\n\n\n@to_ivy_arrays_and_back\ndef Floor(*, x, name=\"Floor\"):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef FloorDiv(*, x, y, name=\"FloorDiv\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.floor_divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef FloorMod(*, x, y, name=\"FloorMod\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Gather(*, params, indices, validate_indices=None, name=\"Gather\"):\n return ivy.gather(params, indices, axis=0, batch_dims=0)\n\n\n@to_ivy_arrays_and_back\ndef Greater(*, x, y, name=\"Greater\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef GreaterEqual(*, x, y, name=\"GreaterEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Imag(\n *,\n input,\n Tout=ivy.float32,\n name=\"Imag\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.imag(input), Tout)\n\n\n@to_ivy_arrays_and_back\ndef Inv(*, x, name=\"Inv\"):\n return ivy.astype(ivy.reciprocal(x), x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef InvGrad(*, y, dy, name=\"InvGrad\"):\n return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))\n\n\n@to_ivy_arrays_and_back\ndef Invert(*, x, name=\"Invert\"):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef LeftShift(*, x, y, name=\"LeftShift\"):\n return ivy.bitwise_left_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Less(*, x, y, name=\"Less\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less(x, y)\n\n\n@to_ivy_arrays_and_back\ndef LinSpace(*, start, stop, num, name=None):\n return ivy.linspace(start, stop, num)\n\n\n@to_ivy_arrays_and_back\ndef Log(*, x, name=\"Log\"):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef LogicalNot(*, x, name=\"LogicalNot\"):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef MatMul(*, a, b, transpose_a=False, transpose_b=False, name=\"MatMul\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)\n\n\n@to_ivy_arrays_and_back\ndef MatrixInverse(*, input, adjoint=False, name=\"MatrixInverse\"):\n return ivy.inv(input, adjoint=adjoint)\n\n\n@to_ivy_arrays_and_back\ndef Minimum(*, x, y, name=\"Minimum\"):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef NotEqual(*, x, y, incompatible_shape_error=True, name=\"NotEqual\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.not_equal(x, y)\n\n try:\n return ivy.not_equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(True)\n\n\n@to_ivy_arrays_and_back\ndef NthElement(*, input, n, reverse=False, name=\"NthElement\"):\n return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef OnesLike(*, x, name=\"OnesLike\"):\n return ivy.ones_like(x)\n\n\n@to_ivy_arrays_and_back\ndef Pack(*, values, axis=0, name=\"Pack\"):\n return ivy.stack(values, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Pad(*, input, paddings, name=\"Pad\"):\n return ivy.constant_pad(input, paddings.to_list())\n\n\n@to_ivy_arrays_and_back\ndef PadV2(*, input, paddings, constant_values, name=\"PadV2\"):\n return ivy.constant_pad(input, paddings.to_list(), value=constant_values)\n\n\n@to_ivy_arrays_and_back\ndef Prod(*, input, axis, keep_dims=False, name=\"Prod\"):\n return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Real(input, Tout=ivy.float32, name=\"Real\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Reverse(*, tensor, dims, name=\"Reverse\"):\n ret = tensor\n for dim in enumerate(dims):\n if dim[1]:\n ret = ivy.flip(ret, axis=dim[0])\n return ret\n\n\n@to_ivy_arrays_and_back\ndef RightShift(*, x, y, name=\"RightShift\"):\n return ivy.bitwise_right_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Round(*, x, name=\"Round\"):\n return ivy.round(x)\n\n\n@to_ivy_arrays_and_back\ndef Rsqrt(*, x, name=\"Rsqrt\"):\n return ivy.sqrt(ivy.reciprocal(x))\n\n\n@to_ivy_arrays_and_back\ndef Shape(*, input, output_type=ivy.int32, name=\"Shape\"):\n output_type = to_ivy_dtype(output_type)\n return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)\n\n\n@with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"unsigned\",)},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Sign(*, x, name=\"Sign\"):\n return ivy.sign(x, np_variant=False)\n\n\n@to_ivy_arrays_and_back\ndef Sinh(*, x, name=\"Sinh\"):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef Softplus(*, features, name=\"Softplus\"):\n return ivy.softplus(features)\n\n\n# Softsign\n@to_ivy_arrays_and_back\ndef Softsign(*, features, name=\"Softsign\"):\n return ivy.softsign(features)\n\n\n@to_ivy_arrays_and_back\ndef SplitV(*, value, size_splits, axis, num_split, name=\"SplitV\"):\n return ivy.split(value, num_or_size_splits=size_splits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Sqrt(*, x, name=\"Sqrt\"):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef Square(*, x, name=\"Square\"):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef Sum(*, input, axis, keep_dims=False, name=\"Sum\"):\n return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"float64\", \"float128\", \"halfcomplex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Svd(*, input, full_matrices=False, compute_uv=True, name=None):\n return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)\n\n\n@to_ivy_arrays_and_back\ndef TanhGrad(*, y, dy, name=\"TanhGrad\"):\n return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))\n\n\n@to_ivy_arrays_and_back\ndef Transpose(*, x, perm, name=\"Transpose\"):\n ret = ivy.permute_dims(x, axes=perm)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef TruncateDiv(*, x, y, name=\"TruncateDiv\"):\n return ivy.astype(ivy.trunc_divide(x, y), x.dtype)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"float16\", \"bfloat16\")}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Unpack(*, value, num, axis=0, name=\"Unpack\"):\n return ivy.unstack(value, axis=axis)[:num]\n\n\n@to_ivy_arrays_and_back\ndef Xdivy(*, x, y, name=\"Xdivy\"):\n if (x == 0).all():\n return 0.0\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"bfloat16\",)}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Xlog1py(*, x, y, name=\"Xlog1py\"):\n if (x == 0).all():\n return 0.0\n return ivy.multiply(x, ivy.log1p(y))\n\n\n@to_ivy_arrays_and_back\ndef ZerosLike(*, x, name=\"ZerosLike\"):\n return ivy.zeros_like(x)\n\n\nAdd = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\nSlice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))\nSub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))\n", "path": "ivy/functional/frontends/tensorflow/raw_ops.py"}], "after_files": [{"content": "# global\nimport ivy\nimport ivy.functional.frontends.tensorflow as tf_frontend\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n map_raw_ops_alias,\n to_ivy_dtype,\n)\n\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n\nAcos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))\nAcosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))\nAddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))\nAddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\nArgMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmax, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\nArgMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmin, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\nAsin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))\nAtan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))\nAtan2 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": \"float16\"},\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.atan2))\n)\nConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))\nCos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))\nCosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))\nCumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))\nCumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))\nDigamma = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.digamma))\nDiv = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))\nEinsum = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"complex128 \",\n \"complex64\",\n \"float64\",\n \"float32\",\n \"float16\",\n \"int64\",\n \"int32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.einsum))\n)\nIdentity = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity)\n)\nIdentityN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity_n)\n)\nIgamma = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float64\",\n \"float32\",\n \"half\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.igamma))\n)\nLeakyRelu = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.leaky_relu,\n )\n )\n)\nLessEqual = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.less_equal))\n)\nLog1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))\nLogSoftmax = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.log_softmax))\n)\nLogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))\nMatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))\nMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_max,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\nMaximum = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.maximum))\n)\nMean = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.math.reduce_mean,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n)\nMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_min,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\nMod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.mod))\nMul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))\nNeg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))\nPow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))\nRealDiv = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.realdiv))\n)\nReciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))\nRelu = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.relu))\n)\nRelu6 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.relu6,\n )\n )\n)\nReshape = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.reshape)\n)\nRoll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))\nShapeN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.shape_n)\n)\nSigmoid = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)\n)\nSin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))\nSize = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))\nSoftmax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"float16\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.softmax))\n)\nSplit = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.split, kwargs_to_update={\"num_split\": \"num_or_size_splits\"}\n )\n)\nSquaredDifference = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n \"int32\",\n \"int64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.squared_difference))\n)\nSqueeze = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.squeeze)\n)\nTan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))\nTanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))\nTile = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.tile))\nXlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))\nZeta = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.zeta))\n)\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _tf_to_ivy_ivy_arguments_for_conv(\n padding, ex_pading, strides, dilations, data_format\n):\n if data_format.find(\"C\") == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n data_format = \"channel_first\"\n pad_index = [4, 8]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n data_format = \"channel_last\"\n pad_index = [2, 6]\n if padding == \"EXPLICIT\":\n padding = [\n (ex_pading[i], ex_pading[i + 1])\n for i in range(pad_index[0], pad_index[1], 2)\n ]\n return padding, strides, dilations, data_format\n\n\n# --- Main --- #\n# ------------ #\n\n\n@to_ivy_arrays_and_back\ndef AccumulateNV2(inputs, shape, name=\"AccumulateNV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Angle(\n *,\n input,\n Tout=ivy.float32,\n name=\"Angle\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.angle(input), Tout)\n\n\n@with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float16\",\n \"bool\",\n \"bfloat16\",\n )\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef ApproximateEqual(\n *,\n x,\n y,\n tolerance=1e-05,\n name=\"ApproximateEqual\",\n):\n x, y = check_tensorflow_casting(x, y)\n return ivy.abs(x - y) < tolerance\n\n\n@to_ivy_arrays_and_back\ndef Atanh(*, x, name=\"Atanh\"):\n return ivy.atanh(x)\n\n\n@to_ivy_arrays_and_back\ndef BandedTriangularSolve(\n matrix,\n rhs,\n lower=True,\n adjoint=False,\n name=\"BandedTriangularSolve\",\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMul(x, y, adj_x=False, adj_y=False, name=\"BatchMatMul\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV2(x, y, adj_x=False, adj_y=False, name=\"BatchMatMulV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name=\"BatchMatMulV3\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BitwiseAnd(*, x, y, name=\"BitwiseAnd\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseOr(*, x, y, name=\"BitwiseOr\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseXor(*, x, y, name=\"BitwiseXor\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BroadcastTo(*, input, shape, name=\"BroadcastTo\"):\n return ivy.broadcast_to(input, shape=shape)\n\n\n@to_ivy_arrays_and_back\ndef Ceil(*, x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef Cholesky(*, input, name=\"Cholesky\"):\n return ivy.astype(ivy.cholesky(input), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Complex(real, imag, Tout=ivy.complex64, name=\"Complex\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Concat(*, concat_dim, values, name=\"Concat\"):\n return ivy.concat(values, axis=concat_dim)\n\n\n@to_ivy_arrays_and_back\ndef Conv2D(\n *,\n input,\n filter,\n strides,\n padding,\n use_cudnn_on_gpu,\n explicit_paddings,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=\"Conv2D\",\n):\n padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(\n padding, explicit_paddings, strides, dilations, data_format\n )\n return ivy.conv_general_dilated(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n dims=2,\n )\n\n\n@to_ivy_arrays_and_back\ndef Conv3D(\n *,\n input,\n filter,\n strides,\n padding,\n data_format=\"NDHWC\",\n dilations=[1, 1, 1, 1, 1],\n name=\"Conv3D\",\n):\n # ivy.backends.tensorflow expects strides and dilations to be\n # a single integer value or a list of 3 values whereas the raw op\n # expects a list of 5 values\n if data_format == \"NDHWC\":\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n elif data_format == \"NCDHW\":\n strides = strides[2:]\n dilations = dilations[2:]\n\n return tf_frontend.nn.conv3d(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n name=name,\n )\n\n\n@to_ivy_arrays_and_back\ndef Cross(*, a, b, name=\"Cross\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.cross(a, b)\n\n\n@to_ivy_arrays_and_back\ndef CumulativeLogsumexp(\n x, axis, exclusive=False, reverse=False, name=\"CumulativeLogsumexp\"\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef DebugGradientIdentity(input, name=\"DebugGradientIdentity\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Diag(*, diagonal, name=\"Diag\"):\n return ivy.astype(ivy.diag(diagonal), diagonal.dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"bfloat16\", \"float16\", \"float32\", \"float64\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Elu(features, name=None):\n zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))\n ones = ivy.ones_like(features, dtype=ivy.dtype(features))\n ret_val = ivy.where(\n # if x > 0 => x; else e^x - 1\n features > zeros,\n features,\n ivy.subtract(ivy.exp(features), ones),\n )\n return ret_val\n\n\n@to_ivy_arrays_and_back\ndef Equal(*, x, y, incompatible_shape_error=True, name=\"Equal\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.equal(x, y)\n\n try:\n return ivy.equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(False)\n\n\n@to_ivy_arrays_and_back\ndef EuclideanNorm(*, input, axis, keep_dims=False, name=\"EuclideanNorm\"):\n return ivy.astype(\n ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef Exp(*, x, name=\"Exp\"):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef Expm1(*, x, name=\"Expm1\"):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef FFT(*, input, name=\"FFT\"):\n return ivy.astype(ivy.fft(input, -1), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef FFT2D(*, input, name=\"FFT2D\"):\n return ivy.astype(ivy.fft2(input, dim=(-2, -1)), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Fill(*, dims, value, name=\"Full\"):\n return ivy.full(dims, value)\n\n\n@to_ivy_arrays_and_back\ndef Floor(*, x, name=\"Floor\"):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef FloorDiv(*, x, y, name=\"FloorDiv\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.floor_divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef FloorMod(*, x, y, name=\"FloorMod\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Gather(*, params, indices, validate_indices=None, name=\"Gather\"):\n return ivy.gather(params, indices, axis=0, batch_dims=0)\n\n\n@to_ivy_arrays_and_back\ndef Greater(*, x, y, name=\"Greater\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef GreaterEqual(*, x, y, name=\"GreaterEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Imag(\n *,\n input,\n Tout=ivy.float32,\n name=\"Imag\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.imag(input), Tout)\n\n\n@to_ivy_arrays_and_back\ndef Inv(*, x, name=\"Inv\"):\n return ivy.astype(ivy.reciprocal(x), x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef InvGrad(*, y, dy, name=\"InvGrad\"):\n return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))\n\n\n@to_ivy_arrays_and_back\ndef Invert(*, x, name=\"Invert\"):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef LeftShift(*, x, y, name=\"LeftShift\"):\n return ivy.bitwise_left_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Less(*, x, y, name=\"Less\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less(x, y)\n\n\n@to_ivy_arrays_and_back\ndef LinSpace(*, start, stop, num, name=None):\n return ivy.linspace(start, stop, num)\n\n\n@to_ivy_arrays_and_back\ndef Log(*, x, name=\"Log\"):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef LogicalNot(*, x, name=\"LogicalNot\"):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef MatMul(*, a, b, transpose_a=False, transpose_b=False, name=\"MatMul\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)\n\n\n@to_ivy_arrays_and_back\ndef MatrixInverse(*, input, adjoint=False, name=\"MatrixInverse\"):\n return ivy.inv(input, adjoint=adjoint)\n\n\n@to_ivy_arrays_and_back\ndef Minimum(*, x, y, name=\"Minimum\"):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef NotEqual(*, x, y, incompatible_shape_error=True, name=\"NotEqual\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.not_equal(x, y)\n\n try:\n return ivy.not_equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(True)\n\n\n@to_ivy_arrays_and_back\ndef NthElement(*, input, n, reverse=False, name=\"NthElement\"):\n return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef OnesLike(*, x, name=\"OnesLike\"):\n return ivy.ones_like(x)\n\n\n@to_ivy_arrays_and_back\ndef Pack(*, values, axis=0, name=\"Pack\"):\n return ivy.stack(values, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Pad(*, input, paddings, name=\"Pad\"):\n return ivy.constant_pad(input, paddings.to_list())\n\n\n@to_ivy_arrays_and_back\ndef PadV2(*, input, paddings, constant_values, name=\"PadV2\"):\n return ivy.constant_pad(input, paddings.to_list(), value=constant_values)\n\n\n@to_ivy_arrays_and_back\ndef Prod(*, input, axis, keep_dims=False, name=\"Prod\"):\n return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Real(input, Tout=ivy.float32, name=\"Real\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Reverse(*, tensor, dims, name=\"Reverse\"):\n ret = tensor\n for dim in enumerate(dims):\n if dim[1]:\n ret = ivy.flip(ret, axis=dim[0])\n return ret\n\n\n@to_ivy_arrays_and_back\ndef RightShift(*, x, y, name=\"RightShift\"):\n return ivy.bitwise_right_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Round(*, x, name=\"Round\"):\n return ivy.round(x)\n\n\n@to_ivy_arrays_and_back\ndef Rsqrt(*, x, name=\"Rsqrt\"):\n return ivy.sqrt(ivy.reciprocal(x))\n\n\n@to_ivy_arrays_and_back\ndef Shape(*, input, output_type=ivy.int32, name=\"Shape\"):\n output_type = to_ivy_dtype(output_type)\n return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)\n\n\n@with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"unsigned\",)},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Sign(*, x, name=\"Sign\"):\n return ivy.sign(x, np_variant=False)\n\n\n@to_ivy_arrays_and_back\ndef Sinh(*, x, name=\"Sinh\"):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef Softplus(*, features, name=\"Softplus\"):\n return ivy.softplus(features)\n\n\n# Softsign\n@to_ivy_arrays_and_back\ndef Softsign(*, features, name=\"Softsign\"):\n return ivy.softsign(features)\n\n\n@to_ivy_arrays_and_back\ndef SplitV(*, value, size_splits, axis, num_split, name=\"SplitV\"):\n return ivy.split(value, num_or_size_splits=size_splits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Sqrt(*, x, name=\"Sqrt\"):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef Square(*, x, name=\"Square\"):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef Sum(*, input, axis, keep_dims=False, name=\"Sum\"):\n return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"float64\", \"float128\", \"halfcomplex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Svd(*, input, full_matrices=False, compute_uv=True, name=None):\n return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)\n\n\n@to_ivy_arrays_and_back\ndef TanhGrad(*, y, dy, name=\"TanhGrad\"):\n return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))\n\n\n@to_ivy_arrays_and_back\ndef Transpose(*, x, perm, name=\"Transpose\"):\n ret = ivy.permute_dims(x, axes=perm)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef TruncateDiv(*, x, y, name=\"TruncateDiv\"):\n return ivy.astype(ivy.trunc_divide(x, y), x.dtype)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"float16\", \"bfloat16\")}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Unpack(*, value, num, axis=0, name=\"Unpack\"):\n return ivy.unstack(value, axis=axis)[:num]\n\n\n@to_ivy_arrays_and_back\ndef Xdivy(*, x, y, name=\"Xdivy\"):\n if (x == 0).all():\n return 0.0\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"bfloat16\",)}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Xlog1py(*, x, y, name=\"Xlog1py\"):\n if (x == 0).all():\n return 0.0\n return ivy.multiply(x, ivy.log1p(y))\n\n\n@to_ivy_arrays_and_back\ndef ZerosLike(*, x, name=\"ZerosLike\"):\n return ivy.zeros_like(x)\n\n\nAdd = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\nSlice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))\nSub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))\n", "path": "ivy/functional/frontends/tensorflow/raw_ops.py"}]} |
gh_patches_debug_1522 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-172 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove unused import
As per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16
17 Usage
18 -----
19
20 The OpenTelemetry ``jinja2`` integration traces templates loading, compilation
21 and rendering.
22
23 Usage
24 -----
25
26 .. code-block:: python
27
28 from jinja2 import Environment, FileSystemLoader
29 from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor
30 from opentelemetry import trace
31 from opentelemetry.trace import TracerProvider
32
33 trace.set_tracer_provider(TracerProvider())
34
35 Jinja2Instrumentor().instrument()
36
37 env = Environment(loader=FileSystemLoader("templates"))
38 template = env.get_template("mytemplate.html")
39
40 API
41 ---
42 """
43 # pylint: disable=no-value-for-parameter
44
45 import logging
46
47 import jinja2
48 from wrapt import ObjectProxy
49 from wrapt import wrap_function_wrapper as _wrap
50
51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
52 from opentelemetry.instrumentation.jinja2.version import __version__
53 from opentelemetry.instrumentation.utils import unwrap
54 from opentelemetry.trace import SpanKind, get_tracer
55 from opentelemetry.trace.status import Status, StatusCode
56
57 logger = logging.getLogger(__name__)
58
59 ATTRIBUTE_JINJA2_TEMPLATE_NAME = "jinja2.template_name"
60 ATTRIBUTE_JINJA2_TEMPLATE_PATH = "jinja2.template_path"
61 DEFAULT_TEMPLATE_NAME = "<memory>"
62
63
64 def _with_tracer_wrapper(func):
65 """Helper for providing tracer for wrapper functions.
66 """
67
68 def _with_tracer(tracer):
69 def wrapper(wrapped, instance, args, kwargs):
70 return func(tracer, wrapped, instance, args, kwargs)
71
72 return wrapper
73
74 return _with_tracer
75
76
77 @_with_tracer_wrapper
78 def _wrap_render(tracer, wrapped, instance, args, kwargs):
79 """Wrap `Template.render()` or `Template.generate()`
80 """
81 with tracer.start_as_current_span(
82 "jinja2.render", kind=SpanKind.INTERNAL,
83 ) as span:
84 if span.is_recording():
85 template_name = instance.name or DEFAULT_TEMPLATE_NAME
86 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
87 return wrapped(*args, **kwargs)
88
89
90 @_with_tracer_wrapper
91 def _wrap_compile(tracer, wrapped, _, args, kwargs):
92 with tracer.start_as_current_span(
93 "jinja2.compile", kind=SpanKind.INTERNAL,
94 ) as span:
95 if span.is_recording():
96 template_name = (
97 args[1]
98 if len(args) > 1
99 else kwargs.get("name", DEFAULT_TEMPLATE_NAME)
100 )
101 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
102 return wrapped(*args, **kwargs)
103
104
105 @_with_tracer_wrapper
106 def _wrap_load_template(tracer, wrapped, _, args, kwargs):
107 with tracer.start_as_current_span(
108 "jinja2.load", kind=SpanKind.INTERNAL,
109 ) as span:
110 if span.is_recording():
111 template_name = kwargs.get("name", args[0])
112 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
113 template = None
114 try:
115 template = wrapped(*args, **kwargs)
116 return template
117 finally:
118 if template and span.is_recording():
119 span.set_attribute(
120 ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename
121 )
122
123
124 class Jinja2Instrumentor(BaseInstrumentor):
125 """An instrumentor for jinja2
126
127 See `BaseInstrumentor`
128 """
129
130 def _instrument(self, **kwargs):
131 tracer_provider = kwargs.get("tracer_provider")
132 tracer = get_tracer(__name__, __version__, tracer_provider)
133
134 _wrap(jinja2, "environment.Template.render", _wrap_render(tracer))
135 _wrap(jinja2, "environment.Template.generate", _wrap_render(tracer))
136 _wrap(jinja2, "environment.Environment.compile", _wrap_compile(tracer))
137 _wrap(
138 jinja2,
139 "environment.Environment._load_template",
140 _wrap_load_template(tracer),
141 )
142
143 def _uninstrument(self, **kwargs):
144 unwrap(jinja2.Template, "render")
145 unwrap(jinja2.Template, "generate")
146 unwrap(jinja2.Environment, "compile")
147 unwrap(jinja2.Environment, "_load_template")
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
@@ -52,7 +52,6 @@
from opentelemetry.instrumentation.jinja2.version import __version__
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.trace import SpanKind, get_tracer
-from opentelemetry.trace.status import Status, StatusCode
logger = logging.getLogger(__name__)
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n@@ -52,7 +52,6 @@\n from opentelemetry.instrumentation.jinja2.version import __version__\n from opentelemetry.instrumentation.utils import unwrap\n from opentelemetry.trace import SpanKind, get_tracer\n-from opentelemetry.trace.status import Status, StatusCode\n \n logger = logging.getLogger(__name__)\n", "issue": "Remove unused import\nAs per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py"}]} |
gh_patches_debug_1523 | rasdani/github-patches | git_diff | carpentries__amy-932 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Date(?) stamp when updating record from url
When updating an event from url, the list of instructors/helpers is recorded in the notes field with a heading that looks like a date but isn't. For example an update I made today has the heading "UPDATE 2016-5-4:"
What is this heading supposed to be? Can we make it be the real date of the update (i.e., UPDATE 2016-06-16:")
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `workshops/forms.py`
Content:
```
1 import re
2
3 from django import forms
4 from django.core.validators import RegexValidator
5 from django.forms import (
6 HiddenInput, CheckboxSelectMultiple, TextInput, modelformset_factory,
7 RadioSelect,
8 )
9
10 from captcha.fields import ReCaptchaField
11 from crispy_forms.helper import FormHelper
12 from crispy_forms.layout import Layout, Div, HTML, Submit
13 from crispy_forms.bootstrap import FormActions
14 from django_countries import Countries
15 from django_countries.fields import CountryField
16 from selectable import forms as selectable
17
18 from workshops.models import (
19 Award, Event, Lesson, Person, Task, Airport, Organization,
20 EventRequest, ProfileUpdateRequest, TodoItem, Membership,
21 Sponsorship, InvoiceRequest, EventSubmission, Language,
22 TrainingRequest,
23 DCSelfOrganizedEventRequest,
24 )
25 from workshops import lookups
26
27
28 AUTOCOMPLETE_HELP_TEXT = (
29 "Autocomplete field; type characters to view available options, "
30 "then select desired item from list."
31 )
32
33
34 class BootstrapHelper(FormHelper):
35 """Layout and behavior for crispy-displayed forms."""
36 form_class = 'form-horizontal'
37 label_class = 'col-lg-2'
38 field_class = 'col-lg-8'
39 html5_required = True
40
41 def __init__(self, form=None):
42 super().__init__(form)
43
44 self.attrs['role'] = 'form'
45 self.inputs.append(Submit('submit', 'Submit'))
46
47
48 class BootstrapHelperGet(BootstrapHelper):
49 """Force form to use GET instead of default POST."""
50 form_method = 'get'
51
52
53 class BootstrapHelperWithAdd(BootstrapHelper):
54 """Change form's 'Submit' to 'Add'."""
55
56 def __init__(self, form=None):
57 super().__init__(form)
58
59 self.inputs[-1] = Submit('submit', 'Add')
60
61
62 class BootstrapHelperFilter(FormHelper):
63 """A differently shaped forms (more space-efficient) for use in sidebar as
64 filter forms."""
65 form_method = 'get'
66
67 def __init__(self, form=None):
68 super().__init__(form)
69 self.attrs['role'] = 'form'
70 self.inputs.append(Submit('', 'Submit'))
71
72
73 class BootstrapHelperWiderLabels(BootstrapHelper):
74 """SWCEventRequestForm and DCEventRequestForm have long labels, so this
75 helper is used to address that issue."""
76 label_class = 'col-lg-3'
77 field_class = 'col-lg-7'
78
79
80 class BootstrapHelperFormsetInline(BootstrapHelper):
81 """For use in inline formsets."""
82 template = 'bootstrap/table_inline_formset.html'
83
84
85 bootstrap_helper = BootstrapHelper()
86 bootstrap_helper_get = BootstrapHelperGet()
87 bootstrap_helper_with_add = BootstrapHelperWithAdd()
88 bootstrap_helper_filter = BootstrapHelperFilter()
89 bootstrap_helper_wider_labels = BootstrapHelperWiderLabels()
90 bootstrap_helper_inline_formsets = BootstrapHelperFormsetInline()
91
92
93 class WorkshopStaffForm(forms.Form):
94 '''Represent instructor matching form.'''
95
96 latitude = forms.FloatField(label='Latitude',
97 min_value=-90.0,
98 max_value=90.0,
99 required=False)
100 longitude = forms.FloatField(label='Longitude',
101 min_value=-180.0,
102 max_value=180.0,
103 required=False)
104 airport = selectable.AutoCompleteSelectField(
105 lookup_class=lookups.AirportLookup,
106 label='Airport',
107 required=False,
108 widget=selectable.AutoComboboxSelectWidget(
109 lookup_class=lookups.AirportLookup,
110 ),
111 )
112 languages = selectable.AutoCompleteSelectMultipleField(
113 lookup_class=lookups.LanguageLookup,
114 label='Languages',
115 required=False,
116 widget=selectable.AutoComboboxSelectMultipleWidget,
117 )
118
119 country = forms.MultipleChoiceField(choices=[])
120
121 lessons = forms.ModelMultipleChoiceField(queryset=Lesson.objects.all(),
122 widget=CheckboxSelectMultiple(),
123 required=False)
124
125 INSTRUCTOR_BADGE_CHOICES = (
126 ('swc-instructor', 'Software Carpentry Instructor'),
127 ('dc-instructor', 'Data Carpentry Instructor'),
128 )
129 instructor_badges = forms.MultipleChoiceField(
130 choices=INSTRUCTOR_BADGE_CHOICES,
131 widget=CheckboxSelectMultiple(),
132 required=False,
133 )
134
135 GENDER_CHOICES = ((None, '---------'), ) + Person.GENDER_CHOICES
136 gender = forms.ChoiceField(choices=GENDER_CHOICES, required=False)
137
138 was_helper = forms.BooleanField(
139 required=False, label='Was helper at least once before')
140 was_organizer = forms.BooleanField(
141 required=False, label='Was organizer at least once before')
142 is_in_progress_trainee = forms.BooleanField(
143 required=False, label='Is an in-progress instructor trainee')
144
145 def __init__(self, *args, **kwargs):
146 '''Build form layout dynamically.'''
147 super().__init__(*args, **kwargs)
148
149 # dynamically build choices for country field
150 only = Airport.objects.distinct().exclude(country='') \
151 .exclude(country=None) \
152 .values_list('country', flat=True)
153 countries = Countries()
154 countries.only = only
155
156 choices = list(countries)
157 self.fields['country'] = forms.MultipleChoiceField(choices=choices,
158 required=False)
159
160 self.helper = FormHelper(self)
161 self.helper.form_class = 'form-inline'
162 self.helper.form_method = 'get'
163 self.helper.layout = Layout(
164 Div(
165 Div(HTML('Location close to'), css_class='panel-heading'),
166 Div('airport', css_class='panel-body'),
167 Div(HTML('<b>OR</b>'), css_class='panel-footer'),
168 Div('country', css_class='panel-body'),
169 Div(HTML('<b>OR</b>'), css_class='panel-footer'),
170 Div('latitude', 'longitude', css_class='panel-body'),
171 css_class='panel panel-default ',
172 ),
173 'instructor_badges',
174 'was_helper',
175 'was_organizer',
176 'is_in_progress_trainee',
177 'languages',
178 'gender',
179 'lessons',
180 FormActions(
181 Submit('submit', 'Submit'),
182 ),
183 )
184
185 def clean(self):
186 cleaned_data = super().clean()
187 lat = bool(cleaned_data.get('latitude'))
188 lng = bool(cleaned_data.get('longitude'))
189 airport = bool(cleaned_data.get('airport'))
190 country = bool(cleaned_data.get('country'))
191 latlng = lat and lng
192
193 # if searching by coordinates, then there must be both lat & lng
194 # present
195 if lat ^ lng:
196 raise forms.ValidationError(
197 'Must specify both latitude and longitude if searching by '
198 'coordinates')
199
200 # User must search by airport, or country, or coordinates, or none
201 # of them. Sum of boolean elements must be equal 0 (if general search)
202 # or 1 (if searching by airport OR country OR lat/lng).
203 if sum([airport, country, latlng]) not in [0, 1]:
204 raise forms.ValidationError(
205 'Must specify an airport OR a country, OR use coordinates, OR '
206 'none of them.')
207 return cleaned_data
208
209
210 class PersonBulkAddForm(forms.Form):
211 '''Represent CSV upload form for bulk adding people.'''
212
213 file = forms.FileField()
214
215
216 class SearchForm(forms.Form):
217 '''Represent general searching form.'''
218
219 term = forms.CharField(label='term',
220 max_length=100)
221 in_organizations = forms.BooleanField(label='in organizations',
222 required=False,
223 initial=True)
224 in_events = forms.BooleanField(label='in events',
225 required=False,
226 initial=True)
227 in_persons = forms.BooleanField(label='in persons',
228 required=False,
229 initial=True)
230 in_airports = forms.BooleanField(label='in airports',
231 required=False,
232 initial=True)
233
234
235 class DebriefForm(forms.Form):
236 '''Represent general debrief form.'''
237 begin_date = forms.DateField(
238 label='Begin date as YYYY-MM-DD',
239 input_formats=['%Y-%m-%d', ]
240 )
241 end_date = forms.DateField(
242 label='End date as YYYY-MD-DD',
243 input_formats=['%Y-%m-%d', ]
244 )
245
246
247 class EventForm(forms.ModelForm):
248 host = selectable.AutoCompleteSelectField(
249 lookup_class=lookups.OrganizationLookup,
250 label='Host',
251 required=True,
252 help_text=Event._meta.get_field('host').help_text,
253 widget=selectable.AutoComboboxSelectWidget,
254 )
255
256 administrator = selectable.AutoCompleteSelectField(
257 lookup_class=lookups.OrganizationLookup,
258 label='Administrator',
259 required=False,
260 help_text=Event._meta.get_field('administrator').help_text,
261 widget=selectable.AutoComboboxSelectWidget,
262 )
263
264 assigned_to = selectable.AutoCompleteSelectField(
265 lookup_class=lookups.AdminLookup,
266 label='Assigned to',
267 required=False,
268 help_text=AUTOCOMPLETE_HELP_TEXT,
269 widget=selectable.AutoComboboxSelectWidget,
270 )
271
272 language = selectable.AutoCompleteSelectField(
273 lookup_class=lookups.LanguageLookup,
274 label='Language',
275 required=False,
276 help_text=AUTOCOMPLETE_HELP_TEXT,
277 widget=selectable.AutoComboboxSelectWidget,
278 )
279
280 country = CountryField().formfield(
281 required=False,
282 help_text=Event._meta.get_field('country').help_text,
283 )
284
285 admin_fee = forms.DecimalField(min_value=0, decimal_places=2,
286 required=False, widget=TextInput)
287
288 def __init__(self, *args, **kwargs):
289 super().__init__(*args, **kwargs)
290 self.fields['slug'].widget.attrs['placeholder'] = 'YYYY-MM-DD-location'
291 self.fields['start'].widget.attrs['placeholder'] = 'YYYY-MM-DD'
292 self.fields['end'].widget.attrs['placeholder'] = 'YYYY-MM-DD'
293
294 self.helper = BootstrapHelper(self)
295
296 idx_start = self.helper['country'].slice[0][0][0]
297 idx_end = self.helper['longitude'].slice[0][0][0]
298 # wrap all venue fields within <div class='panel-body'>
299 self.helper[idx_start:idx_end + 1] \
300 .wrap_together(Div, css_class='panel-body')
301 # wrap <div class='panel-body'> within <div class='panel panel-…'>
302 self.helper[idx_start].wrap_together(Div,
303 css_class='panel panel-default')
304 # add <div class='panel-heading'>Loc. details</div> inside "div.panel"
305 self.helper.layout[idx_start].insert(0, Div(HTML('Location details'),
306 css_class='panel-heading'))
307
308 id_learners_pre = self.helper['learners_pre'].slice[0][0][0]
309 id_learners_longterm = self.helper['learners_longterm'].slice[0][0][0]
310 # wrap all survey fields within <div class='panel-body'>
311 self.helper[id_learners_pre:id_learners_longterm + 1] \
312 .wrap_together(Div, css_class='panel-body')
313 # wrap <div class='panel-body'> within <div class='panel panel-…'>
314 self.helper[id_learners_pre].wrap_together(
315 Div, css_class='panel panel-default')
316 # add <div class='panel-heading'>Venue details</div> inside "div.panel"
317 self.helper.layout[id_learners_pre].insert(
318 0, Div(HTML('Survey results'), css_class='panel-heading'))
319
320 def clean_slug(self):
321 # Ensure slug is in "YYYY-MM-DD-location" format
322 data = self.cleaned_data['slug']
323 match = re.match('(\d{4}|x{4})-(\d{2}|x{2})-(\d{2}|x{2})-.+', data)
324 if not match:
325 raise forms.ValidationError('Slug must be in "YYYY-MM-DD-location"'
326 ' format, where "YYYY", "MM", "DD" can'
327 ' be unspecified (ie. "xx").')
328 return data
329
330 def clean_end(self):
331 """Ensure end >= start."""
332 start = self.cleaned_data['start']
333 end = self.cleaned_data['end']
334
335 if start and end and end < start:
336 raise forms.ValidationError('Must not be earlier than start date.')
337 return end
338
339 class Meta:
340 model = Event
341 # reorder fields, don't display 'deleted' field
342 fields = ('slug', 'completed', 'start', 'end', 'host', 'administrator',
343 'assigned_to', 'tags', 'url', 'language', 'reg_key',
344 'admin_fee', 'invoice_status', 'attendance', 'contact',
345 'notes', 'country', 'venue', 'address', 'latitude',
346 'longitude', 'learners_pre', 'learners_post',
347 'instructors_pre', 'instructors_post', 'learners_longterm')
348 # WARNING: don't change put any fields between 'country' and
349 # 'longitude' that don't relate to the venue of the event
350
351 widgets = {
352 'attendance': TextInput,
353 'latitude': TextInput,
354 'longitude': TextInput,
355 'invoice_status': RadioSelect,
356 }
357
358 class Media:
359 # thanks to this, {{ form.media }} in the template will generate
360 # a <link href=""> (for CSS files) or <script src=""> (for JS files)
361 js = (
362 'import_from_url.js', 'update_from_url.js',
363 'online_country.js',
364 )
365
366
367 class TaskForm(forms.ModelForm):
368
369 person = selectable.AutoCompleteSelectField(
370 lookup_class=lookups.PersonLookup,
371 label='Person',
372 required=True,
373 help_text=AUTOCOMPLETE_HELP_TEXT,
374 widget=selectable.AutoComboboxSelectWidget,
375 )
376
377 class Meta:
378 model = Task
379 fields = '__all__'
380 widgets = {'event': HiddenInput}
381
382
383 class TaskFullForm(TaskForm):
384
385 event = selectable.AutoCompleteSelectField(
386 lookup_class=lookups.EventLookup,
387 label='Event',
388 required=True,
389 help_text=AUTOCOMPLETE_HELP_TEXT,
390 widget=selectable.AutoComboboxSelectWidget,
391 )
392
393 class Meta:
394 model = Task
395 fields = '__all__'
396
397
398 class PersonForm(forms.ModelForm):
399
400 airport = selectable.AutoCompleteSelectField(
401 lookup_class=lookups.AirportLookup,
402 label='Airport',
403 required=False,
404 help_text=AUTOCOMPLETE_HELP_TEXT,
405 widget=selectable.AutoComboboxSelectWidget,
406 )
407 languages = selectable.AutoCompleteSelectMultipleField(
408 lookup_class=lookups.LanguageLookup,
409 label='Languages',
410 required=False,
411 help_text=AUTOCOMPLETE_HELP_TEXT,
412 widget=selectable.AutoComboboxSelectMultipleWidget,
413 )
414
415 class Meta:
416 model = Person
417 # don't display the 'password', 'user_permissions',
418 # 'groups' or 'is_superuser' fields
419 # + reorder fields
420 fields = ['username', 'personal', 'middle', 'family', 'may_contact',
421 'email', 'gender', 'airport', 'affiliation', 'github',
422 'twitter', 'url', 'occupation', 'orcid', 'notes', 'lessons',
423 'domains', 'languages']
424
425
426 class PersonCreateForm(PersonForm):
427 class Meta(PersonForm.Meta):
428 # remove 'username' field as it's being populated after form save
429 # in the `views.PersonCreate.form_valid`
430 fields = PersonForm.Meta.fields.copy()
431 fields.remove('username')
432
433
434 class PersonPermissionsForm(forms.ModelForm):
435 class Meta:
436 model = Person
437 # only display administration-related fields: groups, permissions,
438 # being a superuser or being active (== ability to log in)
439 fields = [
440 'is_active',
441 'is_superuser',
442 'user_permissions',
443 'groups',
444 ]
445
446
447 class PersonsSelectionForm(forms.Form):
448
449 person_a = selectable.AutoCompleteSelectField(
450 lookup_class=lookups.PersonLookup,
451 label='Person From',
452 required=True,
453 help_text=AUTOCOMPLETE_HELP_TEXT,
454 widget=selectable.AutoComboboxSelectWidget,
455 )
456
457 person_b = selectable.AutoCompleteSelectField(
458 lookup_class=lookups.PersonLookup,
459 label='Person To',
460 required=True,
461 help_text=AUTOCOMPLETE_HELP_TEXT,
462 widget=selectable.AutoComboboxSelectWidget,
463 )
464
465
466 class PersonsMergeForm(forms.Form):
467 TWO = (
468 ('obj_a', 'Use A'),
469 ('obj_b', 'Use B'),
470 )
471 THREE = TWO + (('combine', 'Combine'), )
472 DEFAULT = 'obj_a'
473
474 person_a = forms.ModelChoiceField(queryset=Person.objects.all(),
475 widget=forms.HiddenInput)
476
477 person_b = forms.ModelChoiceField(queryset=Person.objects.all(),
478 widget=forms.HiddenInput)
479
480 id = forms.ChoiceField(
481 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
482 )
483 username = forms.ChoiceField(
484 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
485 )
486 personal = forms.ChoiceField(
487 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
488 )
489 middle = forms.ChoiceField(
490 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
491 )
492 family = forms.ChoiceField(
493 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
494 )
495 email = forms.ChoiceField(
496 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
497 )
498 may_contact = forms.ChoiceField(
499 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
500 )
501 gender = forms.ChoiceField(
502 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
503 )
504 airport = forms.ChoiceField(
505 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
506 )
507 github = forms.ChoiceField(
508 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
509 )
510 twitter = forms.ChoiceField(
511 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
512 )
513 url = forms.ChoiceField(
514 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
515 )
516 notes = forms.ChoiceField(
517 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
518 )
519 affiliation = forms.ChoiceField(
520 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
521 )
522 occupation = forms.ChoiceField(
523 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
524 )
525 orcid = forms.ChoiceField(
526 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
527 )
528 award_set = forms.ChoiceField(
529 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
530 )
531 qualification_set = forms.ChoiceField(
532 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
533 label='Lessons',
534 )
535 domains = forms.ChoiceField(
536 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
537 )
538 languages = forms.ChoiceField(
539 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
540 )
541 task_set = forms.ChoiceField(
542 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
543 )
544 is_active = forms.ChoiceField(
545 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
546 )
547
548
549 class BadgeAwardForm(forms.ModelForm):
550
551 person = selectable.AutoCompleteSelectField(
552 lookup_class=lookups.PersonLookup,
553 label='Person',
554 required=True,
555 help_text=AUTOCOMPLETE_HELP_TEXT,
556 widget=selectable.AutoComboboxSelectWidget,
557 )
558
559 event = selectable.AutoCompleteSelectField(
560 lookup_class=lookups.EventLookup,
561 label='Event',
562 required=False,
563 help_text=AUTOCOMPLETE_HELP_TEXT,
564 widget=selectable.AutoComboboxSelectWidget,
565 )
566
567 awarded_by = selectable.AutoCompleteSelectField(
568 lookup_class=lookups.PersonLookup,
569 label='Awarded by',
570 required=False,
571 help_text=AUTOCOMPLETE_HELP_TEXT,
572 widget=selectable.AutoComboboxSelectWidget,
573 )
574
575 class Meta:
576 model = Award
577 fields = '__all__'
578 widgets = {'badge': HiddenInput}
579
580
581 class PersonAwardForm(forms.ModelForm):
582
583 event = selectable.AutoCompleteSelectField(
584 lookup_class=lookups.EventLookup,
585 label='Event',
586 required=False,
587 help_text=AUTOCOMPLETE_HELP_TEXT,
588 widget=selectable.AutoComboboxSelectWidget,
589 )
590
591 awarded_by = selectable.AutoCompleteSelectField(
592 lookup_class=lookups.PersonLookup,
593 label='Awarded by',
594 required=False,
595 help_text=AUTOCOMPLETE_HELP_TEXT,
596 widget=selectable.AutoComboboxSelectWidget,
597 )
598
599 class Meta:
600 model = Award
601 fields = '__all__'
602 widgets = {'person': HiddenInput}
603
604
605 class PersonTaskForm(forms.ModelForm):
606
607 event = selectable.AutoCompleteSelectField(
608 lookup_class=lookups.EventLookup,
609 label='Event',
610 required=True,
611 help_text=AUTOCOMPLETE_HELP_TEXT,
612 widget=selectable.AutoComboboxSelectWidget,
613 )
614
615 class Meta:
616 model = Task
617 fields = '__all__'
618 widgets = {'person': HiddenInput}
619
620
621 class OrganizationForm(forms.ModelForm):
622 domain = forms.CharField(
623 max_length=Organization._meta.get_field('domain').max_length,
624 validators=[
625 RegexValidator(
626 '[^\w\.-]+', inverse_match=True,
627 message='Please enter only the domain (such as "math.esu.edu")'
628 ' without a leading "http://" or a trailing "/".')
629 ],
630 )
631
632 class Meta:
633 model = Organization
634 fields = ['domain', 'fullname', 'country', 'notes']
635
636
637 class MembershipForm(forms.ModelForm):
638 class Meta:
639 model = Membership
640 fields = '__all__'
641 widgets = {'host': HiddenInput, }
642
643
644 class SponsorshipForm(forms.ModelForm):
645 organization = selectable.AutoCompleteSelectField(
646 lookup_class=lookups.OrganizationLookup,
647 label='Organization',
648 required=True,
649 help_text=Sponsorship._meta.get_field('organization').help_text,
650 widget=selectable.AutoComboboxSelectWidget,
651 )
652
653 contact = selectable.AutoCompleteSelectField(
654 lookup_class=lookups.PersonLookup,
655 label='Contact',
656 required=False,
657 help_text=AUTOCOMPLETE_HELP_TEXT,
658 widget=selectable.AutoComboboxSelectWidget,
659 )
660
661 class Meta:
662 model = Sponsorship
663 fields = '__all__'
664 widgets = {'event': HiddenInput, }
665
666
667 class SWCEventRequestForm(forms.ModelForm):
668 captcha = ReCaptchaField()
669 workshop_type = forms.CharField(initial='swc', widget=forms.HiddenInput())
670 understand_admin_fee = forms.BooleanField(
671 required=True,
672 initial=False,
673 label='I understand the Software Carpentry Foundation\'s '
674 'administration fee.',
675 help_text='<a href="http://software-carpentry.org/blog/2015/07/changes'
676 '-to-admin-fee.html" target="_blank">Look up administration '
677 'fees</a>.',
678 )
679 language = selectable.AutoCompleteSelectField(
680 lookup_class=lookups.LanguageLookup,
681 label='Language',
682 required=False,
683 help_text=AUTOCOMPLETE_HELP_TEXT,
684 widget=selectable.AutoComboboxSelectWidget,
685 )
686
687 class Meta:
688 model = EventRequest
689 exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',
690 'data_types', 'data_types_other',
691 'attendee_data_analysis_level', 'fee_waiver_request')
692 widgets = {
693 'approx_attendees': forms.RadioSelect(),
694 'attendee_domains': forms.CheckboxSelectMultiple(),
695 'attendee_academic_levels': forms.CheckboxSelectMultiple(),
696 'attendee_computing_levels': forms.CheckboxSelectMultiple(),
697 'travel_reimbursement': forms.RadioSelect(),
698 'admin_fee_payment': forms.RadioSelect(),
699 }
700
701
702 class DCEventRequestForm(SWCEventRequestForm):
703 workshop_type = forms.CharField(initial='dc', widget=forms.HiddenInput())
704 understand_admin_fee = forms.BooleanField(
705 required=True,
706 initial=False,
707 label='I understand the Data Carpentry\'s administration fee.',
708 help_text='There is a per-workshop fee for Data Carpentry to cover '
709 'administrative and core development costs. The per-workshop fee is '
710 'currently $2500. We work to find local instructors when possible, but'
711 ' the host institute will also need to pay for instructors travel and'
712 ' lodging if they need to travel. Therefore overall workshop costs are'
713 ' $2500 - $6000.',
714 )
715
716 class Meta(SWCEventRequestForm.Meta):
717 exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',
718 'admin_fee_payment', 'attendee_computing_levels')
719 widgets = {
720 'approx_attendees': forms.RadioSelect(),
721 'attendee_domains': forms.CheckboxSelectMultiple(),
722 'data_types': forms.RadioSelect(),
723 'attendee_academic_levels': forms.CheckboxSelectMultiple(),
724 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),
725 'travel_reimbursement': forms.RadioSelect(),
726 }
727
728
729 class EventSubmitFormNoCaptcha(forms.ModelForm):
730 class Meta:
731 model = EventSubmission
732 exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')
733
734
735 class EventSubmitForm(EventSubmitFormNoCaptcha):
736 captcha = ReCaptchaField()
737
738
739 class DCSelfOrganizedEventRequestFormNoCaptcha(forms.ModelForm):
740 # the easiest way to make these fields required without rewriting their
741 # verbose names or help texts
742 handle_registration = DCSelfOrganizedEventRequest._meta \
743 .get_field('handle_registration').formfield(required=True)
744 distribute_surveys = DCSelfOrganizedEventRequest._meta \
745 .get_field('distribute_surveys').formfield(required=True)
746 follow_code_of_conduct = DCSelfOrganizedEventRequest._meta \
747 .get_field('follow_code_of_conduct').formfield(required=True)
748
749 class Meta:
750 model = DCSelfOrganizedEventRequest
751 exclude = ('created_at', 'last_updated_at', 'assigned_to')
752 widgets = {
753 'instructor_status': forms.RadioSelect(),
754 'is_partner': forms.RadioSelect(),
755 'domains': forms.CheckboxSelectMultiple(),
756 'topics': forms.CheckboxSelectMultiple(),
757 'attendee_academic_levels': forms.CheckboxSelectMultiple(),
758 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),
759 'payment': forms.RadioSelect(),
760 }
761
762
763 class DCSelfOrganizedEventRequestForm(
764 DCSelfOrganizedEventRequestFormNoCaptcha):
765 captcha = ReCaptchaField()
766
767 class Meta(DCSelfOrganizedEventRequestFormNoCaptcha.Meta):
768 exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')
769
770
771 class ProfileUpdateRequestFormNoCaptcha(forms.ModelForm):
772 languages = selectable.AutoCompleteSelectMultipleField(
773 lookup_class=lookups.LanguageLookup,
774 label='Languages you can teach in',
775 required=False,
776 help_text=AUTOCOMPLETE_HELP_TEXT,
777 widget=selectable.AutoComboboxSelectMultipleWidget,
778 )
779
780 class Meta:
781 model = ProfileUpdateRequest
782 exclude = ('active', 'created_at', 'last_updated_at')
783 widgets = {
784 'domains': forms.CheckboxSelectMultiple(),
785 'lessons': forms.CheckboxSelectMultiple(),
786 'occupation': forms.RadioSelect(),
787 'gender': forms.RadioSelect(),
788 }
789
790 def clean_twitter(self):
791 """Remove '@'s from the beginning of the Twitter handle."""
792 twitter_handle = self.cleaned_data['twitter']
793 return re.sub('^@+', '', twitter_handle)
794
795
796 class ProfileUpdateRequestForm(ProfileUpdateRequestFormNoCaptcha):
797 captcha = ReCaptchaField()
798
799
800 class PersonLookupForm(forms.Form):
801 person = selectable.AutoCompleteSelectField(
802 lookup_class=lookups.PersonLookup,
803 label='Person',
804 required=True,
805 help_text=AUTOCOMPLETE_HELP_TEXT,
806 widget=selectable.AutoComboboxSelectWidget,
807 )
808
809
810 class AdminLookupForm(forms.Form):
811 person = selectable.AutoCompleteSelectField(
812 lookup_class=lookups.AdminLookup,
813 label='Administrator',
814 required=True,
815 help_text=AUTOCOMPLETE_HELP_TEXT,
816 widget=selectable.AutoComboboxSelectWidget,
817 )
818
819
820 class SimpleTodoForm(forms.ModelForm):
821 class Meta:
822 model = TodoItem
823 fields = ('title', 'due', 'additional', 'completed', 'event')
824 widgets = {'event': HiddenInput, }
825
826 # `extra`: number of forms populated via `initial` parameter; it's hardcoded in
827 # `views.todos_add`
828 TodoFormSet = modelformset_factory(TodoItem, form=SimpleTodoForm, extra=10)
829
830
831 class EventsSelectionForm(forms.Form):
832 event_a = selectable.AutoCompleteSelectField(
833 lookup_class=lookups.EventLookup,
834 label='Event A',
835 required=True,
836 help_text=AUTOCOMPLETE_HELP_TEXT,
837 widget=selectable.AutoComboboxSelectWidget,
838 )
839
840 event_b = selectable.AutoCompleteSelectField(
841 lookup_class=lookups.EventLookup,
842 label='Event B',
843 required=True,
844 help_text=AUTOCOMPLETE_HELP_TEXT,
845 widget=selectable.AutoComboboxSelectWidget,
846 )
847
848
849 class EventsMergeForm(forms.Form):
850 TWO = (
851 ('obj_a', 'Use A'),
852 ('obj_b', 'Use B'),
853 )
854 THREE = TWO + (('combine', 'Combine'), )
855 DEFAULT = 'obj_a'
856
857 event_a = forms.ModelChoiceField(queryset=Event.objects.all(),
858 widget=forms.HiddenInput)
859
860 event_b = forms.ModelChoiceField(queryset=Event.objects.all(),
861 widget=forms.HiddenInput)
862
863 id = forms.ChoiceField(
864 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
865 )
866 slug = forms.ChoiceField(
867 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
868 )
869 completed = forms.ChoiceField(
870 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
871 )
872 assigned_to = forms.ChoiceField(
873 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
874 )
875 start = forms.ChoiceField(
876 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
877 )
878 end = forms.ChoiceField(
879 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
880 )
881 host = forms.ChoiceField(
882 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
883 )
884 administrator = forms.ChoiceField(
885 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
886 )
887 tags = forms.ChoiceField(
888 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
889 )
890 url = forms.ChoiceField(
891 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
892 )
893 language = forms.ChoiceField(
894 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
895 )
896 reg_key = forms.ChoiceField(
897 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
898 )
899 admin_fee = forms.ChoiceField(
900 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
901 )
902 invoice_status = forms.ChoiceField(
903 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
904 )
905 attendance = forms.ChoiceField(
906 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
907 )
908 contact = forms.ChoiceField(
909 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
910 )
911 country = forms.ChoiceField(
912 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
913 )
914 venue = forms.ChoiceField(
915 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
916 )
917 address = forms.ChoiceField(
918 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
919 )
920 latitude = forms.ChoiceField(
921 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
922 )
923 longitude = forms.ChoiceField(
924 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
925 )
926 learners_pre = forms.ChoiceField(
927 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
928 )
929 learners_post = forms.ChoiceField(
930 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
931 )
932 instructors_pre = forms.ChoiceField(
933 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
934 )
935 instructors_post = forms.ChoiceField(
936 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
937 )
938 learners_longterm = forms.ChoiceField(
939 choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,
940 )
941 notes = forms.ChoiceField(
942 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
943 )
944 task_set = forms.ChoiceField(
945 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
946 )
947 todoitem_set = forms.ChoiceField(
948 choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,
949 )
950
951
952 class InvoiceRequestForm(forms.ModelForm):
953 class Meta:
954 model = InvoiceRequest
955 fields = (
956 'organization', 'reason', 'reason_other', 'date', 'event',
957 'event_location', 'item_id', 'postal_number', 'contact_name',
958 'contact_email', 'contact_phone', 'full_address', 'amount',
959 'currency', 'currency_other', 'breakdown', 'vendor_form_required',
960 'vendor_form_link', 'form_W9', 'receipts_sent',
961 'shared_receipts_link', 'notes',
962 )
963 widgets = {
964 'reason': RadioSelect,
965 'currency': RadioSelect,
966 'vendor_form_required': RadioSelect,
967 'receipts_sent': RadioSelect,
968 }
969
970
971 class InvoiceRequestUpdateForm(forms.ModelForm):
972 class Meta:
973 model = InvoiceRequest
974 fields = (
975 'status', 'sent_date', 'paid_date', 'notes'
976 )
977
978
979 class TrainingRequestForm(forms.ModelForm):
980 agreed_to_code_of_conduct = forms.BooleanField(
981 required=True,
982 initial=False,
983 label='*I agree to abide by Software and Data Carpentry\'s Code of Conduct',
984 help_text='The Code of Conduct can be found at '
985 '<a href="http://software-carpentry.org/conduct/" target="_blank">'
986 'http://software-carpentry.org/conduct/</a>'
987 'and <a href="http://datacarpentry.org/code-of-conduct/" target="_blank">'
988 'http://datacarpentry.org/code-of-conduct/</a>',
989 )
990 agreed_to_complete_training = forms.BooleanField(
991 required=True,
992 initial=False,
993 label='*I agree to complete this training within three months of the Training Course',
994 help_text='The completion steps are described at '
995 '<a href="http://swcarpentry.github.io/instructor-training/checkout/" target="_blank">'
996 'http://swcarpentry.github.io/instructor-training/checkout/</a> '
997 'and take a total of approximately 8-10 hours.',
998 )
999 agreed_to_teach_workshops = forms.BooleanField(
1000 required=True,
1001 initial=False,
1002 label='*I agree to teach a Software Carpentry or Data Carpentry '
1003 'workshop within 12 months of this Training Course',
1004 )
1005 captcha = ReCaptchaField()
1006
1007 class Meta:
1008 model = TrainingRequest
1009 fields = (
1010 'group_name',
1011 'personal',
1012 'family',
1013 'email',
1014 'github',
1015 'occupation',
1016 'occupation_other',
1017 'affiliation',
1018 'location',
1019 'country',
1020 'domains',
1021 'domains_other',
1022 'gender',
1023 'gender_other',
1024 'previous_involvement',
1025 'previous_training',
1026 'previous_training_other',
1027 'previous_training_explanation',
1028 'previous_experience',
1029 'previous_experience_other',
1030 'previous_experience_explanation',
1031 'programming_language_usage_frequency',
1032 'reason',
1033 'teaching_frequency_expectation',
1034 'teaching_frequency_expectation_other',
1035 'max_travelling_frequency',
1036 'max_travelling_frequency_other',
1037 'additional_skills',
1038 'comment',
1039 )
1040 widgets = {
1041 'occupation': forms.RadioSelect(),
1042 'domains': forms.CheckboxSelectMultiple(),
1043 'gender': forms.RadioSelect(),
1044 'previous_involvement': forms.CheckboxSelectMultiple(),
1045 'previous_training': forms.RadioSelect(),
1046 'previous_experience': forms.RadioSelect(),
1047 'programming_language_usage_frequency': forms.RadioSelect(),
1048 'teaching_frequency_expectation': forms.RadioSelect(),
1049 'max_travelling_frequency': forms.RadioSelect(),
1050 }
1051
1052
1053 class AutoUpdateProfileForm(forms.ModelForm):
1054 username = forms.CharField(disabled=True, required=False)
1055 github = forms.CharField(
1056 disabled=True, required=False,
1057 help_text='If you want to change your github username, please email '
1058 'us at <a href="mailto:[email protected]">'
1059 '[email protected]</a>.')
1060
1061 languages = selectable.AutoCompleteSelectMultipleField(
1062 lookup_class=lookups.LanguageLookup,
1063 label='Languages',
1064 required=False,
1065 widget=selectable.AutoComboboxSelectMultipleWidget,
1066 )
1067
1068 class Meta:
1069 model = Person
1070 fields = [
1071 'personal',
1072 'middle',
1073 'family',
1074 'email',
1075 'gender',
1076 'may_contact',
1077 'airport',
1078 'github',
1079 'twitter',
1080 'url',
1081 'username',
1082 'affiliation',
1083 'domains',
1084 'lessons',
1085 'languages',
1086 ]
1087 readonly_fields = (
1088 'username',
1089 'github',
1090 )
1091 widgets = {
1092 'occupation': forms.RadioSelect(),
1093 'gender': forms.RadioSelect(),
1094 'domains': forms.CheckboxSelectMultiple(),
1095 'lessons': forms.CheckboxSelectMultiple(),
1096 }
1097
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/workshops/forms.py b/workshops/forms.py
--- a/workshops/forms.py
+++ b/workshops/forms.py
@@ -359,6 +359,7 @@
# thanks to this, {{ form.media }} in the template will generate
# a <link href=""> (for CSS files) or <script src=""> (for JS files)
js = (
+ 'date_yyyymmdd.js',
'import_from_url.js', 'update_from_url.js',
'online_country.js',
)
| {"golden_diff": "diff --git a/workshops/forms.py b/workshops/forms.py\n--- a/workshops/forms.py\n+++ b/workshops/forms.py\n@@ -359,6 +359,7 @@\n # thanks to this, {{ form.media }} in the template will generate\n # a <link href=\"\"> (for CSS files) or <script src=\"\"> (for JS files)\n js = (\n+ 'date_yyyymmdd.js',\n 'import_from_url.js', 'update_from_url.js',\n 'online_country.js',\n )\n", "issue": "Date(?) stamp when updating record from url\nWhen updating an event from url, the list of instructors/helpers is recorded in the notes field with a heading that looks like a date but isn't. For example an update I made today has the heading \"UPDATE 2016-5-4:\"\n\nWhat is this heading supposed to be? Can we make it be the real date of the update (i.e., UPDATE 2016-06-16:\")\n\n", "before_files": [{"content": "import re\n\nfrom django import forms\nfrom django.core.validators import RegexValidator\nfrom django.forms import (\n HiddenInput, CheckboxSelectMultiple, TextInput, modelformset_factory,\n RadioSelect,\n)\n\nfrom captcha.fields import ReCaptchaField\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, HTML, Submit\nfrom crispy_forms.bootstrap import FormActions\nfrom django_countries import Countries\nfrom django_countries.fields import CountryField\nfrom selectable import forms as selectable\n\nfrom workshops.models import (\n Award, Event, Lesson, Person, Task, Airport, Organization,\n EventRequest, ProfileUpdateRequest, TodoItem, Membership,\n Sponsorship, InvoiceRequest, EventSubmission, Language,\n TrainingRequest,\n DCSelfOrganizedEventRequest,\n)\nfrom workshops import lookups\n\n\nAUTOCOMPLETE_HELP_TEXT = (\n \"Autocomplete field; type characters to view available options, \"\n \"then select desired item from list.\"\n)\n\n\nclass BootstrapHelper(FormHelper):\n \"\"\"Layout and behavior for crispy-displayed forms.\"\"\"\n form_class = 'form-horizontal'\n label_class = 'col-lg-2'\n field_class = 'col-lg-8'\n html5_required = True\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('submit', 'Submit'))\n\n\nclass BootstrapHelperGet(BootstrapHelper):\n \"\"\"Force form to use GET instead of default POST.\"\"\"\n form_method = 'get'\n\n\nclass BootstrapHelperWithAdd(BootstrapHelper):\n \"\"\"Change form's 'Submit' to 'Add'.\"\"\"\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.inputs[-1] = Submit('submit', 'Add')\n\n\nclass BootstrapHelperFilter(FormHelper):\n \"\"\"A differently shaped forms (more space-efficient) for use in sidebar as\n filter forms.\"\"\"\n form_method = 'get'\n\n def __init__(self, form=None):\n super().__init__(form)\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('', 'Submit'))\n\n\nclass BootstrapHelperWiderLabels(BootstrapHelper):\n \"\"\"SWCEventRequestForm and DCEventRequestForm have long labels, so this\n helper is used to address that issue.\"\"\"\n label_class = 'col-lg-3'\n field_class = 'col-lg-7'\n\n\nclass BootstrapHelperFormsetInline(BootstrapHelper):\n \"\"\"For use in inline formsets.\"\"\"\n template = 'bootstrap/table_inline_formset.html'\n\n\nbootstrap_helper = BootstrapHelper()\nbootstrap_helper_get = BootstrapHelperGet()\nbootstrap_helper_with_add = BootstrapHelperWithAdd()\nbootstrap_helper_filter = BootstrapHelperFilter()\nbootstrap_helper_wider_labels = BootstrapHelperWiderLabels()\nbootstrap_helper_inline_formsets = BootstrapHelperFormsetInline()\n\n\nclass WorkshopStaffForm(forms.Form):\n '''Represent instructor matching form.'''\n\n latitude = forms.FloatField(label='Latitude',\n min_value=-90.0,\n max_value=90.0,\n required=False)\n longitude = forms.FloatField(label='Longitude',\n min_value=-180.0,\n max_value=180.0,\n required=False)\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n widget=selectable.AutoComboboxSelectWidget(\n lookup_class=lookups.AirportLookup,\n ),\n )\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n country = forms.MultipleChoiceField(choices=[])\n\n lessons = forms.ModelMultipleChoiceField(queryset=Lesson.objects.all(),\n widget=CheckboxSelectMultiple(),\n required=False)\n\n INSTRUCTOR_BADGE_CHOICES = (\n ('swc-instructor', 'Software Carpentry Instructor'),\n ('dc-instructor', 'Data Carpentry Instructor'),\n )\n instructor_badges = forms.MultipleChoiceField(\n choices=INSTRUCTOR_BADGE_CHOICES,\n widget=CheckboxSelectMultiple(),\n required=False,\n )\n\n GENDER_CHOICES = ((None, '---------'), ) + Person.GENDER_CHOICES\n gender = forms.ChoiceField(choices=GENDER_CHOICES, required=False)\n\n was_helper = forms.BooleanField(\n required=False, label='Was helper at least once before')\n was_organizer = forms.BooleanField(\n required=False, label='Was organizer at least once before')\n is_in_progress_trainee = forms.BooleanField(\n required=False, label='Is an in-progress instructor trainee')\n\n def __init__(self, *args, **kwargs):\n '''Build form layout dynamically.'''\n super().__init__(*args, **kwargs)\n\n # dynamically build choices for country field\n only = Airport.objects.distinct().exclude(country='') \\\n .exclude(country=None) \\\n .values_list('country', flat=True)\n countries = Countries()\n countries.only = only\n\n choices = list(countries)\n self.fields['country'] = forms.MultipleChoiceField(choices=choices,\n required=False)\n\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-inline'\n self.helper.form_method = 'get'\n self.helper.layout = Layout(\n Div(\n Div(HTML('Location close to'), css_class='panel-heading'),\n Div('airport', css_class='panel-body'),\n Div(HTML('<b>OR</b>'), css_class='panel-footer'),\n Div('country', css_class='panel-body'),\n Div(HTML('<b>OR</b>'), css_class='panel-footer'),\n Div('latitude', 'longitude', css_class='panel-body'),\n css_class='panel panel-default ',\n ),\n 'instructor_badges',\n 'was_helper',\n 'was_organizer',\n 'is_in_progress_trainee',\n 'languages',\n 'gender',\n 'lessons',\n FormActions(\n Submit('submit', 'Submit'),\n ),\n )\n\n def clean(self):\n cleaned_data = super().clean()\n lat = bool(cleaned_data.get('latitude'))\n lng = bool(cleaned_data.get('longitude'))\n airport = bool(cleaned_data.get('airport'))\n country = bool(cleaned_data.get('country'))\n latlng = lat and lng\n\n # if searching by coordinates, then there must be both lat & lng\n # present\n if lat ^ lng:\n raise forms.ValidationError(\n 'Must specify both latitude and longitude if searching by '\n 'coordinates')\n\n # User must search by airport, or country, or coordinates, or none\n # of them. Sum of boolean elements must be equal 0 (if general search)\n # or 1 (if searching by airport OR country OR lat/lng).\n if sum([airport, country, latlng]) not in [0, 1]:\n raise forms.ValidationError(\n 'Must specify an airport OR a country, OR use coordinates, OR '\n 'none of them.')\n return cleaned_data\n\n\nclass PersonBulkAddForm(forms.Form):\n '''Represent CSV upload form for bulk adding people.'''\n\n file = forms.FileField()\n\n\nclass SearchForm(forms.Form):\n '''Represent general searching form.'''\n\n term = forms.CharField(label='term',\n max_length=100)\n in_organizations = forms.BooleanField(label='in organizations',\n required=False,\n initial=True)\n in_events = forms.BooleanField(label='in events',\n required=False,\n initial=True)\n in_persons = forms.BooleanField(label='in persons',\n required=False,\n initial=True)\n in_airports = forms.BooleanField(label='in airports',\n required=False,\n initial=True)\n\n\nclass DebriefForm(forms.Form):\n '''Represent general debrief form.'''\n begin_date = forms.DateField(\n label='Begin date as YYYY-MM-DD',\n input_formats=['%Y-%m-%d', ]\n )\n end_date = forms.DateField(\n label='End date as YYYY-MD-DD',\n input_formats=['%Y-%m-%d', ]\n )\n\n\nclass EventForm(forms.ModelForm):\n host = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Host',\n required=True,\n help_text=Event._meta.get_field('host').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n administrator = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Administrator',\n required=False,\n help_text=Event._meta.get_field('administrator').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n assigned_to = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AdminLookup,\n label='Assigned to',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n language = selectable.AutoCompleteSelectField(\n lookup_class=lookups.LanguageLookup,\n label='Language',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n country = CountryField().formfield(\n required=False,\n help_text=Event._meta.get_field('country').help_text,\n )\n\n admin_fee = forms.DecimalField(min_value=0, decimal_places=2,\n required=False, widget=TextInput)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['slug'].widget.attrs['placeholder'] = 'YYYY-MM-DD-location'\n self.fields['start'].widget.attrs['placeholder'] = 'YYYY-MM-DD'\n self.fields['end'].widget.attrs['placeholder'] = 'YYYY-MM-DD'\n\n self.helper = BootstrapHelper(self)\n\n idx_start = self.helper['country'].slice[0][0][0]\n idx_end = self.helper['longitude'].slice[0][0][0]\n # wrap all venue fields within <div class='panel-body'>\n self.helper[idx_start:idx_end + 1] \\\n .wrap_together(Div, css_class='panel-body')\n # wrap <div class='panel-body'> within <div class='panel panel-\u2026'>\n self.helper[idx_start].wrap_together(Div,\n css_class='panel panel-default')\n # add <div class='panel-heading'>Loc. details</div> inside \"div.panel\"\n self.helper.layout[idx_start].insert(0, Div(HTML('Location details'),\n css_class='panel-heading'))\n\n id_learners_pre = self.helper['learners_pre'].slice[0][0][0]\n id_learners_longterm = self.helper['learners_longterm'].slice[0][0][0]\n # wrap all survey fields within <div class='panel-body'>\n self.helper[id_learners_pre:id_learners_longterm + 1] \\\n .wrap_together(Div, css_class='panel-body')\n # wrap <div class='panel-body'> within <div class='panel panel-\u2026'>\n self.helper[id_learners_pre].wrap_together(\n Div, css_class='panel panel-default')\n # add <div class='panel-heading'>Venue details</div> inside \"div.panel\"\n self.helper.layout[id_learners_pre].insert(\n 0, Div(HTML('Survey results'), css_class='panel-heading'))\n\n def clean_slug(self):\n # Ensure slug is in \"YYYY-MM-DD-location\" format\n data = self.cleaned_data['slug']\n match = re.match('(\\d{4}|x{4})-(\\d{2}|x{2})-(\\d{2}|x{2})-.+', data)\n if not match:\n raise forms.ValidationError('Slug must be in \"YYYY-MM-DD-location\"'\n ' format, where \"YYYY\", \"MM\", \"DD\" can'\n ' be unspecified (ie. \"xx\").')\n return data\n\n def clean_end(self):\n \"\"\"Ensure end >= start.\"\"\"\n start = self.cleaned_data['start']\n end = self.cleaned_data['end']\n\n if start and end and end < start:\n raise forms.ValidationError('Must not be earlier than start date.')\n return end\n\n class Meta:\n model = Event\n # reorder fields, don't display 'deleted' field\n fields = ('slug', 'completed', 'start', 'end', 'host', 'administrator',\n 'assigned_to', 'tags', 'url', 'language', 'reg_key',\n 'admin_fee', 'invoice_status', 'attendance', 'contact',\n 'notes', 'country', 'venue', 'address', 'latitude',\n 'longitude', 'learners_pre', 'learners_post',\n 'instructors_pre', 'instructors_post', 'learners_longterm')\n # WARNING: don't change put any fields between 'country' and\n # 'longitude' that don't relate to the venue of the event\n\n widgets = {\n 'attendance': TextInput,\n 'latitude': TextInput,\n 'longitude': TextInput,\n 'invoice_status': RadioSelect,\n }\n\n class Media:\n # thanks to this, {{ form.media }} in the template will generate\n # a <link href=\"\"> (for CSS files) or <script src=\"\"> (for JS files)\n js = (\n 'import_from_url.js', 'update_from_url.js',\n 'online_country.js',\n )\n\n\nclass TaskForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'event': HiddenInput}\n\n\nclass TaskFullForm(TaskForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n\n\nclass PersonForm(forms.ModelForm):\n\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = Person\n # don't display the 'password', 'user_permissions',\n # 'groups' or 'is_superuser' fields\n # + reorder fields\n fields = ['username', 'personal', 'middle', 'family', 'may_contact',\n 'email', 'gender', 'airport', 'affiliation', 'github',\n 'twitter', 'url', 'occupation', 'orcid', 'notes', 'lessons',\n 'domains', 'languages']\n\n\nclass PersonCreateForm(PersonForm):\n class Meta(PersonForm.Meta):\n # remove 'username' field as it's being populated after form save\n # in the `views.PersonCreate.form_valid`\n fields = PersonForm.Meta.fields.copy()\n fields.remove('username')\n\n\nclass PersonPermissionsForm(forms.ModelForm):\n class Meta:\n model = Person\n # only display administration-related fields: groups, permissions,\n # being a superuser or being active (== ability to log in)\n fields = [\n 'is_active',\n 'is_superuser',\n 'user_permissions',\n 'groups',\n ]\n\n\nclass PersonsSelectionForm(forms.Form):\n\n person_a = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person From',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n person_b = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person To',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass PersonsMergeForm(forms.Form):\n TWO = (\n ('obj_a', 'Use A'),\n ('obj_b', 'Use B'),\n )\n THREE = TWO + (('combine', 'Combine'), )\n DEFAULT = 'obj_a'\n\n person_a = forms.ModelChoiceField(queryset=Person.objects.all(),\n widget=forms.HiddenInput)\n\n person_b = forms.ModelChoiceField(queryset=Person.objects.all(),\n widget=forms.HiddenInput)\n\n id = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n username = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n personal = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n middle = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n family = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n email = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n may_contact = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n gender = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n airport = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n github = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n twitter = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n url = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n notes = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n affiliation = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n occupation = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n orcid = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n award_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n qualification_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n label='Lessons',\n )\n domains = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n languages = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n task_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n is_active = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n\n\nclass BadgeAwardForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n awarded_by = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Awarded by',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'badge': HiddenInput}\n\n\nclass PersonAwardForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n awarded_by = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Awarded by',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass PersonTaskForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass OrganizationForm(forms.ModelForm):\n domain = forms.CharField(\n max_length=Organization._meta.get_field('domain').max_length,\n validators=[\n RegexValidator(\n '[^\\w\\.-]+', inverse_match=True,\n message='Please enter only the domain (such as \"math.esu.edu\")'\n ' without a leading \"http://\" or a trailing \"/\".')\n ],\n )\n\n class Meta:\n model = Organization\n fields = ['domain', 'fullname', 'country', 'notes']\n\n\nclass MembershipForm(forms.ModelForm):\n class Meta:\n model = Membership\n fields = '__all__'\n widgets = {'host': HiddenInput, }\n\n\nclass SponsorshipForm(forms.ModelForm):\n organization = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Organization',\n required=True,\n help_text=Sponsorship._meta.get_field('organization').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n contact = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Contact',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Sponsorship\n fields = '__all__'\n widgets = {'event': HiddenInput, }\n\n\nclass SWCEventRequestForm(forms.ModelForm):\n captcha = ReCaptchaField()\n workshop_type = forms.CharField(initial='swc', widget=forms.HiddenInput())\n understand_admin_fee = forms.BooleanField(\n required=True,\n initial=False,\n label='I understand the Software Carpentry Foundation\\'s '\n 'administration fee.',\n help_text='<a href=\"http://software-carpentry.org/blog/2015/07/changes'\n '-to-admin-fee.html\" target=\"_blank\">Look up administration '\n 'fees</a>.',\n )\n language = selectable.AutoCompleteSelectField(\n lookup_class=lookups.LanguageLookup,\n label='Language',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = EventRequest\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',\n 'data_types', 'data_types_other',\n 'attendee_data_analysis_level', 'fee_waiver_request')\n widgets = {\n 'approx_attendees': forms.RadioSelect(),\n 'attendee_domains': forms.CheckboxSelectMultiple(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_computing_levels': forms.CheckboxSelectMultiple(),\n 'travel_reimbursement': forms.RadioSelect(),\n 'admin_fee_payment': forms.RadioSelect(),\n }\n\n\nclass DCEventRequestForm(SWCEventRequestForm):\n workshop_type = forms.CharField(initial='dc', widget=forms.HiddenInput())\n understand_admin_fee = forms.BooleanField(\n required=True,\n initial=False,\n label='I understand the Data Carpentry\\'s administration fee.',\n help_text='There is a per-workshop fee for Data Carpentry to cover '\n 'administrative and core development costs. The per-workshop fee is '\n 'currently $2500. We work to find local instructors when possible, but'\n ' the host institute will also need to pay for instructors travel and'\n ' lodging if they need to travel. Therefore overall workshop costs are'\n ' $2500 - $6000.',\n )\n\n class Meta(SWCEventRequestForm.Meta):\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',\n 'admin_fee_payment', 'attendee_computing_levels')\n widgets = {\n 'approx_attendees': forms.RadioSelect(),\n 'attendee_domains': forms.CheckboxSelectMultiple(),\n 'data_types': forms.RadioSelect(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),\n 'travel_reimbursement': forms.RadioSelect(),\n }\n\n\nclass EventSubmitFormNoCaptcha(forms.ModelForm):\n class Meta:\n model = EventSubmission\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')\n\n\nclass EventSubmitForm(EventSubmitFormNoCaptcha):\n captcha = ReCaptchaField()\n\n\nclass DCSelfOrganizedEventRequestFormNoCaptcha(forms.ModelForm):\n # the easiest way to make these fields required without rewriting their\n # verbose names or help texts\n handle_registration = DCSelfOrganizedEventRequest._meta \\\n .get_field('handle_registration').formfield(required=True)\n distribute_surveys = DCSelfOrganizedEventRequest._meta \\\n .get_field('distribute_surveys').formfield(required=True)\n follow_code_of_conduct = DCSelfOrganizedEventRequest._meta \\\n .get_field('follow_code_of_conduct').formfield(required=True)\n\n class Meta:\n model = DCSelfOrganizedEventRequest\n exclude = ('created_at', 'last_updated_at', 'assigned_to')\n widgets = {\n 'instructor_status': forms.RadioSelect(),\n 'is_partner': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'topics': forms.CheckboxSelectMultiple(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),\n 'payment': forms.RadioSelect(),\n }\n\n\nclass DCSelfOrganizedEventRequestForm(\n DCSelfOrganizedEventRequestFormNoCaptcha):\n captcha = ReCaptchaField()\n\n class Meta(DCSelfOrganizedEventRequestFormNoCaptcha.Meta):\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')\n\n\nclass ProfileUpdateRequestFormNoCaptcha(forms.ModelForm):\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages you can teach in',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = ProfileUpdateRequest\n exclude = ('active', 'created_at', 'last_updated_at')\n widgets = {\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n 'occupation': forms.RadioSelect(),\n 'gender': forms.RadioSelect(),\n }\n\n def clean_twitter(self):\n \"\"\"Remove '@'s from the beginning of the Twitter handle.\"\"\"\n twitter_handle = self.cleaned_data['twitter']\n return re.sub('^@+', '', twitter_handle)\n\n\nclass ProfileUpdateRequestForm(ProfileUpdateRequestFormNoCaptcha):\n captcha = ReCaptchaField()\n\n\nclass PersonLookupForm(forms.Form):\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass AdminLookupForm(forms.Form):\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AdminLookup,\n label='Administrator',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass SimpleTodoForm(forms.ModelForm):\n class Meta:\n model = TodoItem\n fields = ('title', 'due', 'additional', 'completed', 'event')\n widgets = {'event': HiddenInput, }\n\n# `extra`: number of forms populated via `initial` parameter; it's hardcoded in\n# `views.todos_add`\nTodoFormSet = modelformset_factory(TodoItem, form=SimpleTodoForm, extra=10)\n\n\nclass EventsSelectionForm(forms.Form):\n event_a = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event A',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event_b = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event B',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass EventsMergeForm(forms.Form):\n TWO = (\n ('obj_a', 'Use A'),\n ('obj_b', 'Use B'),\n )\n THREE = TWO + (('combine', 'Combine'), )\n DEFAULT = 'obj_a'\n\n event_a = forms.ModelChoiceField(queryset=Event.objects.all(),\n widget=forms.HiddenInput)\n\n event_b = forms.ModelChoiceField(queryset=Event.objects.all(),\n widget=forms.HiddenInput)\n\n id = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n slug = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n completed = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n assigned_to = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n start = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n end = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n host = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n administrator = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n tags = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n url = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n language = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n reg_key = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n admin_fee = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n invoice_status = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n attendance = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n contact = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n country = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n venue = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n address = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n latitude = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n longitude = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_pre = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_post = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n instructors_pre = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n instructors_post = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_longterm = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n notes = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n task_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n todoitem_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n\n\nclass InvoiceRequestForm(forms.ModelForm):\n class Meta:\n model = InvoiceRequest\n fields = (\n 'organization', 'reason', 'reason_other', 'date', 'event',\n 'event_location', 'item_id', 'postal_number', 'contact_name',\n 'contact_email', 'contact_phone', 'full_address', 'amount',\n 'currency', 'currency_other', 'breakdown', 'vendor_form_required',\n 'vendor_form_link', 'form_W9', 'receipts_sent',\n 'shared_receipts_link', 'notes',\n )\n widgets = {\n 'reason': RadioSelect,\n 'currency': RadioSelect,\n 'vendor_form_required': RadioSelect,\n 'receipts_sent': RadioSelect,\n }\n\n\nclass InvoiceRequestUpdateForm(forms.ModelForm):\n class Meta:\n model = InvoiceRequest\n fields = (\n 'status', 'sent_date', 'paid_date', 'notes'\n )\n\n\nclass TrainingRequestForm(forms.ModelForm):\n agreed_to_code_of_conduct = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to abide by Software and Data Carpentry\\'s Code of Conduct',\n help_text='The Code of Conduct can be found at '\n '<a href=\"http://software-carpentry.org/conduct/\" target=\"_blank\">'\n 'http://software-carpentry.org/conduct/</a>'\n 'and <a href=\"http://datacarpentry.org/code-of-conduct/\" target=\"_blank\">'\n 'http://datacarpentry.org/code-of-conduct/</a>',\n )\n agreed_to_complete_training = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to complete this training within three months of the Training Course',\n help_text='The completion steps are described at '\n '<a href=\"http://swcarpentry.github.io/instructor-training/checkout/\" target=\"_blank\">'\n 'http://swcarpentry.github.io/instructor-training/checkout/</a> '\n 'and take a total of approximately 8-10 hours.',\n )\n agreed_to_teach_workshops = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to teach a Software Carpentry or Data Carpentry '\n 'workshop within 12 months of this Training Course',\n )\n captcha = ReCaptchaField()\n\n class Meta:\n model = TrainingRequest\n fields = (\n 'group_name',\n 'personal',\n 'family',\n 'email',\n 'github',\n 'occupation',\n 'occupation_other',\n 'affiliation',\n 'location',\n 'country',\n 'domains',\n 'domains_other',\n 'gender',\n 'gender_other',\n 'previous_involvement',\n 'previous_training',\n 'previous_training_other',\n 'previous_training_explanation',\n 'previous_experience',\n 'previous_experience_other',\n 'previous_experience_explanation',\n 'programming_language_usage_frequency',\n 'reason',\n 'teaching_frequency_expectation',\n 'teaching_frequency_expectation_other',\n 'max_travelling_frequency',\n 'max_travelling_frequency_other',\n 'additional_skills',\n 'comment',\n )\n widgets = {\n 'occupation': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'gender': forms.RadioSelect(),\n 'previous_involvement': forms.CheckboxSelectMultiple(),\n 'previous_training': forms.RadioSelect(),\n 'previous_experience': forms.RadioSelect(),\n 'programming_language_usage_frequency': forms.RadioSelect(),\n 'teaching_frequency_expectation': forms.RadioSelect(),\n 'max_travelling_frequency': forms.RadioSelect(),\n }\n\n\nclass AutoUpdateProfileForm(forms.ModelForm):\n username = forms.CharField(disabled=True, required=False)\n github = forms.CharField(\n disabled=True, required=False,\n help_text='If you want to change your github username, please email '\n 'us at <a href=\"mailto:[email protected]\">'\n '[email protected]</a>.')\n\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = Person\n fields = [\n 'personal',\n 'middle',\n 'family',\n 'email',\n 'gender',\n 'may_contact',\n 'airport',\n 'github',\n 'twitter',\n 'url',\n 'username',\n 'affiliation',\n 'domains',\n 'lessons',\n 'languages',\n ]\n readonly_fields = (\n 'username',\n 'github',\n )\n widgets = {\n 'occupation': forms.RadioSelect(),\n 'gender': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n }\n", "path": "workshops/forms.py"}], "after_files": [{"content": "import re\n\nfrom django import forms\nfrom django.core.validators import RegexValidator\nfrom django.forms import (\n HiddenInput, CheckboxSelectMultiple, TextInput, modelformset_factory,\n RadioSelect,\n)\n\nfrom captcha.fields import ReCaptchaField\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, HTML, Submit\nfrom crispy_forms.bootstrap import FormActions\nfrom django_countries import Countries\nfrom django_countries.fields import CountryField\nfrom selectable import forms as selectable\n\nfrom workshops.models import (\n Award, Event, Lesson, Person, Task, Airport, Organization,\n EventRequest, ProfileUpdateRequest, TodoItem, Membership,\n Sponsorship, InvoiceRequest, EventSubmission, Language,\n TrainingRequest,\n DCSelfOrganizedEventRequest,\n)\nfrom workshops import lookups\n\n\nAUTOCOMPLETE_HELP_TEXT = (\n \"Autocomplete field; type characters to view available options, \"\n \"then select desired item from list.\"\n)\n\n\nclass BootstrapHelper(FormHelper):\n \"\"\"Layout and behavior for crispy-displayed forms.\"\"\"\n form_class = 'form-horizontal'\n label_class = 'col-lg-2'\n field_class = 'col-lg-8'\n html5_required = True\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('submit', 'Submit'))\n\n\nclass BootstrapHelperGet(BootstrapHelper):\n \"\"\"Force form to use GET instead of default POST.\"\"\"\n form_method = 'get'\n\n\nclass BootstrapHelperWithAdd(BootstrapHelper):\n \"\"\"Change form's 'Submit' to 'Add'.\"\"\"\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.inputs[-1] = Submit('submit', 'Add')\n\n\nclass BootstrapHelperFilter(FormHelper):\n \"\"\"A differently shaped forms (more space-efficient) for use in sidebar as\n filter forms.\"\"\"\n form_method = 'get'\n\n def __init__(self, form=None):\n super().__init__(form)\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('', 'Submit'))\n\n\nclass BootstrapHelperWiderLabels(BootstrapHelper):\n \"\"\"SWCEventRequestForm and DCEventRequestForm have long labels, so this\n helper is used to address that issue.\"\"\"\n label_class = 'col-lg-3'\n field_class = 'col-lg-7'\n\n\nclass BootstrapHelperFormsetInline(BootstrapHelper):\n \"\"\"For use in inline formsets.\"\"\"\n template = 'bootstrap/table_inline_formset.html'\n\n\nbootstrap_helper = BootstrapHelper()\nbootstrap_helper_get = BootstrapHelperGet()\nbootstrap_helper_with_add = BootstrapHelperWithAdd()\nbootstrap_helper_filter = BootstrapHelperFilter()\nbootstrap_helper_wider_labels = BootstrapHelperWiderLabels()\nbootstrap_helper_inline_formsets = BootstrapHelperFormsetInline()\n\n\nclass WorkshopStaffForm(forms.Form):\n '''Represent instructor matching form.'''\n\n latitude = forms.FloatField(label='Latitude',\n min_value=-90.0,\n max_value=90.0,\n required=False)\n longitude = forms.FloatField(label='Longitude',\n min_value=-180.0,\n max_value=180.0,\n required=False)\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n widget=selectable.AutoComboboxSelectWidget(\n lookup_class=lookups.AirportLookup,\n ),\n )\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n country = forms.MultipleChoiceField(choices=[])\n\n lessons = forms.ModelMultipleChoiceField(queryset=Lesson.objects.all(),\n widget=CheckboxSelectMultiple(),\n required=False)\n\n INSTRUCTOR_BADGE_CHOICES = (\n ('swc-instructor', 'Software Carpentry Instructor'),\n ('dc-instructor', 'Data Carpentry Instructor'),\n )\n instructor_badges = forms.MultipleChoiceField(\n choices=INSTRUCTOR_BADGE_CHOICES,\n widget=CheckboxSelectMultiple(),\n required=False,\n )\n\n GENDER_CHOICES = ((None, '---------'), ) + Person.GENDER_CHOICES\n gender = forms.ChoiceField(choices=GENDER_CHOICES, required=False)\n\n was_helper = forms.BooleanField(\n required=False, label='Was helper at least once before')\n was_organizer = forms.BooleanField(\n required=False, label='Was organizer at least once before')\n is_in_progress_trainee = forms.BooleanField(\n required=False, label='Is an in-progress instructor trainee')\n\n def __init__(self, *args, **kwargs):\n '''Build form layout dynamically.'''\n super().__init__(*args, **kwargs)\n\n # dynamically build choices for country field\n only = Airport.objects.distinct().exclude(country='') \\\n .exclude(country=None) \\\n .values_list('country', flat=True)\n countries = Countries()\n countries.only = only\n\n choices = list(countries)\n self.fields['country'] = forms.MultipleChoiceField(choices=choices,\n required=False)\n\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-inline'\n self.helper.form_method = 'get'\n self.helper.layout = Layout(\n Div(\n Div(HTML('Location close to'), css_class='panel-heading'),\n Div('airport', css_class='panel-body'),\n Div(HTML('<b>OR</b>'), css_class='panel-footer'),\n Div('country', css_class='panel-body'),\n Div(HTML('<b>OR</b>'), css_class='panel-footer'),\n Div('latitude', 'longitude', css_class='panel-body'),\n css_class='panel panel-default ',\n ),\n 'instructor_badges',\n 'was_helper',\n 'was_organizer',\n 'is_in_progress_trainee',\n 'languages',\n 'gender',\n 'lessons',\n FormActions(\n Submit('submit', 'Submit'),\n ),\n )\n\n def clean(self):\n cleaned_data = super().clean()\n lat = bool(cleaned_data.get('latitude'))\n lng = bool(cleaned_data.get('longitude'))\n airport = bool(cleaned_data.get('airport'))\n country = bool(cleaned_data.get('country'))\n latlng = lat and lng\n\n # if searching by coordinates, then there must be both lat & lng\n # present\n if lat ^ lng:\n raise forms.ValidationError(\n 'Must specify both latitude and longitude if searching by '\n 'coordinates')\n\n # User must search by airport, or country, or coordinates, or none\n # of them. Sum of boolean elements must be equal 0 (if general search)\n # or 1 (if searching by airport OR country OR lat/lng).\n if sum([airport, country, latlng]) not in [0, 1]:\n raise forms.ValidationError(\n 'Must specify an airport OR a country, OR use coordinates, OR '\n 'none of them.')\n return cleaned_data\n\n\nclass PersonBulkAddForm(forms.Form):\n '''Represent CSV upload form for bulk adding people.'''\n\n file = forms.FileField()\n\n\nclass SearchForm(forms.Form):\n '''Represent general searching form.'''\n\n term = forms.CharField(label='term',\n max_length=100)\n in_organizations = forms.BooleanField(label='in organizations',\n required=False,\n initial=True)\n in_events = forms.BooleanField(label='in events',\n required=False,\n initial=True)\n in_persons = forms.BooleanField(label='in persons',\n required=False,\n initial=True)\n in_airports = forms.BooleanField(label='in airports',\n required=False,\n initial=True)\n\n\nclass DebriefForm(forms.Form):\n '''Represent general debrief form.'''\n begin_date = forms.DateField(\n label='Begin date as YYYY-MM-DD',\n input_formats=['%Y-%m-%d', ]\n )\n end_date = forms.DateField(\n label='End date as YYYY-MD-DD',\n input_formats=['%Y-%m-%d', ]\n )\n\n\nclass EventForm(forms.ModelForm):\n host = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Host',\n required=True,\n help_text=Event._meta.get_field('host').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n administrator = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Administrator',\n required=False,\n help_text=Event._meta.get_field('administrator').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n assigned_to = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AdminLookup,\n label='Assigned to',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n language = selectable.AutoCompleteSelectField(\n lookup_class=lookups.LanguageLookup,\n label='Language',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n country = CountryField().formfield(\n required=False,\n help_text=Event._meta.get_field('country').help_text,\n )\n\n admin_fee = forms.DecimalField(min_value=0, decimal_places=2,\n required=False, widget=TextInput)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['slug'].widget.attrs['placeholder'] = 'YYYY-MM-DD-location'\n self.fields['start'].widget.attrs['placeholder'] = 'YYYY-MM-DD'\n self.fields['end'].widget.attrs['placeholder'] = 'YYYY-MM-DD'\n\n self.helper = BootstrapHelper(self)\n\n idx_start = self.helper['country'].slice[0][0][0]\n idx_end = self.helper['longitude'].slice[0][0][0]\n # wrap all venue fields within <div class='panel-body'>\n self.helper[idx_start:idx_end + 1] \\\n .wrap_together(Div, css_class='panel-body')\n # wrap <div class='panel-body'> within <div class='panel panel-\u2026'>\n self.helper[idx_start].wrap_together(Div,\n css_class='panel panel-default')\n # add <div class='panel-heading'>Loc. details</div> inside \"div.panel\"\n self.helper.layout[idx_start].insert(0, Div(HTML('Location details'),\n css_class='panel-heading'))\n\n id_learners_pre = self.helper['learners_pre'].slice[0][0][0]\n id_learners_longterm = self.helper['learners_longterm'].slice[0][0][0]\n # wrap all survey fields within <div class='panel-body'>\n self.helper[id_learners_pre:id_learners_longterm + 1] \\\n .wrap_together(Div, css_class='panel-body')\n # wrap <div class='panel-body'> within <div class='panel panel-\u2026'>\n self.helper[id_learners_pre].wrap_together(\n Div, css_class='panel panel-default')\n # add <div class='panel-heading'>Venue details</div> inside \"div.panel\"\n self.helper.layout[id_learners_pre].insert(\n 0, Div(HTML('Survey results'), css_class='panel-heading'))\n\n def clean_slug(self):\n # Ensure slug is in \"YYYY-MM-DD-location\" format\n data = self.cleaned_data['slug']\n match = re.match('(\\d{4}|x{4})-(\\d{2}|x{2})-(\\d{2}|x{2})-.+', data)\n if not match:\n raise forms.ValidationError('Slug must be in \"YYYY-MM-DD-location\"'\n ' format, where \"YYYY\", \"MM\", \"DD\" can'\n ' be unspecified (ie. \"xx\").')\n return data\n\n def clean_end(self):\n \"\"\"Ensure end >= start.\"\"\"\n start = self.cleaned_data['start']\n end = self.cleaned_data['end']\n\n if start and end and end < start:\n raise forms.ValidationError('Must not be earlier than start date.')\n return end\n\n class Meta:\n model = Event\n # reorder fields, don't display 'deleted' field\n fields = ('slug', 'completed', 'start', 'end', 'host', 'administrator',\n 'assigned_to', 'tags', 'url', 'language', 'reg_key',\n 'admin_fee', 'invoice_status', 'attendance', 'contact',\n 'notes', 'country', 'venue', 'address', 'latitude',\n 'longitude', 'learners_pre', 'learners_post',\n 'instructors_pre', 'instructors_post', 'learners_longterm')\n # WARNING: don't change put any fields between 'country' and\n # 'longitude' that don't relate to the venue of the event\n\n widgets = {\n 'attendance': TextInput,\n 'latitude': TextInput,\n 'longitude': TextInput,\n 'invoice_status': RadioSelect,\n }\n\n class Media:\n # thanks to this, {{ form.media }} in the template will generate\n # a <link href=\"\"> (for CSS files) or <script src=\"\"> (for JS files)\n js = (\n 'date_yyyymmdd.js',\n 'import_from_url.js', 'update_from_url.js',\n 'online_country.js',\n )\n\n\nclass TaskForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'event': HiddenInput}\n\n\nclass TaskFullForm(TaskForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n\n\nclass PersonForm(forms.ModelForm):\n\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = Person\n # don't display the 'password', 'user_permissions',\n # 'groups' or 'is_superuser' fields\n # + reorder fields\n fields = ['username', 'personal', 'middle', 'family', 'may_contact',\n 'email', 'gender', 'airport', 'affiliation', 'github',\n 'twitter', 'url', 'occupation', 'orcid', 'notes', 'lessons',\n 'domains', 'languages']\n\n\nclass PersonCreateForm(PersonForm):\n class Meta(PersonForm.Meta):\n # remove 'username' field as it's being populated after form save\n # in the `views.PersonCreate.form_valid`\n fields = PersonForm.Meta.fields.copy()\n fields.remove('username')\n\n\nclass PersonPermissionsForm(forms.ModelForm):\n class Meta:\n model = Person\n # only display administration-related fields: groups, permissions,\n # being a superuser or being active (== ability to log in)\n fields = [\n 'is_active',\n 'is_superuser',\n 'user_permissions',\n 'groups',\n ]\n\n\nclass PersonsSelectionForm(forms.Form):\n\n person_a = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person From',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n person_b = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person To',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass PersonsMergeForm(forms.Form):\n TWO = (\n ('obj_a', 'Use A'),\n ('obj_b', 'Use B'),\n )\n THREE = TWO + (('combine', 'Combine'), )\n DEFAULT = 'obj_a'\n\n person_a = forms.ModelChoiceField(queryset=Person.objects.all(),\n widget=forms.HiddenInput)\n\n person_b = forms.ModelChoiceField(queryset=Person.objects.all(),\n widget=forms.HiddenInput)\n\n id = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n username = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n personal = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n middle = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n family = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n email = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n may_contact = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n gender = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n airport = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n github = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n twitter = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n url = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n notes = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n affiliation = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n occupation = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n orcid = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n award_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n qualification_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n label='Lessons',\n )\n domains = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n languages = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n task_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n is_active = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n\n\nclass BadgeAwardForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n awarded_by = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Awarded by',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'badge': HiddenInput}\n\n\nclass PersonAwardForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n awarded_by = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Awarded by',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass PersonTaskForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass OrganizationForm(forms.ModelForm):\n domain = forms.CharField(\n max_length=Organization._meta.get_field('domain').max_length,\n validators=[\n RegexValidator(\n '[^\\w\\.-]+', inverse_match=True,\n message='Please enter only the domain (such as \"math.esu.edu\")'\n ' without a leading \"http://\" or a trailing \"/\".')\n ],\n )\n\n class Meta:\n model = Organization\n fields = ['domain', 'fullname', 'country', 'notes']\n\n\nclass MembershipForm(forms.ModelForm):\n class Meta:\n model = Membership\n fields = '__all__'\n widgets = {'host': HiddenInput, }\n\n\nclass SponsorshipForm(forms.ModelForm):\n organization = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Organization',\n required=True,\n help_text=Sponsorship._meta.get_field('organization').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n contact = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Contact',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Sponsorship\n fields = '__all__'\n widgets = {'event': HiddenInput, }\n\n\nclass SWCEventRequestForm(forms.ModelForm):\n captcha = ReCaptchaField()\n workshop_type = forms.CharField(initial='swc', widget=forms.HiddenInput())\n understand_admin_fee = forms.BooleanField(\n required=True,\n initial=False,\n label='I understand the Software Carpentry Foundation\\'s '\n 'administration fee.',\n help_text='<a href=\"http://software-carpentry.org/blog/2015/07/changes'\n '-to-admin-fee.html\" target=\"_blank\">Look up administration '\n 'fees</a>.',\n )\n language = selectable.AutoCompleteSelectField(\n lookup_class=lookups.LanguageLookup,\n label='Language',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = EventRequest\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',\n 'data_types', 'data_types_other',\n 'attendee_data_analysis_level', 'fee_waiver_request')\n widgets = {\n 'approx_attendees': forms.RadioSelect(),\n 'attendee_domains': forms.CheckboxSelectMultiple(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_computing_levels': forms.CheckboxSelectMultiple(),\n 'travel_reimbursement': forms.RadioSelect(),\n 'admin_fee_payment': forms.RadioSelect(),\n }\n\n\nclass DCEventRequestForm(SWCEventRequestForm):\n workshop_type = forms.CharField(initial='dc', widget=forms.HiddenInput())\n understand_admin_fee = forms.BooleanField(\n required=True,\n initial=False,\n label='I understand the Data Carpentry\\'s administration fee.',\n help_text='There is a per-workshop fee for Data Carpentry to cover '\n 'administrative and core development costs. The per-workshop fee is '\n 'currently $2500. We work to find local instructors when possible, but'\n ' the host institute will also need to pay for instructors travel and'\n ' lodging if they need to travel. Therefore overall workshop costs are'\n ' $2500 - $6000.',\n )\n\n class Meta(SWCEventRequestForm.Meta):\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',\n 'admin_fee_payment', 'attendee_computing_levels')\n widgets = {\n 'approx_attendees': forms.RadioSelect(),\n 'attendee_domains': forms.CheckboxSelectMultiple(),\n 'data_types': forms.RadioSelect(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),\n 'travel_reimbursement': forms.RadioSelect(),\n }\n\n\nclass EventSubmitFormNoCaptcha(forms.ModelForm):\n class Meta:\n model = EventSubmission\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')\n\n\nclass EventSubmitForm(EventSubmitFormNoCaptcha):\n captcha = ReCaptchaField()\n\n\nclass DCSelfOrganizedEventRequestFormNoCaptcha(forms.ModelForm):\n # the easiest way to make these fields required without rewriting their\n # verbose names or help texts\n handle_registration = DCSelfOrganizedEventRequest._meta \\\n .get_field('handle_registration').formfield(required=True)\n distribute_surveys = DCSelfOrganizedEventRequest._meta \\\n .get_field('distribute_surveys').formfield(required=True)\n follow_code_of_conduct = DCSelfOrganizedEventRequest._meta \\\n .get_field('follow_code_of_conduct').formfield(required=True)\n\n class Meta:\n model = DCSelfOrganizedEventRequest\n exclude = ('created_at', 'last_updated_at', 'assigned_to')\n widgets = {\n 'instructor_status': forms.RadioSelect(),\n 'is_partner': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'topics': forms.CheckboxSelectMultiple(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),\n 'payment': forms.RadioSelect(),\n }\n\n\nclass DCSelfOrganizedEventRequestForm(\n DCSelfOrganizedEventRequestFormNoCaptcha):\n captcha = ReCaptchaField()\n\n class Meta(DCSelfOrganizedEventRequestFormNoCaptcha.Meta):\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')\n\n\nclass ProfileUpdateRequestFormNoCaptcha(forms.ModelForm):\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages you can teach in',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = ProfileUpdateRequest\n exclude = ('active', 'created_at', 'last_updated_at')\n widgets = {\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n 'occupation': forms.RadioSelect(),\n 'gender': forms.RadioSelect(),\n }\n\n def clean_twitter(self):\n \"\"\"Remove '@'s from the beginning of the Twitter handle.\"\"\"\n twitter_handle = self.cleaned_data['twitter']\n return re.sub('^@+', '', twitter_handle)\n\n\nclass ProfileUpdateRequestForm(ProfileUpdateRequestFormNoCaptcha):\n captcha = ReCaptchaField()\n\n\nclass PersonLookupForm(forms.Form):\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass AdminLookupForm(forms.Form):\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AdminLookup,\n label='Administrator',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass SimpleTodoForm(forms.ModelForm):\n class Meta:\n model = TodoItem\n fields = ('title', 'due', 'additional', 'completed', 'event')\n widgets = {'event': HiddenInput, }\n\n# `extra`: number of forms populated via `initial` parameter; it's hardcoded in\n# `views.todos_add`\nTodoFormSet = modelformset_factory(TodoItem, form=SimpleTodoForm, extra=10)\n\n\nclass EventsSelectionForm(forms.Form):\n event_a = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event A',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event_b = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event B',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass EventsMergeForm(forms.Form):\n TWO = (\n ('obj_a', 'Use A'),\n ('obj_b', 'Use B'),\n )\n THREE = TWO + (('combine', 'Combine'), )\n DEFAULT = 'obj_a'\n\n event_a = forms.ModelChoiceField(queryset=Event.objects.all(),\n widget=forms.HiddenInput)\n\n event_b = forms.ModelChoiceField(queryset=Event.objects.all(),\n widget=forms.HiddenInput)\n\n id = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n slug = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n completed = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n assigned_to = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n start = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n end = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n host = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n administrator = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n tags = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n url = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n language = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n reg_key = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n admin_fee = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n invoice_status = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n attendance = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n contact = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n country = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n venue = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n address = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n latitude = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n longitude = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_pre = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_post = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n instructors_pre = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n instructors_post = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_longterm = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n notes = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n task_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n todoitem_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n\n\nclass InvoiceRequestForm(forms.ModelForm):\n class Meta:\n model = InvoiceRequest\n fields = (\n 'organization', 'reason', 'reason_other', 'date', 'event',\n 'event_location', 'item_id', 'postal_number', 'contact_name',\n 'contact_email', 'contact_phone', 'full_address', 'amount',\n 'currency', 'currency_other', 'breakdown', 'vendor_form_required',\n 'vendor_form_link', 'form_W9', 'receipts_sent',\n 'shared_receipts_link', 'notes',\n )\n widgets = {\n 'reason': RadioSelect,\n 'currency': RadioSelect,\n 'vendor_form_required': RadioSelect,\n 'receipts_sent': RadioSelect,\n }\n\n\nclass InvoiceRequestUpdateForm(forms.ModelForm):\n class Meta:\n model = InvoiceRequest\n fields = (\n 'status', 'sent_date', 'paid_date', 'notes'\n )\n\n\nclass TrainingRequestForm(forms.ModelForm):\n agreed_to_code_of_conduct = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to abide by Software and Data Carpentry\\'s Code of Conduct',\n help_text='The Code of Conduct can be found at '\n '<a href=\"http://software-carpentry.org/conduct/\" target=\"_blank\">'\n 'http://software-carpentry.org/conduct/</a>'\n 'and <a href=\"http://datacarpentry.org/code-of-conduct/\" target=\"_blank\">'\n 'http://datacarpentry.org/code-of-conduct/</a>',\n )\n agreed_to_complete_training = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to complete this training within three months of the Training Course',\n help_text='The completion steps are described at '\n '<a href=\"http://swcarpentry.github.io/instructor-training/checkout/\" target=\"_blank\">'\n 'http://swcarpentry.github.io/instructor-training/checkout/</a> '\n 'and take a total of approximately 8-10 hours.',\n )\n agreed_to_teach_workshops = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to teach a Software Carpentry or Data Carpentry '\n 'workshop within 12 months of this Training Course',\n )\n captcha = ReCaptchaField()\n\n class Meta:\n model = TrainingRequest\n fields = (\n 'group_name',\n 'personal',\n 'family',\n 'email',\n 'github',\n 'occupation',\n 'occupation_other',\n 'affiliation',\n 'location',\n 'country',\n 'domains',\n 'domains_other',\n 'gender',\n 'gender_other',\n 'previous_involvement',\n 'previous_training',\n 'previous_training_other',\n 'previous_training_explanation',\n 'previous_experience',\n 'previous_experience_other',\n 'previous_experience_explanation',\n 'programming_language_usage_frequency',\n 'reason',\n 'teaching_frequency_expectation',\n 'teaching_frequency_expectation_other',\n 'max_travelling_frequency',\n 'max_travelling_frequency_other',\n 'additional_skills',\n 'comment',\n )\n widgets = {\n 'occupation': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'gender': forms.RadioSelect(),\n 'previous_involvement': forms.CheckboxSelectMultiple(),\n 'previous_training': forms.RadioSelect(),\n 'previous_experience': forms.RadioSelect(),\n 'programming_language_usage_frequency': forms.RadioSelect(),\n 'teaching_frequency_expectation': forms.RadioSelect(),\n 'max_travelling_frequency': forms.RadioSelect(),\n }\n\n\nclass AutoUpdateProfileForm(forms.ModelForm):\n username = forms.CharField(disabled=True, required=False)\n github = forms.CharField(\n disabled=True, required=False,\n help_text='If you want to change your github username, please email '\n 'us at <a href=\"mailto:[email protected]\">'\n '[email protected]</a>.')\n\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = Person\n fields = [\n 'personal',\n 'middle',\n 'family',\n 'email',\n 'gender',\n 'may_contact',\n 'airport',\n 'github',\n 'twitter',\n 'url',\n 'username',\n 'affiliation',\n 'domains',\n 'lessons',\n 'languages',\n ]\n readonly_fields = (\n 'username',\n 'github',\n )\n widgets = {\n 'occupation': forms.RadioSelect(),\n 'gender': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n }\n", "path": "workshops/forms.py"}]} |
gh_patches_debug_1524 | rasdani/github-patches | git_diff | pex-tool__pex-577 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 1.4.8
User facing issues on the docket:
+ non-zip_safe pex extraction has a race on renaming #557
+ Execute pex archive at runtime with -m parameter #547
+ OSX's python 2.7.10 interpreter reports a bad local platform, bdists fail to resolve #523
+ `pex.resolver.resolve` does the wrong thing when given an `interpreter` and no `platform` #511
+ [Errno 13] Permission denied when executing with bdist_pex #570
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.4.7'
5
6 # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems
7 # for pex code so we exclude that range.
8 SETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*'
9
10 WHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32'
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,7 +1,7 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.4.7'
+__version__ = '1.4.8'
# Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems
# for pex code so we exclude that range.
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,7 +1,7 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.4.7'\n+__version__ = '1.4.8'\n \n # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n # for pex code so we exclude that range.\n", "issue": "Release 1.4.8\nUser facing issues on the docket:\r\n+ non-zip_safe pex extraction has a race on renaming #557\r\n+ Execute pex archive at runtime with -m parameter #547\r\n+ OSX's python 2.7.10 interpreter reports a bad local platform, bdists fail to resolve #523 \r\n+ `pex.resolver.resolve` does the wrong thing when given an `interpreter` and no `platform` #511\r\n+ [Errno 13] Permission denied when executing with bdist_pex #570\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.4.7'\n\n# Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n# for pex code so we exclude that range.\nSETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*'\n\nWHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.4.8'\n\n# Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n# for pex code so we exclude that range.\nSETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*'\n\nWHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32'\n", "path": "pex/version.py"}]} |
gh_patches_debug_1525 | rasdani/github-patches | git_diff | sopel-irc__sopel-1527 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken interrupt handling when connection is inactive
For the record, I'm testing on macOS today because that's what I have available. However, the same behavior occurs with identical or similar tracebacks on my Ubuntu machine at home, so I don't think it's related to the fact that I'm using Homebrew Python or anything. It's probably related to Sopel's sometimes-wonky exception handling.
Here's what I did:
1. Run `sopel`
2. Press Ctrl-C after "Connecting to <server>..."
3. Run `sopel` again
4. Press Ctrl-C while Sopel is waiting to reconnect after an expected SSL failure
Interrupting the connection phase resulted in an `AttributeError`, probably as expected (since quitting tries to send something to the socket, and the socket object doesn't exist before connecting finishes):
```
Connecting to irc.network.net:6667...
^CGot quit signal, shutting down.
Traceback (most recent call last):
File "/Users/dgw/github/sopel/sopel/__init__.py", line 91, in run
p.run(config.core.host, int(config.core.port))
File "/Users/dgw/github/sopel/sopel/irc.py", line 167, in run
self.initiate_connect(host, port)
File "/Users/dgw/github/sopel/sopel/irc.py", line 177, in initiate_connect
source_address=source_address))
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 707, in create_connection
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 748, in getaddrinfo
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
File "/Users/dgw/github/sopel/sopel/__init__.py", line 73, in signal_handler
p.quit('Closing')
File "/Users/dgw/github/sopel/sopel/irc.py", line 199, in quit
self.write(['QUIT'], message)
File "/Users/dgw/github/sopel/sopel/bot.py", line 166, in write
irc.Bot.write(self, args, text=text)
File "/Users/dgw/github/sopel/sopel/irc.py", line 161, in write
self.send(temp.encode('utf-8'))
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncore.py", line 360, in send
result = self.socket.send(data)
AttributeError: 'NoneType' object has no attribute 'send'
```
Interrupting the reconnect delay yields a completely different traceback, and additionally requires pressing Ctrl-C twice:
```
Warning: Disconnected. Reconnecting in 20 seconds...
^CGot quit signal, shutting down.
Traceback (most recent call last):
File "./sopel.py", line 7, in <module>
sys.exit(run_script.main())
File "/Users/dgw/github/sopel/sopel/run_script.py", line 351, in main
ret = run(config_module, pid_file_path)
File "/Users/dgw/github/sopel/sopel/__init__.py", line 120, in run
time.sleep(delay)
File "/Users/dgw/github/sopel/sopel/__init__.py", line 73, in signal_handler
p.quit('Closing')
File "/Users/dgw/github/sopel/sopel/irc.py", line 199, in quit
self.write(['QUIT'], message)
File "/Users/dgw/github/sopel/sopel/bot.py", line 166, in write
irc.Bot.write(self, args, text=text)
File "/Users/dgw/github/sopel/sopel/irc.py", line 161, in write
self.send(temp.encode('utf-8'))
File "/Users/dgw/github/sopel/sopel/irc.py", line 327, in _ssl_send
result = self.socket.send(data)
OSError: [Errno 9] Bad file descriptor
^CGot quit signal, shutting down.
Exception ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1273, in _shutdown
t.join()
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1032, in join
self._wait_for_tstate_lock()
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1048, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
File "/Users/dgw/github/sopel/sopel/__init__.py", line 73, in signal_handler
p.quit('Closing')
File "/Users/dgw/github/sopel/sopel/irc.py", line 199, in quit
self.write(['QUIT'], message)
File "/Users/dgw/github/sopel/sopel/bot.py", line 166, in write
irc.Bot.write(self, args, text=text)
File "/Users/dgw/github/sopel/sopel/irc.py", line 161, in write
self.send(temp.encode('utf-8'))
File "/Users/dgw/github/sopel/sopel/irc.py", line 327, in _ssl_send
result = self.socket.send(data)
OSError: [Errno 9] Bad file descriptor
```
I've run into this second issue relatively often while testing things in the last few months, since I often intentionally interrupt Sopel during states other than "Connected to network and running normally".
The exception itself isn't a big deal (though it would be nice not to spit it out). I consider needing to press Ctrl-C twice to be the main bug here. That simply shouldn't be required.
Interrupting Sopel after the "Loading modules..." line (but before it starts to connect) yields a clean exit, though. That's kind of weird, and I haven't had time to look into why.
Since I know we have a few people digging around in Sopel's internals and refactoring things now (and doing damn good work, too!), I'm hoping one of them will look into this at some point. 😹
This isn't a high-severity bug, but I do want to get it fixed if possible, ideally in the next year or two. That need to press Ctrl-C twice carries over into, for example, `sopel --quit`. Sopel should _never_ need to be told to quit twice, unless something is catastrophically wrong—and waiting to reconnect is a totally normal situation that shouldn't break things the way it does now.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/cli/run.py`
Content:
```
1 #!/usr/bin/env python2.7
2 # coding=utf-8
3 """
4 Sopel - An IRC Bot
5 Copyright 2008, Sean B. Palmer, inamidst.com
6 Copyright © 2012-2014, Elad Alfassa <[email protected]>
7 Licensed under the Eiffel Forum License 2.
8
9 https://sopel.chat
10 """
11 from __future__ import unicode_literals, absolute_import, print_function, division
12
13 import argparse
14 import os
15 import platform
16 import signal
17 import sys
18 import time
19 import traceback
20
21 from sopel import bot, logger, tools, __version__
22 from sopel.config import (
23 Config,
24 _create_config,
25 ConfigurationError,
26 ConfigurationNotFound,
27 DEFAULT_HOMEDIR,
28 _wizard
29 )
30 from . import utils
31
32 if sys.version_info < (2, 7):
33 tools.stderr('Error: Requires Python 2.7 or later. Try python2.7 sopel')
34 sys.exit(1)
35 if sys.version_info.major == 2:
36 tools.stderr('Warning: Python 2.x is near end of life. Sopel support at that point is TBD.')
37 if sys.version_info.major == 3 and sys.version_info.minor < 3:
38 tools.stderr('Error: When running on Python 3, Python 3.3 is required.')
39 sys.exit(1)
40
41 ERR_CODE = 1
42 """Error code: program exited with an error"""
43 ERR_CODE_NO_RESTART = 2
44 """Error code: program exited with an error and should not be restarted
45
46 This error code is used to prevent systemd from restarting the bot when it
47 encounters such an error case.
48 """
49
50
51 def run(config, pid_file, daemon=False):
52 delay = 20
53 # Inject ca_certs from config to web for SSL validation of web requests
54 if not config.core.ca_certs:
55 tools.stderr(
56 'Could not open CA certificates file. SSL will not work properly!')
57
58 def signal_handler(sig, frame):
59 if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:
60 tools.stderr('Got quit signal, shutting down.')
61 p.quit('Closing')
62 elif sig == signal.SIGUSR2 or sig == signal.SIGILL:
63 tools.stderr('Got restart signal.')
64 p.restart('Restarting')
65
66 while True:
67 try:
68 p = bot.Sopel(config, daemon=daemon)
69 if hasattr(signal, 'SIGUSR1'):
70 signal.signal(signal.SIGUSR1, signal_handler)
71 if hasattr(signal, 'SIGTERM'):
72 signal.signal(signal.SIGTERM, signal_handler)
73 if hasattr(signal, 'SIGINT'):
74 signal.signal(signal.SIGINT, signal_handler)
75 if hasattr(signal, 'SIGUSR2'):
76 signal.signal(signal.SIGUSR2, signal_handler)
77 if hasattr(signal, 'SIGILL'):
78 signal.signal(signal.SIGILL, signal_handler)
79 logger.setup_logging(p)
80 p.run(config.core.host, int(config.core.port))
81 except KeyboardInterrupt:
82 break
83 except Exception: # TODO: Be specific
84 trace = traceback.format_exc()
85 try:
86 tools.stderr(trace)
87 except Exception: # TODO: Be specific
88 pass
89 logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a')
90 logfile.write('Critical exception in core')
91 logfile.write(trace)
92 logfile.write('----------------------------------------\n\n')
93 logfile.close()
94 # TODO: This should be handled by command_start
95 # All we should need here is a return value, but replacing the
96 # os._exit() call below (at the end) broke ^C.
97 # This one is much harder to test, so until that one's sorted it
98 # isn't worth the risk of trying to remove this one.
99 os.unlink(pid_file)
100 os._exit(1)
101
102 if not isinstance(delay, int):
103 break
104 if p.wantsrestart:
105 return -1
106 if p.hasquit:
107 break
108 tools.stderr(
109 'Warning: Disconnected. Reconnecting in %s seconds...' % delay)
110 time.sleep(delay)
111 # TODO: This should be handled by command_start
112 # All we should need here is a return value, but making this
113 # a return makes Sopel hang on ^C after it says "Closed!"
114 os.unlink(pid_file)
115 os._exit(0)
116
117
118 def add_legacy_options(parser):
119 parser.add_argument("-d", '--fork', action="store_true",
120 dest="daemonize", help="Daemonize Sopel")
121 parser.add_argument("-q", '--quit', action="store_true", dest="quit",
122 help=(
123 "Gracefully quit Sopel "
124 "(deprecated, and will be removed in Sopel 8; "
125 "use `sopel stop` instead)"))
126 parser.add_argument("-k", '--kill', action="store_true", dest="kill",
127 help=(
128 "Kill Sopel "
129 "(deprecated, and will be removed in Sopel 8; "
130 "use `sopel stop --kill` instead)"))
131 parser.add_argument("-r", '--restart', action="store_true", dest="restart",
132 help=(
133 "Restart Sopel "
134 "(deprecated, and will be removed in Sopel 8; "
135 "use `sopel restart` instead)"))
136 parser.add_argument("-l", '--list', action="store_true",
137 dest="list_configs",
138 help="List all config files found")
139 parser.add_argument('--quiet', action="store_true", dest="quiet",
140 help="Suppress all output")
141 parser.add_argument('-w', '--configure-all', action='store_true',
142 dest='wizard',
143 help=(
144 "Run the configuration wizard "
145 "(deprecated, and will be removed in Sopel 8; "
146 "use `sopel configure` instead)"))
147 parser.add_argument('--configure-modules', action='store_true',
148 dest='mod_wizard',
149 help=(
150 "Run the configuration wizard, but only for the "
151 "module configuration options "
152 "(deprecated, and will be removed in Sopel 8; "
153 "use `sopel configure --modules` instead)"))
154 parser.add_argument('-v', action="store_true",
155 dest='version_legacy',
156 help=(
157 "Show version number and exit "
158 "(deprecated, and will be removed in Sopel 8; "
159 "use -V/--version instead)"))
160 parser.add_argument('-V', '--version', action='store_true',
161 dest='version',
162 help='Show version number and exit')
163
164
165 def build_parser():
166 """Build an ``argparse.ArgumentParser`` for the bot"""
167 parser = argparse.ArgumentParser(description='Sopel IRC Bot',
168 usage='%(prog)s [options]')
169 add_legacy_options(parser)
170 utils.add_common_arguments(parser)
171
172 subparsers = parser.add_subparsers(
173 title='sub-commands',
174 description='List of Sopel\'s sub-commands',
175 dest='action',
176 metavar='{start,configure,stop,restart}')
177
178 # manage `legacy` sub-command
179 parser_legacy = subparsers.add_parser('legacy')
180 add_legacy_options(parser_legacy)
181 utils.add_common_arguments(parser_legacy)
182
183 # manage `start` sub-command
184 parser_start = subparsers.add_parser(
185 'start',
186 description='Start a Sopel instance',
187 help='Start a Sopel instance')
188 parser_start.add_argument(
189 '-d', '--fork',
190 dest='daemonize',
191 action='store_true',
192 default=False,
193 help='Run Sopel as a daemon (fork)')
194 parser_start.add_argument(
195 '--quiet',
196 action="store_true",
197 dest="quiet",
198 help="Suppress all output")
199 utils.add_common_arguments(parser_start)
200
201 # manage `configure` sub-command
202 parser_configure = subparsers.add_parser(
203 'configure', help='Sopel\'s Wizard tool')
204 parser_configure.add_argument(
205 '--modules',
206 action='store_true',
207 default=False,
208 dest='modules')
209 utils.add_common_arguments(parser_configure)
210
211 # manage `stop` sub-command
212 parser_stop = subparsers.add_parser(
213 'stop',
214 description='Stop a running Sopel instance',
215 help='Stop a running Sopel instance')
216 parser_stop.add_argument(
217 '-k', '--kill',
218 action='store_true',
219 default=False,
220 help='Kill Sopel without a graceful quit')
221 parser_stop.add_argument(
222 '--quiet',
223 action="store_true",
224 dest="quiet",
225 help="Suppress all output")
226 utils.add_common_arguments(parser_stop)
227
228 # manage `restart` sub-command
229 parser_restart = subparsers.add_parser(
230 'restart',
231 description='Restart a running Sopel instance',
232 help='Restart a running Sopel instance')
233 parser_restart.add_argument(
234 '--quiet',
235 action="store_true",
236 dest="quiet",
237 help="Suppress all output")
238 utils.add_common_arguments(parser_restart)
239
240 return parser
241
242
243 def check_not_root():
244 """Check if root is running the bot.
245
246 It raises a ``RuntimeError`` if the user has root privileges on Linux or
247 if it is the ``Administrator`` account on Windows.
248 """
249 opersystem = platform.system()
250 if opersystem in ["Linux", "Darwin"]:
251 # Linux/Mac
252 if os.getuid() == 0 or os.geteuid() == 0:
253 raise RuntimeError('Error: Do not run Sopel with root privileges.')
254 elif opersystem in ["Windows"]:
255 # Windows
256 if os.environ.get("USERNAME") == "Administrator":
257 raise RuntimeError('Error: Do not run Sopel as Administrator.')
258 else:
259 tools.stderr(
260 "Warning: %s is an uncommon operating system platform. "
261 "Sopel should still work, but please contact Sopel's developers "
262 "if you experience issues."
263 % opersystem)
264
265
266 def print_version():
267 """Print Python version and Sopel version on stdout."""
268 py_ver = '%s.%s.%s' % (sys.version_info.major,
269 sys.version_info.minor,
270 sys.version_info.micro)
271 print('Sopel %s (running on Python %s)' % (__version__, py_ver))
272 print('https://sopel.chat/')
273
274
275 def print_config():
276 """Print list of available configurations from default homedir."""
277 configs = utils.enumerate_configs(DEFAULT_HOMEDIR)
278 print('Config files in %s:' % DEFAULT_HOMEDIR)
279 config = None
280 for config in configs:
281 print('\t%s' % config)
282 if not config:
283 print('\tNone found')
284
285 print('-------------------------')
286
287
288 def get_configuration(options):
289 """Get or create a configuration object from ``options``.
290
291 :param options: argument parser's options
292 :type options: ``argparse.Namespace``
293 :return: a configuration object
294 :rtype: :class:`sopel.config.Config`
295
296 This may raise a :exc:`sopel.config.ConfigurationError` if the
297 configuration file is invalid.
298
299 .. seealso::
300
301 The configuration file is loaded by
302 :func:`~sopel.cli.run.utils.load_settings` or created using the
303 configuration wizard.
304
305 """
306 try:
307 bot_config = utils.load_settings(options)
308 except ConfigurationNotFound as error:
309 print(
310 "Welcome to Sopel!\n"
311 "I can't seem to find the configuration file, "
312 "so let's generate it!\n")
313
314 config_path = error.filename
315 if not config_path.endswith('.cfg'):
316 config_path = config_path + '.cfg'
317
318 config_path = _create_config(config_path)
319 # try to reload it now that it's created
320 bot_config = Config(config_path)
321
322 bot_config._is_daemonized = options.daemonize
323 return bot_config
324
325
326 def get_pid_filename(options, pid_dir):
327 """Get the pid file name in ``pid_dir`` from the given ``options``.
328
329 :param options: command line options
330 :param str pid_dir: path to the pid directory
331 :return: absolute filename of the pid file
332
333 By default, it's ``sopel.pid``, but if a configuration filename is given
334 in the ``options``, its basename is used to generate the filename, as:
335 ``sopel-{basename}.pid`` instead.
336 """
337 name = 'sopel.pid'
338 if options.config:
339 basename = os.path.basename(options.config)
340 if basename.endswith('.cfg'):
341 basename = basename[:-4]
342 name = 'sopel-%s.pid' % basename
343
344 return os.path.abspath(os.path.join(pid_dir, name))
345
346
347 def get_running_pid(filename):
348 """Retrieve the PID number from the given ``filename``.
349
350 :param str filename: path to file to read the PID from
351 :return: the PID number of a Sopel instance if running, ``None`` otherwise
352 :rtype: integer
353
354 This function tries to retrieve a PID number from the given ``filename``,
355 as an integer, and returns ``None`` if the file is not found or if the
356 content is not an integer.
357 """
358 if not os.path.isfile(filename):
359 return
360
361 with open(filename, 'r') as pid_file:
362 try:
363 return int(pid_file.read())
364 except ValueError:
365 pass
366
367
368 def command_start(opts):
369 """Start a Sopel instance"""
370 # Step One: Get the configuration file and prepare to run
371 try:
372 config_module = get_configuration(opts)
373 except ConfigurationError as e:
374 tools.stderr(e)
375 return ERR_CODE_NO_RESTART
376
377 if config_module.core.not_configured:
378 tools.stderr('Bot is not configured, can\'t start')
379 return ERR_CODE_NO_RESTART
380
381 # Step Two: Manage logfile, stdout and stderr
382 utils.redirect_outputs(config_module, opts.quiet)
383
384 # Step Three: Handle process-lifecycle options and manage the PID file
385 pid_dir = config_module.core.pid_dir
386 pid_file_path = get_pid_filename(opts, pid_dir)
387 pid = get_running_pid(pid_file_path)
388
389 if pid is not None and tools.check_pid(pid):
390 tools.stderr('There\'s already a Sopel instance running '
391 'with this config file.')
392 tools.stderr('Try using either the `sopel stop` '
393 'or the `sopel restart` command.')
394 return ERR_CODE
395
396 if opts.daemonize:
397 child_pid = os.fork()
398 if child_pid is not 0:
399 return
400
401 with open(pid_file_path, 'w') as pid_file:
402 pid_file.write(str(os.getpid()))
403
404 # Step Four: Run Sopel
405 ret = run(config_module, pid_file_path)
406
407 # Step Five: Shutdown Clean-Up
408 os.unlink(pid_file_path)
409
410 if ret == -1:
411 # Restart
412 os.execv(sys.executable, ['python'] + sys.argv)
413 else:
414 # Quit
415 return ret
416
417
418 def command_configure(opts):
419 """Sopel Configuration Wizard"""
420 if getattr(opts, 'modules', False):
421 _wizard('mod', opts.config)
422 else:
423 _wizard('all', opts.config)
424
425
426 def command_stop(opts):
427 """Stop a running Sopel instance"""
428 # Get Configuration
429 try:
430 settings = utils.load_settings(opts)
431 except ConfigurationNotFound as error:
432 tools.stderr('Configuration "%s" not found' % error.filename)
433 return ERR_CODE
434
435 if settings.core.not_configured:
436 tools.stderr('Sopel is not configured, can\'t stop')
437 return ERR_CODE
438
439 # Redirect Outputs
440 utils.redirect_outputs(settings, opts.quiet)
441
442 # Get Sopel's PID
443 filename = get_pid_filename(opts, settings.core.pid_dir)
444 pid = get_running_pid(filename)
445
446 if pid is None or not tools.check_pid(pid):
447 tools.stderr('Sopel is not running!')
448 return ERR_CODE
449
450 # Stop Sopel
451 if opts.kill:
452 tools.stderr('Killing the Sopel')
453 os.kill(pid, signal.SIGKILL)
454 return
455
456 tools.stderr('Signaling Sopel to stop gracefully')
457 if hasattr(signal, 'SIGUSR1'):
458 os.kill(pid, signal.SIGUSR1)
459 else:
460 # Windows will not generate SIGTERM itself
461 # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
462 os.kill(pid, signal.SIGTERM)
463
464
465 def command_restart(opts):
466 """Restart a running Sopel instance"""
467 # Get Configuration
468 try:
469 settings = utils.load_settings(opts)
470 except ConfigurationNotFound as error:
471 tools.stderr('Configuration "%s" not found' % error.filename)
472 return ERR_CODE
473
474 if settings.core.not_configured:
475 tools.stderr('Sopel is not configured, can\'t stop')
476 return ERR_CODE
477
478 # Redirect Outputs
479 utils.redirect_outputs(settings, opts.quiet)
480
481 # Get Sopel's PID
482 filename = get_pid_filename(opts, settings.core.pid_dir)
483 pid = get_running_pid(filename)
484
485 if pid is None or not tools.check_pid(pid):
486 tools.stderr('Sopel is not running!')
487 return ERR_CODE
488
489 tools.stderr('Asking Sopel to restart')
490 if hasattr(signal, 'SIGUSR2'):
491 os.kill(pid, signal.SIGUSR2)
492 else:
493 # Windows will not generate SIGILL itself
494 # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
495 os.kill(pid, signal.SIGILL)
496
497
498 def command_legacy(opts):
499 """Legacy Sopel run script
500
501 The ``legacy`` command manages the old-style ``sopel`` command line tool.
502 Most of its features are replaced by the following commands:
503
504 * ``sopel start`` replaces the default behavior (run the bot)
505 * ``sopel stop`` replaces the ``--quit/--kill`` options
506 * ``sopel restart`` replaces the ``--restart`` option
507 * ``sopel configure`` replaces the
508 ``-w/--configure-all/--configure-modules`` options
509
510 The ``-v`` option for "version" is deprecated, ``-V/--version`` should be
511 used instead.
512
513 .. seealso::
514
515 The github issue `#1471`__ tracks various changes requested for future
516 versions of Sopel, some of them related to this legacy command.
517
518 .. __: https://github.com/sopel-irc/sopel/issues/1471
519
520 """
521 # Step One: Handle "No config needed" options
522 if opts.version:
523 print_version()
524 return
525 elif opts.version_legacy:
526 tools.stderr(
527 'WARNING: option -v is deprecated; '
528 'use `sopel -V/--version` instead')
529 print_version()
530 return
531
532 if opts.wizard:
533 tools.stderr(
534 'WARNING: option -w/--configure-all is deprecated; '
535 'use `sopel configure` instead')
536 _wizard('all', opts.config)
537 return
538
539 if opts.mod_wizard:
540 tools.stderr(
541 'WARNING: option --configure-modules is deprecated; '
542 'use `sopel configure --modules` instead')
543 _wizard('mod', opts.config)
544 return
545
546 if opts.list_configs:
547 print_config()
548 return
549
550 # Step Two: Get the configuration file and prepare to run
551 try:
552 config_module = get_configuration(opts)
553 except ConfigurationError as e:
554 tools.stderr(e)
555 return ERR_CODE_NO_RESTART
556
557 if config_module.core.not_configured:
558 tools.stderr('Bot is not configured, can\'t start')
559 return ERR_CODE_NO_RESTART
560
561 # Step Three: Manage logfile, stdout and stderr
562 utils.redirect_outputs(config_module, opts.quiet)
563
564 # Step Four: Handle process-lifecycle options and manage the PID file
565 pid_dir = config_module.core.pid_dir
566 pid_file_path = get_pid_filename(opts, pid_dir)
567 old_pid = get_running_pid(pid_file_path)
568
569 if old_pid is not None and tools.check_pid(old_pid):
570 if not opts.quit and not opts.kill and not opts.restart:
571 tools.stderr(
572 'There\'s already a Sopel instance running with this config file')
573 tools.stderr(
574 'Try using either the `sopel stop` command or the `sopel restart` command')
575 return ERR_CODE
576 elif opts.kill:
577 tools.stderr(
578 'WARNING: option -k/--kill is deprecated; '
579 'use `sopel stop --kill` instead')
580 tools.stderr('Killing the Sopel')
581 os.kill(old_pid, signal.SIGKILL)
582 return
583 elif opts.quit:
584 tools.stderr(
585 'WARNING: options -q/--quit is deprecated; '
586 'use `sopel stop` instead')
587 tools.stderr('Signaling Sopel to stop gracefully')
588 if hasattr(signal, 'SIGUSR1'):
589 os.kill(old_pid, signal.SIGUSR1)
590 else:
591 # Windows will not generate SIGTERM itself
592 # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
593 os.kill(old_pid, signal.SIGTERM)
594 return
595 elif opts.restart:
596 tools.stderr(
597 'WARNING: options --restart is deprecated; '
598 'use `sopel restart` instead')
599 tools.stderr('Asking Sopel to restart')
600 if hasattr(signal, 'SIGUSR2'):
601 os.kill(old_pid, signal.SIGUSR2)
602 else:
603 # Windows will not generate SIGILL itself
604 # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
605 os.kill(old_pid, signal.SIGILL)
606 return
607 elif opts.kill or opts.quit or opts.restart:
608 tools.stderr('Sopel is not running!')
609 return ERR_CODE
610
611 if opts.daemonize:
612 child_pid = os.fork()
613 if child_pid is not 0:
614 return
615 with open(pid_file_path, 'w') as pid_file:
616 pid_file.write(str(os.getpid()))
617
618 # Step Five: Initialize and run Sopel
619 ret = run(config_module, pid_file_path)
620 os.unlink(pid_file_path)
621 if ret == -1:
622 os.execv(sys.executable, ['python'] + sys.argv)
623 else:
624 return ret
625
626
627 def main(argv=None):
628 """Sopel run script entry point"""
629 try:
630 # Step One: Parse The Command Line
631 parser = build_parser()
632
633 # make sure to have an action first (`legacy` by default)
634 # TODO: `start` should be the default in Sopel 8
635 argv = argv or sys.argv[1:]
636 if not argv:
637 argv = ['legacy']
638 elif argv[0].startswith('-') and argv[0] not in ['-h', '--help']:
639 argv = ['legacy'] + argv
640
641 opts = parser.parse_args(argv)
642
643 # Step Two: "Do not run as root" checks
644 try:
645 check_not_root()
646 except RuntimeError as err:
647 tools.stderr('%s' % err)
648 return ERR_CODE
649
650 # Step Three: Handle command
651 action = getattr(opts, 'action', 'legacy')
652 command = {
653 'legacy': command_legacy,
654 'start': command_start,
655 'configure': command_configure,
656 'stop': command_stop,
657 'restart': command_restart,
658 }.get(action)
659 return command(opts)
660 except KeyboardInterrupt:
661 print("\n\nInterrupted")
662 return ERR_CODE
663
664
665 if __name__ == '__main__':
666 sys.exit(main())
667
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/cli/run.py b/sopel/cli/run.py
--- a/sopel/cli/run.py
+++ b/sopel/cli/run.py
@@ -67,7 +67,11 @@
tools.stderr('Got restart signal.')
p.restart('Restarting')
+ # Define empty variable `p` for bot
+ p = None
while True:
+ if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase
+ break
try:
p = bot.Sopel(config, daemon=daemon)
if hasattr(signal, 'SIGUSR1'):
| {"golden_diff": "diff --git a/sopel/cli/run.py b/sopel/cli/run.py\n--- a/sopel/cli/run.py\n+++ b/sopel/cli/run.py\n@@ -67,7 +67,11 @@\n tools.stderr('Got restart signal.')\n p.restart('Restarting')\n \n+ # Define empty variable `p` for bot\n+ p = None\n while True:\n+ if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase\n+ break\n try:\n p = bot.Sopel(config, daemon=daemon)\n if hasattr(signal, 'SIGUSR1'):\n", "issue": "Broken interrupt handling when connection is inactive\nFor the record, I'm testing on macOS today because that's what I have available. However, the same behavior occurs with identical or similar tracebacks on my Ubuntu machine at home, so I don't think it's related to the fact that I'm using Homebrew Python or anything. It's probably related to Sopel's sometimes-wonky exception handling.\r\n\r\nHere's what I did:\r\n\r\n1. Run `sopel`\r\n2. Press Ctrl-C after \"Connecting to <server>...\"\r\n3. Run `sopel` again\r\n4. Press Ctrl-C while Sopel is waiting to reconnect after an expected SSL failure\r\n\r\nInterrupting the connection phase resulted in an `AttributeError`, probably as expected (since quitting tries to send something to the socket, and the socket object doesn't exist before connecting finishes):\r\n\r\n```\r\nConnecting to irc.network.net:6667...\r\n^CGot quit signal, shutting down.\r\nTraceback (most recent call last):\r\n File \"/Users/dgw/github/sopel/sopel/__init__.py\", line 91, in run\r\n p.run(config.core.host, int(config.core.port))\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 167, in run\r\n self.initiate_connect(host, port)\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 177, in initiate_connect\r\n source_address=source_address))\r\n File \"/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py\", line 707, in create_connection\r\n for res in getaddrinfo(host, port, 0, SOCK_STREAM):\r\n File \"/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py\", line 748, in getaddrinfo\r\n for res in _socket.getaddrinfo(host, port, family, type, proto, flags):\r\n File \"/Users/dgw/github/sopel/sopel/__init__.py\", line 73, in signal_handler\r\n p.quit('Closing')\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 199, in quit\r\n self.write(['QUIT'], message)\r\n File \"/Users/dgw/github/sopel/sopel/bot.py\", line 166, in write\r\n irc.Bot.write(self, args, text=text)\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 161, in write\r\n self.send(temp.encode('utf-8'))\r\n File \"/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncore.py\", line 360, in send\r\n result = self.socket.send(data)\r\nAttributeError: 'NoneType' object has no attribute 'send'\r\n```\r\n\r\nInterrupting the reconnect delay yields a completely different traceback, and additionally requires pressing Ctrl-C twice:\r\n\r\n```\r\nWarning: Disconnected. Reconnecting in 20 seconds...\r\n^CGot quit signal, shutting down.\r\nTraceback (most recent call last):\r\n File \"./sopel.py\", line 7, in <module>\r\n sys.exit(run_script.main())\r\n File \"/Users/dgw/github/sopel/sopel/run_script.py\", line 351, in main\r\n ret = run(config_module, pid_file_path)\r\n File \"/Users/dgw/github/sopel/sopel/__init__.py\", line 120, in run\r\n time.sleep(delay)\r\n File \"/Users/dgw/github/sopel/sopel/__init__.py\", line 73, in signal_handler\r\n p.quit('Closing')\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 199, in quit\r\n self.write(['QUIT'], message)\r\n File \"/Users/dgw/github/sopel/sopel/bot.py\", line 166, in write\r\n irc.Bot.write(self, args, text=text)\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 161, in write\r\n self.send(temp.encode('utf-8'))\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 327, in _ssl_send\r\n result = self.socket.send(data)\r\nOSError: [Errno 9] Bad file descriptor\r\n^CGot quit signal, shutting down.\r\nException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py\", line 1273, in _shutdown\r\n t.join()\r\n File \"/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py\", line 1032, in join\r\n self._wait_for_tstate_lock()\r\n File \"/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py\", line 1048, in _wait_for_tstate_lock\r\n elif lock.acquire(block, timeout):\r\n File \"/Users/dgw/github/sopel/sopel/__init__.py\", line 73, in signal_handler\r\n p.quit('Closing')\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 199, in quit\r\n self.write(['QUIT'], message)\r\n File \"/Users/dgw/github/sopel/sopel/bot.py\", line 166, in write\r\n irc.Bot.write(self, args, text=text)\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 161, in write\r\n self.send(temp.encode('utf-8'))\r\n File \"/Users/dgw/github/sopel/sopel/irc.py\", line 327, in _ssl_send\r\n result = self.socket.send(data)\r\nOSError: [Errno 9] Bad file descriptor\r\n```\r\n\r\nI've run into this second issue relatively often while testing things in the last few months, since I often intentionally interrupt Sopel during states other than \"Connected to network and running normally\".\r\n\r\nThe exception itself isn't a big deal (though it would be nice not to spit it out). I consider needing to press Ctrl-C twice to be the main bug here. That simply shouldn't be required.\r\n\r\nInterrupting Sopel after the \"Loading modules...\" line (but before it starts to connect) yields a clean exit, though. That's kind of weird, and I haven't had time to look into why.\r\n\r\nSince I know we have a few people digging around in Sopel's internals and refactoring things now (and doing damn good work, too!), I'm hoping one of them will look into this at some point. \ud83d\ude39\r\n\r\nThis isn't a high-severity bug, but I do want to get it fixed if possible, ideally in the next year or two. That need to press Ctrl-C twice carries over into, for example, `sopel --quit`. Sopel should _never_ need to be told to quit twice, unless something is catastrophically wrong\u2014and waiting to reconnect is a totally normal situation that shouldn't break things the way it does now.\n", "before_files": [{"content": "#!/usr/bin/env python2.7\n# coding=utf-8\n\"\"\"\nSopel - An IRC Bot\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright \u00a9 2012-2014, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport argparse\nimport os\nimport platform\nimport signal\nimport sys\nimport time\nimport traceback\n\nfrom sopel import bot, logger, tools, __version__\nfrom sopel.config import (\n Config,\n _create_config,\n ConfigurationError,\n ConfigurationNotFound,\n DEFAULT_HOMEDIR,\n _wizard\n)\nfrom . import utils\n\nif sys.version_info < (2, 7):\n tools.stderr('Error: Requires Python 2.7 or later. Try python2.7 sopel')\n sys.exit(1)\nif sys.version_info.major == 2:\n tools.stderr('Warning: Python 2.x is near end of life. Sopel support at that point is TBD.')\nif sys.version_info.major == 3 and sys.version_info.minor < 3:\n tools.stderr('Error: When running on Python 3, Python 3.3 is required.')\n sys.exit(1)\n\nERR_CODE = 1\n\"\"\"Error code: program exited with an error\"\"\"\nERR_CODE_NO_RESTART = 2\n\"\"\"Error code: program exited with an error and should not be restarted\n\nThis error code is used to prevent systemd from restarting the bot when it\nencounters such an error case.\n\"\"\"\n\n\ndef run(config, pid_file, daemon=False):\n delay = 20\n # Inject ca_certs from config to web for SSL validation of web requests\n if not config.core.ca_certs:\n tools.stderr(\n 'Could not open CA certificates file. SSL will not work properly!')\n\n def signal_handler(sig, frame):\n if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:\n tools.stderr('Got quit signal, shutting down.')\n p.quit('Closing')\n elif sig == signal.SIGUSR2 or sig == signal.SIGILL:\n tools.stderr('Got restart signal.')\n p.restart('Restarting')\n\n while True:\n try:\n p = bot.Sopel(config, daemon=daemon)\n if hasattr(signal, 'SIGUSR1'):\n signal.signal(signal.SIGUSR1, signal_handler)\n if hasattr(signal, 'SIGTERM'):\n signal.signal(signal.SIGTERM, signal_handler)\n if hasattr(signal, 'SIGINT'):\n signal.signal(signal.SIGINT, signal_handler)\n if hasattr(signal, 'SIGUSR2'):\n signal.signal(signal.SIGUSR2, signal_handler)\n if hasattr(signal, 'SIGILL'):\n signal.signal(signal.SIGILL, signal_handler)\n logger.setup_logging(p)\n p.run(config.core.host, int(config.core.port))\n except KeyboardInterrupt:\n break\n except Exception: # TODO: Be specific\n trace = traceback.format_exc()\n try:\n tools.stderr(trace)\n except Exception: # TODO: Be specific\n pass\n logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a')\n logfile.write('Critical exception in core')\n logfile.write(trace)\n logfile.write('----------------------------------------\\n\\n')\n logfile.close()\n # TODO: This should be handled by command_start\n # All we should need here is a return value, but replacing the\n # os._exit() call below (at the end) broke ^C.\n # This one is much harder to test, so until that one's sorted it\n # isn't worth the risk of trying to remove this one.\n os.unlink(pid_file)\n os._exit(1)\n\n if not isinstance(delay, int):\n break\n if p.wantsrestart:\n return -1\n if p.hasquit:\n break\n tools.stderr(\n 'Warning: Disconnected. Reconnecting in %s seconds...' % delay)\n time.sleep(delay)\n # TODO: This should be handled by command_start\n # All we should need here is a return value, but making this\n # a return makes Sopel hang on ^C after it says \"Closed!\"\n os.unlink(pid_file)\n os._exit(0)\n\n\ndef add_legacy_options(parser):\n parser.add_argument(\"-d\", '--fork', action=\"store_true\",\n dest=\"daemonize\", help=\"Daemonize Sopel\")\n parser.add_argument(\"-q\", '--quit', action=\"store_true\", dest=\"quit\",\n help=(\n \"Gracefully quit Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel stop` instead)\"))\n parser.add_argument(\"-k\", '--kill', action=\"store_true\", dest=\"kill\",\n help=(\n \"Kill Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel stop --kill` instead)\"))\n parser.add_argument(\"-r\", '--restart', action=\"store_true\", dest=\"restart\",\n help=(\n \"Restart Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel restart` instead)\"))\n parser.add_argument(\"-l\", '--list', action=\"store_true\",\n dest=\"list_configs\",\n help=\"List all config files found\")\n parser.add_argument('--quiet', action=\"store_true\", dest=\"quiet\",\n help=\"Suppress all output\")\n parser.add_argument('-w', '--configure-all', action='store_true',\n dest='wizard',\n help=(\n \"Run the configuration wizard \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel configure` instead)\"))\n parser.add_argument('--configure-modules', action='store_true',\n dest='mod_wizard',\n help=(\n \"Run the configuration wizard, but only for the \"\n \"module configuration options \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel configure --modules` instead)\"))\n parser.add_argument('-v', action=\"store_true\",\n dest='version_legacy',\n help=(\n \"Show version number and exit \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use -V/--version instead)\"))\n parser.add_argument('-V', '--version', action='store_true',\n dest='version',\n help='Show version number and exit')\n\n\ndef build_parser():\n \"\"\"Build an ``argparse.ArgumentParser`` for the bot\"\"\"\n parser = argparse.ArgumentParser(description='Sopel IRC Bot',\n usage='%(prog)s [options]')\n add_legacy_options(parser)\n utils.add_common_arguments(parser)\n\n subparsers = parser.add_subparsers(\n title='sub-commands',\n description='List of Sopel\\'s sub-commands',\n dest='action',\n metavar='{start,configure,stop,restart}')\n\n # manage `legacy` sub-command\n parser_legacy = subparsers.add_parser('legacy')\n add_legacy_options(parser_legacy)\n utils.add_common_arguments(parser_legacy)\n\n # manage `start` sub-command\n parser_start = subparsers.add_parser(\n 'start',\n description='Start a Sopel instance',\n help='Start a Sopel instance')\n parser_start.add_argument(\n '-d', '--fork',\n dest='daemonize',\n action='store_true',\n default=False,\n help='Run Sopel as a daemon (fork)')\n parser_start.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_start)\n\n # manage `configure` sub-command\n parser_configure = subparsers.add_parser(\n 'configure', help='Sopel\\'s Wizard tool')\n parser_configure.add_argument(\n '--modules',\n action='store_true',\n default=False,\n dest='modules')\n utils.add_common_arguments(parser_configure)\n\n # manage `stop` sub-command\n parser_stop = subparsers.add_parser(\n 'stop',\n description='Stop a running Sopel instance',\n help='Stop a running Sopel instance')\n parser_stop.add_argument(\n '-k', '--kill',\n action='store_true',\n default=False,\n help='Kill Sopel without a graceful quit')\n parser_stop.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_stop)\n\n # manage `restart` sub-command\n parser_restart = subparsers.add_parser(\n 'restart',\n description='Restart a running Sopel instance',\n help='Restart a running Sopel instance')\n parser_restart.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_restart)\n\n return parser\n\n\ndef check_not_root():\n \"\"\"Check if root is running the bot.\n\n It raises a ``RuntimeError`` if the user has root privileges on Linux or\n if it is the ``Administrator`` account on Windows.\n \"\"\"\n opersystem = platform.system()\n if opersystem in [\"Linux\", \"Darwin\"]:\n # Linux/Mac\n if os.getuid() == 0 or os.geteuid() == 0:\n raise RuntimeError('Error: Do not run Sopel with root privileges.')\n elif opersystem in [\"Windows\"]:\n # Windows\n if os.environ.get(\"USERNAME\") == \"Administrator\":\n raise RuntimeError('Error: Do not run Sopel as Administrator.')\n else:\n tools.stderr(\n \"Warning: %s is an uncommon operating system platform. \"\n \"Sopel should still work, but please contact Sopel's developers \"\n \"if you experience issues.\"\n % opersystem)\n\n\ndef print_version():\n \"\"\"Print Python version and Sopel version on stdout.\"\"\"\n py_ver = '%s.%s.%s' % (sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n print('Sopel %s (running on Python %s)' % (__version__, py_ver))\n print('https://sopel.chat/')\n\n\ndef print_config():\n \"\"\"Print list of available configurations from default homedir.\"\"\"\n configs = utils.enumerate_configs(DEFAULT_HOMEDIR)\n print('Config files in %s:' % DEFAULT_HOMEDIR)\n config = None\n for config in configs:\n print('\\t%s' % config)\n if not config:\n print('\\tNone found')\n\n print('-------------------------')\n\n\ndef get_configuration(options):\n \"\"\"Get or create a configuration object from ``options``.\n\n :param options: argument parser's options\n :type options: ``argparse.Namespace``\n :return: a configuration object\n :rtype: :class:`sopel.config.Config`\n\n This may raise a :exc:`sopel.config.ConfigurationError` if the\n configuration file is invalid.\n\n .. seealso::\n\n The configuration file is loaded by\n :func:`~sopel.cli.run.utils.load_settings` or created using the\n configuration wizard.\n\n \"\"\"\n try:\n bot_config = utils.load_settings(options)\n except ConfigurationNotFound as error:\n print(\n \"Welcome to Sopel!\\n\"\n \"I can't seem to find the configuration file, \"\n \"so let's generate it!\\n\")\n\n config_path = error.filename\n if not config_path.endswith('.cfg'):\n config_path = config_path + '.cfg'\n\n config_path = _create_config(config_path)\n # try to reload it now that it's created\n bot_config = Config(config_path)\n\n bot_config._is_daemonized = options.daemonize\n return bot_config\n\n\ndef get_pid_filename(options, pid_dir):\n \"\"\"Get the pid file name in ``pid_dir`` from the given ``options``.\n\n :param options: command line options\n :param str pid_dir: path to the pid directory\n :return: absolute filename of the pid file\n\n By default, it's ``sopel.pid``, but if a configuration filename is given\n in the ``options``, its basename is used to generate the filename, as:\n ``sopel-{basename}.pid`` instead.\n \"\"\"\n name = 'sopel.pid'\n if options.config:\n basename = os.path.basename(options.config)\n if basename.endswith('.cfg'):\n basename = basename[:-4]\n name = 'sopel-%s.pid' % basename\n\n return os.path.abspath(os.path.join(pid_dir, name))\n\n\ndef get_running_pid(filename):\n \"\"\"Retrieve the PID number from the given ``filename``.\n\n :param str filename: path to file to read the PID from\n :return: the PID number of a Sopel instance if running, ``None`` otherwise\n :rtype: integer\n\n This function tries to retrieve a PID number from the given ``filename``,\n as an integer, and returns ``None`` if the file is not found or if the\n content is not an integer.\n \"\"\"\n if not os.path.isfile(filename):\n return\n\n with open(filename, 'r') as pid_file:\n try:\n return int(pid_file.read())\n except ValueError:\n pass\n\n\ndef command_start(opts):\n \"\"\"Start a Sopel instance\"\"\"\n # Step One: Get the configuration file and prepare to run\n try:\n config_module = get_configuration(opts)\n except ConfigurationError as e:\n tools.stderr(e)\n return ERR_CODE_NO_RESTART\n\n if config_module.core.not_configured:\n tools.stderr('Bot is not configured, can\\'t start')\n return ERR_CODE_NO_RESTART\n\n # Step Two: Manage logfile, stdout and stderr\n utils.redirect_outputs(config_module, opts.quiet)\n\n # Step Three: Handle process-lifecycle options and manage the PID file\n pid_dir = config_module.core.pid_dir\n pid_file_path = get_pid_filename(opts, pid_dir)\n pid = get_running_pid(pid_file_path)\n\n if pid is not None and tools.check_pid(pid):\n tools.stderr('There\\'s already a Sopel instance running '\n 'with this config file.')\n tools.stderr('Try using either the `sopel stop` '\n 'or the `sopel restart` command.')\n return ERR_CODE\n\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n return\n\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Four: Run Sopel\n ret = run(config_module, pid_file_path)\n\n # Step Five: Shutdown Clean-Up\n os.unlink(pid_file_path)\n\n if ret == -1:\n # Restart\n os.execv(sys.executable, ['python'] + sys.argv)\n else:\n # Quit\n return ret\n\n\ndef command_configure(opts):\n \"\"\"Sopel Configuration Wizard\"\"\"\n if getattr(opts, 'modules', False):\n _wizard('mod', opts.config)\n else:\n _wizard('all', opts.config)\n\n\ndef command_stop(opts):\n \"\"\"Stop a running Sopel instance\"\"\"\n # Get Configuration\n try:\n settings = utils.load_settings(opts)\n except ConfigurationNotFound as error:\n tools.stderr('Configuration \"%s\" not found' % error.filename)\n return ERR_CODE\n\n if settings.core.not_configured:\n tools.stderr('Sopel is not configured, can\\'t stop')\n return ERR_CODE\n\n # Redirect Outputs\n utils.redirect_outputs(settings, opts.quiet)\n\n # Get Sopel's PID\n filename = get_pid_filename(opts, settings.core.pid_dir)\n pid = get_running_pid(filename)\n\n if pid is None or not tools.check_pid(pid):\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n # Stop Sopel\n if opts.kill:\n tools.stderr('Killing the Sopel')\n os.kill(pid, signal.SIGKILL)\n return\n\n tools.stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(pid, signal.SIGUSR1)\n else:\n # Windows will not generate SIGTERM itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(pid, signal.SIGTERM)\n\n\ndef command_restart(opts):\n \"\"\"Restart a running Sopel instance\"\"\"\n # Get Configuration\n try:\n settings = utils.load_settings(opts)\n except ConfigurationNotFound as error:\n tools.stderr('Configuration \"%s\" not found' % error.filename)\n return ERR_CODE\n\n if settings.core.not_configured:\n tools.stderr('Sopel is not configured, can\\'t stop')\n return ERR_CODE\n\n # Redirect Outputs\n utils.redirect_outputs(settings, opts.quiet)\n\n # Get Sopel's PID\n filename = get_pid_filename(opts, settings.core.pid_dir)\n pid = get_running_pid(filename)\n\n if pid is None or not tools.check_pid(pid):\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n tools.stderr('Asking Sopel to restart')\n if hasattr(signal, 'SIGUSR2'):\n os.kill(pid, signal.SIGUSR2)\n else:\n # Windows will not generate SIGILL itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(pid, signal.SIGILL)\n\n\ndef command_legacy(opts):\n \"\"\"Legacy Sopel run script\n\n The ``legacy`` command manages the old-style ``sopel`` command line tool.\n Most of its features are replaced by the following commands:\n\n * ``sopel start`` replaces the default behavior (run the bot)\n * ``sopel stop`` replaces the ``--quit/--kill`` options\n * ``sopel restart`` replaces the ``--restart`` option\n * ``sopel configure`` replaces the\n ``-w/--configure-all/--configure-modules`` options\n\n The ``-v`` option for \"version\" is deprecated, ``-V/--version`` should be\n used instead.\n\n .. seealso::\n\n The github issue `#1471`__ tracks various changes requested for future\n versions of Sopel, some of them related to this legacy command.\n\n .. __: https://github.com/sopel-irc/sopel/issues/1471\n\n \"\"\"\n # Step One: Handle \"No config needed\" options\n if opts.version:\n print_version()\n return\n elif opts.version_legacy:\n tools.stderr(\n 'WARNING: option -v is deprecated; '\n 'use `sopel -V/--version` instead')\n print_version()\n return\n\n if opts.wizard:\n tools.stderr(\n 'WARNING: option -w/--configure-all is deprecated; '\n 'use `sopel configure` instead')\n _wizard('all', opts.config)\n return\n\n if opts.mod_wizard:\n tools.stderr(\n 'WARNING: option --configure-modules is deprecated; '\n 'use `sopel configure --modules` instead')\n _wizard('mod', opts.config)\n return\n\n if opts.list_configs:\n print_config()\n return\n\n # Step Two: Get the configuration file and prepare to run\n try:\n config_module = get_configuration(opts)\n except ConfigurationError as e:\n tools.stderr(e)\n return ERR_CODE_NO_RESTART\n\n if config_module.core.not_configured:\n tools.stderr('Bot is not configured, can\\'t start')\n return ERR_CODE_NO_RESTART\n\n # Step Three: Manage logfile, stdout and stderr\n utils.redirect_outputs(config_module, opts.quiet)\n\n # Step Four: Handle process-lifecycle options and manage the PID file\n pid_dir = config_module.core.pid_dir\n pid_file_path = get_pid_filename(opts, pid_dir)\n old_pid = get_running_pid(pid_file_path)\n\n if old_pid is not None and tools.check_pid(old_pid):\n if not opts.quit and not opts.kill and not opts.restart:\n tools.stderr(\n 'There\\'s already a Sopel instance running with this config file')\n tools.stderr(\n 'Try using either the `sopel stop` command or the `sopel restart` command')\n return ERR_CODE\n elif opts.kill:\n tools.stderr(\n 'WARNING: option -k/--kill is deprecated; '\n 'use `sopel stop --kill` instead')\n tools.stderr('Killing the Sopel')\n os.kill(old_pid, signal.SIGKILL)\n return\n elif opts.quit:\n tools.stderr(\n 'WARNING: options -q/--quit is deprecated; '\n 'use `sopel stop` instead')\n tools.stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(old_pid, signal.SIGUSR1)\n else:\n # Windows will not generate SIGTERM itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(old_pid, signal.SIGTERM)\n return\n elif opts.restart:\n tools.stderr(\n 'WARNING: options --restart is deprecated; '\n 'use `sopel restart` instead')\n tools.stderr('Asking Sopel to restart')\n if hasattr(signal, 'SIGUSR2'):\n os.kill(old_pid, signal.SIGUSR2)\n else:\n # Windows will not generate SIGILL itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(old_pid, signal.SIGILL)\n return\n elif opts.kill or opts.quit or opts.restart:\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n return\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Five: Initialize and run Sopel\n ret = run(config_module, pid_file_path)\n os.unlink(pid_file_path)\n if ret == -1:\n os.execv(sys.executable, ['python'] + sys.argv)\n else:\n return ret\n\n\ndef main(argv=None):\n \"\"\"Sopel run script entry point\"\"\"\n try:\n # Step One: Parse The Command Line\n parser = build_parser()\n\n # make sure to have an action first (`legacy` by default)\n # TODO: `start` should be the default in Sopel 8\n argv = argv or sys.argv[1:]\n if not argv:\n argv = ['legacy']\n elif argv[0].startswith('-') and argv[0] not in ['-h', '--help']:\n argv = ['legacy'] + argv\n\n opts = parser.parse_args(argv)\n\n # Step Two: \"Do not run as root\" checks\n try:\n check_not_root()\n except RuntimeError as err:\n tools.stderr('%s' % err)\n return ERR_CODE\n\n # Step Three: Handle command\n action = getattr(opts, 'action', 'legacy')\n command = {\n 'legacy': command_legacy,\n 'start': command_start,\n 'configure': command_configure,\n 'stop': command_stop,\n 'restart': command_restart,\n }.get(action)\n return command(opts)\n except KeyboardInterrupt:\n print(\"\\n\\nInterrupted\")\n return ERR_CODE\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "sopel/cli/run.py"}], "after_files": [{"content": "#!/usr/bin/env python2.7\n# coding=utf-8\n\"\"\"\nSopel - An IRC Bot\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright \u00a9 2012-2014, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport sys\n\nfrom sopel import tools\n\nif sys.version_info < (2, 7):\n tools.stderr('Error: Requires Python 2.7 or later. Try python2.7 sopel')\n sys.exit(1)\nif sys.version_info.major == 2:\n tools.stderr('Warning: Python 2.x is near end of life. Sopel support at that point is TBD.')\nif sys.version_info.major == 3 and sys.version_info.minor < 3:\n tools.stderr('Error: When running on Python 3, Python 3.3 is required.')\n sys.exit(1)\n\nimport argparse\nimport os\nimport platform\nimport signal\nimport time\nimport traceback\n\nfrom sopel import bot, logger, __version__\nfrom sopel.config import (\n Config,\n _create_config,\n ConfigurationError,\n ConfigurationNotFound,\n DEFAULT_HOMEDIR,\n _wizard\n)\nfrom . import utils\n\n\nERR_CODE = 1\n\"\"\"Error code: program exited with an error\"\"\"\nERR_CODE_NO_RESTART = 2\n\"\"\"Error code: program exited with an error and should not be restarted\n\nThis error code is used to prevent systemd from restarting the bot when it\nencounters such an error case.\n\"\"\"\n\n\ndef run(config, pid_file, daemon=False):\n delay = 20\n # Inject ca_certs from config to web for SSL validation of web requests\n if not config.core.ca_certs:\n tools.stderr(\n 'Could not open CA certificates file. SSL will not work properly!')\n\n def signal_handler(sig, frame):\n if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:\n tools.stderr('Got quit signal, shutting down.')\n p.quit('Closing')\n elif sig == signal.SIGUSR2 or sig == signal.SIGILL:\n tools.stderr('Got restart signal.')\n p.restart('Restarting')\n\n # Define empty variable `p` for bot\n p = None\n while True:\n if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase\n break\n try:\n p = bot.Sopel(config, daemon=daemon)\n if hasattr(signal, 'SIGUSR1'):\n signal.signal(signal.SIGUSR1, signal_handler)\n if hasattr(signal, 'SIGTERM'):\n signal.signal(signal.SIGTERM, signal_handler)\n if hasattr(signal, 'SIGINT'):\n signal.signal(signal.SIGINT, signal_handler)\n if hasattr(signal, 'SIGUSR2'):\n signal.signal(signal.SIGUSR2, signal_handler)\n if hasattr(signal, 'SIGILL'):\n signal.signal(signal.SIGILL, signal_handler)\n logger.setup_logging(p)\n p.run(config.core.host, int(config.core.port))\n except KeyboardInterrupt:\n break\n except Exception: # TODO: Be specific\n trace = traceback.format_exc()\n try:\n tools.stderr(trace)\n except Exception: # TODO: Be specific\n pass\n logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a')\n logfile.write('Critical exception in core')\n logfile.write(trace)\n logfile.write('----------------------------------------\\n\\n')\n logfile.close()\n # TODO: This should be handled by command_start\n # All we should need here is a return value, but replacing the\n # os._exit() call below (at the end) broke ^C.\n # This one is much harder to test, so until that one's sorted it\n # isn't worth the risk of trying to remove this one.\n os.unlink(pid_file)\n os._exit(1)\n\n if not isinstance(delay, int):\n break\n if p.wantsrestart:\n return -1\n if p.hasquit:\n break\n tools.stderr(\n 'Warning: Disconnected. Reconnecting in %s seconds...' % delay)\n time.sleep(delay)\n # TODO: This should be handled by command_start\n # All we should need here is a return value, but making this\n # a return makes Sopel hang on ^C after it says \"Closed!\"\n os.unlink(pid_file)\n os._exit(0)\n\n\ndef add_legacy_options(parser):\n parser.add_argument(\"-d\", '--fork', action=\"store_true\",\n dest=\"daemonize\", help=\"Daemonize Sopel\")\n parser.add_argument(\"-q\", '--quit', action=\"store_true\", dest=\"quit\",\n help=(\n \"Gracefully quit Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel stop` instead)\"))\n parser.add_argument(\"-k\", '--kill', action=\"store_true\", dest=\"kill\",\n help=(\n \"Kill Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel stop --kill` instead)\"))\n parser.add_argument(\"-r\", '--restart', action=\"store_true\", dest=\"restart\",\n help=(\n \"Restart Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel restart` instead)\"))\n parser.add_argument(\"-l\", '--list', action=\"store_true\",\n dest=\"list_configs\",\n help=\"List all config files found\")\n parser.add_argument('--quiet', action=\"store_true\", dest=\"quiet\",\n help=\"Suppress all output\")\n parser.add_argument('-w', '--configure-all', action='store_true',\n dest='wizard',\n help=(\n \"Run the configuration wizard \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel configure` instead)\"))\n parser.add_argument('--configure-modules', action='store_true',\n dest='mod_wizard',\n help=(\n \"Run the configuration wizard, but only for the \"\n \"module configuration options \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel configure --modules` instead)\"))\n parser.add_argument('-v', action=\"store_true\",\n dest='version_legacy',\n help=(\n \"Show version number and exit \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use -V/--version instead)\"))\n parser.add_argument('-V', '--version', action='store_true',\n dest='version',\n help='Show version number and exit')\n\n\ndef build_parser():\n \"\"\"Build an ``argparse.ArgumentParser`` for the bot\"\"\"\n parser = argparse.ArgumentParser(description='Sopel IRC Bot',\n usage='%(prog)s [options]')\n add_legacy_options(parser)\n utils.add_common_arguments(parser)\n\n subparsers = parser.add_subparsers(\n title='sub-commands',\n description='List of Sopel\\'s sub-commands',\n dest='action',\n metavar='{start,configure,stop,restart}')\n\n # manage `legacy` sub-command\n parser_legacy = subparsers.add_parser('legacy')\n add_legacy_options(parser_legacy)\n utils.add_common_arguments(parser_legacy)\n\n # manage `start` sub-command\n parser_start = subparsers.add_parser(\n 'start',\n description='Start a Sopel instance',\n help='Start a Sopel instance')\n parser_start.add_argument(\n '-d', '--fork',\n dest='daemonize',\n action='store_true',\n default=False,\n help='Run Sopel as a daemon (fork)')\n parser_start.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_start)\n\n # manage `configure` sub-command\n parser_configure = subparsers.add_parser(\n 'configure', help='Sopel\\'s Wizard tool')\n parser_configure.add_argument(\n '--modules',\n action='store_true',\n default=False,\n dest='modules')\n utils.add_common_arguments(parser_configure)\n\n # manage `stop` sub-command\n parser_stop = subparsers.add_parser(\n 'stop',\n description='Stop a running Sopel instance',\n help='Stop a running Sopel instance')\n parser_stop.add_argument(\n '-k', '--kill',\n action='store_true',\n default=False,\n help='Kill Sopel without a graceful quit')\n parser_stop.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_stop)\n\n # manage `restart` sub-command\n parser_restart = subparsers.add_parser(\n 'restart',\n description='Restart a running Sopel instance',\n help='Restart a running Sopel instance')\n parser_restart.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_restart)\n\n return parser\n\n\ndef check_not_root():\n \"\"\"Check if root is running the bot.\n\n It raises a ``RuntimeError`` if the user has root privileges on Linux or\n if it is the ``Administrator`` account on Windows.\n \"\"\"\n opersystem = platform.system()\n if opersystem in [\"Linux\", \"Darwin\"]:\n # Linux/Mac\n if os.getuid() == 0 or os.geteuid() == 0:\n raise RuntimeError('Error: Do not run Sopel with root privileges.')\n elif opersystem in [\"Windows\"]:\n # Windows\n if os.environ.get(\"USERNAME\") == \"Administrator\":\n raise RuntimeError('Error: Do not run Sopel as Administrator.')\n else:\n tools.stderr(\n \"Warning: %s is an uncommon operating system platform. \"\n \"Sopel should still work, but please contact Sopel's developers \"\n \"if you experience issues.\"\n % opersystem)\n\n\ndef print_version():\n \"\"\"Print Python version and Sopel version on stdout.\"\"\"\n py_ver = '%s.%s.%s' % (sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n print('Sopel %s (running on Python %s)' % (__version__, py_ver))\n print('https://sopel.chat/')\n\n\ndef print_config():\n \"\"\"Print list of available configurations from default homedir.\"\"\"\n configs = utils.enumerate_configs(DEFAULT_HOMEDIR)\n print('Config files in %s:' % DEFAULT_HOMEDIR)\n config = None\n for config in configs:\n print('\\t%s' % config)\n if not config:\n print('\\tNone found')\n\n print('-------------------------')\n\n\ndef get_configuration(options):\n \"\"\"Get or create a configuration object from ``options``.\n\n :param options: argument parser's options\n :type options: ``argparse.Namespace``\n :return: a configuration object\n :rtype: :class:`sopel.config.Config`\n\n This may raise a :exc:`sopel.config.ConfigurationError` if the\n configuration file is invalid.\n\n .. seealso::\n\n The configuration file is loaded by\n :func:`~sopel.cli.run.utils.load_settings` or created using the\n configuration wizard.\n\n \"\"\"\n try:\n bot_config = utils.load_settings(options)\n except ConfigurationNotFound as error:\n print(\n \"Welcome to Sopel!\\n\"\n \"I can't seem to find the configuration file, \"\n \"so let's generate it!\\n\")\n\n config_path = error.filename\n if not config_path.endswith('.cfg'):\n config_path = config_path + '.cfg'\n\n config_path = _create_config(config_path)\n # try to reload it now that it's created\n bot_config = Config(config_path)\n\n bot_config._is_daemonized = options.daemonize\n return bot_config\n\n\ndef get_pid_filename(options, pid_dir):\n \"\"\"Get the pid file name in ``pid_dir`` from the given ``options``.\n\n :param options: command line options\n :param str pid_dir: path to the pid directory\n :return: absolute filename of the pid file\n\n By default, it's ``sopel.pid``, but if a configuration filename is given\n in the ``options``, its basename is used to generate the filename, as:\n ``sopel-{basename}.pid`` instead.\n \"\"\"\n name = 'sopel.pid'\n if options.config:\n basename = os.path.basename(options.config)\n if basename.endswith('.cfg'):\n basename = basename[:-4]\n name = 'sopel-%s.pid' % basename\n\n return os.path.abspath(os.path.join(pid_dir, name))\n\n\ndef get_running_pid(filename):\n \"\"\"Retrieve the PID number from the given ``filename``.\n\n :param str filename: path to file to read the PID from\n :return: the PID number of a Sopel instance if running, ``None`` otherwise\n :rtype: integer\n\n This function tries to retrieve a PID number from the given ``filename``,\n as an integer, and returns ``None`` if the file is not found or if the\n content is not an integer.\n \"\"\"\n if not os.path.isfile(filename):\n return\n\n with open(filename, 'r') as pid_file:\n try:\n return int(pid_file.read())\n except ValueError:\n pass\n\n\ndef command_start(opts):\n \"\"\"Start a Sopel instance\"\"\"\n # Step One: Get the configuration file and prepare to run\n try:\n config_module = get_configuration(opts)\n except ConfigurationError as e:\n tools.stderr(e)\n return ERR_CODE_NO_RESTART\n\n if config_module.core.not_configured:\n tools.stderr('Bot is not configured, can\\'t start')\n return ERR_CODE_NO_RESTART\n\n # Step Two: Manage logfile, stdout and stderr\n utils.redirect_outputs(config_module, opts.quiet)\n\n # Step Three: Handle process-lifecycle options and manage the PID file\n pid_dir = config_module.core.pid_dir\n pid_file_path = get_pid_filename(opts, pid_dir)\n pid = get_running_pid(pid_file_path)\n\n if pid is not None and tools.check_pid(pid):\n tools.stderr('There\\'s already a Sopel instance running '\n 'with this config file.')\n tools.stderr('Try using either the `sopel stop` '\n 'or the `sopel restart` command.')\n return ERR_CODE\n\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n return\n\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Four: Run Sopel\n ret = run(config_module, pid_file_path)\n\n # Step Five: Shutdown Clean-Up\n os.unlink(pid_file_path)\n\n if ret == -1:\n # Restart\n os.execv(sys.executable, ['python'] + sys.argv)\n else:\n # Quit\n return ret\n\n\ndef command_configure(opts):\n \"\"\"Sopel Configuration Wizard\"\"\"\n if getattr(opts, 'modules', False):\n _wizard('mod', opts.config)\n else:\n _wizard('all', opts.config)\n\n\ndef command_stop(opts):\n \"\"\"Stop a running Sopel instance\"\"\"\n # Get Configuration\n try:\n settings = utils.load_settings(opts)\n except ConfigurationNotFound as error:\n tools.stderr('Configuration \"%s\" not found' % error.filename)\n return ERR_CODE\n\n if settings.core.not_configured:\n tools.stderr('Sopel is not configured, can\\'t stop')\n return ERR_CODE\n\n # Redirect Outputs\n utils.redirect_outputs(settings, opts.quiet)\n\n # Get Sopel's PID\n filename = get_pid_filename(opts, settings.core.pid_dir)\n pid = get_running_pid(filename)\n\n if pid is None or not tools.check_pid(pid):\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n # Stop Sopel\n if opts.kill:\n tools.stderr('Killing the Sopel')\n os.kill(pid, signal.SIGKILL)\n return\n\n tools.stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(pid, signal.SIGUSR1)\n else:\n # Windows will not generate SIGTERM itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(pid, signal.SIGTERM)\n\n\ndef command_restart(opts):\n \"\"\"Restart a running Sopel instance\"\"\"\n # Get Configuration\n try:\n settings = utils.load_settings(opts)\n except ConfigurationNotFound as error:\n tools.stderr('Configuration \"%s\" not found' % error.filename)\n return ERR_CODE\n\n if settings.core.not_configured:\n tools.stderr('Sopel is not configured, can\\'t stop')\n return ERR_CODE\n\n # Redirect Outputs\n utils.redirect_outputs(settings, opts.quiet)\n\n # Get Sopel's PID\n filename = get_pid_filename(opts, settings.core.pid_dir)\n pid = get_running_pid(filename)\n\n if pid is None or not tools.check_pid(pid):\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n tools.stderr('Asking Sopel to restart')\n if hasattr(signal, 'SIGUSR2'):\n os.kill(pid, signal.SIGUSR2)\n else:\n # Windows will not generate SIGILL itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(pid, signal.SIGILL)\n\n\ndef command_legacy(opts):\n \"\"\"Legacy Sopel run script\n\n The ``legacy`` command manages the old-style ``sopel`` command line tool.\n Most of its features are replaced by the following commands:\n\n * ``sopel start`` replaces the default behavior (run the bot)\n * ``sopel stop`` replaces the ``--quit/--kill`` options\n * ``sopel restart`` replaces the ``--restart`` option\n * ``sopel configure`` replaces the\n ``-w/--configure-all/--configure-modules`` options\n\n The ``-v`` option for \"version\" is deprecated, ``-V/--version`` should be\n used instead.\n\n .. seealso::\n\n The github issue `#1471`__ tracks various changes requested for future\n versions of Sopel, some of them related to this legacy command.\n\n .. __: https://github.com/sopel-irc/sopel/issues/1471\n\n \"\"\"\n # Step One: Handle \"No config needed\" options\n if opts.version:\n print_version()\n return\n elif opts.version_legacy:\n tools.stderr(\n 'WARNING: option -v is deprecated; '\n 'use `sopel -V/--version` instead')\n print_version()\n return\n\n if opts.wizard:\n tools.stderr(\n 'WARNING: option -w/--configure-all is deprecated; '\n 'use `sopel configure` instead')\n _wizard('all', opts.config)\n return\n\n if opts.mod_wizard:\n tools.stderr(\n 'WARNING: option --configure-modules is deprecated; '\n 'use `sopel configure --modules` instead')\n _wizard('mod', opts.config)\n return\n\n if opts.list_configs:\n print_config()\n return\n\n # Step Two: Get the configuration file and prepare to run\n try:\n config_module = get_configuration(opts)\n except ConfigurationError as e:\n tools.stderr(e)\n return ERR_CODE_NO_RESTART\n\n if config_module.core.not_configured:\n tools.stderr('Bot is not configured, can\\'t start')\n return ERR_CODE_NO_RESTART\n\n # Step Three: Manage logfile, stdout and stderr\n utils.redirect_outputs(config_module, opts.quiet)\n\n # Step Four: Handle process-lifecycle options and manage the PID file\n pid_dir = config_module.core.pid_dir\n pid_file_path = get_pid_filename(opts, pid_dir)\n old_pid = get_running_pid(pid_file_path)\n\n if old_pid is not None and tools.check_pid(old_pid):\n if not opts.quit and not opts.kill and not opts.restart:\n tools.stderr(\n 'There\\'s already a Sopel instance running with this config file')\n tools.stderr(\n 'Try using either the `sopel stop` command or the `sopel restart` command')\n return ERR_CODE\n elif opts.kill:\n tools.stderr(\n 'WARNING: option -k/--kill is deprecated; '\n 'use `sopel stop --kill` instead')\n tools.stderr('Killing the Sopel')\n os.kill(old_pid, signal.SIGKILL)\n return\n elif opts.quit:\n tools.stderr(\n 'WARNING: options -q/--quit is deprecated; '\n 'use `sopel stop` instead')\n tools.stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(old_pid, signal.SIGUSR1)\n else:\n # Windows will not generate SIGTERM itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(old_pid, signal.SIGTERM)\n return\n elif opts.restart:\n tools.stderr(\n 'WARNING: options --restart is deprecated; '\n 'use `sopel restart` instead')\n tools.stderr('Asking Sopel to restart')\n if hasattr(signal, 'SIGUSR2'):\n os.kill(old_pid, signal.SIGUSR2)\n else:\n # Windows will not generate SIGILL itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(old_pid, signal.SIGILL)\n return\n elif opts.kill or opts.quit or opts.restart:\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n return\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Five: Initialize and run Sopel\n ret = run(config_module, pid_file_path)\n os.unlink(pid_file_path)\n if ret == -1:\n os.execv(sys.executable, ['python'] + sys.argv)\n else:\n return ret\n\n\ndef main(argv=None):\n \"\"\"Sopel run script entry point\"\"\"\n try:\n # Step One: Parse The Command Line\n parser = build_parser()\n\n # make sure to have an action first (`legacy` by default)\n #\u00a0TODO: `start` should be the default in Sopel 8\n argv = argv or sys.argv[1:]\n if not argv:\n argv = ['legacy']\n elif argv[0].startswith('-') and argv[0] not in ['-h', '--help']:\n argv = ['legacy'] + argv\n\n opts = parser.parse_args(argv)\n\n # Step Two: \"Do not run as root\" checks\n try:\n check_not_root()\n except RuntimeError as err:\n tools.stderr('%s' % err)\n return ERR_CODE\n\n # Step Three: Handle command\n action = getattr(opts, 'action', 'legacy')\n command = {\n 'legacy': command_legacy,\n 'start': command_start,\n 'configure': command_configure,\n 'stop': command_stop,\n 'restart': command_restart,\n }.get(action)\n return command(opts)\n except KeyboardInterrupt:\n print(\"\\n\\nInterrupted\")\n return ERR_CODE\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "sopel/cli/run.py"}]} |
gh_patches_debug_1526 | rasdani/github-patches | git_diff | pallets__click-2714 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs wrongly links PRs and Issues to flask

Environment:
- Python version: N/A
- Click version: N/A
Docs wrongly links PRs and Issues to flask

Environment:
- Python version: N/A
- Click version: N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 from pallets_sphinx_themes import get_version
2 from pallets_sphinx_themes import ProjectLink
3
4 # Project --------------------------------------------------------------
5
6 project = "Click"
7 copyright = "2014 Pallets"
8 author = "Pallets"
9 release, version = get_version("Click")
10
11 # General --------------------------------------------------------------
12
13 default_role = "code"
14 extensions = [
15 "sphinx.ext.autodoc",
16 "sphinx.ext.extlinks",
17 "sphinx.ext.intersphinx",
18 "sphinx_tabs.tabs",
19 "sphinxcontrib.log_cabinet",
20 "pallets_sphinx_themes",
21 ]
22 autodoc_member_order = "bysource"
23 autodoc_typehints = "description"
24 autodoc_preserve_defaults = True
25 extlinks = {
26 "issue": ("https://github.com/pallets/flask/issues/%s", "#%s"),
27 "pr": ("https://github.com/pallets/flask/pull/%s", "#%s"),
28 }
29 intersphinx_mapping = {
30 "python": ("https://docs.python.org/3/", None),
31 }
32
33 # HTML -----------------------------------------------------------------
34
35 html_theme = "click"
36 html_theme_options = {"index_sidebar_logo": False}
37 html_context = {
38 "project_links": [
39 ProjectLink("Donate", "https://palletsprojects.com/donate"),
40 ProjectLink("PyPI Releases", "https://pypi.org/project/click/"),
41 ProjectLink("Source Code", "https://github.com/pallets/click/"),
42 ProjectLink("Issue Tracker", "https://github.com/pallets/click/issues/"),
43 ProjectLink("Chat", "https://discord.gg/pallets"),
44 ]
45 }
46 html_sidebars = {
47 "index": ["project.html", "localtoc.html", "searchbox.html", "ethicalads.html"],
48 "**": ["localtoc.html", "relations.html", "searchbox.html", "ethicalads.html"],
49 }
50 singlehtml_sidebars = {"index": ["project.html", "localtoc.html", "ethicalads.html"]}
51 html_static_path = ["_static"]
52 html_favicon = "_static/click-icon.png"
53 html_logo = "_static/click-logo-sidebar.png"
54 html_title = f"Click Documentation ({version})"
55 html_show_sourcelink = False
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -23,8 +23,8 @@
autodoc_typehints = "description"
autodoc_preserve_defaults = True
extlinks = {
- "issue": ("https://github.com/pallets/flask/issues/%s", "#%s"),
- "pr": ("https://github.com/pallets/flask/pull/%s", "#%s"),
+ "issue": ("https://github.com/pallets/click/issues/%s", "#%s"),
+ "pr": ("https://github.com/pallets/click/pull/%s", "#%s"),
}
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -23,8 +23,8 @@\n autodoc_typehints = \"description\"\n autodoc_preserve_defaults = True\n extlinks = {\n- \"issue\": (\"https://github.com/pallets/flask/issues/%s\", \"#%s\"),\n- \"pr\": (\"https://github.com/pallets/flask/pull/%s\", \"#%s\"),\n+ \"issue\": (\"https://github.com/pallets/click/issues/%s\", \"#%s\"),\n+ \"pr\": (\"https://github.com/pallets/click/pull/%s\", \"#%s\"),\n }\n intersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n", "issue": "Docs wrongly links PRs and Issues to flask\n\r\n\r\nEnvironment:\r\n\r\n- Python version: N/A\r\n- Click version: N/A\r\n\nDocs wrongly links PRs and Issues to flask\n\r\n\r\nEnvironment:\r\n\r\n- Python version: N/A\r\n- Click version: N/A\r\n\n", "before_files": [{"content": "from pallets_sphinx_themes import get_version\nfrom pallets_sphinx_themes import ProjectLink\n\n# Project --------------------------------------------------------------\n\nproject = \"Click\"\ncopyright = \"2014 Pallets\"\nauthor = \"Pallets\"\nrelease, version = get_version(\"Click\")\n\n# General --------------------------------------------------------------\n\ndefault_role = \"code\"\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinxcontrib.log_cabinet\",\n \"pallets_sphinx_themes\",\n]\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\nautodoc_preserve_defaults = True\nextlinks = {\n \"issue\": (\"https://github.com/pallets/flask/issues/%s\", \"#%s\"),\n \"pr\": (\"https://github.com/pallets/flask/pull/%s\", \"#%s\"),\n}\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n}\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = \"click\"\nhtml_theme_options = {\"index_sidebar_logo\": False}\nhtml_context = {\n \"project_links\": [\n ProjectLink(\"Donate\", \"https://palletsprojects.com/donate\"),\n ProjectLink(\"PyPI Releases\", \"https://pypi.org/project/click/\"),\n ProjectLink(\"Source Code\", \"https://github.com/pallets/click/\"),\n ProjectLink(\"Issue Tracker\", \"https://github.com/pallets/click/issues/\"),\n ProjectLink(\"Chat\", \"https://discord.gg/pallets\"),\n ]\n}\nhtml_sidebars = {\n \"index\": [\"project.html\", \"localtoc.html\", \"searchbox.html\", \"ethicalads.html\"],\n \"**\": [\"localtoc.html\", \"relations.html\", \"searchbox.html\", \"ethicalads.html\"],\n}\nsinglehtml_sidebars = {\"index\": [\"project.html\", \"localtoc.html\", \"ethicalads.html\"]}\nhtml_static_path = [\"_static\"]\nhtml_favicon = \"_static/click-icon.png\"\nhtml_logo = \"_static/click-logo-sidebar.png\"\nhtml_title = f\"Click Documentation ({version})\"\nhtml_show_sourcelink = False\n", "path": "docs/conf.py"}], "after_files": [{"content": "from pallets_sphinx_themes import get_version\nfrom pallets_sphinx_themes import ProjectLink\n\n# Project --------------------------------------------------------------\n\nproject = \"Click\"\ncopyright = \"2014 Pallets\"\nauthor = \"Pallets\"\nrelease, version = get_version(\"Click\")\n\n# General --------------------------------------------------------------\n\ndefault_role = \"code\"\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinxcontrib.log_cabinet\",\n \"pallets_sphinx_themes\",\n]\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\nautodoc_preserve_defaults = True\nextlinks = {\n \"issue\": (\"https://github.com/pallets/click/issues/%s\", \"#%s\"),\n \"pr\": (\"https://github.com/pallets/click/pull/%s\", \"#%s\"),\n}\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n}\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = \"click\"\nhtml_theme_options = {\"index_sidebar_logo\": False}\nhtml_context = {\n \"project_links\": [\n ProjectLink(\"Donate\", \"https://palletsprojects.com/donate\"),\n ProjectLink(\"PyPI Releases\", \"https://pypi.org/project/click/\"),\n ProjectLink(\"Source Code\", \"https://github.com/pallets/click/\"),\n ProjectLink(\"Issue Tracker\", \"https://github.com/pallets/click/issues/\"),\n ProjectLink(\"Chat\", \"https://discord.gg/pallets\"),\n ]\n}\nhtml_sidebars = {\n \"index\": [\"project.html\", \"localtoc.html\", \"searchbox.html\", \"ethicalads.html\"],\n \"**\": [\"localtoc.html\", \"relations.html\", \"searchbox.html\", \"ethicalads.html\"],\n}\nsinglehtml_sidebars = {\"index\": [\"project.html\", \"localtoc.html\", \"ethicalads.html\"]}\nhtml_static_path = [\"_static\"]\nhtml_favicon = \"_static/click-icon.png\"\nhtml_logo = \"_static/click-logo-sidebar.png\"\nhtml_title = f\"Click Documentation ({version})\"\nhtml_show_sourcelink = False\n", "path": "docs/conf.py"}]} |
gh_patches_debug_1527 | rasdani/github-patches | git_diff | python__mypy-3593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typing for @contextmanager doesn't play well with generic functions
```
from contextlib import contextmanager
from typing import TypeVar, Iterator
_T = TypeVar('_T')
@contextmanager
def yield_id(item):
# type: (_T) -> Iterator[_T]
yield item
with yield_id(1):
pass
```
... results in...
`example.py:11: error: Argument 1 to "yield_id" has incompatible type "int"; expected "_T"`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mypy/plugin.py`
Content:
```
1 """Plugin system for extending mypy."""
2
3 from abc import abstractmethod
4 from typing import Callable, List, Tuple, Optional, NamedTuple, TypeVar
5
6 from mypy.nodes import Expression, StrExpr, IntExpr, UnaryExpr, Context
7 from mypy.types import (
8 Type, Instance, CallableType, TypedDictType, UnionType, NoneTyp, FunctionLike, TypeVarType,
9 AnyType, TypeList, UnboundType
10 )
11 from mypy.messages import MessageBuilder
12 from mypy.options import Options
13
14
15 class AnalyzerPluginInterface:
16 """Interface for accessing semantic analyzer functionality in plugins."""
17
18 @abstractmethod
19 def fail(self, msg: str, ctx: Context) -> None:
20 raise NotImplementedError
21
22 @abstractmethod
23 def named_type(self, name: str, args: List[Type]) -> Instance:
24 raise NotImplementedError
25
26 @abstractmethod
27 def analyze_type(self, typ: Type) -> Type:
28 raise NotImplementedError
29
30 @abstractmethod
31 def analyze_callable_args(self, arglist: TypeList) -> Optional[Tuple[List[Type],
32 List[int],
33 List[Optional[str]]]]:
34 raise NotImplementedError
35
36
37 # A context for a hook that semantically analyzes an unbound type.
38 AnalyzeTypeContext = NamedTuple(
39 'AnalyzeTypeContext', [
40 ('type', UnboundType), # Type to analyze
41 ('context', Context),
42 ('api', AnalyzerPluginInterface)])
43
44
45 class CheckerPluginInterface:
46 """Interface for accessing type checker functionality in plugins."""
47
48 msg = None # type: MessageBuilder
49
50 @abstractmethod
51 def named_generic_type(self, name: str, args: List[Type]) -> Instance:
52 raise NotImplementedError
53
54
55 # A context for a function hook that infers the return type of a function with
56 # a special signature.
57 #
58 # A no-op callback would just return the inferred return type, but a useful
59 # callback at least sometimes can infer a more precise type.
60 FunctionContext = NamedTuple(
61 'FunctionContext', [
62 ('arg_types', List[List[Type]]), # List of actual caller types for each formal argument
63 ('default_return_type', Type), # Return type inferred from signature
64 ('args', List[List[Expression]]), # Actual expressions for each formal argument
65 ('context', Context),
66 ('api', CheckerPluginInterface)])
67
68 # A context for a method signature hook that infers a better signature for a
69 # method. Note that argument types aren't available yet. If you need them,
70 # you have to use a method hook instead.
71 MethodSigContext = NamedTuple(
72 'MethodSigContext', [
73 ('type', Type), # Base object type for method call
74 ('args', List[List[Expression]]), # Actual expressions for each formal argument
75 ('default_signature', CallableType), # Original signature of the method
76 ('context', Context),
77 ('api', CheckerPluginInterface)])
78
79 # A context for a method hook that infers the return type of a method with a
80 # special signature.
81 #
82 # This is very similar to FunctionContext (only differences are documented).
83 MethodContext = NamedTuple(
84 'MethodContext', [
85 ('type', Type), # Base object type for method call
86 ('arg_types', List[List[Type]]),
87 ('default_return_type', Type),
88 ('args', List[List[Expression]]),
89 ('context', Context),
90 ('api', CheckerPluginInterface)])
91
92 # A context for an attribute type hook that infers the type of an attribute.
93 AttributeContext = NamedTuple(
94 'AttributeContext', [
95 ('type', Type), # Type of object with attribute
96 ('default_attr_type', Type), # Original attribute type
97 ('context', Context),
98 ('api', CheckerPluginInterface)])
99
100
101 class Plugin:
102 """Base class of all type checker plugins.
103
104 This defines a no-op plugin. Subclasses can override some methods to
105 provide some actual functionality.
106
107 All get_ methods are treated as pure functions (you should assume that
108 results might be cached).
109
110 Look at the comments of various *Context objects for descriptions of
111 various hooks.
112 """
113
114 def __init__(self, options: Options) -> None:
115 self.options = options
116 self.python_version = options.python_version
117
118 def get_type_analyze_hook(self, fullname: str
119 ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:
120 return None
121
122 def get_function_hook(self, fullname: str
123 ) -> Optional[Callable[[FunctionContext], Type]]:
124 return None
125
126 def get_method_signature_hook(self, fullname: str
127 ) -> Optional[Callable[[MethodSigContext], CallableType]]:
128 return None
129
130 def get_method_hook(self, fullname: str
131 ) -> Optional[Callable[[MethodContext], Type]]:
132 return None
133
134 def get_attribute_hook(self, fullname: str
135 ) -> Optional[Callable[[AttributeContext], Type]]:
136 return None
137
138 # TODO: metaclass / class decorator hook
139
140
141 T = TypeVar('T')
142
143
144 class ChainedPlugin(Plugin):
145 """A plugin that represents a sequence of chained plugins.
146
147 Each lookup method returns the hook for the first plugin that
148 reports a match.
149
150 This class should not be subclassed -- use Plugin as the base class
151 for all plugins.
152 """
153
154 # TODO: Support caching of lookup results (through a LRU cache, for example).
155
156 def __init__(self, options: Options, plugins: List[Plugin]) -> None:
157 """Initialize chained plugin.
158
159 Assume that the child plugins aren't mutated (results may be cached).
160 """
161 super().__init__(options)
162 self._plugins = plugins
163
164 def get_type_analyze_hook(self, fullname: str
165 ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:
166 return self._find_hook(lambda plugin: plugin.get_type_analyze_hook(fullname))
167
168 def get_function_hook(self, fullname: str
169 ) -> Optional[Callable[[FunctionContext], Type]]:
170 return self._find_hook(lambda plugin: plugin.get_function_hook(fullname))
171
172 def get_method_signature_hook(self, fullname: str
173 ) -> Optional[Callable[[MethodSigContext], CallableType]]:
174 return self._find_hook(lambda plugin: plugin.get_method_signature_hook(fullname))
175
176 def get_method_hook(self, fullname: str
177 ) -> Optional[Callable[[MethodContext], Type]]:
178 return self._find_hook(lambda plugin: plugin.get_method_hook(fullname))
179
180 def get_attribute_hook(self, fullname: str
181 ) -> Optional[Callable[[AttributeContext], Type]]:
182 return self._find_hook(lambda plugin: plugin.get_attribute_hook(fullname))
183
184 def _find_hook(self, lookup: Callable[[Plugin], T]) -> Optional[T]:
185 for plugin in self._plugins:
186 hook = lookup(plugin)
187 if hook:
188 return hook
189 return None
190
191
192 class DefaultPlugin(Plugin):
193 """Type checker plugin that is enabled by default."""
194
195 def get_function_hook(self, fullname: str
196 ) -> Optional[Callable[[FunctionContext], Type]]:
197 if fullname == 'contextlib.contextmanager':
198 return contextmanager_callback
199 elif fullname == 'builtins.open' and self.python_version[0] == 3:
200 return open_callback
201 return None
202
203 def get_method_signature_hook(self, fullname: str
204 ) -> Optional[Callable[[MethodSigContext], CallableType]]:
205 if fullname == 'typing.Mapping.get':
206 return typed_dict_get_signature_callback
207 return None
208
209 def get_method_hook(self, fullname: str
210 ) -> Optional[Callable[[MethodContext], Type]]:
211 if fullname == 'typing.Mapping.get':
212 return typed_dict_get_callback
213 elif fullname == 'builtins.int.__pow__':
214 return int_pow_callback
215 return None
216
217
218 def open_callback(ctx: FunctionContext) -> Type:
219 """Infer a better return type for 'open'.
220
221 Infer TextIO or BinaryIO as the return value if the mode argument is not
222 given or is a literal.
223 """
224 mode = None
225 if not ctx.arg_types or len(ctx.arg_types[1]) != 1:
226 mode = 'r'
227 elif isinstance(ctx.args[1][0], StrExpr):
228 mode = ctx.args[1][0].value
229 if mode is not None:
230 assert isinstance(ctx.default_return_type, Instance)
231 if 'b' in mode:
232 return ctx.api.named_generic_type('typing.BinaryIO', [])
233 else:
234 return ctx.api.named_generic_type('typing.TextIO', [])
235 return ctx.default_return_type
236
237
238 def contextmanager_callback(ctx: FunctionContext) -> Type:
239 """Infer a better return type for 'contextlib.contextmanager'."""
240 # Be defensive, just in case.
241 if ctx.arg_types and len(ctx.arg_types[0]) == 1:
242 arg_type = ctx.arg_types[0][0]
243 if (isinstance(arg_type, CallableType)
244 and isinstance(ctx.default_return_type, CallableType)):
245 # The stub signature doesn't preserve information about arguments so
246 # add them back here.
247 return ctx.default_return_type.copy_modified(
248 arg_types=arg_type.arg_types,
249 arg_kinds=arg_type.arg_kinds,
250 arg_names=arg_type.arg_names)
251 return ctx.default_return_type
252
253
254 def typed_dict_get_signature_callback(ctx: MethodSigContext) -> CallableType:
255 """Try to infer a better signature type for TypedDict.get.
256
257 This is used to get better type context for the second argument that
258 depends on a TypedDict value type.
259 """
260 signature = ctx.default_signature
261 if (isinstance(ctx.type, TypedDictType)
262 and len(ctx.args) == 2
263 and len(ctx.args[0]) == 1
264 and isinstance(ctx.args[0][0], StrExpr)
265 and len(signature.arg_types) == 2
266 and len(signature.variables) == 1):
267 key = ctx.args[0][0].value
268 value_type = ctx.type.items.get(key)
269 if value_type:
270 # Tweak the signature to include the value type as context. It's
271 # only needed for type inference since there's a union with a type
272 # variable that accepts everything.
273 tv = TypeVarType(signature.variables[0])
274 return signature.copy_modified(
275 arg_types=[signature.arg_types[0],
276 UnionType.make_simplified_union([value_type, tv])])
277 return signature
278
279
280 def typed_dict_get_callback(ctx: MethodContext) -> Type:
281 """Infer a precise return type for TypedDict.get with literal first argument."""
282 if (isinstance(ctx.type, TypedDictType)
283 and len(ctx.arg_types) >= 1
284 and len(ctx.arg_types[0]) == 1):
285 if isinstance(ctx.args[0][0], StrExpr):
286 key = ctx.args[0][0].value
287 value_type = ctx.type.items.get(key)
288 if value_type:
289 if len(ctx.arg_types) == 1:
290 return UnionType.make_simplified_union([value_type, NoneTyp()])
291 elif len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1:
292 return UnionType.make_simplified_union([value_type, ctx.arg_types[1][0]])
293 else:
294 ctx.api.msg.typeddict_item_name_not_found(ctx.type, key, ctx.context)
295 return AnyType()
296 return ctx.default_return_type
297
298
299 def int_pow_callback(ctx: MethodContext) -> Type:
300 """Infer a more precise return type for int.__pow__."""
301 if (len(ctx.arg_types) == 1
302 and len(ctx.arg_types[0]) == 1):
303 arg = ctx.args[0][0]
304 if isinstance(arg, IntExpr):
305 exponent = arg.value
306 elif isinstance(arg, UnaryExpr) and arg.op == '-' and isinstance(arg.expr, IntExpr):
307 exponent = -arg.expr.value
308 else:
309 # Right operand not an int literal or a negated literal -- give up.
310 return ctx.default_return_type
311 if exponent >= 0:
312 return ctx.api.named_generic_type('builtins.int', [])
313 else:
314 return ctx.api.named_generic_type('builtins.float', [])
315 return ctx.default_return_type
316
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mypy/plugin.py b/mypy/plugin.py
--- a/mypy/plugin.py
+++ b/mypy/plugin.py
@@ -247,7 +247,9 @@
return ctx.default_return_type.copy_modified(
arg_types=arg_type.arg_types,
arg_kinds=arg_type.arg_kinds,
- arg_names=arg_type.arg_names)
+ arg_names=arg_type.arg_names,
+ variables=arg_type.variables,
+ is_ellipsis_args=arg_type.is_ellipsis_args)
return ctx.default_return_type
| {"golden_diff": "diff --git a/mypy/plugin.py b/mypy/plugin.py\n--- a/mypy/plugin.py\n+++ b/mypy/plugin.py\n@@ -247,7 +247,9 @@\n return ctx.default_return_type.copy_modified(\n arg_types=arg_type.arg_types,\n arg_kinds=arg_type.arg_kinds,\n- arg_names=arg_type.arg_names)\n+ arg_names=arg_type.arg_names,\n+ variables=arg_type.variables,\n+ is_ellipsis_args=arg_type.is_ellipsis_args)\n return ctx.default_return_type\n", "issue": "Typing for @contextmanager doesn't play well with generic functions\n```\r\nfrom contextlib import contextmanager\r\nfrom typing import TypeVar, Iterator\r\n\r\n_T = TypeVar('_T')\r\n\r\n@contextmanager\r\ndef yield_id(item):\r\n # type: (_T) -> Iterator[_T]\r\n yield item\r\n\r\nwith yield_id(1):\r\n pass\r\n```\r\n\r\n... results in...\r\n\r\n`example.py:11: error: Argument 1 to \"yield_id\" has incompatible type \"int\"; expected \"_T\"`\r\n\n", "before_files": [{"content": "\"\"\"Plugin system for extending mypy.\"\"\"\n\nfrom abc import abstractmethod\nfrom typing import Callable, List, Tuple, Optional, NamedTuple, TypeVar\n\nfrom mypy.nodes import Expression, StrExpr, IntExpr, UnaryExpr, Context\nfrom mypy.types import (\n Type, Instance, CallableType, TypedDictType, UnionType, NoneTyp, FunctionLike, TypeVarType,\n AnyType, TypeList, UnboundType\n)\nfrom mypy.messages import MessageBuilder\nfrom mypy.options import Options\n\n\nclass AnalyzerPluginInterface:\n \"\"\"Interface for accessing semantic analyzer functionality in plugins.\"\"\"\n\n @abstractmethod\n def fail(self, msg: str, ctx: Context) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def named_type(self, name: str, args: List[Type]) -> Instance:\n raise NotImplementedError\n\n @abstractmethod\n def analyze_type(self, typ: Type) -> Type:\n raise NotImplementedError\n\n @abstractmethod\n def analyze_callable_args(self, arglist: TypeList) -> Optional[Tuple[List[Type],\n List[int],\n List[Optional[str]]]]:\n raise NotImplementedError\n\n\n# A context for a hook that semantically analyzes an unbound type.\nAnalyzeTypeContext = NamedTuple(\n 'AnalyzeTypeContext', [\n ('type', UnboundType), # Type to analyze\n ('context', Context),\n ('api', AnalyzerPluginInterface)])\n\n\nclass CheckerPluginInterface:\n \"\"\"Interface for accessing type checker functionality in plugins.\"\"\"\n\n msg = None # type: MessageBuilder\n\n @abstractmethod\n def named_generic_type(self, name: str, args: List[Type]) -> Instance:\n raise NotImplementedError\n\n\n# A context for a function hook that infers the return type of a function with\n# a special signature.\n#\n# A no-op callback would just return the inferred return type, but a useful\n# callback at least sometimes can infer a more precise type.\nFunctionContext = NamedTuple(\n 'FunctionContext', [\n ('arg_types', List[List[Type]]), # List of actual caller types for each formal argument\n ('default_return_type', Type), # Return type inferred from signature\n ('args', List[List[Expression]]), # Actual expressions for each formal argument\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for a method signature hook that infers a better signature for a\n# method. Note that argument types aren't available yet. If you need them,\n# you have to use a method hook instead.\nMethodSigContext = NamedTuple(\n 'MethodSigContext', [\n ('type', Type), # Base object type for method call\n ('args', List[List[Expression]]), # Actual expressions for each formal argument\n ('default_signature', CallableType), # Original signature of the method\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for a method hook that infers the return type of a method with a\n# special signature.\n#\n# This is very similar to FunctionContext (only differences are documented).\nMethodContext = NamedTuple(\n 'MethodContext', [\n ('type', Type), # Base object type for method call\n ('arg_types', List[List[Type]]),\n ('default_return_type', Type),\n ('args', List[List[Expression]]),\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for an attribute type hook that infers the type of an attribute.\nAttributeContext = NamedTuple(\n 'AttributeContext', [\n ('type', Type), # Type of object with attribute\n ('default_attr_type', Type), # Original attribute type\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n\nclass Plugin:\n \"\"\"Base class of all type checker plugins.\n\n This defines a no-op plugin. Subclasses can override some methods to\n provide some actual functionality.\n\n All get_ methods are treated as pure functions (you should assume that\n results might be cached).\n\n Look at the comments of various *Context objects for descriptions of\n various hooks.\n \"\"\"\n\n def __init__(self, options: Options) -> None:\n self.options = options\n self.python_version = options.python_version\n\n def get_type_analyze_hook(self, fullname: str\n ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:\n return None\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n return None\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n return None\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n return None\n\n def get_attribute_hook(self, fullname: str\n ) -> Optional[Callable[[AttributeContext], Type]]:\n return None\n\n # TODO: metaclass / class decorator hook\n\n\nT = TypeVar('T')\n\n\nclass ChainedPlugin(Plugin):\n \"\"\"A plugin that represents a sequence of chained plugins.\n\n Each lookup method returns the hook for the first plugin that\n reports a match.\n\n This class should not be subclassed -- use Plugin as the base class\n for all plugins.\n \"\"\"\n\n # TODO: Support caching of lookup results (through a LRU cache, for example).\n\n def __init__(self, options: Options, plugins: List[Plugin]) -> None:\n \"\"\"Initialize chained plugin.\n\n Assume that the child plugins aren't mutated (results may be cached).\n \"\"\"\n super().__init__(options)\n self._plugins = plugins\n\n def get_type_analyze_hook(self, fullname: str\n ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_type_analyze_hook(fullname))\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_function_hook(fullname))\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n return self._find_hook(lambda plugin: plugin.get_method_signature_hook(fullname))\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_method_hook(fullname))\n\n def get_attribute_hook(self, fullname: str\n ) -> Optional[Callable[[AttributeContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_attribute_hook(fullname))\n\n def _find_hook(self, lookup: Callable[[Plugin], T]) -> Optional[T]:\n for plugin in self._plugins:\n hook = lookup(plugin)\n if hook:\n return hook\n return None\n\n\nclass DefaultPlugin(Plugin):\n \"\"\"Type checker plugin that is enabled by default.\"\"\"\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n if fullname == 'contextlib.contextmanager':\n return contextmanager_callback\n elif fullname == 'builtins.open' and self.python_version[0] == 3:\n return open_callback\n return None\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n if fullname == 'typing.Mapping.get':\n return typed_dict_get_signature_callback\n return None\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n if fullname == 'typing.Mapping.get':\n return typed_dict_get_callback\n elif fullname == 'builtins.int.__pow__':\n return int_pow_callback\n return None\n\n\ndef open_callback(ctx: FunctionContext) -> Type:\n \"\"\"Infer a better return type for 'open'.\n\n Infer TextIO or BinaryIO as the return value if the mode argument is not\n given or is a literal.\n \"\"\"\n mode = None\n if not ctx.arg_types or len(ctx.arg_types[1]) != 1:\n mode = 'r'\n elif isinstance(ctx.args[1][0], StrExpr):\n mode = ctx.args[1][0].value\n if mode is not None:\n assert isinstance(ctx.default_return_type, Instance)\n if 'b' in mode:\n return ctx.api.named_generic_type('typing.BinaryIO', [])\n else:\n return ctx.api.named_generic_type('typing.TextIO', [])\n return ctx.default_return_type\n\n\ndef contextmanager_callback(ctx: FunctionContext) -> Type:\n \"\"\"Infer a better return type for 'contextlib.contextmanager'.\"\"\"\n # Be defensive, just in case.\n if ctx.arg_types and len(ctx.arg_types[0]) == 1:\n arg_type = ctx.arg_types[0][0]\n if (isinstance(arg_type, CallableType)\n and isinstance(ctx.default_return_type, CallableType)):\n # The stub signature doesn't preserve information about arguments so\n # add them back here.\n return ctx.default_return_type.copy_modified(\n arg_types=arg_type.arg_types,\n arg_kinds=arg_type.arg_kinds,\n arg_names=arg_type.arg_names)\n return ctx.default_return_type\n\n\ndef typed_dict_get_signature_callback(ctx: MethodSigContext) -> CallableType:\n \"\"\"Try to infer a better signature type for TypedDict.get.\n\n This is used to get better type context for the second argument that\n depends on a TypedDict value type.\n \"\"\"\n signature = ctx.default_signature\n if (isinstance(ctx.type, TypedDictType)\n and len(ctx.args) == 2\n and len(ctx.args[0]) == 1\n and isinstance(ctx.args[0][0], StrExpr)\n and len(signature.arg_types) == 2\n and len(signature.variables) == 1):\n key = ctx.args[0][0].value\n value_type = ctx.type.items.get(key)\n if value_type:\n # Tweak the signature to include the value type as context. It's\n # only needed for type inference since there's a union with a type\n # variable that accepts everything.\n tv = TypeVarType(signature.variables[0])\n return signature.copy_modified(\n arg_types=[signature.arg_types[0],\n UnionType.make_simplified_union([value_type, tv])])\n return signature\n\n\ndef typed_dict_get_callback(ctx: MethodContext) -> Type:\n \"\"\"Infer a precise return type for TypedDict.get with literal first argument.\"\"\"\n if (isinstance(ctx.type, TypedDictType)\n and len(ctx.arg_types) >= 1\n and len(ctx.arg_types[0]) == 1):\n if isinstance(ctx.args[0][0], StrExpr):\n key = ctx.args[0][0].value\n value_type = ctx.type.items.get(key)\n if value_type:\n if len(ctx.arg_types) == 1:\n return UnionType.make_simplified_union([value_type, NoneTyp()])\n elif len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1:\n return UnionType.make_simplified_union([value_type, ctx.arg_types[1][0]])\n else:\n ctx.api.msg.typeddict_item_name_not_found(ctx.type, key, ctx.context)\n return AnyType()\n return ctx.default_return_type\n\n\ndef int_pow_callback(ctx: MethodContext) -> Type:\n \"\"\"Infer a more precise return type for int.__pow__.\"\"\"\n if (len(ctx.arg_types) == 1\n and len(ctx.arg_types[0]) == 1):\n arg = ctx.args[0][0]\n if isinstance(arg, IntExpr):\n exponent = arg.value\n elif isinstance(arg, UnaryExpr) and arg.op == '-' and isinstance(arg.expr, IntExpr):\n exponent = -arg.expr.value\n else:\n # Right operand not an int literal or a negated literal -- give up.\n return ctx.default_return_type\n if exponent >= 0:\n return ctx.api.named_generic_type('builtins.int', [])\n else:\n return ctx.api.named_generic_type('builtins.float', [])\n return ctx.default_return_type\n", "path": "mypy/plugin.py"}], "after_files": [{"content": "\"\"\"Plugin system for extending mypy.\"\"\"\n\nfrom abc import abstractmethod\nfrom typing import Callable, List, Tuple, Optional, NamedTuple, TypeVar\n\nfrom mypy.nodes import Expression, StrExpr, IntExpr, UnaryExpr, Context\nfrom mypy.types import (\n Type, Instance, CallableType, TypedDictType, UnionType, NoneTyp, FunctionLike, TypeVarType,\n AnyType, TypeList, UnboundType\n)\nfrom mypy.messages import MessageBuilder\nfrom mypy.options import Options\n\n\nclass AnalyzerPluginInterface:\n \"\"\"Interface for accessing semantic analyzer functionality in plugins.\"\"\"\n\n @abstractmethod\n def fail(self, msg: str, ctx: Context) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def named_type(self, name: str, args: List[Type]) -> Instance:\n raise NotImplementedError\n\n @abstractmethod\n def analyze_type(self, typ: Type) -> Type:\n raise NotImplementedError\n\n @abstractmethod\n def analyze_callable_args(self, arglist: TypeList) -> Optional[Tuple[List[Type],\n List[int],\n List[Optional[str]]]]:\n raise NotImplementedError\n\n\n# A context for a hook that semantically analyzes an unbound type.\nAnalyzeTypeContext = NamedTuple(\n 'AnalyzeTypeContext', [\n ('type', UnboundType), # Type to analyze\n ('context', Context),\n ('api', AnalyzerPluginInterface)])\n\n\nclass CheckerPluginInterface:\n \"\"\"Interface for accessing type checker functionality in plugins.\"\"\"\n\n msg = None # type: MessageBuilder\n\n @abstractmethod\n def named_generic_type(self, name: str, args: List[Type]) -> Instance:\n raise NotImplementedError\n\n\n# A context for a function hook that infers the return type of a function with\n# a special signature.\n#\n# A no-op callback would just return the inferred return type, but a useful\n# callback at least sometimes can infer a more precise type.\nFunctionContext = NamedTuple(\n 'FunctionContext', [\n ('arg_types', List[List[Type]]), # List of actual caller types for each formal argument\n ('default_return_type', Type), # Return type inferred from signature\n ('args', List[List[Expression]]), # Actual expressions for each formal argument\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for a method signature hook that infers a better signature for a\n# method. Note that argument types aren't available yet. If you need them,\n# you have to use a method hook instead.\nMethodSigContext = NamedTuple(\n 'MethodSigContext', [\n ('type', Type), # Base object type for method call\n ('args', List[List[Expression]]), # Actual expressions for each formal argument\n ('default_signature', CallableType), # Original signature of the method\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for a method hook that infers the return type of a method with a\n# special signature.\n#\n# This is very similar to FunctionContext (only differences are documented).\nMethodContext = NamedTuple(\n 'MethodContext', [\n ('type', Type), # Base object type for method call\n ('arg_types', List[List[Type]]),\n ('default_return_type', Type),\n ('args', List[List[Expression]]),\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for an attribute type hook that infers the type of an attribute.\nAttributeContext = NamedTuple(\n 'AttributeContext', [\n ('type', Type), # Type of object with attribute\n ('default_attr_type', Type), # Original attribute type\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n\nclass Plugin:\n \"\"\"Base class of all type checker plugins.\n\n This defines a no-op plugin. Subclasses can override some methods to\n provide some actual functionality.\n\n All get_ methods are treated as pure functions (you should assume that\n results might be cached).\n\n Look at the comments of various *Context objects for descriptions of\n various hooks.\n \"\"\"\n\n def __init__(self, options: Options) -> None:\n self.options = options\n self.python_version = options.python_version\n\n def get_type_analyze_hook(self, fullname: str\n ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:\n return None\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n return None\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n return None\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n return None\n\n def get_attribute_hook(self, fullname: str\n ) -> Optional[Callable[[AttributeContext], Type]]:\n return None\n\n # TODO: metaclass / class decorator hook\n\n\nT = TypeVar('T')\n\n\nclass ChainedPlugin(Plugin):\n \"\"\"A plugin that represents a sequence of chained plugins.\n\n Each lookup method returns the hook for the first plugin that\n reports a match.\n\n This class should not be subclassed -- use Plugin as the base class\n for all plugins.\n \"\"\"\n\n # TODO: Support caching of lookup results (through a LRU cache, for example).\n\n def __init__(self, options: Options, plugins: List[Plugin]) -> None:\n \"\"\"Initialize chained plugin.\n\n Assume that the child plugins aren't mutated (results may be cached).\n \"\"\"\n super().__init__(options)\n self._plugins = plugins\n\n def get_type_analyze_hook(self, fullname: str\n ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_type_analyze_hook(fullname))\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_function_hook(fullname))\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n return self._find_hook(lambda plugin: plugin.get_method_signature_hook(fullname))\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_method_hook(fullname))\n\n def get_attribute_hook(self, fullname: str\n ) -> Optional[Callable[[AttributeContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_attribute_hook(fullname))\n\n def _find_hook(self, lookup: Callable[[Plugin], T]) -> Optional[T]:\n for plugin in self._plugins:\n hook = lookup(plugin)\n if hook:\n return hook\n return None\n\n\nclass DefaultPlugin(Plugin):\n \"\"\"Type checker plugin that is enabled by default.\"\"\"\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n if fullname == 'contextlib.contextmanager':\n return contextmanager_callback\n elif fullname == 'builtins.open' and self.python_version[0] == 3:\n return open_callback\n return None\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n if fullname == 'typing.Mapping.get':\n return typed_dict_get_signature_callback\n return None\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n if fullname == 'typing.Mapping.get':\n return typed_dict_get_callback\n elif fullname == 'builtins.int.__pow__':\n return int_pow_callback\n return None\n\n\ndef open_callback(ctx: FunctionContext) -> Type:\n \"\"\"Infer a better return type for 'open'.\n\n Infer TextIO or BinaryIO as the return value if the mode argument is not\n given or is a literal.\n \"\"\"\n mode = None\n if not ctx.arg_types or len(ctx.arg_types[1]) != 1:\n mode = 'r'\n elif isinstance(ctx.args[1][0], StrExpr):\n mode = ctx.args[1][0].value\n if mode is not None:\n assert isinstance(ctx.default_return_type, Instance)\n if 'b' in mode:\n return ctx.api.named_generic_type('typing.BinaryIO', [])\n else:\n return ctx.api.named_generic_type('typing.TextIO', [])\n return ctx.default_return_type\n\n\ndef contextmanager_callback(ctx: FunctionContext) -> Type:\n \"\"\"Infer a better return type for 'contextlib.contextmanager'.\"\"\"\n # Be defensive, just in case.\n if ctx.arg_types and len(ctx.arg_types[0]) == 1:\n arg_type = ctx.arg_types[0][0]\n if (isinstance(arg_type, CallableType)\n and isinstance(ctx.default_return_type, CallableType)):\n # The stub signature doesn't preserve information about arguments so\n # add them back here.\n return ctx.default_return_type.copy_modified(\n arg_types=arg_type.arg_types,\n arg_kinds=arg_type.arg_kinds,\n arg_names=arg_type.arg_names,\n variables=arg_type.variables,\n is_ellipsis_args=arg_type.is_ellipsis_args)\n return ctx.default_return_type\n\n\ndef typed_dict_get_signature_callback(ctx: MethodSigContext) -> CallableType:\n \"\"\"Try to infer a better signature type for TypedDict.get.\n\n This is used to get better type context for the second argument that\n depends on a TypedDict value type.\n \"\"\"\n signature = ctx.default_signature\n if (isinstance(ctx.type, TypedDictType)\n and len(ctx.args) == 2\n and len(ctx.args[0]) == 1\n and isinstance(ctx.args[0][0], StrExpr)\n and len(signature.arg_types) == 2\n and len(signature.variables) == 1):\n key = ctx.args[0][0].value\n value_type = ctx.type.items.get(key)\n if value_type:\n # Tweak the signature to include the value type as context. It's\n # only needed for type inference since there's a union with a type\n # variable that accepts everything.\n tv = TypeVarType(signature.variables[0])\n return signature.copy_modified(\n arg_types=[signature.arg_types[0],\n UnionType.make_simplified_union([value_type, tv])])\n return signature\n\n\ndef typed_dict_get_callback(ctx: MethodContext) -> Type:\n \"\"\"Infer a precise return type for TypedDict.get with literal first argument.\"\"\"\n if (isinstance(ctx.type, TypedDictType)\n and len(ctx.arg_types) >= 1\n and len(ctx.arg_types[0]) == 1):\n if isinstance(ctx.args[0][0], StrExpr):\n key = ctx.args[0][0].value\n value_type = ctx.type.items.get(key)\n if value_type:\n if len(ctx.arg_types) == 1:\n return UnionType.make_simplified_union([value_type, NoneTyp()])\n elif len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1:\n return UnionType.make_simplified_union([value_type, ctx.arg_types[1][0]])\n else:\n ctx.api.msg.typeddict_item_name_not_found(ctx.type, key, ctx.context)\n return AnyType()\n return ctx.default_return_type\n\n\ndef int_pow_callback(ctx: MethodContext) -> Type:\n \"\"\"Infer a more precise return type for int.__pow__.\"\"\"\n if (len(ctx.arg_types) == 1\n and len(ctx.arg_types[0]) == 1):\n arg = ctx.args[0][0]\n if isinstance(arg, IntExpr):\n exponent = arg.value\n elif isinstance(arg, UnaryExpr) and arg.op == '-' and isinstance(arg.expr, IntExpr):\n exponent = -arg.expr.value\n else:\n # Right operand not an int literal or a negated literal -- give up.\n return ctx.default_return_type\n if exponent >= 0:\n return ctx.api.named_generic_type('builtins.int', [])\n else:\n return ctx.api.named_generic_type('builtins.float', [])\n return ctx.default_return_type\n", "path": "mypy/plugin.py"}]} |
gh_patches_debug_1528 | rasdani/github-patches | git_diff | koxudaxi__datamodel-code-generator-1669 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Impossible to get the json schema of a json schema object
**Describe the bug**
```python
from datamodel_code_generator.parser.jsonschema import JsonSchemaObject
if __name__ == "__main__":
print(JsonSchemaObject.model_json_schema())
```
Raises
```
pydantic.errors.PydanticInvalidForJsonSchema: Cannot generate a JsonSchema for core_schema.PlainValidatorFunctionSchema ({'type': 'no-info', 'function': <bound method UnionIntFloat.validate of <class 'datamodel_code_generator.types.UnionIntFloat'>>})
```
**To Reproduce**
See code above
**Expected behavior**
The json schema of a json schema object.
**Version:**
- OS: Linux 6.2.0
- Python version: 3.11.4
- datamodel-code-generator version: 0.22.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datamodel_code_generator/types.py`
Content:
```
1 import re
2 from abc import ABC, abstractmethod
3 from enum import Enum, auto
4 from functools import lru_cache
5 from itertools import chain
6 from typing import (
7 TYPE_CHECKING,
8 Any,
9 Callable,
10 ClassVar,
11 Dict,
12 FrozenSet,
13 Iterable,
14 Iterator,
15 List,
16 Optional,
17 Pattern,
18 Sequence,
19 Set,
20 Tuple,
21 Type,
22 TypeVar,
23 Union,
24 )
25
26 import pydantic
27 from packaging import version
28 from pydantic import (
29 StrictBool,
30 StrictInt,
31 StrictStr,
32 create_model,
33 )
34
35 from datamodel_code_generator.format import PythonVersion
36 from datamodel_code_generator.imports import (
37 IMPORT_ABC_MAPPING,
38 IMPORT_ABC_SEQUENCE,
39 IMPORT_ABC_SET,
40 IMPORT_DICT,
41 IMPORT_FROZEN_SET,
42 IMPORT_LIST,
43 IMPORT_LITERAL,
44 IMPORT_LITERAL_BACKPORT,
45 IMPORT_MAPPING,
46 IMPORT_OPTIONAL,
47 IMPORT_SEQUENCE,
48 IMPORT_SET,
49 IMPORT_UNION,
50 Import,
51 )
52 from datamodel_code_generator.reference import Reference, _BaseModel
53 from datamodel_code_generator.util import (
54 PYDANTIC_V2,
55 ConfigDict,
56 Protocol,
57 runtime_checkable,
58 )
59
60 if PYDANTIC_V2:
61 from pydantic import GetCoreSchemaHandler
62 from pydantic_core import core_schema
63
64 T = TypeVar('T')
65
66 OPTIONAL = 'Optional'
67 OPTIONAL_PREFIX = f'{OPTIONAL}['
68
69 UNION = 'Union'
70 UNION_PREFIX = f'{UNION}['
71 UNION_DELIMITER = ', '
72 UNION_PATTERN: Pattern[str] = re.compile(r'\s*,\s*')
73 UNION_OPERATOR_DELIMITER = ' | '
74 UNION_OPERATOR_PATTERN: Pattern[str] = re.compile(r'\s*\|\s*')
75 NONE = 'None'
76 ANY = 'Any'
77 LITERAL = 'Literal'
78 SEQUENCE = 'Sequence'
79 FROZEN_SET = 'FrozenSet'
80 MAPPING = 'Mapping'
81 DICT = 'Dict'
82 SET = 'Set'
83 LIST = 'List'
84 STANDARD_DICT = 'dict'
85 STANDARD_LIST = 'list'
86 STANDARD_SET = 'set'
87 STR = 'str'
88
89 NOT_REQUIRED = 'NotRequired'
90 NOT_REQUIRED_PREFIX = f'{NOT_REQUIRED}['
91
92
93 class StrictTypes(Enum):
94 str = 'str'
95 bytes = 'bytes'
96 int = 'int'
97 float = 'float'
98 bool = 'bool'
99
100
101 class UnionIntFloat:
102 def __init__(self, value: Union[int, float]) -> None:
103 self.value: Union[int, float] = value
104
105 def __int__(self) -> int:
106 return int(self.value)
107
108 def __float__(self) -> float:
109 return float(self.value)
110
111 def __str__(self) -> str:
112 return str(self.value)
113
114 @classmethod
115 def __get_validators__(cls) -> Iterator[Callable[[Any], Any]]:
116 yield cls.validate
117
118 @classmethod
119 def __get_pydantic_core_schema__(
120 cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'
121 ) -> 'core_schema.CoreSchema':
122 from_int_schema = core_schema.chain_schema(
123 [
124 core_schema.union_schema(
125 [core_schema.int_schema(), core_schema.float_schema()]
126 ),
127 core_schema.no_info_plain_validator_function(cls.validate),
128 ]
129 )
130
131 return core_schema.json_or_python_schema(
132 json_schema=core_schema.no_info_plain_validator_function(cls.validate),
133 python_schema=core_schema.union_schema(
134 [
135 # check if it's an instance first before doing any further work
136 core_schema.is_instance_schema(UnionIntFloat),
137 from_int_schema,
138 ]
139 ),
140 serialization=core_schema.plain_serializer_function_ser_schema(
141 lambda instance: instance.value
142 ),
143 )
144
145 @classmethod
146 def validate(cls, v: Any) -> 'UnionIntFloat':
147 if isinstance(v, UnionIntFloat):
148 return v
149 elif not isinstance(v, (int, float)): # pragma: no cover
150 try:
151 int(v)
152 return cls(v)
153 except (TypeError, ValueError):
154 pass
155 try:
156 float(v)
157 return cls(v)
158 except (TypeError, ValueError):
159 pass
160
161 raise TypeError(f'{v} is not int or float')
162 return cls(v)
163
164
165 def chain_as_tuple(*iterables: Iterable[T]) -> Tuple[T, ...]:
166 return tuple(chain(*iterables))
167
168
169 @lru_cache()
170 def _remove_none_from_type(
171 type_: str, split_pattern: Pattern[str], delimiter: str
172 ) -> List[str]:
173 types: List[str] = []
174 split_type: str = ''
175 inner_count: int = 0
176 for part in re.split(split_pattern, type_):
177 if part == NONE:
178 continue
179 inner_count += part.count('[') - part.count(']')
180 if split_type:
181 split_type += delimiter
182 if inner_count == 0:
183 if split_type:
184 types.append(f'{split_type}{part}')
185 else:
186 types.append(part)
187 split_type = ''
188 continue
189 else:
190 split_type += part
191 return types
192
193
194 def _remove_none_from_union(type_: str, use_union_operator: bool) -> str:
195 if use_union_operator:
196 if not re.match(r'^\w+ | ', type_):
197 return type_
198 return UNION_OPERATOR_DELIMITER.join(
199 _remove_none_from_type(
200 type_, UNION_OPERATOR_PATTERN, UNION_OPERATOR_DELIMITER
201 )
202 )
203
204 if not type_.startswith(UNION_PREFIX):
205 return type_
206 inner_types = _remove_none_from_type(
207 type_[len(UNION_PREFIX) :][:-1], UNION_PATTERN, UNION_DELIMITER
208 )
209
210 if len(inner_types) == 1:
211 return inner_types[0]
212 return f'{UNION_PREFIX}{UNION_DELIMITER.join(inner_types)}]'
213
214
215 @lru_cache()
216 def get_optional_type(type_: str, use_union_operator: bool) -> str:
217 type_ = _remove_none_from_union(type_, use_union_operator)
218
219 if not type_ or type_ == NONE:
220 return NONE
221 if use_union_operator:
222 return f'{type_} | {NONE}'
223 return f'{OPTIONAL_PREFIX}{type_}]'
224
225
226 @runtime_checkable
227 class Modular(Protocol):
228 @property
229 def module_name(self) -> str:
230 raise NotImplementedError
231
232
233 @runtime_checkable
234 class Nullable(Protocol):
235 @property
236 def nullable(self) -> bool:
237 raise NotImplementedError
238
239
240 class DataType(_BaseModel):
241 if PYDANTIC_V2:
242 # TODO[pydantic]: The following keys were removed: `copy_on_model_validation`.
243 # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-config for more information.
244 model_config = ConfigDict(
245 extra='forbid',
246 revalidate_instances='never',
247 )
248 else:
249 if not TYPE_CHECKING:
250
251 @classmethod
252 def model_rebuild(cls) -> None:
253 cls.update_forward_refs()
254
255 class Config:
256 extra = 'forbid'
257 copy_on_model_validation = (
258 False
259 if version.parse(pydantic.VERSION) < version.parse('1.9.2')
260 else 'none'
261 )
262
263 type: Optional[str] = None
264 reference: Optional[Reference] = None
265 data_types: List['DataType'] = []
266 is_func: bool = False
267 kwargs: Optional[Dict[str, Any]] = None
268 import_: Optional[Import] = None
269 python_version: PythonVersion = PythonVersion.PY_37
270 is_optional: bool = False
271 is_dict: bool = False
272 is_list: bool = False
273 is_set: bool = False
274 is_custom_type: bool = False
275 literals: List[Union[StrictBool, StrictInt, StrictStr]] = []
276 use_standard_collections: bool = False
277 use_generic_container: bool = False
278 use_union_operator: bool = False
279 alias: Optional[str] = None
280 parent: Optional[Any] = None
281 children: List[Any] = []
282 strict: bool = False
283 dict_key: Optional['DataType'] = None
284
285 _exclude_fields: ClassVar[Set[str]] = {'parent', 'children'}
286 _pass_fields: ClassVar[Set[str]] = {'parent', 'children', 'data_types', 'reference'}
287
288 @classmethod
289 def from_import(
290 cls: Type['DataTypeT'],
291 import_: Import,
292 *,
293 is_optional: bool = False,
294 is_dict: bool = False,
295 is_list: bool = False,
296 is_set: bool = False,
297 is_custom_type: bool = False,
298 strict: bool = False,
299 kwargs: Optional[Dict[str, Any]] = None,
300 ) -> 'DataTypeT':
301 return cls(
302 type=import_.import_,
303 import_=import_,
304 is_optional=is_optional,
305 is_dict=is_dict,
306 is_list=is_list,
307 is_set=is_set,
308 is_func=True if kwargs else False,
309 is_custom_type=is_custom_type,
310 strict=strict,
311 kwargs=kwargs,
312 )
313
314 @property
315 def unresolved_types(self) -> FrozenSet[str]:
316 return frozenset(
317 {
318 t.reference.path
319 for data_types in self.data_types
320 for t in data_types.all_data_types
321 if t.reference
322 }
323 | ({self.reference.path} if self.reference else set())
324 )
325
326 def replace_reference(self, reference: Optional[Reference]) -> None:
327 if not self.reference: # pragma: no cover
328 raise Exception(
329 f"`{self.__class__.__name__}.replace_reference()` can't be called"
330 f' when `reference` field is empty.'
331 )
332 self_id = id(self)
333 self.reference.children = [
334 c for c in self.reference.children if id(c) != self_id
335 ]
336 self.reference = reference
337 if reference:
338 reference.children.append(self)
339
340 def remove_reference(self) -> None:
341 self.replace_reference(None)
342
343 @property
344 def module_name(self) -> Optional[str]:
345 if self.reference and isinstance(self.reference.source, Modular):
346 return self.reference.source.module_name
347 return None # pragma: no cover
348
349 @property
350 def full_name(self) -> str:
351 module_name = self.module_name
352 if module_name:
353 return f'{module_name}.{self.reference.short_name}' # type: ignore
354 return self.reference.short_name # type: ignore
355
356 @property
357 def all_data_types(self) -> Iterator['DataType']:
358 for data_type in self.data_types:
359 yield from data_type.all_data_types
360 yield self
361
362 @property
363 def all_imports(self) -> Iterator[Import]:
364 for data_type in self.data_types:
365 yield from data_type.all_imports
366 yield from self.imports
367
368 @property
369 def imports(self) -> Iterator[Import]:
370 if self.import_:
371 yield self.import_
372 imports: Tuple[Tuple[bool, Import], ...] = (
373 (self.is_optional and not self.use_union_operator, IMPORT_OPTIONAL),
374 (len(self.data_types) > 1 and not self.use_union_operator, IMPORT_UNION),
375 )
376 if any(self.literals):
377 import_literal = (
378 IMPORT_LITERAL
379 if self.python_version.has_literal_type
380 else IMPORT_LITERAL_BACKPORT
381 )
382 imports = (
383 *imports,
384 (any(self.literals), import_literal),
385 )
386
387 if self.use_generic_container:
388 if self.use_standard_collections:
389 imports = (
390 *imports,
391 (self.is_list, IMPORT_ABC_SEQUENCE),
392 (self.is_set, IMPORT_ABC_SET),
393 (self.is_dict, IMPORT_ABC_MAPPING),
394 )
395 else:
396 imports = (
397 *imports,
398 (self.is_list, IMPORT_SEQUENCE),
399 (self.is_set, IMPORT_FROZEN_SET),
400 (self.is_dict, IMPORT_MAPPING),
401 )
402 elif not self.use_standard_collections:
403 imports = (
404 *imports,
405 (self.is_list, IMPORT_LIST),
406 (self.is_set, IMPORT_SET),
407 (self.is_dict, IMPORT_DICT),
408 )
409 for field, import_ in imports:
410 if field and import_ != self.import_:
411 yield import_
412
413 if self.dict_key:
414 yield from self.dict_key.imports
415
416 def __init__(self, **values: Any) -> None:
417 if not TYPE_CHECKING:
418 super().__init__(**values)
419
420 for type_ in self.data_types:
421 if type_.type == ANY and type_.is_optional:
422 if any(t for t in self.data_types if t.type != ANY): # pragma: no cover
423 self.is_optional = True
424 self.data_types = [
425 t
426 for t in self.data_types
427 if not (t.type == ANY and t.is_optional)
428 ]
429 break # pragma: no cover
430
431 for data_type in self.data_types:
432 if data_type.reference or data_type.data_types:
433 data_type.parent = self
434
435 if self.reference:
436 self.reference.children.append(self)
437
438 @property
439 def type_hint(self) -> str:
440 type_: Optional[str] = self.alias or self.type
441 if not type_:
442 if self.is_union:
443 data_types: List[str] = []
444 for data_type in self.data_types:
445 data_type_type = data_type.type_hint
446 if data_type_type in data_types: # pragma: no cover
447 continue
448 data_types.append(data_type_type)
449 if NONE in data_types:
450 data_types = [d for d in data_types if d != NONE]
451 self.is_optional = True
452 if len(data_types) == 1:
453 type_ = data_types[0]
454 else:
455 if self.use_union_operator:
456 type_ = UNION_OPERATOR_DELIMITER.join(data_types)
457 else:
458 type_ = f'{UNION_PREFIX}{UNION_DELIMITER.join(data_types)}]'
459 elif len(self.data_types) == 1:
460 type_ = self.data_types[0].type_hint
461 elif self.literals:
462 type_ = f"{LITERAL}[{', '.join(repr(literal) for literal in self.literals)}]"
463 else:
464 if self.reference:
465 type_ = self.reference.short_name
466 else:
467 # TODO support strict Any
468 # type_ = 'Any'
469 type_ = ''
470 if self.reference:
471 source = self.reference.source
472 if isinstance(source, Nullable) and source.nullable:
473 self.is_optional = True
474 if self.reference and self.python_version == PythonVersion.PY_36:
475 type_ = f"'{type_}'"
476 if self.is_list:
477 if self.use_generic_container:
478 list_ = SEQUENCE
479 elif self.use_standard_collections:
480 list_ = STANDARD_LIST
481 else:
482 list_ = LIST
483 type_ = f'{list_}[{type_}]' if type_ else list_
484 elif self.is_set:
485 if self.use_generic_container:
486 set_ = FROZEN_SET
487 elif self.use_standard_collections:
488 set_ = STANDARD_SET
489 else:
490 set_ = SET
491 type_ = f'{set_}[{type_}]' if type_ else set_
492 elif self.is_dict:
493 if self.use_generic_container:
494 dict_ = MAPPING
495 elif self.use_standard_collections:
496 dict_ = STANDARD_DICT
497 else:
498 dict_ = DICT
499 if self.dict_key or type_:
500 key = self.dict_key.type_hint if self.dict_key else STR
501 type_ = f'{dict_}[{key}, {type_ or ANY}]'
502 else: # pragma: no cover
503 type_ = dict_
504 if self.is_optional and type_ != ANY:
505 return get_optional_type(type_, self.use_union_operator)
506 elif self.is_func:
507 if self.kwargs:
508 kwargs: str = ', '.join(f'{k}={v}' for k, v in self.kwargs.items())
509 return f'{type_}({kwargs})'
510 return f'{type_}()'
511 return type_
512
513 @property
514 def is_union(self) -> bool:
515 return len(self.data_types) > 1
516
517
518 DataType.model_rebuild()
519
520 DataTypeT = TypeVar('DataTypeT', bound=DataType)
521
522
523 class EmptyDataType(DataType):
524 pass
525
526
527 class Types(Enum):
528 integer = auto()
529 int32 = auto()
530 int64 = auto()
531 number = auto()
532 float = auto()
533 double = auto()
534 decimal = auto()
535 time = auto()
536 string = auto()
537 byte = auto()
538 binary = auto()
539 date = auto()
540 date_time = auto()
541 password = auto()
542 email = auto()
543 uuid = auto()
544 uuid1 = auto()
545 uuid2 = auto()
546 uuid3 = auto()
547 uuid4 = auto()
548 uuid5 = auto()
549 uri = auto()
550 hostname = auto()
551 ipv4 = auto()
552 ipv4_network = auto()
553 ipv6 = auto()
554 ipv6_network = auto()
555 boolean = auto()
556 object = auto()
557 null = auto()
558 array = auto()
559 any = auto()
560
561
562 class DataTypeManager(ABC):
563 def __init__(
564 self,
565 python_version: PythonVersion = PythonVersion.PY_37,
566 use_standard_collections: bool = False,
567 use_generic_container_types: bool = False,
568 strict_types: Optional[Sequence[StrictTypes]] = None,
569 use_non_positive_negative_number_constrained_types: bool = False,
570 use_union_operator: bool = False,
571 ) -> None:
572 self.python_version = python_version
573 self.use_standard_collections: bool = use_standard_collections
574 self.use_generic_container_types: bool = use_generic_container_types
575 self.strict_types: Sequence[StrictTypes] = strict_types or ()
576 self.use_non_positive_negative_number_constrained_types: bool = (
577 use_non_positive_negative_number_constrained_types
578 )
579 self.use_union_operator: bool = use_union_operator
580
581 if (
582 use_generic_container_types and python_version == PythonVersion.PY_36
583 ): # pragma: no cover
584 raise Exception(
585 'use_generic_container_types can not be used with target_python_version 3.6.\n'
586 ' The version will be not supported in a future version'
587 )
588
589 if TYPE_CHECKING:
590 self.data_type: Type[DataType]
591 else:
592 self.data_type: Type[DataType] = create_model(
593 'ContextDataType',
594 python_version=(PythonVersion, python_version),
595 use_standard_collections=(bool, use_standard_collections),
596 use_generic_container=(bool, use_generic_container_types),
597 use_union_operator=(bool, use_union_operator),
598 __base__=DataType,
599 )
600
601 @abstractmethod
602 def get_data_type(self, types: Types, **kwargs: Any) -> DataType:
603 raise NotImplementedError
604
605 def get_data_type_from_full_path(
606 self, full_path: str, is_custom_type: bool
607 ) -> DataType:
608 return self.data_type.from_import(
609 Import.from_full_path(full_path), is_custom_type=is_custom_type
610 )
611
612 def get_data_type_from_value(self, value: Any) -> DataType:
613 type_: Optional[Types] = None
614 if isinstance(value, str):
615 type_ = Types.string
616 elif isinstance(value, bool):
617 type_ = Types.boolean
618 elif isinstance(value, int):
619 type_ = Types.integer
620 elif isinstance(value, float):
621 type_ = Types.float
622 elif isinstance(value, dict):
623 return self.data_type.from_import(IMPORT_DICT)
624 elif isinstance(value, list):
625 return self.data_type.from_import(IMPORT_LIST)
626 else:
627 type_ = Types.any
628 return self.get_data_type(type_)
629
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datamodel_code_generator/types.py b/datamodel_code_generator/types.py
--- a/datamodel_code_generator/types.py
+++ b/datamodel_code_generator/types.py
@@ -129,7 +129,7 @@
)
return core_schema.json_or_python_schema(
- json_schema=core_schema.no_info_plain_validator_function(cls.validate),
+ json_schema=from_int_schema,
python_schema=core_schema.union_schema(
[
# check if it's an instance first before doing any further work
| {"golden_diff": "diff --git a/datamodel_code_generator/types.py b/datamodel_code_generator/types.py\n--- a/datamodel_code_generator/types.py\n+++ b/datamodel_code_generator/types.py\n@@ -129,7 +129,7 @@\n )\n \n return core_schema.json_or_python_schema(\n- json_schema=core_schema.no_info_plain_validator_function(cls.validate),\n+ json_schema=from_int_schema,\n python_schema=core_schema.union_schema(\n [\n # check if it's an instance first before doing any further work\n", "issue": "Impossible to get the json schema of a json schema object\n**Describe the bug**\r\n\r\n```python\r\nfrom datamodel_code_generator.parser.jsonschema import JsonSchemaObject\r\n\r\nif __name__ == \"__main__\":\r\n print(JsonSchemaObject.model_json_schema())\r\n```\r\n\r\nRaises\r\n\r\n```\r\npydantic.errors.PydanticInvalidForJsonSchema: Cannot generate a JsonSchema for core_schema.PlainValidatorFunctionSchema ({'type': 'no-info', 'function': <bound method UnionIntFloat.validate of <class 'datamodel_code_generator.types.UnionIntFloat'>>})\r\n```\r\n\r\n**To Reproduce**\r\n\r\nSee code above\r\n\r\n**Expected behavior**\r\n\r\nThe json schema of a json schema object.\r\n\r\n**Version:**\r\n - OS: Linux 6.2.0\r\n - Python version: 3.11.4\r\n - datamodel-code-generator version: 0.22.1\r\n\n", "before_files": [{"content": "import re\nfrom abc import ABC, abstractmethod\nfrom enum import Enum, auto\nfrom functools import lru_cache\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Iterable,\n Iterator,\n List,\n Optional,\n Pattern,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport pydantic\nfrom packaging import version\nfrom pydantic import (\n StrictBool,\n StrictInt,\n StrictStr,\n create_model,\n)\n\nfrom datamodel_code_generator.format import PythonVersion\nfrom datamodel_code_generator.imports import (\n IMPORT_ABC_MAPPING,\n IMPORT_ABC_SEQUENCE,\n IMPORT_ABC_SET,\n IMPORT_DICT,\n IMPORT_FROZEN_SET,\n IMPORT_LIST,\n IMPORT_LITERAL,\n IMPORT_LITERAL_BACKPORT,\n IMPORT_MAPPING,\n IMPORT_OPTIONAL,\n IMPORT_SEQUENCE,\n IMPORT_SET,\n IMPORT_UNION,\n Import,\n)\nfrom datamodel_code_generator.reference import Reference, _BaseModel\nfrom datamodel_code_generator.util import (\n PYDANTIC_V2,\n ConfigDict,\n Protocol,\n runtime_checkable,\n)\n\nif PYDANTIC_V2:\n from pydantic import GetCoreSchemaHandler\n from pydantic_core import core_schema\n\nT = TypeVar('T')\n\nOPTIONAL = 'Optional'\nOPTIONAL_PREFIX = f'{OPTIONAL}['\n\nUNION = 'Union'\nUNION_PREFIX = f'{UNION}['\nUNION_DELIMITER = ', '\nUNION_PATTERN: Pattern[str] = re.compile(r'\\s*,\\s*')\nUNION_OPERATOR_DELIMITER = ' | '\nUNION_OPERATOR_PATTERN: Pattern[str] = re.compile(r'\\s*\\|\\s*')\nNONE = 'None'\nANY = 'Any'\nLITERAL = 'Literal'\nSEQUENCE = 'Sequence'\nFROZEN_SET = 'FrozenSet'\nMAPPING = 'Mapping'\nDICT = 'Dict'\nSET = 'Set'\nLIST = 'List'\nSTANDARD_DICT = 'dict'\nSTANDARD_LIST = 'list'\nSTANDARD_SET = 'set'\nSTR = 'str'\n\nNOT_REQUIRED = 'NotRequired'\nNOT_REQUIRED_PREFIX = f'{NOT_REQUIRED}['\n\n\nclass StrictTypes(Enum):\n str = 'str'\n bytes = 'bytes'\n int = 'int'\n float = 'float'\n bool = 'bool'\n\n\nclass UnionIntFloat:\n def __init__(self, value: Union[int, float]) -> None:\n self.value: Union[int, float] = value\n\n def __int__(self) -> int:\n return int(self.value)\n\n def __float__(self) -> float:\n return float(self.value)\n\n def __str__(self) -> str:\n return str(self.value)\n\n @classmethod\n def __get_validators__(cls) -> Iterator[Callable[[Any], Any]]:\n yield cls.validate\n\n @classmethod\n def __get_pydantic_core_schema__(\n cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'\n ) -> 'core_schema.CoreSchema':\n from_int_schema = core_schema.chain_schema(\n [\n core_schema.union_schema(\n [core_schema.int_schema(), core_schema.float_schema()]\n ),\n core_schema.no_info_plain_validator_function(cls.validate),\n ]\n )\n\n return core_schema.json_or_python_schema(\n json_schema=core_schema.no_info_plain_validator_function(cls.validate),\n python_schema=core_schema.union_schema(\n [\n # check if it's an instance first before doing any further work\n core_schema.is_instance_schema(UnionIntFloat),\n from_int_schema,\n ]\n ),\n serialization=core_schema.plain_serializer_function_ser_schema(\n lambda instance: instance.value\n ),\n )\n\n @classmethod\n def validate(cls, v: Any) -> 'UnionIntFloat':\n if isinstance(v, UnionIntFloat):\n return v\n elif not isinstance(v, (int, float)): # pragma: no cover\n try:\n int(v)\n return cls(v)\n except (TypeError, ValueError):\n pass\n try:\n float(v)\n return cls(v)\n except (TypeError, ValueError):\n pass\n\n raise TypeError(f'{v} is not int or float')\n return cls(v)\n\n\ndef chain_as_tuple(*iterables: Iterable[T]) -> Tuple[T, ...]:\n return tuple(chain(*iterables))\n\n\n@lru_cache()\ndef _remove_none_from_type(\n type_: str, split_pattern: Pattern[str], delimiter: str\n) -> List[str]:\n types: List[str] = []\n split_type: str = ''\n inner_count: int = 0\n for part in re.split(split_pattern, type_):\n if part == NONE:\n continue\n inner_count += part.count('[') - part.count(']')\n if split_type:\n split_type += delimiter\n if inner_count == 0:\n if split_type:\n types.append(f'{split_type}{part}')\n else:\n types.append(part)\n split_type = ''\n continue\n else:\n split_type += part\n return types\n\n\ndef _remove_none_from_union(type_: str, use_union_operator: bool) -> str:\n if use_union_operator:\n if not re.match(r'^\\w+ | ', type_):\n return type_\n return UNION_OPERATOR_DELIMITER.join(\n _remove_none_from_type(\n type_, UNION_OPERATOR_PATTERN, UNION_OPERATOR_DELIMITER\n )\n )\n\n if not type_.startswith(UNION_PREFIX):\n return type_\n inner_types = _remove_none_from_type(\n type_[len(UNION_PREFIX) :][:-1], UNION_PATTERN, UNION_DELIMITER\n )\n\n if len(inner_types) == 1:\n return inner_types[0]\n return f'{UNION_PREFIX}{UNION_DELIMITER.join(inner_types)}]'\n\n\n@lru_cache()\ndef get_optional_type(type_: str, use_union_operator: bool) -> str:\n type_ = _remove_none_from_union(type_, use_union_operator)\n\n if not type_ or type_ == NONE:\n return NONE\n if use_union_operator:\n return f'{type_} | {NONE}'\n return f'{OPTIONAL_PREFIX}{type_}]'\n\n\n@runtime_checkable\nclass Modular(Protocol):\n @property\n def module_name(self) -> str:\n raise NotImplementedError\n\n\n@runtime_checkable\nclass Nullable(Protocol):\n @property\n def nullable(self) -> bool:\n raise NotImplementedError\n\n\nclass DataType(_BaseModel):\n if PYDANTIC_V2:\n # TODO[pydantic]: The following keys were removed: `copy_on_model_validation`.\n # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-config for more information.\n model_config = ConfigDict(\n extra='forbid',\n revalidate_instances='never',\n )\n else:\n if not TYPE_CHECKING:\n\n @classmethod\n def model_rebuild(cls) -> None:\n cls.update_forward_refs()\n\n class Config:\n extra = 'forbid'\n copy_on_model_validation = (\n False\n if version.parse(pydantic.VERSION) < version.parse('1.9.2')\n else 'none'\n )\n\n type: Optional[str] = None\n reference: Optional[Reference] = None\n data_types: List['DataType'] = []\n is_func: bool = False\n kwargs: Optional[Dict[str, Any]] = None\n import_: Optional[Import] = None\n python_version: PythonVersion = PythonVersion.PY_37\n is_optional: bool = False\n is_dict: bool = False\n is_list: bool = False\n is_set: bool = False\n is_custom_type: bool = False\n literals: List[Union[StrictBool, StrictInt, StrictStr]] = []\n use_standard_collections: bool = False\n use_generic_container: bool = False\n use_union_operator: bool = False\n alias: Optional[str] = None\n parent: Optional[Any] = None\n children: List[Any] = []\n strict: bool = False\n dict_key: Optional['DataType'] = None\n\n _exclude_fields: ClassVar[Set[str]] = {'parent', 'children'}\n _pass_fields: ClassVar[Set[str]] = {'parent', 'children', 'data_types', 'reference'}\n\n @classmethod\n def from_import(\n cls: Type['DataTypeT'],\n import_: Import,\n *,\n is_optional: bool = False,\n is_dict: bool = False,\n is_list: bool = False,\n is_set: bool = False,\n is_custom_type: bool = False,\n strict: bool = False,\n kwargs: Optional[Dict[str, Any]] = None,\n ) -> 'DataTypeT':\n return cls(\n type=import_.import_,\n import_=import_,\n is_optional=is_optional,\n is_dict=is_dict,\n is_list=is_list,\n is_set=is_set,\n is_func=True if kwargs else False,\n is_custom_type=is_custom_type,\n strict=strict,\n kwargs=kwargs,\n )\n\n @property\n def unresolved_types(self) -> FrozenSet[str]:\n return frozenset(\n {\n t.reference.path\n for data_types in self.data_types\n for t in data_types.all_data_types\n if t.reference\n }\n | ({self.reference.path} if self.reference else set())\n )\n\n def replace_reference(self, reference: Optional[Reference]) -> None:\n if not self.reference: # pragma: no cover\n raise Exception(\n f\"`{self.__class__.__name__}.replace_reference()` can't be called\"\n f' when `reference` field is empty.'\n )\n self_id = id(self)\n self.reference.children = [\n c for c in self.reference.children if id(c) != self_id\n ]\n self.reference = reference\n if reference:\n reference.children.append(self)\n\n def remove_reference(self) -> None:\n self.replace_reference(None)\n\n @property\n def module_name(self) -> Optional[str]:\n if self.reference and isinstance(self.reference.source, Modular):\n return self.reference.source.module_name\n return None # pragma: no cover\n\n @property\n def full_name(self) -> str:\n module_name = self.module_name\n if module_name:\n return f'{module_name}.{self.reference.short_name}' # type: ignore\n return self.reference.short_name # type: ignore\n\n @property\n def all_data_types(self) -> Iterator['DataType']:\n for data_type in self.data_types:\n yield from data_type.all_data_types\n yield self\n\n @property\n def all_imports(self) -> Iterator[Import]:\n for data_type in self.data_types:\n yield from data_type.all_imports\n yield from self.imports\n\n @property\n def imports(self) -> Iterator[Import]:\n if self.import_:\n yield self.import_\n imports: Tuple[Tuple[bool, Import], ...] = (\n (self.is_optional and not self.use_union_operator, IMPORT_OPTIONAL),\n (len(self.data_types) > 1 and not self.use_union_operator, IMPORT_UNION),\n )\n if any(self.literals):\n import_literal = (\n IMPORT_LITERAL\n if self.python_version.has_literal_type\n else IMPORT_LITERAL_BACKPORT\n )\n imports = (\n *imports,\n (any(self.literals), import_literal),\n )\n\n if self.use_generic_container:\n if self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_ABC_SEQUENCE),\n (self.is_set, IMPORT_ABC_SET),\n (self.is_dict, IMPORT_ABC_MAPPING),\n )\n else:\n imports = (\n *imports,\n (self.is_list, IMPORT_SEQUENCE),\n (self.is_set, IMPORT_FROZEN_SET),\n (self.is_dict, IMPORT_MAPPING),\n )\n elif not self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_LIST),\n (self.is_set, IMPORT_SET),\n (self.is_dict, IMPORT_DICT),\n )\n for field, import_ in imports:\n if field and import_ != self.import_:\n yield import_\n\n if self.dict_key:\n yield from self.dict_key.imports\n\n def __init__(self, **values: Any) -> None:\n if not TYPE_CHECKING:\n super().__init__(**values)\n\n for type_ in self.data_types:\n if type_.type == ANY and type_.is_optional:\n if any(t for t in self.data_types if t.type != ANY): # pragma: no cover\n self.is_optional = True\n self.data_types = [\n t\n for t in self.data_types\n if not (t.type == ANY and t.is_optional)\n ]\n break # pragma: no cover\n\n for data_type in self.data_types:\n if data_type.reference or data_type.data_types:\n data_type.parent = self\n\n if self.reference:\n self.reference.children.append(self)\n\n @property\n def type_hint(self) -> str:\n type_: Optional[str] = self.alias or self.type\n if not type_:\n if self.is_union:\n data_types: List[str] = []\n for data_type in self.data_types:\n data_type_type = data_type.type_hint\n if data_type_type in data_types: # pragma: no cover\n continue\n data_types.append(data_type_type)\n if NONE in data_types:\n data_types = [d for d in data_types if d != NONE]\n self.is_optional = True\n if len(data_types) == 1:\n type_ = data_types[0]\n else:\n if self.use_union_operator:\n type_ = UNION_OPERATOR_DELIMITER.join(data_types)\n else:\n type_ = f'{UNION_PREFIX}{UNION_DELIMITER.join(data_types)}]'\n elif len(self.data_types) == 1:\n type_ = self.data_types[0].type_hint\n elif self.literals:\n type_ = f\"{LITERAL}[{', '.join(repr(literal) for literal in self.literals)}]\"\n else:\n if self.reference:\n type_ = self.reference.short_name\n else:\n # TODO support strict Any\n # type_ = 'Any'\n type_ = ''\n if self.reference:\n source = self.reference.source\n if isinstance(source, Nullable) and source.nullable:\n self.is_optional = True\n if self.reference and self.python_version == PythonVersion.PY_36:\n type_ = f\"'{type_}'\"\n if self.is_list:\n if self.use_generic_container:\n list_ = SEQUENCE\n elif self.use_standard_collections:\n list_ = STANDARD_LIST\n else:\n list_ = LIST\n type_ = f'{list_}[{type_}]' if type_ else list_\n elif self.is_set:\n if self.use_generic_container:\n set_ = FROZEN_SET\n elif self.use_standard_collections:\n set_ = STANDARD_SET\n else:\n set_ = SET\n type_ = f'{set_}[{type_}]' if type_ else set_\n elif self.is_dict:\n if self.use_generic_container:\n dict_ = MAPPING\n elif self.use_standard_collections:\n dict_ = STANDARD_DICT\n else:\n dict_ = DICT\n if self.dict_key or type_:\n key = self.dict_key.type_hint if self.dict_key else STR\n type_ = f'{dict_}[{key}, {type_ or ANY}]'\n else: # pragma: no cover\n type_ = dict_\n if self.is_optional and type_ != ANY:\n return get_optional_type(type_, self.use_union_operator)\n elif self.is_func:\n if self.kwargs:\n kwargs: str = ', '.join(f'{k}={v}' for k, v in self.kwargs.items())\n return f'{type_}({kwargs})'\n return f'{type_}()'\n return type_\n\n @property\n def is_union(self) -> bool:\n return len(self.data_types) > 1\n\n\nDataType.model_rebuild()\n\nDataTypeT = TypeVar('DataTypeT', bound=DataType)\n\n\nclass EmptyDataType(DataType):\n pass\n\n\nclass Types(Enum):\n integer = auto()\n int32 = auto()\n int64 = auto()\n number = auto()\n float = auto()\n double = auto()\n decimal = auto()\n time = auto()\n string = auto()\n byte = auto()\n binary = auto()\n date = auto()\n date_time = auto()\n password = auto()\n email = auto()\n uuid = auto()\n uuid1 = auto()\n uuid2 = auto()\n uuid3 = auto()\n uuid4 = auto()\n uuid5 = auto()\n uri = auto()\n hostname = auto()\n ipv4 = auto()\n ipv4_network = auto()\n ipv6 = auto()\n ipv6_network = auto()\n boolean = auto()\n object = auto()\n null = auto()\n array = auto()\n any = auto()\n\n\nclass DataTypeManager(ABC):\n def __init__(\n self,\n python_version: PythonVersion = PythonVersion.PY_37,\n use_standard_collections: bool = False,\n use_generic_container_types: bool = False,\n strict_types: Optional[Sequence[StrictTypes]] = None,\n use_non_positive_negative_number_constrained_types: bool = False,\n use_union_operator: bool = False,\n ) -> None:\n self.python_version = python_version\n self.use_standard_collections: bool = use_standard_collections\n self.use_generic_container_types: bool = use_generic_container_types\n self.strict_types: Sequence[StrictTypes] = strict_types or ()\n self.use_non_positive_negative_number_constrained_types: bool = (\n use_non_positive_negative_number_constrained_types\n )\n self.use_union_operator: bool = use_union_operator\n\n if (\n use_generic_container_types and python_version == PythonVersion.PY_36\n ): # pragma: no cover\n raise Exception(\n 'use_generic_container_types can not be used with target_python_version 3.6.\\n'\n ' The version will be not supported in a future version'\n )\n\n if TYPE_CHECKING:\n self.data_type: Type[DataType]\n else:\n self.data_type: Type[DataType] = create_model(\n 'ContextDataType',\n python_version=(PythonVersion, python_version),\n use_standard_collections=(bool, use_standard_collections),\n use_generic_container=(bool, use_generic_container_types),\n use_union_operator=(bool, use_union_operator),\n __base__=DataType,\n )\n\n @abstractmethod\n def get_data_type(self, types: Types, **kwargs: Any) -> DataType:\n raise NotImplementedError\n\n def get_data_type_from_full_path(\n self, full_path: str, is_custom_type: bool\n ) -> DataType:\n return self.data_type.from_import(\n Import.from_full_path(full_path), is_custom_type=is_custom_type\n )\n\n def get_data_type_from_value(self, value: Any) -> DataType:\n type_: Optional[Types] = None\n if isinstance(value, str):\n type_ = Types.string\n elif isinstance(value, bool):\n type_ = Types.boolean\n elif isinstance(value, int):\n type_ = Types.integer\n elif isinstance(value, float):\n type_ = Types.float\n elif isinstance(value, dict):\n return self.data_type.from_import(IMPORT_DICT)\n elif isinstance(value, list):\n return self.data_type.from_import(IMPORT_LIST)\n else:\n type_ = Types.any\n return self.get_data_type(type_)\n", "path": "datamodel_code_generator/types.py"}], "after_files": [{"content": "import re\nfrom abc import ABC, abstractmethod\nfrom enum import Enum, auto\nfrom functools import lru_cache\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Iterable,\n Iterator,\n List,\n Optional,\n Pattern,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport pydantic\nfrom packaging import version\nfrom pydantic import (\n StrictBool,\n StrictInt,\n StrictStr,\n create_model,\n)\n\nfrom datamodel_code_generator.format import PythonVersion\nfrom datamodel_code_generator.imports import (\n IMPORT_ABC_MAPPING,\n IMPORT_ABC_SEQUENCE,\n IMPORT_ABC_SET,\n IMPORT_DICT,\n IMPORT_FROZEN_SET,\n IMPORT_LIST,\n IMPORT_LITERAL,\n IMPORT_LITERAL_BACKPORT,\n IMPORT_MAPPING,\n IMPORT_OPTIONAL,\n IMPORT_SEQUENCE,\n IMPORT_SET,\n IMPORT_UNION,\n Import,\n)\nfrom datamodel_code_generator.reference import Reference, _BaseModel\nfrom datamodel_code_generator.util import (\n PYDANTIC_V2,\n ConfigDict,\n Protocol,\n runtime_checkable,\n)\n\nif PYDANTIC_V2:\n from pydantic import GetCoreSchemaHandler\n from pydantic_core import core_schema\n\nT = TypeVar('T')\n\nOPTIONAL = 'Optional'\nOPTIONAL_PREFIX = f'{OPTIONAL}['\n\nUNION = 'Union'\nUNION_PREFIX = f'{UNION}['\nUNION_DELIMITER = ', '\nUNION_PATTERN: Pattern[str] = re.compile(r'\\s*,\\s*')\nUNION_OPERATOR_DELIMITER = ' | '\nUNION_OPERATOR_PATTERN: Pattern[str] = re.compile(r'\\s*\\|\\s*')\nNONE = 'None'\nANY = 'Any'\nLITERAL = 'Literal'\nSEQUENCE = 'Sequence'\nFROZEN_SET = 'FrozenSet'\nMAPPING = 'Mapping'\nDICT = 'Dict'\nSET = 'Set'\nLIST = 'List'\nSTANDARD_DICT = 'dict'\nSTANDARD_LIST = 'list'\nSTANDARD_SET = 'set'\nSTR = 'str'\n\nNOT_REQUIRED = 'NotRequired'\nNOT_REQUIRED_PREFIX = f'{NOT_REQUIRED}['\n\n\nclass StrictTypes(Enum):\n str = 'str'\n bytes = 'bytes'\n int = 'int'\n float = 'float'\n bool = 'bool'\n\n\nclass UnionIntFloat:\n def __init__(self, value: Union[int, float]) -> None:\n self.value: Union[int, float] = value\n\n def __int__(self) -> int:\n return int(self.value)\n\n def __float__(self) -> float:\n return float(self.value)\n\n def __str__(self) -> str:\n return str(self.value)\n\n @classmethod\n def __get_validators__(cls) -> Iterator[Callable[[Any], Any]]:\n yield cls.validate\n\n @classmethod\n def __get_pydantic_core_schema__(\n cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'\n ) -> 'core_schema.CoreSchema':\n from_int_schema = core_schema.chain_schema(\n [\n core_schema.union_schema(\n [core_schema.int_schema(), core_schema.float_schema()]\n ),\n core_schema.no_info_plain_validator_function(cls.validate),\n ]\n )\n\n return core_schema.json_or_python_schema(\n json_schema=from_int_schema,\n python_schema=core_schema.union_schema(\n [\n # check if it's an instance first before doing any further work\n core_schema.is_instance_schema(UnionIntFloat),\n from_int_schema,\n ]\n ),\n serialization=core_schema.plain_serializer_function_ser_schema(\n lambda instance: instance.value\n ),\n )\n\n @classmethod\n def validate(cls, v: Any) -> 'UnionIntFloat':\n if isinstance(v, UnionIntFloat):\n return v\n elif not isinstance(v, (int, float)): # pragma: no cover\n try:\n int(v)\n return cls(v)\n except (TypeError, ValueError):\n pass\n try:\n float(v)\n return cls(v)\n except (TypeError, ValueError):\n pass\n\n raise TypeError(f'{v} is not int or float')\n return cls(v)\n\n\ndef chain_as_tuple(*iterables: Iterable[T]) -> Tuple[T, ...]:\n return tuple(chain(*iterables))\n\n\n@lru_cache()\ndef _remove_none_from_type(\n type_: str, split_pattern: Pattern[str], delimiter: str\n) -> List[str]:\n types: List[str] = []\n split_type: str = ''\n inner_count: int = 0\n for part in re.split(split_pattern, type_):\n if part == NONE:\n continue\n inner_count += part.count('[') - part.count(']')\n if split_type:\n split_type += delimiter\n if inner_count == 0:\n if split_type:\n types.append(f'{split_type}{part}')\n else:\n types.append(part)\n split_type = ''\n continue\n else:\n split_type += part\n return types\n\n\ndef _remove_none_from_union(type_: str, use_union_operator: bool) -> str:\n if use_union_operator:\n if not re.match(r'^\\w+ | ', type_):\n return type_\n return UNION_OPERATOR_DELIMITER.join(\n _remove_none_from_type(\n type_, UNION_OPERATOR_PATTERN, UNION_OPERATOR_DELIMITER\n )\n )\n\n if not type_.startswith(UNION_PREFIX):\n return type_\n inner_types = _remove_none_from_type(\n type_[len(UNION_PREFIX) :][:-1], UNION_PATTERN, UNION_DELIMITER\n )\n\n if len(inner_types) == 1:\n return inner_types[0]\n return f'{UNION_PREFIX}{UNION_DELIMITER.join(inner_types)}]'\n\n\n@lru_cache()\ndef get_optional_type(type_: str, use_union_operator: bool) -> str:\n type_ = _remove_none_from_union(type_, use_union_operator)\n\n if not type_ or type_ == NONE:\n return NONE\n if use_union_operator:\n return f'{type_} | {NONE}'\n return f'{OPTIONAL_PREFIX}{type_}]'\n\n\n@runtime_checkable\nclass Modular(Protocol):\n @property\n def module_name(self) -> str:\n raise NotImplementedError\n\n\n@runtime_checkable\nclass Nullable(Protocol):\n @property\n def nullable(self) -> bool:\n raise NotImplementedError\n\n\nclass DataType(_BaseModel):\n if PYDANTIC_V2:\n # TODO[pydantic]: The following keys were removed: `copy_on_model_validation`.\n # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-config for more information.\n model_config = ConfigDict(\n extra='forbid',\n revalidate_instances='never',\n )\n else:\n if not TYPE_CHECKING:\n\n @classmethod\n def model_rebuild(cls) -> None:\n cls.update_forward_refs()\n\n class Config:\n extra = 'forbid'\n copy_on_model_validation = (\n False\n if version.parse(pydantic.VERSION) < version.parse('1.9.2')\n else 'none'\n )\n\n type: Optional[str] = None\n reference: Optional[Reference] = None\n data_types: List['DataType'] = []\n is_func: bool = False\n kwargs: Optional[Dict[str, Any]] = None\n import_: Optional[Import] = None\n python_version: PythonVersion = PythonVersion.PY_37\n is_optional: bool = False\n is_dict: bool = False\n is_list: bool = False\n is_set: bool = False\n is_custom_type: bool = False\n literals: List[Union[StrictBool, StrictInt, StrictStr]] = []\n use_standard_collections: bool = False\n use_generic_container: bool = False\n use_union_operator: bool = False\n alias: Optional[str] = None\n parent: Optional[Any] = None\n children: List[Any] = []\n strict: bool = False\n dict_key: Optional['DataType'] = None\n\n _exclude_fields: ClassVar[Set[str]] = {'parent', 'children'}\n _pass_fields: ClassVar[Set[str]] = {'parent', 'children', 'data_types', 'reference'}\n\n @classmethod\n def from_import(\n cls: Type['DataTypeT'],\n import_: Import,\n *,\n is_optional: bool = False,\n is_dict: bool = False,\n is_list: bool = False,\n is_set: bool = False,\n is_custom_type: bool = False,\n strict: bool = False,\n kwargs: Optional[Dict[str, Any]] = None,\n ) -> 'DataTypeT':\n return cls(\n type=import_.import_,\n import_=import_,\n is_optional=is_optional,\n is_dict=is_dict,\n is_list=is_list,\n is_set=is_set,\n is_func=True if kwargs else False,\n is_custom_type=is_custom_type,\n strict=strict,\n kwargs=kwargs,\n )\n\n @property\n def unresolved_types(self) -> FrozenSet[str]:\n return frozenset(\n {\n t.reference.path\n for data_types in self.data_types\n for t in data_types.all_data_types\n if t.reference\n }\n | ({self.reference.path} if self.reference else set())\n )\n\n def replace_reference(self, reference: Optional[Reference]) -> None:\n if not self.reference: # pragma: no cover\n raise Exception(\n f\"`{self.__class__.__name__}.replace_reference()` can't be called\"\n f' when `reference` field is empty.'\n )\n self_id = id(self)\n self.reference.children = [\n c for c in self.reference.children if id(c) != self_id\n ]\n self.reference = reference\n if reference:\n reference.children.append(self)\n\n def remove_reference(self) -> None:\n self.replace_reference(None)\n\n @property\n def module_name(self) -> Optional[str]:\n if self.reference and isinstance(self.reference.source, Modular):\n return self.reference.source.module_name\n return None # pragma: no cover\n\n @property\n def full_name(self) -> str:\n module_name = self.module_name\n if module_name:\n return f'{module_name}.{self.reference.short_name}' # type: ignore\n return self.reference.short_name # type: ignore\n\n @property\n def all_data_types(self) -> Iterator['DataType']:\n for data_type in self.data_types:\n yield from data_type.all_data_types\n yield self\n\n @property\n def all_imports(self) -> Iterator[Import]:\n for data_type in self.data_types:\n yield from data_type.all_imports\n yield from self.imports\n\n @property\n def imports(self) -> Iterator[Import]:\n if self.import_:\n yield self.import_\n imports: Tuple[Tuple[bool, Import], ...] = (\n (self.is_optional and not self.use_union_operator, IMPORT_OPTIONAL),\n (len(self.data_types) > 1 and not self.use_union_operator, IMPORT_UNION),\n )\n if any(self.literals):\n import_literal = (\n IMPORT_LITERAL\n if self.python_version.has_literal_type\n else IMPORT_LITERAL_BACKPORT\n )\n imports = (\n *imports,\n (any(self.literals), import_literal),\n )\n\n if self.use_generic_container:\n if self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_ABC_SEQUENCE),\n (self.is_set, IMPORT_ABC_SET),\n (self.is_dict, IMPORT_ABC_MAPPING),\n )\n else:\n imports = (\n *imports,\n (self.is_list, IMPORT_SEQUENCE),\n (self.is_set, IMPORT_FROZEN_SET),\n (self.is_dict, IMPORT_MAPPING),\n )\n elif not self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_LIST),\n (self.is_set, IMPORT_SET),\n (self.is_dict, IMPORT_DICT),\n )\n for field, import_ in imports:\n if field and import_ != self.import_:\n yield import_\n\n if self.dict_key:\n yield from self.dict_key.imports\n\n def __init__(self, **values: Any) -> None:\n if not TYPE_CHECKING:\n super().__init__(**values)\n\n for type_ in self.data_types:\n if type_.type == ANY and type_.is_optional:\n if any(t for t in self.data_types if t.type != ANY): # pragma: no cover\n self.is_optional = True\n self.data_types = [\n t\n for t in self.data_types\n if not (t.type == ANY and t.is_optional)\n ]\n break # pragma: no cover\n\n for data_type in self.data_types:\n if data_type.reference or data_type.data_types:\n data_type.parent = self\n\n if self.reference:\n self.reference.children.append(self)\n\n @property\n def type_hint(self) -> str:\n type_: Optional[str] = self.alias or self.type\n if not type_:\n if self.is_union:\n data_types: List[str] = []\n for data_type in self.data_types:\n data_type_type = data_type.type_hint\n if data_type_type in data_types: # pragma: no cover\n continue\n data_types.append(data_type_type)\n if NONE in data_types:\n data_types = [d for d in data_types if d != NONE]\n self.is_optional = True\n if len(data_types) == 1:\n type_ = data_types[0]\n else:\n if self.use_union_operator:\n type_ = UNION_OPERATOR_DELIMITER.join(data_types)\n else:\n type_ = f'{UNION_PREFIX}{UNION_DELIMITER.join(data_types)}]'\n elif len(self.data_types) == 1:\n type_ = self.data_types[0].type_hint\n elif self.literals:\n type_ = f\"{LITERAL}[{', '.join(repr(literal) for literal in self.literals)}]\"\n else:\n if self.reference:\n type_ = self.reference.short_name\n else:\n # TODO support strict Any\n # type_ = 'Any'\n type_ = ''\n if self.reference:\n source = self.reference.source\n if isinstance(source, Nullable) and source.nullable:\n self.is_optional = True\n if self.reference and self.python_version == PythonVersion.PY_36:\n type_ = f\"'{type_}'\"\n if self.is_list:\n if self.use_generic_container:\n list_ = SEQUENCE\n elif self.use_standard_collections:\n list_ = STANDARD_LIST\n else:\n list_ = LIST\n type_ = f'{list_}[{type_}]' if type_ else list_\n elif self.is_set:\n if self.use_generic_container:\n set_ = FROZEN_SET\n elif self.use_standard_collections:\n set_ = STANDARD_SET\n else:\n set_ = SET\n type_ = f'{set_}[{type_}]' if type_ else set_\n elif self.is_dict:\n if self.use_generic_container:\n dict_ = MAPPING\n elif self.use_standard_collections:\n dict_ = STANDARD_DICT\n else:\n dict_ = DICT\n if self.dict_key or type_:\n key = self.dict_key.type_hint if self.dict_key else STR\n type_ = f'{dict_}[{key}, {type_ or ANY}]'\n else: # pragma: no cover\n type_ = dict_\n if self.is_optional and type_ != ANY:\n return get_optional_type(type_, self.use_union_operator)\n elif self.is_func:\n if self.kwargs:\n kwargs: str = ', '.join(f'{k}={v}' for k, v in self.kwargs.items())\n return f'{type_}({kwargs})'\n return f'{type_}()'\n return type_\n\n @property\n def is_union(self) -> bool:\n return len(self.data_types) > 1\n\n\nDataType.model_rebuild()\n\nDataTypeT = TypeVar('DataTypeT', bound=DataType)\n\n\nclass EmptyDataType(DataType):\n pass\n\n\nclass Types(Enum):\n integer = auto()\n int32 = auto()\n int64 = auto()\n number = auto()\n float = auto()\n double = auto()\n decimal = auto()\n time = auto()\n string = auto()\n byte = auto()\n binary = auto()\n date = auto()\n date_time = auto()\n password = auto()\n email = auto()\n uuid = auto()\n uuid1 = auto()\n uuid2 = auto()\n uuid3 = auto()\n uuid4 = auto()\n uuid5 = auto()\n uri = auto()\n hostname = auto()\n ipv4 = auto()\n ipv4_network = auto()\n ipv6 = auto()\n ipv6_network = auto()\n boolean = auto()\n object = auto()\n null = auto()\n array = auto()\n any = auto()\n\n\nclass DataTypeManager(ABC):\n def __init__(\n self,\n python_version: PythonVersion = PythonVersion.PY_37,\n use_standard_collections: bool = False,\n use_generic_container_types: bool = False,\n strict_types: Optional[Sequence[StrictTypes]] = None,\n use_non_positive_negative_number_constrained_types: bool = False,\n use_union_operator: bool = False,\n ) -> None:\n self.python_version = python_version\n self.use_standard_collections: bool = use_standard_collections\n self.use_generic_container_types: bool = use_generic_container_types\n self.strict_types: Sequence[StrictTypes] = strict_types or ()\n self.use_non_positive_negative_number_constrained_types: bool = (\n use_non_positive_negative_number_constrained_types\n )\n self.use_union_operator: bool = use_union_operator\n\n if (\n use_generic_container_types and python_version == PythonVersion.PY_36\n ): # pragma: no cover\n raise Exception(\n 'use_generic_container_types can not be used with target_python_version 3.6.\\n'\n ' The version will be not supported in a future version'\n )\n\n if TYPE_CHECKING:\n self.data_type: Type[DataType]\n else:\n self.data_type: Type[DataType] = create_model(\n 'ContextDataType',\n python_version=(PythonVersion, python_version),\n use_standard_collections=(bool, use_standard_collections),\n use_generic_container=(bool, use_generic_container_types),\n use_union_operator=(bool, use_union_operator),\n __base__=DataType,\n )\n\n @abstractmethod\n def get_data_type(self, types: Types, **kwargs: Any) -> DataType:\n raise NotImplementedError\n\n def get_data_type_from_full_path(\n self, full_path: str, is_custom_type: bool\n ) -> DataType:\n return self.data_type.from_import(\n Import.from_full_path(full_path), is_custom_type=is_custom_type\n )\n\n def get_data_type_from_value(self, value: Any) -> DataType:\n type_: Optional[Types] = None\n if isinstance(value, str):\n type_ = Types.string\n elif isinstance(value, bool):\n type_ = Types.boolean\n elif isinstance(value, int):\n type_ = Types.integer\n elif isinstance(value, float):\n type_ = Types.float\n elif isinstance(value, dict):\n return self.data_type.from_import(IMPORT_DICT)\n elif isinstance(value, list):\n return self.data_type.from_import(IMPORT_LIST)\n else:\n type_ = Types.any\n return self.get_data_type(type_)\n", "path": "datamodel_code_generator/types.py"}]} |
gh_patches_debug_1529 | rasdani/github-patches | git_diff | keras-team__keras-13342 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NameError: name 'math_ops' is not defined
**System information**
- Have I written custom code (as opposed to using example directory):
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):
- TensorFlow backend (yes / no):
- TensorFlow version: 1.14.0
- Keras version: 2.3.0
- Python version:
- CUDA/cuDNN version:
- GPU model and memory:
**Describe the current behavior**
File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 222, in compile
masks=masks)
File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 871, in _handle_metrics
self._per_output_metrics[i], target, output, output_mask)
File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 842, in _handle_per_output_metrics
metric_fn, y_true, y_pred, weights=weights, mask=mask)
File "/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py", line 1022, in call_metric_function
mask = math_ops.cast(mask, y_pred.dtype)
NameError: name 'math_ops' is not defined
**Describe the expected behavior**
**Code to reproduce the issue**
**Other info / logs**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/engine/training_utils.py`
Content:
```
1 """Training-related utilities.
2 """
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6
7 import inspect
8 import collections
9 import copy
10 import numpy as np
11 import six
12 import warnings
13 from collections import OrderedDict
14
15 from .. import backend as K
16 from .. import losses
17 from .. import metrics as metrics_module
18 from ..utils import Sequence
19 from ..utils import generic_utils
20 from ..utils import losses_utils
21
22
23 def standardize_single_array(x):
24 if x is None:
25 return None
26 elif K.is_tensor(x):
27 shape = K.int_shape(x)
28 if shape is None or shape[0] is None:
29 raise ValueError(
30 'When feeding symbolic tensors to a model, we expect the '
31 'tensors to have a static batch size. '
32 'Got tensor with shape: %s' % str(shape))
33 return x
34 elif x.ndim == 1:
35 x = np.expand_dims(x, 1)
36 return x
37
38
39 def standardize_input_data(data,
40 names,
41 shapes=None,
42 check_batch_axis=True,
43 exception_prefix=''):
44 """Normalizes inputs and targets provided by users.
45
46 Users may pass data as a list of arrays, dictionary of arrays,
47 or as a single array. We normalize this to an ordered list of
48 arrays (same order as `names`), while checking that the provided
49 arrays have shapes that match the network's expectations.
50
51 # Arguments
52 data: User-provided input data (polymorphic).
53 names: List of expected array names.
54 shapes: Optional list of expected array shapes.
55 check_batch_axis: Boolean; whether to check that
56 the batch axis of the arrays matches the expected
57 value found in `shapes`.
58 exception_prefix: String prefix used for exception formatting.
59
60 # Returns
61 List of standardized input arrays (one array per model input).
62
63 # Raises
64 ValueError: in case of improperly formatted user-provided data.
65 """
66 if not names:
67 if data is not None and hasattr(data, '__len__') and len(data):
68 raise ValueError('Error when checking model ' +
69 exception_prefix + ': '
70 'expected no data, but got:', data)
71 return []
72 if data is None:
73 return [None for _ in range(len(names))]
74
75 if isinstance(data, dict):
76 try:
77 data = [
78 data[x].values
79 if data[x].__class__.__name__ == 'DataFrame' else data[x]
80 for x in names
81 ]
82 except KeyError as e:
83 raise ValueError('No data provided for "' + e.args[0] +
84 '". Need data '
85 'for each key in: ' + str(names))
86 elif isinstance(data, list):
87 if isinstance(data[0], list):
88 data = [np.asarray(d) for d in data]
89 elif len(names) == 1 and isinstance(data[0], (float, int)):
90 data = [np.asarray(data)]
91 else:
92 data = [
93 x.values if x.__class__.__name__ == 'DataFrame'
94 else x for x in data
95 ]
96 else:
97 data = data.values if data.__class__.__name__ == 'DataFrame' else data
98 data = [data]
99 data = [standardize_single_array(x) for x in data]
100
101 if len(data) != len(names):
102 if data and hasattr(data[0], 'shape'):
103 raise ValueError(
104 'Error when checking model ' + exception_prefix +
105 ': the list of Numpy arrays that you are passing to '
106 'your model is not the size the model expected. '
107 'Expected to see ' + str(len(names)) + ' array(s), '
108 'but instead got the following list of ' +
109 str(len(data)) + ' arrays: ' + str(data)[:200] + '...')
110 elif len(names) > 1:
111 raise ValueError(
112 'Error when checking model ' + exception_prefix +
113 ': you are passing a list as input to your model, '
114 'but the model expects a list of ' + str(len(names)) +
115 ' Numpy arrays instead. '
116 'The list you passed was: ' + str(data)[:200])
117 elif len(data) == 1 and not hasattr(data[0], 'shape'):
118 raise TypeError('Error when checking model ' + exception_prefix +
119 ': data should be a Numpy array, or list/dict of '
120 'Numpy arrays. Found: ' + str(data)[:200] + '...')
121 elif len(names) == 1:
122 data = [np.asarray(data)]
123
124 # Check shapes compatibility.
125 if shapes:
126 for i in range(len(names)):
127 if shapes[i] is not None and not K.is_tensor(data[i]):
128 data_shape = data[i].shape
129 shape = shapes[i]
130 if data[i].ndim != len(shape):
131 raise ValueError(
132 'Error when checking ' + exception_prefix +
133 ': expected ' + names[i] + ' to have ' +
134 str(len(shape)) + ' dimensions, but got array '
135 'with shape ' + str(data_shape))
136 if not check_batch_axis:
137 data_shape = data_shape[1:]
138 shape = shape[1:]
139 for dim, ref_dim in zip(data_shape, shape):
140 if ref_dim != dim and ref_dim:
141 raise ValueError(
142 'Error when checking ' + exception_prefix +
143 ': expected ' + names[i] + ' to have shape ' +
144 str(shape) + ' but got array with shape ' +
145 str(data_shape))
146 return data
147
148
149 def standardize_sample_or_class_weights(x_weight,
150 output_names,
151 weight_type):
152 """Maps `sample_weight` or `class_weight` to model outputs.
153
154 # Arguments
155 x_weight: User-provided `sample_weight` or `class_weight` argument.
156 output_names: List of output names (strings) in the model.
157 weight_type: A string used purely for exception printing.
158
159 # Returns
160 A list of `sample_weight` or `class_weight` where there are exactly
161 one element per model output.
162
163 # Raises
164 ValueError: In case of invalid user-provided argument.
165 """
166 if x_weight is None or len(x_weight) == 0:
167 return [None for _ in output_names]
168 if len(output_names) == 1:
169 if isinstance(x_weight, list) and len(x_weight) == 1:
170 return x_weight
171 if isinstance(x_weight, dict) and output_names[0] in x_weight:
172 return [x_weight[output_names[0]]]
173 else:
174 return [x_weight]
175 if isinstance(x_weight, list):
176 if len(x_weight) != len(output_names):
177 raise ValueError('Provided `' + weight_type + '` was a list of ' +
178 str(len(x_weight)) +
179 ' elements, but the model has ' +
180 str(len(output_names)) + ' outputs. '
181 'You should provide one `' + weight_type + '`'
182 'array per model output.')
183 return x_weight
184 if isinstance(x_weight, dict):
185 x_weights = []
186 for name in output_names:
187 x_weights.append(x_weight.get(name))
188 return x_weights
189 else:
190 raise TypeError('The model has multiple outputs, so `' +
191 weight_type + '` '
192 'should be either a list or a dict. '
193 'Provided `' + weight_type +
194 '` type not understood: ' +
195 str(x_weight))
196
197
198 def standardize_class_weights(class_weight, output_names):
199 return standardize_sample_or_class_weights(class_weight,
200 output_names,
201 'class_weight')
202
203
204 def standardize_sample_weights(sample_weight, output_names):
205 return standardize_sample_or_class_weights(sample_weight,
206 output_names,
207 'sample_weight')
208
209
210 def check_array_length_consistency(inputs, targets, weights=None):
211 """Checks if batch axes are the same for Numpy arrays.
212
213 # Arguments
214 inputs: list of Numpy arrays of inputs.
215 targets: list of Numpy arrays of targets.
216 weights: list of Numpy arrays of sample weights.
217
218 # Raises
219 ValueError: in case of incorrectly formatted data.
220 """
221 def set_of_lengths(x):
222 # return a set with the variation between
223 # different shapes, with None => 0
224 if x is None:
225 return {0}
226 else:
227 return set([0 if y is None else int(y.shape[0]) for y in x])
228
229 set_x = set_of_lengths(inputs)
230 set_y = set_of_lengths(targets)
231 set_w = set_of_lengths(weights)
232 if len(set_x) > 1:
233 raise ValueError('All input arrays (x) should have '
234 'the same number of samples. Got array shapes: ' +
235 str([x.shape for x in inputs]))
236 if len(set_y) > 1:
237 raise ValueError('All target arrays (y) should have '
238 'the same number of samples. Got array shapes: ' +
239 str([y.shape for y in targets]))
240 if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
241 raise ValueError('Input arrays should have '
242 'the same number of samples as target arrays. '
243 'Found ' + str(list(set_x)[0]) + ' input samples '
244 'and ' + str(list(set_y)[0]) + ' target samples.')
245 if len(set_w) > 1:
246 raise ValueError('All sample_weight arrays should have '
247 'the same number of samples. Got array shapes: ' +
248 str([w.shape for w in weights]))
249 if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
250 raise ValueError('Sample_weight arrays should have '
251 'the same number of samples as target arrays. Got ' +
252 str(list(set_y)[0]) + ' input samples and ' +
253 str(list(set_w)[0]) + ' target samples.')
254
255
256 def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
257 """Does validation on the compatibility of targets and loss functions.
258
259 This helps prevent users from using loss functions incorrectly. This check
260 is purely for UX purposes.
261
262 # Arguments
263 targets: list of Numpy arrays of targets.
264 loss_fns: list of loss functions.
265 output_shapes: list of shapes of model outputs.
266
267 # Raises
268 ValueError: if a loss function or target array
269 is incompatible with an output.
270 """
271 key_loss_fns = {
272 losses.mean_squared_error, losses.binary_crossentropy,
273 losses.categorical_crossentropy
274 }
275 key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,
276 losses.CategoricalCrossentropy)
277 for y, loss, shape in zip(targets, loss_fns, output_shapes):
278 if y is None or loss is None:
279 continue
280 if losses.is_categorical_crossentropy(loss):
281 if y.shape[-1] == 1:
282 raise ValueError(
283 'You are passing a target array of shape ' + str(y.shape) +
284 ' while using as loss `categorical_crossentropy`. '
285 '`categorical_crossentropy` expects '
286 'targets to be binary matrices (1s and 0s) '
287 'of shape (samples, classes). '
288 'If your targets are integer classes, '
289 'you can convert them to the expected format via:\n'
290 '```\n'
291 'from keras.utils import to_categorical\n'
292 'y_binary = to_categorical(y_int)\n'
293 '```\n'
294 '\n'
295 'Alternatively, you can use the loss function '
296 '`sparse_categorical_crossentropy` instead, '
297 'which does expect integer targets.')
298 is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)
299 if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and
300 (loss.fn in key_loss_fns))):
301 for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
302 if out_dim is not None and target_dim != out_dim:
303 loss_name = loss.name
304 if loss_name is None:
305 loss_type = loss.fn if is_loss_wrapper else type(loss)
306 loss_name = loss_type.__name__
307 raise ValueError(
308 'A target array with shape ' + str(y.shape) +
309 ' was passed for an output of shape ' + str(shape) +
310 ' while using as loss `' + loss_name + '`. '
311 'This loss expects targets to have the same shape '
312 'as the output.')
313
314
315 def check_generator_arguments(y=None, sample_weight=None,
316 validation_split=None):
317 """Validates arguments passed when using a generator."""
318 if y is not None:
319 raise ValueError('`y` argument is not supported when data is'
320 'a generator or Sequence instance. Instead pass targets'
321 ' as the second element of the generator.')
322 if sample_weight is not None:
323 raise ValueError('`sample_weight` argument is not supported when data is'
324 'a generator or Sequence instance. Instead pass sample'
325 ' weights as the third element of the generator.')
326 if validation_split:
327 raise ValueError('If your data is in the form of a Python generator, '
328 'you cannot use `validation_split`.')
329
330
331 def batch_shuffle(index_array, batch_size):
332 """Shuffles an array in a batch-wise fashion.
333
334 Useful for shuffling HDF5 arrays
335 (where one cannot access arbitrary indices).
336
337 # Arguments
338 index_array: array of indices to be shuffled.
339 batch_size: integer.
340
341 # Returns
342 The `index_array` array, shuffled in a batch-wise fashion.
343 """
344 batch_count = int(len(index_array) / batch_size)
345 # to reshape we need to be cleanly divisible by batch size
346 # we stash extra items and reappend them after shuffling
347 last_batch = index_array[batch_count * batch_size:]
348 index_array = index_array[:batch_count * batch_size]
349 index_array = index_array.reshape((batch_count, batch_size))
350 np.random.shuffle(index_array)
351 index_array = index_array.flatten()
352 return np.append(index_array, last_batch)
353
354
355 def make_batches(size, batch_size):
356 """Returns a list of batch indices (tuples of indices).
357
358 # Arguments
359 size: Integer, total size of the data to slice into batches.
360 batch_size: Integer, batch size.
361
362 # Returns
363 A list of tuples of array indices.
364 """
365 num_batches = (size + batch_size - 1) // batch_size # round up
366 return [(i * batch_size, min(size, (i + 1) * batch_size))
367 for i in range(num_batches)]
368
369
370 def weighted_masked_objective(fn):
371 """Adds support for masking and sample-weighting to an objective function.
372
373 It transforms an objective function `fn(y_true, y_pred)`
374 into a sample-weighted, cost-masked objective function
375 `fn(y_true, y_pred, weights, mask)`.
376
377 # Arguments
378 fn: The objective function to wrap,
379 with signature `fn(y_true, y_pred)`.
380
381 # Returns
382 A function with signature `fn(y_true, y_pred, weights, mask)`.
383 """
384 if fn is None:
385 return None
386
387 def weighted(y_true, y_pred, weights, mask=None):
388 """Wrapper function.
389
390 # Arguments
391 y_true: `y_true` argument of `fn`.
392 y_pred: `y_pred` argument of `fn`.
393 weights: Weights tensor.
394 mask: Mask tensor.
395
396 # Returns
397 Scalar tensor.
398 """
399 # score_array has ndim >= 2
400 score_array = fn(y_true, y_pred)
401 if mask is not None:
402 # Cast the mask to floatX to avoid float64 upcasting in Theano
403 mask = K.cast(mask, K.floatx())
404 # mask should have the same shape as score_array
405 score_array *= mask
406 # the loss per batch should be proportional
407 # to the number of unmasked samples.
408 score_array /= K.mean(mask) + K.epsilon()
409
410 # apply sample weighting
411 if weights is not None:
412 # reduce score_array to same ndim as weight array
413 ndim = K.ndim(score_array)
414 weight_ndim = K.ndim(weights)
415 score_array = K.mean(score_array,
416 axis=list(range(weight_ndim, ndim)))
417 score_array *= weights
418 score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
419 return K.mean(score_array)
420 return weighted
421
422
423 def standardize_weights(y,
424 sample_weight=None,
425 class_weight=None,
426 sample_weight_mode=None):
427 """Performs sample weight validation and standardization.
428
429 Everything gets normalized to a single sample-wise (or timestep-wise)
430 weight array. If both `sample_weights` and `class_weights` are provided,
431 the weights are multiplied together.
432
433 # Arguments
434 y: Numpy array of model targets to be weighted.
435 sample_weight: User-provided `sample_weight` argument.
436 class_weight: User-provided `class_weight` argument.
437 sample_weight_mode: One of `None` or `"temporal"`.
438 `"temporal"` indicated that we expect 2D weight data
439 that will be applied to the last 2 dimensions of
440 the targets (i.e. we are weighting timesteps, not samples).
441
442 # Returns
443 A Numpy array of target weights, one entry per sample to weight.
444
445 # Raises
446 ValueError: In case of invalid user-provided arguments.
447 """
448 if sample_weight_mode is not None:
449 if sample_weight_mode != 'temporal':
450 raise ValueError('"sample_weight_mode '
451 'should be None or "temporal". '
452 'Found: ' + str(sample_weight_mode))
453 if len(y.shape) < 3:
454 raise ValueError('Found a sample_weight array for '
455 'an input with shape ' +
456 str(y.shape) + '. '
457 'Timestep-wise sample weighting (use of '
458 'sample_weight_mode="temporal") is restricted to '
459 'outputs that are at least 3D, i.e. that have '
460 'a time dimension.')
461 if sample_weight is not None and len(sample_weight.shape) != 2:
462 raise ValueError('Found a sample_weight array with shape ' +
463 str(sample_weight.shape) + '. '
464 'In order to use timestep-wise sample weighting, '
465 'you should pass a 2D sample_weight array.')
466 else:
467 if sample_weight is not None and len(sample_weight.shape) != 1:
468 raise ValueError('Found a sample_weight array with shape ' +
469 str(sample_weight.shape) + '. '
470 'In order to use timestep-wise sample weights, '
471 'you should specify '
472 'sample_weight_mode="temporal" '
473 'in compile(). If you just mean to use '
474 'sample-wise weights, make sure your '
475 'sample_weight array is 1D.')
476
477 if sample_weight is not None:
478 if len(sample_weight.shape) > len(y.shape):
479 raise ValueError('Found a sample_weight with shape' +
480 str(sample_weight.shape) + '.'
481 'Expected sample_weight with rank '
482 'less than or equal to ' + str(len(y.shape)))
483
484 if y.shape[:sample_weight.ndim] != sample_weight.shape:
485 raise ValueError('Found a sample_weight array with shape ' +
486 str(sample_weight.shape) +
487 ' for an input with shape ' +
488 str(y.shape) + '. '
489 'sample_weight cannot be broadcast.')
490
491 class_sample_weight = None
492 if isinstance(class_weight, dict):
493 if len(y.shape) > 2:
494 raise ValueError('`class_weight` not supported for '
495 '3+ dimensional targets.')
496 if len(y.shape) == 2:
497 if y.shape[1] > 1:
498 y_classes = np.argmax(y, axis=1)
499 elif y.shape[1] == 1:
500 y_classes = np.reshape(y, y.shape[0])
501 else:
502 y_classes = y
503
504 class_sample_weight = np.asarray(
505 [class_weight[cls] for cls in y_classes if cls in class_weight])
506
507 if len(class_sample_weight) != len(y_classes):
508 # subtract the sets to pick all missing classes
509 existing_classes = set(y_classes)
510 existing_class_weight = set(class_weight.keys())
511 raise ValueError('`class_weight` must contain '
512 'all classes in the data.'
513 ' The classes %s exist in the data but not in '
514 '`class_weight`.'
515 % (existing_classes - existing_class_weight))
516
517 if sample_weight is not None and class_sample_weight is not None:
518 return sample_weight * class_sample_weight
519 if sample_weight is not None:
520 return sample_weight
521 if class_sample_weight is not None:
522 return class_sample_weight
523
524 # Everything has weight 1 by default.
525 if sample_weight_mode is None:
526 return np.ones((y.shape[0],), dtype=K.floatx())
527 else:
528 return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
529
530
531 def check_num_samples(ins,
532 batch_size=None,
533 steps=None,
534 steps_name='steps'):
535 """Checks the number of samples provided for training and evaluation.
536
537 The number of samples is not defined when running with `steps`,
538 in which case the number of samples is set to `None`.
539
540 # Arguments
541 ins: List of tensors to be fed to the Keras function.
542 batch_size: Integer batch size or `None` if not defined.
543 steps: Total number of steps (batches of samples)
544 before declaring `predict_loop` finished.
545 Ignored with the default value of `None`.
546 steps_name: The public API's parameter name for `steps`.
547
548 # Raises
549 ValueError: when `steps` is `None` and the attribute `ins.shape`
550 does not exist. Also raises ValueError when `steps` is not `None`
551 and `batch_size` is not `None` because they are mutually
552 exclusive.
553
554 # Returns
555 When `steps` is `None`, returns the number of samples to be
556 processed based on the size of the first dimension of the
557 first input Numpy array. When `steps` is not `None` and
558 `batch_size` is `None`, returns `None`.
559
560 # Raises
561 ValueError: In case of invalid arguments.
562 """
563 if steps is not None and batch_size is not None:
564 raise ValueError(
565 'If ' + steps_name + ' is set, the `batch_size` must be None.')
566
567 if not ins or any(K.is_tensor(x) for x in ins):
568 if steps is None:
569 raise ValueError(
570 'If your data is in the form of symbolic tensors, '
571 'you should specify the `' + steps_name + '` argument '
572 '(instead of the `batch_size` argument, '
573 'because symbolic tensors are expected to produce '
574 'batches of input data).')
575 return None
576
577 if hasattr(ins[0], 'shape'):
578 return int(ins[0].shape[0])
579 return None # Edge case where ins == [static_learning_phase]
580
581
582 def iter_sequence_infinite(seq):
583 """Iterate indefinitely over a Sequence.
584
585 # Arguments
586 seq: Sequence object
587
588 # Returns
589 Generator yielding batches.
590 """
591 while True:
592 for item in seq:
593 yield item
594
595
596 def is_sequence(seq):
597 """Determine if an object follows the Sequence API.
598
599 # Arguments
600 seq: a possible Sequence object
601
602 # Returns
603 boolean, whether the object follows the Sequence API.
604 """
605 # TODO Dref360: Decide which pattern to follow. First needs a new TF Version.
606 return (getattr(seq, 'use_sequence_api', False)
607 or set(dir(Sequence())).issubset(set(dir(seq) + ['use_sequence_api'])))
608
609
610 def is_generator_or_sequence(x):
611 """Check if `x` is a Keras generator type."""
612 return inspect.isgenerator(x) or is_sequence(x)
613
614
615 def should_run_validation(validation_freq, epoch):
616 """Checks if validation should be run this epoch.
617
618 # Arguments
619 validation_freq: Integer or list. If an integer, specifies how many training
620 epochs to run before a new validation run is performed. If a list,
621 specifies the epochs on which to run validation.
622 epoch: Integer, the number of the training epoch just completed.
623
624 # Returns
625 Bool, True if validation should be run.
626
627 # Raises
628 ValueError: if `validation_freq` is an Integer and less than 1, or if
629 it is neither an Integer nor a Sequence.
630 """
631 # `epoch` is 0-indexed internally but 1-indexed in the public API.
632 one_indexed_epoch = epoch + 1
633
634 if isinstance(validation_freq, int):
635 if validation_freq < 1:
636 raise ValueError('`validation_freq` can not be less than 1.')
637 return one_indexed_epoch % validation_freq == 0
638
639 if not isinstance(validation_freq, collections.Container):
640 raise ValueError('`validation_freq` must be an Integer or '
641 '`collections.Container` (e.g. list, tuple, etc.)')
642 return one_indexed_epoch in validation_freq
643
644
645 def get_static_batch_size(layer):
646 """Gets the static batch size of a Layer.
647
648 # Arguments
649 layer: a `Layer` instance.
650
651 # Returns
652 The static batch size of a Layer.
653 """
654 batch_input_shape, _ = get_input_shape_and_dtype(layer)
655 if batch_input_shape is not None:
656 return batch_input_shape[0]
657 return None
658
659
660 def get_input_shape_and_dtype(layer):
661 """Retrieves input shape and input dtype of layer if applicable.
662
663 # Arguments
664 layer: Layer (or model) instance.
665
666 # Returns
667 Tuple (input_shape, input_dtype). Both could be None if the layer
668 does not have a defined input shape.
669
670 # Raises
671 ValueError: in case an empty Sequential or Functional model is passed.
672 """
673 def _is_graph_model(layer):
674 return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or
675 layer.__class__.__name__ == 'Sequential')
676
677 # In case of nested models: recover the first layer
678 # of the deepest model to infer input shape and dtype.
679 # Subclassed Models may not have been built so can't be checked.
680 while _is_graph_model(layer):
681 if not layer.layers:
682 raise ValueError('An empty Model cannot be used as a Layer.')
683 layer = layer.layers[0]
684
685 if hasattr(layer, '_batch_input_shape'):
686 return layer._batch_input_shape, layer.dtype
687 return None, None
688
689
690 def get_loss_function(loss):
691 """Returns the loss corresponding to the loss input in `compile` API."""
692 if loss is None or isinstance(loss, losses.Loss):
693 return loss
694
695 # Deserialize loss configuration, if needed.
696 if isinstance(loss, collections.Mapping):
697 loss = losses.get(loss)
698
699 # Custom callable class.
700 if callable(loss) and not hasattr(loss, '__name__'):
701 return loss
702
703 # Wrap loss function with signature `(y_true, y_pred, **kwargs)`
704 # in `LossFunctionWrapper` class.
705 loss_fn = losses.get(loss)
706
707 # For losses which are given as strings/functions in the compile API,
708 # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`..
709 return losses.LossFunctionWrapper(
710 loss_fn,
711 name=loss_fn.__name__,
712 reduction=losses_utils.Reduction.SUM_OVER_BATCH_SIZE)
713
714
715 def get_output_sample_weight_and_mode(skip_target_weighing_indices,
716 sample_weight_mode, output_name,
717 output_index):
718 """Returns the sample weight and weight mode for a single output."""
719 if output_index in skip_target_weighing_indices:
720 return None, None
721
722 if sample_weight_mode == 'temporal':
723 shape = [None, None]
724 mode = 'temporal'
725 else:
726 shape = [None]
727 mode = None
728 weight = K.placeholder(
729 shape=shape,
730 name=output_name + '_sample_weights')
731 return weight, mode
732
733
734 def prepare_sample_weights(output_names, sample_weight_mode,
735 skip_target_weighing_indices):
736 """Prepares sample weights for the model.
737
738 # Arguments
739 output_names: List of model output names.
740 sample_weight_mode: sample weight mode user input passed from compile API.
741 skip_target_weighing_indices: Indices of output for which sample weights
742 should be skipped.
743
744 # Returns
745 A pair of list of sample weights and sample weight modes
746 (one for each output).
747
748 # Raises
749 ValueError: In case of invalid `sample_weight_mode` input.
750 """
751 sample_weights = []
752 sample_weight_modes = []
753 if isinstance(sample_weight_mode, dict):
754 unknown_output = set(sample_weight_mode.keys()) - set(output_names)
755 if unknown_output:
756 raise ValueError(
757 'Unknown entry in '
758 'sample_weight_mode dictionary: "' + str(unknown_output) +
759 '". Only expected the following keys: ' + str(output_names))
760 for i, name in enumerate(output_names):
761 if (i not in skip_target_weighing_indices and
762 name not in sample_weight_mode):
763 raise ValueError(
764 'Output missing from sample_weight_modes dictionary')
765 weight, mode = get_output_sample_weight_and_mode(
766 skip_target_weighing_indices,
767 sample_weight_mode.get(name),
768 name,
769 i)
770 sample_weights.append(weight)
771 sample_weight_modes.append(mode)
772 elif isinstance(sample_weight_mode, list):
773 if len(sample_weight_mode) != len(output_names):
774 raise ValueError('When passing a list as sample_weight_mode, '
775 'it should have one entry per model output. '
776 'The model has ' + str(len(output_names)) +
777 ' outputs, but you passed ' +
778 str(len(sample_weight_mode)) + 'sample_weight_modes')
779 for i, name in enumerate(output_names):
780 weight, mode = get_output_sample_weight_and_mode(
781 skip_target_weighing_indices, sample_weight_mode[i], name, i)
782 sample_weights.append(weight)
783 sample_weight_modes.append(mode)
784 else:
785 for i, name in enumerate(output_names):
786 weight, mode = get_output_sample_weight_and_mode(
787 skip_target_weighing_indices, sample_weight_mode, name, i)
788 sample_weights.append(weight)
789 sample_weight_modes.append(mode)
790 return sample_weights, sample_weight_modes
791
792
793 def prepare_loss_functions(loss, output_names):
794 """Converts loss to a list of loss functions.
795
796 # Arguments
797 loss: String (name of objective function), objective function or
798 `Loss` instance. If the model has multiple outputs, you can use
799 a different loss on each output by passing a dictionary or a
800 list of losses. The loss value that will be minimized by the model
801 will then be the sum of all individual losses.
802 output_names: List of model output names.
803
804 # Returns
805 A list of loss objective functions.
806
807 # Raises:
808 ValueError: If loss is a dict with keys not in model output names,
809 or if loss is a list with len not equal to model outputs.
810 """
811 if isinstance(loss, collections.Mapping):
812 generic_utils.check_for_unexpected_keys('loss', loss, output_names)
813 loss_functions = []
814 for name in output_names:
815 if name not in loss:
816 warnings.warn(
817 'Output {0} missing from loss dictionary. We assume '
818 'this was done on purpose. The fit and evaluate APIs will not '
819 'be expecting any data to be passed to {0}.'.format(name))
820 loss_functions.append(get_loss_function(loss.get(name, None)))
821 elif isinstance(loss, six.string_types):
822 loss_functions = [get_loss_function(loss) for _ in output_names]
823 elif isinstance(loss, collections.Sequence):
824 if len(loss) != len(output_names):
825 raise ValueError('When passing a list as loss, it should have one entry '
826 'per model outputs. The model has {} outputs, but you '
827 'passed loss={}'.format(len(output_names), loss))
828 loss_functions = [get_loss_function(l) for l in loss]
829 else:
830 loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]
831
832 return loss_functions
833
834
835 def prepare_loss_weights(output_names, loss_weights=None):
836 """Converts loss weights to a list of loss weights.
837
838 # Arguments
839 output_names: List of model output names.
840 loss_weights: Optional list or dictionary specifying scalar coefficients
841 (Python floats) to weight the loss contributions of different model
842 outputs. The loss value that will be minimized by the model will then be
843 the *weighted sum* of all individual losses, weighted by the
844 `loss_weights` coefficients. If a list, it is expected to have a 1:1
845 mapping to the model's outputs. If a dict, it is expected to map
846 output names (strings) to scalar coefficients.
847
848 # Returns
849 A list of loss weights of python floats.
850
851 # Raises
852 ValueError: If loss weight is a dict with key not in model output names,
853 or if loss is a list with len not equal to model outputs.
854 """
855 if loss_weights is None:
856 weights_list = [1.] * len(output_names)
857 elif isinstance(loss_weights, collections.Mapping):
858 generic_utils.check_for_unexpected_keys('loss_weights', loss_weights,
859 output_names)
860 weights_list = [loss_weights.get(name, 1.) for name in output_names]
861 elif isinstance(loss_weights, list):
862 if len(loss_weights) != len(output_names):
863 raise ValueError('When passing a list as loss_weights, '
864 'it should have one entry per model output. '
865 'The model has ' + str(len(output_names)) +
866 ' outputs, but you passed loss_weights=' +
867 str(loss_weights))
868 weights_list = loss_weights
869 else:
870 raise TypeError('Could not interpret loss_weights argument: ' +
871 str(loss_weights) + ' - expected a list of dicts.')
872
873 return weights_list
874
875
876 def collect_per_output_metric_info(metrics,
877 output_names,
878 output_shapes,
879 loss_fns,
880 is_weighted=False):
881 """Maps metric names and functions to model outputs.
882
883 # Arguments
884 metrics: a list or a list of lists or a dict of metric functions.
885 output_names: a list of the names (strings) of model outputs.
886 output_shapes: a list of the shapes (strings) of model outputs.
887 loss_fns: a list of the loss functions corresponding to the model outputs.
888 is_weighted: Boolean indicating whether the given metrics are weighted.
889
890 # Returns
891 A list (one entry per model output) of dicts.
892 For instance, if the model has 2 outputs, and for the first output
893 we want to compute "binary_accuracy" and "binary_crossentropy",
894 and just "binary_accuracy" for the second output,
895 the list would look like: `[{
896 'acc': binary_accuracy(),
897 'ce': binary_crossentropy(),
898 }, {
899 'acc': binary_accuracy(),
900 }]`
901
902 # Raises
903 TypeError: if an incorrect type is passed for the `metrics` argument.
904 """
905 if not metrics:
906 return [{} for _ in output_names]
907
908 if isinstance(metrics, list):
909 any_sub_list = any(isinstance(m, list) for m in metrics)
910 if any_sub_list:
911 if len(metrics) != len(output_names):
912 raise ValueError('When passing a list of lists as `metrics`, '
913 'it should have one entry per model output. '
914 'The model has ' + str(len(output_names)) +
915 ' outputs, but you passed metrics=' + str(metrics))
916 # User has provided a list of len = len(outputs).
917 nested_metrics = [generic_utils.to_list(m) for m in metrics]
918 else:
919 # If it is a single list we then apply all metrics to all outputs.
920 if len(output_names) > 1:
921 nested_metrics = []
922 for _ in output_names:
923 nested_metrics.append(
924 [metrics_module.clone_metric(m) for m in metrics])
925 else:
926 nested_metrics = [metrics]
927 elif isinstance(metrics, collections.Mapping):
928 generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)
929 nested_metrics = []
930 for name in output_names:
931 output_metrics = generic_utils.to_list(metrics.get(name, []))
932 nested_metrics.append(output_metrics)
933 else:
934 raise TypeError('Type of `metrics` argument not understood. '
935 'Expected a list or dictionary, found: ' + str(metrics))
936
937 per_output_metrics = []
938 for i, metrics in enumerate(nested_metrics):
939 metrics_dict = OrderedDict()
940 for metric in metrics:
941 metric_name = get_metric_name(metric, is_weighted)
942 metric_fn = get_metric_function(
943 metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])
944
945 # If the metric function is not stateful, we create a stateful version.
946 if not isinstance(metric_fn, metrics_module.Metric):
947 metric_fn = metrics_module.MeanMetricWrapper(
948 metric_fn, name=metric_name)
949 metrics_dict[metric_name] = metric_fn
950 per_output_metrics.append(metrics_dict)
951
952 return per_output_metrics
953
954
955 def get_metric_name(metric, weighted=False):
956 """Returns the name corresponding to the given metric input.
957
958 # Arguments
959 metric: Metric function name or reference.
960 weighted: Boolean indicating if the given metric is weighted.
961
962 # Returns
963 The metric name.
964 """
965 # We keep the string that the user has set in compile as the metric name.
966 if isinstance(metric, six.string_types):
967 return metric
968
969 metric = metrics_module.get(metric)
970 return metric.name if hasattr(metric, 'name') else metric.__name__
971
972
973 def get_metric_function(metric, output_shape=None, loss_fn=None):
974 """Returns the metric function corresponding to the given metric input.
975
976 # Arguments
977 metric: Metric function name or reference.
978 output_shape: The shape of the output that this metric will be calculated
979 for.
980 loss_fn: The loss function used.
981
982 # Returns
983 The metric function.
984 """
985 if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:
986 return metrics_module.get(metric)
987
988 is_sparse_categorical_crossentropy = (
989 isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or
990 (isinstance(loss_fn, losses.LossFunctionWrapper) and
991 loss_fn.fn == losses.sparse_categorical_crossentropy))
992
993 is_binary_crossentropy = (
994 isinstance(loss_fn, losses.BinaryCrossentropy) or
995 (isinstance(loss_fn, losses.LossFunctionWrapper) and
996 loss_fn.fn == losses.binary_crossentropy))
997
998 if metric in ['accuracy', 'acc']:
999 if output_shape[-1] == 1 or is_binary_crossentropy:
1000 return metrics_module.binary_accuracy
1001 elif is_sparse_categorical_crossentropy:
1002 return metrics_module.sparse_categorical_accuracy
1003 # If the output_shape[-1] is not 1, then we know output is `categorical`.
1004 # We assume it is sparse categorical only if loss is explicitly given
1005 # as sparse categorical crossentropy loss.
1006 return metrics_module.categorical_accuracy
1007 else:
1008 if output_shape[-1] == 1 or is_binary_crossentropy:
1009 return metrics_module.binary_crossentropy
1010 elif is_sparse_categorical_crossentropy:
1011 return metrics_module.sparse_categorical_crossentropy
1012 return metrics_module.categorical_crossentropy
1013
1014
1015 def call_metric_function(metric_fn,
1016 y_true,
1017 y_pred=None,
1018 weights=None,
1019 mask=None):
1020 """Invokes metric function and returns the metric result tensor."""
1021 if mask is not None:
1022 mask = math_ops.cast(mask, y_pred.dtype)
1023 if weights is None:
1024 # Use mask as sample weight.
1025 weights = mask
1026 else:
1027 # Update dimensions of weights to match with mask.
1028 mask, _, weights = losses_utils.squeeze_or_expand_dimensions(
1029 mask, sample_weight=weights)
1030 weights *= mask
1031
1032 if y_pred is not None:
1033 update_ops = metric_fn.update_state(y_true, y_pred, sample_weight=weights)
1034 with K.control_dependencies(update_ops): # For TF
1035 metric_fn.result()
1036 else:
1037 # `Mean` metric only takes a single value.
1038 update_ops = metric_fn.update_state(y_true, sample_weight=weights)
1039 with K.control_dependencies(update_ops): # For TF
1040 metric_fn.result()
1041
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras/engine/training_utils.py b/keras/engine/training_utils.py
--- a/keras/engine/training_utils.py
+++ b/keras/engine/training_utils.py
@@ -1019,7 +1019,7 @@
mask=None):
"""Invokes metric function and returns the metric result tensor."""
if mask is not None:
- mask = math_ops.cast(mask, y_pred.dtype)
+ mask = K.cast(mask, y_pred.dtype)
if weights is None:
# Use mask as sample weight.
weights = mask
| {"golden_diff": "diff --git a/keras/engine/training_utils.py b/keras/engine/training_utils.py\n--- a/keras/engine/training_utils.py\n+++ b/keras/engine/training_utils.py\n@@ -1019,7 +1019,7 @@\n mask=None):\n \"\"\"Invokes metric function and returns the metric result tensor.\"\"\"\n if mask is not None:\n- mask = math_ops.cast(mask, y_pred.dtype)\n+ mask = K.cast(mask, y_pred.dtype)\n if weights is None:\n # Use mask as sample weight.\n weights = mask\n", "issue": "NameError: name 'math_ops' is not defined\n**System information** \r\n- Have I written custom code (as opposed to using example directory): \r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): \r\n- TensorFlow backend (yes / no): \r\n- TensorFlow version: 1.14.0 \r\n- Keras version: 2.3.0\r\n- Python version: \r\n- CUDA/cuDNN version: \r\n- GPU model and memory: \r\n\r\n\r\n**Describe the current behavior** \r\n\r\n File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training.py\", line 222, in compile\r\n masks=masks)\r\n File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training.py\", line 871, in _handle_metrics\r\n self._per_output_metrics[i], target, output, output_mask)\r\n File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training.py\", line 842, in _handle_per_output_metrics\r\n metric_fn, y_true, y_pred, weights=weights, mask=mask)\r\n File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py\", line 1022, in call_metric_function\r\n mask = math_ops.cast(mask, y_pred.dtype)\r\nNameError: name 'math_ops' is not defined\r\n\r\n**Describe the expected behavior** \r\n\r\n**Code to reproduce the issue** \r\n\r\n**Other info / logs** \r\n\n", "before_files": [{"content": "\"\"\"Training-related utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport collections\nimport copy\nimport numpy as np\nimport six\nimport warnings\nfrom collections import OrderedDict\n\nfrom .. import backend as K\nfrom .. import losses\nfrom .. import metrics as metrics_module\nfrom ..utils import Sequence\nfrom ..utils import generic_utils\nfrom ..utils import losses_utils\n\n\ndef standardize_single_array(x):\n if x is None:\n return None\n elif K.is_tensor(x):\n shape = K.int_shape(x)\n if shape is None or shape[0] is None:\n raise ValueError(\n 'When feeding symbolic tensors to a model, we expect the '\n 'tensors to have a static batch size. '\n 'Got tensor with shape: %s' % str(shape))\n return x\n elif x.ndim == 1:\n x = np.expand_dims(x, 1)\n return x\n\n\ndef standardize_input_data(data,\n names,\n shapes=None,\n check_batch_axis=True,\n exception_prefix=''):\n \"\"\"Normalizes inputs and targets provided by users.\n\n Users may pass data as a list of arrays, dictionary of arrays,\n or as a single array. We normalize this to an ordered list of\n arrays (same order as `names`), while checking that the provided\n arrays have shapes that match the network's expectations.\n\n # Arguments\n data: User-provided input data (polymorphic).\n names: List of expected array names.\n shapes: Optional list of expected array shapes.\n check_batch_axis: Boolean; whether to check that\n the batch axis of the arrays matches the expected\n value found in `shapes`.\n exception_prefix: String prefix used for exception formatting.\n\n # Returns\n List of standardized input arrays (one array per model input).\n\n # Raises\n ValueError: in case of improperly formatted user-provided data.\n \"\"\"\n if not names:\n if data is not None and hasattr(data, '__len__') and len(data):\n raise ValueError('Error when checking model ' +\n exception_prefix + ': '\n 'expected no data, but got:', data)\n return []\n if data is None:\n return [None for _ in range(len(names))]\n\n if isinstance(data, dict):\n try:\n data = [\n data[x].values\n if data[x].__class__.__name__ == 'DataFrame' else data[x]\n for x in names\n ]\n except KeyError as e:\n raise ValueError('No data provided for \"' + e.args[0] +\n '\". Need data '\n 'for each key in: ' + str(names))\n elif isinstance(data, list):\n if isinstance(data[0], list):\n data = [np.asarray(d) for d in data]\n elif len(names) == 1 and isinstance(data[0], (float, int)):\n data = [np.asarray(data)]\n else:\n data = [\n x.values if x.__class__.__name__ == 'DataFrame'\n else x for x in data\n ]\n else:\n data = data.values if data.__class__.__name__ == 'DataFrame' else data\n data = [data]\n data = [standardize_single_array(x) for x in data]\n\n if len(data) != len(names):\n if data and hasattr(data[0], 'shape'):\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': the list of Numpy arrays that you are passing to '\n 'your model is not the size the model expected. '\n 'Expected to see ' + str(len(names)) + ' array(s), '\n 'but instead got the following list of ' +\n str(len(data)) + ' arrays: ' + str(data)[:200] + '...')\n elif len(names) > 1:\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': you are passing a list as input to your model, '\n 'but the model expects a list of ' + str(len(names)) +\n ' Numpy arrays instead. '\n 'The list you passed was: ' + str(data)[:200])\n elif len(data) == 1 and not hasattr(data[0], 'shape'):\n raise TypeError('Error when checking model ' + exception_prefix +\n ': data should be a Numpy array, or list/dict of '\n 'Numpy arrays. Found: ' + str(data)[:200] + '...')\n elif len(names) == 1:\n data = [np.asarray(data)]\n\n # Check shapes compatibility.\n if shapes:\n for i in range(len(names)):\n if shapes[i] is not None and not K.is_tensor(data[i]):\n data_shape = data[i].shape\n shape = shapes[i]\n if data[i].ndim != len(shape):\n raise ValueError(\n 'Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have ' +\n str(len(shape)) + ' dimensions, but got array '\n 'with shape ' + str(data_shape))\n if not check_batch_axis:\n data_shape = data_shape[1:]\n shape = shape[1:]\n for dim, ref_dim in zip(data_shape, shape):\n if ref_dim != dim and ref_dim:\n raise ValueError(\n 'Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have shape ' +\n str(shape) + ' but got array with shape ' +\n str(data_shape))\n return data\n\n\ndef standardize_sample_or_class_weights(x_weight,\n output_names,\n weight_type):\n \"\"\"Maps `sample_weight` or `class_weight` to model outputs.\n\n # Arguments\n x_weight: User-provided `sample_weight` or `class_weight` argument.\n output_names: List of output names (strings) in the model.\n weight_type: A string used purely for exception printing.\n\n # Returns\n A list of `sample_weight` or `class_weight` where there are exactly\n one element per model output.\n\n # Raises\n ValueError: In case of invalid user-provided argument.\n \"\"\"\n if x_weight is None or len(x_weight) == 0:\n return [None for _ in output_names]\n if len(output_names) == 1:\n if isinstance(x_weight, list) and len(x_weight) == 1:\n return x_weight\n if isinstance(x_weight, dict) and output_names[0] in x_weight:\n return [x_weight[output_names[0]]]\n else:\n return [x_weight]\n if isinstance(x_weight, list):\n if len(x_weight) != len(output_names):\n raise ValueError('Provided `' + weight_type + '` was a list of ' +\n str(len(x_weight)) +\n ' elements, but the model has ' +\n str(len(output_names)) + ' outputs. '\n 'You should provide one `' + weight_type + '`'\n 'array per model output.')\n return x_weight\n if isinstance(x_weight, dict):\n x_weights = []\n for name in output_names:\n x_weights.append(x_weight.get(name))\n return x_weights\n else:\n raise TypeError('The model has multiple outputs, so `' +\n weight_type + '` '\n 'should be either a list or a dict. '\n 'Provided `' + weight_type +\n '` type not understood: ' +\n str(x_weight))\n\n\ndef standardize_class_weights(class_weight, output_names):\n return standardize_sample_or_class_weights(class_weight,\n output_names,\n 'class_weight')\n\n\ndef standardize_sample_weights(sample_weight, output_names):\n return standardize_sample_or_class_weights(sample_weight,\n output_names,\n 'sample_weight')\n\n\ndef check_array_length_consistency(inputs, targets, weights=None):\n \"\"\"Checks if batch axes are the same for Numpy arrays.\n\n # Arguments\n inputs: list of Numpy arrays of inputs.\n targets: list of Numpy arrays of targets.\n weights: list of Numpy arrays of sample weights.\n\n # Raises\n ValueError: in case of incorrectly formatted data.\n \"\"\"\n def set_of_lengths(x):\n # return a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {0}\n else:\n return set([0 if y is None else int(y.shape[0]) for y in x])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')\n\n\ndef check_loss_and_target_compatibility(targets, loss_fns, output_shapes):\n \"\"\"Does validation on the compatibility of targets and loss functions.\n\n This helps prevent users from using loss functions incorrectly. This check\n is purely for UX purposes.\n\n # Arguments\n targets: list of Numpy arrays of targets.\n loss_fns: list of loss functions.\n output_shapes: list of shapes of model outputs.\n\n # Raises\n ValueError: if a loss function or target array\n is incompatible with an output.\n \"\"\"\n key_loss_fns = {\n losses.mean_squared_error, losses.binary_crossentropy,\n losses.categorical_crossentropy\n }\n key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,\n losses.CategoricalCrossentropy)\n for y, loss, shape in zip(targets, loss_fns, output_shapes):\n if y is None or loss is None:\n continue\n if losses.is_categorical_crossentropy(loss):\n if y.shape[-1] == 1:\n raise ValueError(\n 'You are passing a target array of shape ' + str(y.shape) +\n ' while using as loss `categorical_crossentropy`. '\n '`categorical_crossentropy` expects '\n 'targets to be binary matrices (1s and 0s) '\n 'of shape (samples, classes). '\n 'If your targets are integer classes, '\n 'you can convert them to the expected format via:\\n'\n '```\\n'\n 'from keras.utils import to_categorical\\n'\n 'y_binary = to_categorical(y_int)\\n'\n '```\\n'\n '\\n'\n 'Alternatively, you can use the loss function '\n '`sparse_categorical_crossentropy` instead, '\n 'which does expect integer targets.')\n is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)\n if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and\n (loss.fn in key_loss_fns))):\n for target_dim, out_dim in zip(y.shape[1:], shape[1:]):\n if out_dim is not None and target_dim != out_dim:\n loss_name = loss.name\n if loss_name is None:\n loss_type = loss.fn if is_loss_wrapper else type(loss)\n loss_name = loss_type.__name__\n raise ValueError(\n 'A target array with shape ' + str(y.shape) +\n ' was passed for an output of shape ' + str(shape) +\n ' while using as loss `' + loss_name + '`. '\n 'This loss expects targets to have the same shape '\n 'as the output.')\n\n\ndef check_generator_arguments(y=None, sample_weight=None,\n validation_split=None):\n \"\"\"Validates arguments passed when using a generator.\"\"\"\n if y is not None:\n raise ValueError('`y` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass targets'\n ' as the second element of the generator.')\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass sample'\n ' weights as the third element of the generator.')\n if validation_split:\n raise ValueError('If your data is in the form of a Python generator, '\n 'you cannot use `validation_split`.')\n\n\ndef batch_shuffle(index_array, batch_size):\n \"\"\"Shuffles an array in a batch-wise fashion.\n\n Useful for shuffling HDF5 arrays\n (where one cannot access arbitrary indices).\n\n # Arguments\n index_array: array of indices to be shuffled.\n batch_size: integer.\n\n # Returns\n The `index_array` array, shuffled in a batch-wise fashion.\n \"\"\"\n batch_count = int(len(index_array) / batch_size)\n # to reshape we need to be cleanly divisible by batch size\n # we stash extra items and reappend them after shuffling\n last_batch = index_array[batch_count * batch_size:]\n index_array = index_array[:batch_count * batch_size]\n index_array = index_array.reshape((batch_count, batch_size))\n np.random.shuffle(index_array)\n index_array = index_array.flatten()\n return np.append(index_array, last_batch)\n\n\ndef make_batches(size, batch_size):\n \"\"\"Returns a list of batch indices (tuples of indices).\n\n # Arguments\n size: Integer, total size of the data to slice into batches.\n batch_size: Integer, batch size.\n\n # Returns\n A list of tuples of array indices.\n \"\"\"\n num_batches = (size + batch_size - 1) // batch_size # round up\n return [(i * batch_size, min(size, (i + 1) * batch_size))\n for i in range(num_batches)]\n\n\ndef weighted_masked_objective(fn):\n \"\"\"Adds support for masking and sample-weighting to an objective function.\n\n It transforms an objective function `fn(y_true, y_pred)`\n into a sample-weighted, cost-masked objective function\n `fn(y_true, y_pred, weights, mask)`.\n\n # Arguments\n fn: The objective function to wrap,\n with signature `fn(y_true, y_pred)`.\n\n # Returns\n A function with signature `fn(y_true, y_pred, weights, mask)`.\n \"\"\"\n if fn is None:\n return None\n\n def weighted(y_true, y_pred, weights, mask=None):\n \"\"\"Wrapper function.\n\n # Arguments\n y_true: `y_true` argument of `fn`.\n y_pred: `y_pred` argument of `fn`.\n weights: Weights tensor.\n mask: Mask tensor.\n\n # Returns\n Scalar tensor.\n \"\"\"\n # score_array has ndim >= 2\n score_array = fn(y_true, y_pred)\n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting in Theano\n mask = K.cast(mask, K.floatx())\n # mask should have the same shape as score_array\n score_array *= mask\n # the loss per batch should be proportional\n # to the number of unmasked samples.\n score_array /= K.mean(mask) + K.epsilon()\n\n # apply sample weighting\n if weights is not None:\n # reduce score_array to same ndim as weight array\n ndim = K.ndim(score_array)\n weight_ndim = K.ndim(weights)\n score_array = K.mean(score_array,\n axis=list(range(weight_ndim, ndim)))\n score_array *= weights\n score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))\n return K.mean(score_array)\n return weighted\n\n\ndef standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n \"\"\"Performs sample weight validation and standardization.\n\n Everything gets normalized to a single sample-wise (or timestep-wise)\n weight array. If both `sample_weights` and `class_weights` are provided,\n the weights are multiplied together.\n\n # Arguments\n y: Numpy array of model targets to be weighted.\n sample_weight: User-provided `sample_weight` argument.\n class_weight: User-provided `class_weight` argument.\n sample_weight_mode: One of `None` or `\"temporal\"`.\n `\"temporal\"` indicated that we expect 2D weight data\n that will be applied to the last 2 dimensions of\n the targets (i.e. we are weighting timesteps, not samples).\n\n # Returns\n A Numpy array of target weights, one entry per sample to weight.\n\n # Raises\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n if sample_weight_mode is not None:\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' +\n str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weights, '\n 'you should specify '\n 'sample_weight_mode=\"temporal\" '\n 'in compile(). If you just mean to use '\n 'sample-wise weights, make sure your '\n 'sample_weight array is 1D.')\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' +\n str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if y.shape[:sample_weight.ndim] != sample_weight.shape:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) +\n ' for an input with shape ' +\n str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n else:\n y_classes = y\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError('`class_weight` must contain '\n 'all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.'\n % (existing_classes - existing_class_weight))\n\n if sample_weight is not None and class_sample_weight is not None:\n return sample_weight * class_sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n\n # Everything has weight 1 by default.\n if sample_weight_mode is None:\n return np.ones((y.shape[0],), dtype=K.floatx())\n else:\n return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())\n\n\ndef check_num_samples(ins,\n batch_size=None,\n steps=None,\n steps_name='steps'):\n \"\"\"Checks the number of samples provided for training and evaluation.\n\n The number of samples is not defined when running with `steps`,\n in which case the number of samples is set to `None`.\n\n # Arguments\n ins: List of tensors to be fed to the Keras function.\n batch_size: Integer batch size or `None` if not defined.\n steps: Total number of steps (batches of samples)\n before declaring `predict_loop` finished.\n Ignored with the default value of `None`.\n steps_name: The public API's parameter name for `steps`.\n\n # Raises\n ValueError: when `steps` is `None` and the attribute `ins.shape`\n does not exist. Also raises ValueError when `steps` is not `None`\n and `batch_size` is not `None` because they are mutually\n exclusive.\n\n # Returns\n When `steps` is `None`, returns the number of samples to be\n processed based on the size of the first dimension of the\n first input Numpy array. When `steps` is not `None` and\n `batch_size` is `None`, returns `None`.\n\n # Raises\n ValueError: In case of invalid arguments.\n \"\"\"\n if steps is not None and batch_size is not None:\n raise ValueError(\n 'If ' + steps_name + ' is set, the `batch_size` must be None.')\n\n if not ins or any(K.is_tensor(x) for x in ins):\n if steps is None:\n raise ValueError(\n 'If your data is in the form of symbolic tensors, '\n 'you should specify the `' + steps_name + '` argument '\n '(instead of the `batch_size` argument, '\n 'because symbolic tensors are expected to produce '\n 'batches of input data).')\n return None\n\n if hasattr(ins[0], 'shape'):\n return int(ins[0].shape[0])\n return None # Edge case where ins == [static_learning_phase]\n\n\ndef iter_sequence_infinite(seq):\n \"\"\"Iterate indefinitely over a Sequence.\n\n # Arguments\n seq: Sequence object\n\n # Returns\n Generator yielding batches.\n \"\"\"\n while True:\n for item in seq:\n yield item\n\n\ndef is_sequence(seq):\n \"\"\"Determine if an object follows the Sequence API.\n\n # Arguments\n seq: a possible Sequence object\n\n # Returns\n boolean, whether the object follows the Sequence API.\n \"\"\"\n # TODO Dref360: Decide which pattern to follow. First needs a new TF Version.\n return (getattr(seq, 'use_sequence_api', False)\n or set(dir(Sequence())).issubset(set(dir(seq) + ['use_sequence_api'])))\n\n\ndef is_generator_or_sequence(x):\n \"\"\"Check if `x` is a Keras generator type.\"\"\"\n return inspect.isgenerator(x) or is_sequence(x)\n\n\ndef should_run_validation(validation_freq, epoch):\n \"\"\"Checks if validation should be run this epoch.\n\n # Arguments\n validation_freq: Integer or list. If an integer, specifies how many training\n epochs to run before a new validation run is performed. If a list,\n specifies the epochs on which to run validation.\n epoch: Integer, the number of the training epoch just completed.\n\n # Returns\n Bool, True if validation should be run.\n\n # Raises\n ValueError: if `validation_freq` is an Integer and less than 1, or if\n it is neither an Integer nor a Sequence.\n \"\"\"\n # `epoch` is 0-indexed internally but 1-indexed in the public API.\n one_indexed_epoch = epoch + 1\n\n if isinstance(validation_freq, int):\n if validation_freq < 1:\n raise ValueError('`validation_freq` can not be less than 1.')\n return one_indexed_epoch % validation_freq == 0\n\n if not isinstance(validation_freq, collections.Container):\n raise ValueError('`validation_freq` must be an Integer or '\n '`collections.Container` (e.g. list, tuple, etc.)')\n return one_indexed_epoch in validation_freq\n\n\ndef get_static_batch_size(layer):\n \"\"\"Gets the static batch size of a Layer.\n\n # Arguments\n layer: a `Layer` instance.\n\n # Returns\n The static batch size of a Layer.\n \"\"\"\n batch_input_shape, _ = get_input_shape_and_dtype(layer)\n if batch_input_shape is not None:\n return batch_input_shape[0]\n return None\n\n\ndef get_input_shape_and_dtype(layer):\n \"\"\"Retrieves input shape and input dtype of layer if applicable.\n\n # Arguments\n layer: Layer (or model) instance.\n\n # Returns\n Tuple (input_shape, input_dtype). Both could be None if the layer\n does not have a defined input shape.\n\n # Raises\n ValueError: in case an empty Sequential or Functional model is passed.\n \"\"\"\n def _is_graph_model(layer):\n return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or\n layer.__class__.__name__ == 'Sequential')\n\n # In case of nested models: recover the first layer\n # of the deepest model to infer input shape and dtype.\n # Subclassed Models may not have been built so can't be checked.\n while _is_graph_model(layer):\n if not layer.layers:\n raise ValueError('An empty Model cannot be used as a Layer.')\n layer = layer.layers[0]\n\n if hasattr(layer, '_batch_input_shape'):\n return layer._batch_input_shape, layer.dtype\n return None, None\n\n\ndef get_loss_function(loss):\n \"\"\"Returns the loss corresponding to the loss input in `compile` API.\"\"\"\n if loss is None or isinstance(loss, losses.Loss):\n return loss\n\n # Deserialize loss configuration, if needed.\n if isinstance(loss, collections.Mapping):\n loss = losses.get(loss)\n\n # Custom callable class.\n if callable(loss) and not hasattr(loss, '__name__'):\n return loss\n\n # Wrap loss function with signature `(y_true, y_pred, **kwargs)`\n # in `LossFunctionWrapper` class.\n loss_fn = losses.get(loss)\n\n # For losses which are given as strings/functions in the compile API,\n # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`..\n return losses.LossFunctionWrapper(\n loss_fn,\n name=loss_fn.__name__,\n reduction=losses_utils.Reduction.SUM_OVER_BATCH_SIZE)\n\n\ndef get_output_sample_weight_and_mode(skip_target_weighing_indices,\n sample_weight_mode, output_name,\n output_index):\n \"\"\"Returns the sample weight and weight mode for a single output.\"\"\"\n if output_index in skip_target_weighing_indices:\n return None, None\n\n if sample_weight_mode == 'temporal':\n shape = [None, None]\n mode = 'temporal'\n else:\n shape = [None]\n mode = None\n weight = K.placeholder(\n shape=shape,\n name=output_name + '_sample_weights')\n return weight, mode\n\n\ndef prepare_sample_weights(output_names, sample_weight_mode,\n skip_target_weighing_indices):\n \"\"\"Prepares sample weights for the model.\n\n # Arguments\n output_names: List of model output names.\n sample_weight_mode: sample weight mode user input passed from compile API.\n skip_target_weighing_indices: Indices of output for which sample weights\n should be skipped.\n\n # Returns\n A pair of list of sample weights and sample weight modes\n (one for each output).\n\n # Raises\n ValueError: In case of invalid `sample_weight_mode` input.\n \"\"\"\n sample_weights = []\n sample_weight_modes = []\n if isinstance(sample_weight_mode, dict):\n unknown_output = set(sample_weight_mode.keys()) - set(output_names)\n if unknown_output:\n raise ValueError(\n 'Unknown entry in '\n 'sample_weight_mode dictionary: \"' + str(unknown_output) +\n '\". Only expected the following keys: ' + str(output_names))\n for i, name in enumerate(output_names):\n if (i not in skip_target_weighing_indices and\n name not in sample_weight_mode):\n raise ValueError(\n 'Output missing from sample_weight_modes dictionary')\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices,\n sample_weight_mode.get(name),\n name,\n i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n elif isinstance(sample_weight_mode, list):\n if len(sample_weight_mode) != len(output_names):\n raise ValueError('When passing a list as sample_weight_mode, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed ' +\n str(len(sample_weight_mode)) + 'sample_weight_modes')\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode[i], name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n else:\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode, name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n return sample_weights, sample_weight_modes\n\n\ndef prepare_loss_functions(loss, output_names):\n \"\"\"Converts loss to a list of loss functions.\n\n # Arguments\n loss: String (name of objective function), objective function or\n `Loss` instance. If the model has multiple outputs, you can use\n a different loss on each output by passing a dictionary or a\n list of losses. The loss value that will be minimized by the model\n will then be the sum of all individual losses.\n output_names: List of model output names.\n\n # Returns\n A list of loss objective functions.\n\n # Raises:\n ValueError: If loss is a dict with keys not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if isinstance(loss, collections.Mapping):\n generic_utils.check_for_unexpected_keys('loss', loss, output_names)\n loss_functions = []\n for name in output_names:\n if name not in loss:\n warnings.warn(\n 'Output {0} missing from loss dictionary. We assume '\n 'this was done on purpose. The fit and evaluate APIs will not '\n 'be expecting any data to be passed to {0}.'.format(name))\n loss_functions.append(get_loss_function(loss.get(name, None)))\n elif isinstance(loss, six.string_types):\n loss_functions = [get_loss_function(loss) for _ in output_names]\n elif isinstance(loss, collections.Sequence):\n if len(loss) != len(output_names):\n raise ValueError('When passing a list as loss, it should have one entry '\n 'per model outputs. The model has {} outputs, but you '\n 'passed loss={}'.format(len(output_names), loss))\n loss_functions = [get_loss_function(l) for l in loss]\n else:\n loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]\n\n return loss_functions\n\n\ndef prepare_loss_weights(output_names, loss_weights=None):\n \"\"\"Converts loss weights to a list of loss weights.\n\n # Arguments\n output_names: List of model output names.\n loss_weights: Optional list or dictionary specifying scalar coefficients\n (Python floats) to weight the loss contributions of different model\n outputs. The loss value that will be minimized by the model will then be\n the *weighted sum* of all individual losses, weighted by the\n `loss_weights` coefficients. If a list, it is expected to have a 1:1\n mapping to the model's outputs. If a dict, it is expected to map\n output names (strings) to scalar coefficients.\n\n # Returns\n A list of loss weights of python floats.\n\n # Raises\n ValueError: If loss weight is a dict with key not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if loss_weights is None:\n weights_list = [1.] * len(output_names)\n elif isinstance(loss_weights, collections.Mapping):\n generic_utils.check_for_unexpected_keys('loss_weights', loss_weights,\n output_names)\n weights_list = [loss_weights.get(name, 1.) for name in output_names]\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(output_names):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n weights_list = loss_weights\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')\n\n return weights_list\n\n\ndef collect_per_output_metric_info(metrics,\n output_names,\n output_shapes,\n loss_fns,\n is_weighted=False):\n \"\"\"Maps metric names and functions to model outputs.\n\n # Arguments\n metrics: a list or a list of lists or a dict of metric functions.\n output_names: a list of the names (strings) of model outputs.\n output_shapes: a list of the shapes (strings) of model outputs.\n loss_fns: a list of the loss functions corresponding to the model outputs.\n is_weighted: Boolean indicating whether the given metrics are weighted.\n\n # Returns\n A list (one entry per model output) of dicts.\n For instance, if the model has 2 outputs, and for the first output\n we want to compute \"binary_accuracy\" and \"binary_crossentropy\",\n and just \"binary_accuracy\" for the second output,\n the list would look like: `[{\n 'acc': binary_accuracy(),\n 'ce': binary_crossentropy(),\n }, {\n 'acc': binary_accuracy(),\n }]`\n\n # Raises\n TypeError: if an incorrect type is passed for the `metrics` argument.\n \"\"\"\n if not metrics:\n return [{} for _ in output_names]\n\n if isinstance(metrics, list):\n any_sub_list = any(isinstance(m, list) for m in metrics)\n if any_sub_list:\n if len(metrics) != len(output_names):\n raise ValueError('When passing a list of lists as `metrics`, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed metrics=' + str(metrics))\n # User has provided a list of len = len(outputs).\n nested_metrics = [generic_utils.to_list(m) for m in metrics]\n else:\n # If it is a single list we then apply all metrics to all outputs.\n if len(output_names) > 1:\n nested_metrics = []\n for _ in output_names:\n nested_metrics.append(\n [metrics_module.clone_metric(m) for m in metrics])\n else:\n nested_metrics = [metrics]\n elif isinstance(metrics, collections.Mapping):\n generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)\n nested_metrics = []\n for name in output_names:\n output_metrics = generic_utils.to_list(metrics.get(name, []))\n nested_metrics.append(output_metrics)\n else:\n raise TypeError('Type of `metrics` argument not understood. '\n 'Expected a list or dictionary, found: ' + str(metrics))\n\n per_output_metrics = []\n for i, metrics in enumerate(nested_metrics):\n metrics_dict = OrderedDict()\n for metric in metrics:\n metric_name = get_metric_name(metric, is_weighted)\n metric_fn = get_metric_function(\n metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])\n\n # If the metric function is not stateful, we create a stateful version.\n if not isinstance(metric_fn, metrics_module.Metric):\n metric_fn = metrics_module.MeanMetricWrapper(\n metric_fn, name=metric_name)\n metrics_dict[metric_name] = metric_fn\n per_output_metrics.append(metrics_dict)\n\n return per_output_metrics\n\n\ndef get_metric_name(metric, weighted=False):\n \"\"\"Returns the name corresponding to the given metric input.\n\n # Arguments\n metric: Metric function name or reference.\n weighted: Boolean indicating if the given metric is weighted.\n\n # Returns\n The metric name.\n \"\"\"\n # We keep the string that the user has set in compile as the metric name.\n if isinstance(metric, six.string_types):\n return metric\n\n metric = metrics_module.get(metric)\n return metric.name if hasattr(metric, 'name') else metric.__name__\n\n\ndef get_metric_function(metric, output_shape=None, loss_fn=None):\n \"\"\"Returns the metric function corresponding to the given metric input.\n\n # Arguments\n metric: Metric function name or reference.\n output_shape: The shape of the output that this metric will be calculated\n for.\n loss_fn: The loss function used.\n\n # Returns\n The metric function.\n \"\"\"\n if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n return metrics_module.get(metric)\n\n is_sparse_categorical_crossentropy = (\n isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.sparse_categorical_crossentropy))\n\n is_binary_crossentropy = (\n isinstance(loss_fn, losses.BinaryCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.binary_crossentropy))\n\n if metric in ['accuracy', 'acc']:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_accuracy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_accuracy\n # If the output_shape[-1] is not 1, then we know output is `categorical`.\n # We assume it is sparse categorical only if loss is explicitly given\n # as sparse categorical crossentropy loss.\n return metrics_module.categorical_accuracy\n else:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_crossentropy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_crossentropy\n return metrics_module.categorical_crossentropy\n\n\ndef call_metric_function(metric_fn,\n y_true,\n y_pred=None,\n weights=None,\n mask=None):\n \"\"\"Invokes metric function and returns the metric result tensor.\"\"\"\n if mask is not None:\n mask = math_ops.cast(mask, y_pred.dtype)\n if weights is None:\n # Use mask as sample weight.\n weights = mask\n else:\n # Update dimensions of weights to match with mask.\n mask, _, weights = losses_utils.squeeze_or_expand_dimensions(\n mask, sample_weight=weights)\n weights *= mask\n\n if y_pred is not None:\n update_ops = metric_fn.update_state(y_true, y_pred, sample_weight=weights)\n with K.control_dependencies(update_ops): # For TF\n metric_fn.result()\n else:\n # `Mean` metric only takes a single value.\n update_ops = metric_fn.update_state(y_true, sample_weight=weights)\n with K.control_dependencies(update_ops): # For TF\n metric_fn.result()\n", "path": "keras/engine/training_utils.py"}], "after_files": [{"content": "\"\"\"Training-related utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport collections\nimport copy\nimport numpy as np\nimport six\nimport warnings\nfrom collections import OrderedDict\n\nfrom .. import backend as K\nfrom .. import losses\nfrom .. import metrics as metrics_module\nfrom ..utils import Sequence\nfrom ..utils import generic_utils\nfrom ..utils import losses_utils\n\n\ndef standardize_single_array(x):\n if x is None:\n return None\n elif K.is_tensor(x):\n shape = K.int_shape(x)\n if shape is None or shape[0] is None:\n raise ValueError(\n 'When feeding symbolic tensors to a model, we expect the '\n 'tensors to have a static batch size. '\n 'Got tensor with shape: %s' % str(shape))\n return x\n elif x.ndim == 1:\n x = np.expand_dims(x, 1)\n return x\n\n\ndef standardize_input_data(data,\n names,\n shapes=None,\n check_batch_axis=True,\n exception_prefix=''):\n \"\"\"Normalizes inputs and targets provided by users.\n\n Users may pass data as a list of arrays, dictionary of arrays,\n or as a single array. We normalize this to an ordered list of\n arrays (same order as `names`), while checking that the provided\n arrays have shapes that match the network's expectations.\n\n # Arguments\n data: User-provided input data (polymorphic).\n names: List of expected array names.\n shapes: Optional list of expected array shapes.\n check_batch_axis: Boolean; whether to check that\n the batch axis of the arrays matches the expected\n value found in `shapes`.\n exception_prefix: String prefix used for exception formatting.\n\n # Returns\n List of standardized input arrays (one array per model input).\n\n # Raises\n ValueError: in case of improperly formatted user-provided data.\n \"\"\"\n if not names:\n if data is not None and hasattr(data, '__len__') and len(data):\n raise ValueError('Error when checking model ' +\n exception_prefix + ': '\n 'expected no data, but got:', data)\n return []\n if data is None:\n return [None for _ in range(len(names))]\n\n if isinstance(data, dict):\n try:\n data = [\n data[x].values\n if data[x].__class__.__name__ == 'DataFrame' else data[x]\n for x in names\n ]\n except KeyError as e:\n raise ValueError('No data provided for \"' + e.args[0] +\n '\". Need data '\n 'for each key in: ' + str(names))\n elif isinstance(data, list):\n if isinstance(data[0], list):\n data = [np.asarray(d) for d in data]\n elif len(names) == 1 and isinstance(data[0], (float, int)):\n data = [np.asarray(data)]\n else:\n data = [\n x.values if x.__class__.__name__ == 'DataFrame'\n else x for x in data\n ]\n else:\n data = data.values if data.__class__.__name__ == 'DataFrame' else data\n data = [data]\n data = [standardize_single_array(x) for x in data]\n\n if len(data) != len(names):\n if data and hasattr(data[0], 'shape'):\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': the list of Numpy arrays that you are passing to '\n 'your model is not the size the model expected. '\n 'Expected to see ' + str(len(names)) + ' array(s), '\n 'but instead got the following list of ' +\n str(len(data)) + ' arrays: ' + str(data)[:200] + '...')\n elif len(names) > 1:\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': you are passing a list as input to your model, '\n 'but the model expects a list of ' + str(len(names)) +\n ' Numpy arrays instead. '\n 'The list you passed was: ' + str(data)[:200])\n elif len(data) == 1 and not hasattr(data[0], 'shape'):\n raise TypeError('Error when checking model ' + exception_prefix +\n ': data should be a Numpy array, or list/dict of '\n 'Numpy arrays. Found: ' + str(data)[:200] + '...')\n elif len(names) == 1:\n data = [np.asarray(data)]\n\n # Check shapes compatibility.\n if shapes:\n for i in range(len(names)):\n if shapes[i] is not None and not K.is_tensor(data[i]):\n data_shape = data[i].shape\n shape = shapes[i]\n if data[i].ndim != len(shape):\n raise ValueError(\n 'Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have ' +\n str(len(shape)) + ' dimensions, but got array '\n 'with shape ' + str(data_shape))\n if not check_batch_axis:\n data_shape = data_shape[1:]\n shape = shape[1:]\n for dim, ref_dim in zip(data_shape, shape):\n if ref_dim != dim and ref_dim:\n raise ValueError(\n 'Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have shape ' +\n str(shape) + ' but got array with shape ' +\n str(data_shape))\n return data\n\n\ndef standardize_sample_or_class_weights(x_weight,\n output_names,\n weight_type):\n \"\"\"Maps `sample_weight` or `class_weight` to model outputs.\n\n # Arguments\n x_weight: User-provided `sample_weight` or `class_weight` argument.\n output_names: List of output names (strings) in the model.\n weight_type: A string used purely for exception printing.\n\n # Returns\n A list of `sample_weight` or `class_weight` where there are exactly\n one element per model output.\n\n # Raises\n ValueError: In case of invalid user-provided argument.\n \"\"\"\n if x_weight is None or len(x_weight) == 0:\n return [None for _ in output_names]\n if len(output_names) == 1:\n if isinstance(x_weight, list) and len(x_weight) == 1:\n return x_weight\n if isinstance(x_weight, dict) and output_names[0] in x_weight:\n return [x_weight[output_names[0]]]\n else:\n return [x_weight]\n if isinstance(x_weight, list):\n if len(x_weight) != len(output_names):\n raise ValueError('Provided `' + weight_type + '` was a list of ' +\n str(len(x_weight)) +\n ' elements, but the model has ' +\n str(len(output_names)) + ' outputs. '\n 'You should provide one `' + weight_type + '`'\n 'array per model output.')\n return x_weight\n if isinstance(x_weight, dict):\n x_weights = []\n for name in output_names:\n x_weights.append(x_weight.get(name))\n return x_weights\n else:\n raise TypeError('The model has multiple outputs, so `' +\n weight_type + '` '\n 'should be either a list or a dict. '\n 'Provided `' + weight_type +\n '` type not understood: ' +\n str(x_weight))\n\n\ndef standardize_class_weights(class_weight, output_names):\n return standardize_sample_or_class_weights(class_weight,\n output_names,\n 'class_weight')\n\n\ndef standardize_sample_weights(sample_weight, output_names):\n return standardize_sample_or_class_weights(sample_weight,\n output_names,\n 'sample_weight')\n\n\ndef check_array_length_consistency(inputs, targets, weights=None):\n \"\"\"Checks if batch axes are the same for Numpy arrays.\n\n # Arguments\n inputs: list of Numpy arrays of inputs.\n targets: list of Numpy arrays of targets.\n weights: list of Numpy arrays of sample weights.\n\n # Raises\n ValueError: in case of incorrectly formatted data.\n \"\"\"\n def set_of_lengths(x):\n # return a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {0}\n else:\n return set([0 if y is None else int(y.shape[0]) for y in x])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')\n\n\ndef check_loss_and_target_compatibility(targets, loss_fns, output_shapes):\n \"\"\"Does validation on the compatibility of targets and loss functions.\n\n This helps prevent users from using loss functions incorrectly. This check\n is purely for UX purposes.\n\n # Arguments\n targets: list of Numpy arrays of targets.\n loss_fns: list of loss functions.\n output_shapes: list of shapes of model outputs.\n\n # Raises\n ValueError: if a loss function or target array\n is incompatible with an output.\n \"\"\"\n key_loss_fns = {\n losses.mean_squared_error, losses.binary_crossentropy,\n losses.categorical_crossentropy\n }\n key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,\n losses.CategoricalCrossentropy)\n for y, loss, shape in zip(targets, loss_fns, output_shapes):\n if y is None or loss is None:\n continue\n if losses.is_categorical_crossentropy(loss):\n if y.shape[-1] == 1:\n raise ValueError(\n 'You are passing a target array of shape ' + str(y.shape) +\n ' while using as loss `categorical_crossentropy`. '\n '`categorical_crossentropy` expects '\n 'targets to be binary matrices (1s and 0s) '\n 'of shape (samples, classes). '\n 'If your targets are integer classes, '\n 'you can convert them to the expected format via:\\n'\n '```\\n'\n 'from keras.utils import to_categorical\\n'\n 'y_binary = to_categorical(y_int)\\n'\n '```\\n'\n '\\n'\n 'Alternatively, you can use the loss function '\n '`sparse_categorical_crossentropy` instead, '\n 'which does expect integer targets.')\n is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)\n if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and\n (loss.fn in key_loss_fns))):\n for target_dim, out_dim in zip(y.shape[1:], shape[1:]):\n if out_dim is not None and target_dim != out_dim:\n loss_name = loss.name\n if loss_name is None:\n loss_type = loss.fn if is_loss_wrapper else type(loss)\n loss_name = loss_type.__name__\n raise ValueError(\n 'A target array with shape ' + str(y.shape) +\n ' was passed for an output of shape ' + str(shape) +\n ' while using as loss `' + loss_name + '`. '\n 'This loss expects targets to have the same shape '\n 'as the output.')\n\n\ndef check_generator_arguments(y=None, sample_weight=None,\n validation_split=None):\n \"\"\"Validates arguments passed when using a generator.\"\"\"\n if y is not None:\n raise ValueError('`y` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass targets'\n ' as the second element of the generator.')\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass sample'\n ' weights as the third element of the generator.')\n if validation_split:\n raise ValueError('If your data is in the form of a Python generator, '\n 'you cannot use `validation_split`.')\n\n\ndef batch_shuffle(index_array, batch_size):\n \"\"\"Shuffles an array in a batch-wise fashion.\n\n Useful for shuffling HDF5 arrays\n (where one cannot access arbitrary indices).\n\n # Arguments\n index_array: array of indices to be shuffled.\n batch_size: integer.\n\n # Returns\n The `index_array` array, shuffled in a batch-wise fashion.\n \"\"\"\n batch_count = int(len(index_array) / batch_size)\n # to reshape we need to be cleanly divisible by batch size\n # we stash extra items and reappend them after shuffling\n last_batch = index_array[batch_count * batch_size:]\n index_array = index_array[:batch_count * batch_size]\n index_array = index_array.reshape((batch_count, batch_size))\n np.random.shuffle(index_array)\n index_array = index_array.flatten()\n return np.append(index_array, last_batch)\n\n\ndef make_batches(size, batch_size):\n \"\"\"Returns a list of batch indices (tuples of indices).\n\n # Arguments\n size: Integer, total size of the data to slice into batches.\n batch_size: Integer, batch size.\n\n # Returns\n A list of tuples of array indices.\n \"\"\"\n num_batches = (size + batch_size - 1) // batch_size # round up\n return [(i * batch_size, min(size, (i + 1) * batch_size))\n for i in range(num_batches)]\n\n\ndef weighted_masked_objective(fn):\n \"\"\"Adds support for masking and sample-weighting to an objective function.\n\n It transforms an objective function `fn(y_true, y_pred)`\n into a sample-weighted, cost-masked objective function\n `fn(y_true, y_pred, weights, mask)`.\n\n # Arguments\n fn: The objective function to wrap,\n with signature `fn(y_true, y_pred)`.\n\n # Returns\n A function with signature `fn(y_true, y_pred, weights, mask)`.\n \"\"\"\n if fn is None:\n return None\n\n def weighted(y_true, y_pred, weights, mask=None):\n \"\"\"Wrapper function.\n\n # Arguments\n y_true: `y_true` argument of `fn`.\n y_pred: `y_pred` argument of `fn`.\n weights: Weights tensor.\n mask: Mask tensor.\n\n # Returns\n Scalar tensor.\n \"\"\"\n # score_array has ndim >= 2\n score_array = fn(y_true, y_pred)\n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting in Theano\n mask = K.cast(mask, K.floatx())\n # mask should have the same shape as score_array\n score_array *= mask\n # the loss per batch should be proportional\n # to the number of unmasked samples.\n score_array /= K.mean(mask) + K.epsilon()\n\n # apply sample weighting\n if weights is not None:\n # reduce score_array to same ndim as weight array\n ndim = K.ndim(score_array)\n weight_ndim = K.ndim(weights)\n score_array = K.mean(score_array,\n axis=list(range(weight_ndim, ndim)))\n score_array *= weights\n score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))\n return K.mean(score_array)\n return weighted\n\n\ndef standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n \"\"\"Performs sample weight validation and standardization.\n\n Everything gets normalized to a single sample-wise (or timestep-wise)\n weight array. If both `sample_weights` and `class_weights` are provided,\n the weights are multiplied together.\n\n # Arguments\n y: Numpy array of model targets to be weighted.\n sample_weight: User-provided `sample_weight` argument.\n class_weight: User-provided `class_weight` argument.\n sample_weight_mode: One of `None` or `\"temporal\"`.\n `\"temporal\"` indicated that we expect 2D weight data\n that will be applied to the last 2 dimensions of\n the targets (i.e. we are weighting timesteps, not samples).\n\n # Returns\n A Numpy array of target weights, one entry per sample to weight.\n\n # Raises\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n if sample_weight_mode is not None:\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' +\n str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weights, '\n 'you should specify '\n 'sample_weight_mode=\"temporal\" '\n 'in compile(). If you just mean to use '\n 'sample-wise weights, make sure your '\n 'sample_weight array is 1D.')\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' +\n str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if y.shape[:sample_weight.ndim] != sample_weight.shape:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) +\n ' for an input with shape ' +\n str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n else:\n y_classes = y\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError('`class_weight` must contain '\n 'all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.'\n % (existing_classes - existing_class_weight))\n\n if sample_weight is not None and class_sample_weight is not None:\n return sample_weight * class_sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n\n # Everything has weight 1 by default.\n if sample_weight_mode is None:\n return np.ones((y.shape[0],), dtype=K.floatx())\n else:\n return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())\n\n\ndef check_num_samples(ins,\n batch_size=None,\n steps=None,\n steps_name='steps'):\n \"\"\"Checks the number of samples provided for training and evaluation.\n\n The number of samples is not defined when running with `steps`,\n in which case the number of samples is set to `None`.\n\n # Arguments\n ins: List of tensors to be fed to the Keras function.\n batch_size: Integer batch size or `None` if not defined.\n steps: Total number of steps (batches of samples)\n before declaring `predict_loop` finished.\n Ignored with the default value of `None`.\n steps_name: The public API's parameter name for `steps`.\n\n # Raises\n ValueError: when `steps` is `None` and the attribute `ins.shape`\n does not exist. Also raises ValueError when `steps` is not `None`\n and `batch_size` is not `None` because they are mutually\n exclusive.\n\n # Returns\n When `steps` is `None`, returns the number of samples to be\n processed based on the size of the first dimension of the\n first input Numpy array. When `steps` is not `None` and\n `batch_size` is `None`, returns `None`.\n\n # Raises\n ValueError: In case of invalid arguments.\n \"\"\"\n if steps is not None and batch_size is not None:\n raise ValueError(\n 'If ' + steps_name + ' is set, the `batch_size` must be None.')\n\n if not ins or any(K.is_tensor(x) for x in ins):\n if steps is None:\n raise ValueError(\n 'If your data is in the form of symbolic tensors, '\n 'you should specify the `' + steps_name + '` argument '\n '(instead of the `batch_size` argument, '\n 'because symbolic tensors are expected to produce '\n 'batches of input data).')\n return None\n\n if hasattr(ins[0], 'shape'):\n return int(ins[0].shape[0])\n return None # Edge case where ins == [static_learning_phase]\n\n\ndef iter_sequence_infinite(seq):\n \"\"\"Iterate indefinitely over a Sequence.\n\n # Arguments\n seq: Sequence object\n\n # Returns\n Generator yielding batches.\n \"\"\"\n while True:\n for item in seq:\n yield item\n\n\ndef is_sequence(seq):\n \"\"\"Determine if an object follows the Sequence API.\n\n # Arguments\n seq: a possible Sequence object\n\n # Returns\n boolean, whether the object follows the Sequence API.\n \"\"\"\n # TODO Dref360: Decide which pattern to follow. First needs a new TF Version.\n return (getattr(seq, 'use_sequence_api', False)\n or set(dir(Sequence())).issubset(set(dir(seq) + ['use_sequence_api'])))\n\n\ndef is_generator_or_sequence(x):\n \"\"\"Check if `x` is a Keras generator type.\"\"\"\n return inspect.isgenerator(x) or is_sequence(x)\n\n\ndef should_run_validation(validation_freq, epoch):\n \"\"\"Checks if validation should be run this epoch.\n\n # Arguments\n validation_freq: Integer or list. If an integer, specifies how many training\n epochs to run before a new validation run is performed. If a list,\n specifies the epochs on which to run validation.\n epoch: Integer, the number of the training epoch just completed.\n\n # Returns\n Bool, True if validation should be run.\n\n # Raises\n ValueError: if `validation_freq` is an Integer and less than 1, or if\n it is neither an Integer nor a Sequence.\n \"\"\"\n # `epoch` is 0-indexed internally but 1-indexed in the public API.\n one_indexed_epoch = epoch + 1\n\n if isinstance(validation_freq, int):\n if validation_freq < 1:\n raise ValueError('`validation_freq` can not be less than 1.')\n return one_indexed_epoch % validation_freq == 0\n\n if not isinstance(validation_freq, collections.Container):\n raise ValueError('`validation_freq` must be an Integer or '\n '`collections.Container` (e.g. list, tuple, etc.)')\n return one_indexed_epoch in validation_freq\n\n\ndef get_static_batch_size(layer):\n \"\"\"Gets the static batch size of a Layer.\n\n # Arguments\n layer: a `Layer` instance.\n\n # Returns\n The static batch size of a Layer.\n \"\"\"\n batch_input_shape, _ = get_input_shape_and_dtype(layer)\n if batch_input_shape is not None:\n return batch_input_shape[0]\n return None\n\n\ndef get_input_shape_and_dtype(layer):\n \"\"\"Retrieves input shape and input dtype of layer if applicable.\n\n # Arguments\n layer: Layer (or model) instance.\n\n # Returns\n Tuple (input_shape, input_dtype). Both could be None if the layer\n does not have a defined input shape.\n\n # Raises\n ValueError: in case an empty Sequential or Functional model is passed.\n \"\"\"\n def _is_graph_model(layer):\n return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or\n layer.__class__.__name__ == 'Sequential')\n\n # In case of nested models: recover the first layer\n # of the deepest model to infer input shape and dtype.\n # Subclassed Models may not have been built so can't be checked.\n while _is_graph_model(layer):\n if not layer.layers:\n raise ValueError('An empty Model cannot be used as a Layer.')\n layer = layer.layers[0]\n\n if hasattr(layer, '_batch_input_shape'):\n return layer._batch_input_shape, layer.dtype\n return None, None\n\n\ndef get_loss_function(loss):\n \"\"\"Returns the loss corresponding to the loss input in `compile` API.\"\"\"\n if loss is None or isinstance(loss, losses.Loss):\n return loss\n\n # Deserialize loss configuration, if needed.\n if isinstance(loss, collections.Mapping):\n loss = losses.get(loss)\n\n # Custom callable class.\n if callable(loss) and not hasattr(loss, '__name__'):\n return loss\n\n # Wrap loss function with signature `(y_true, y_pred, **kwargs)`\n # in `LossFunctionWrapper` class.\n loss_fn = losses.get(loss)\n\n # For losses which are given as strings/functions in the compile API,\n # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`..\n return losses.LossFunctionWrapper(\n loss_fn,\n name=loss_fn.__name__,\n reduction=losses_utils.Reduction.SUM_OVER_BATCH_SIZE)\n\n\ndef get_output_sample_weight_and_mode(skip_target_weighing_indices,\n sample_weight_mode, output_name,\n output_index):\n \"\"\"Returns the sample weight and weight mode for a single output.\"\"\"\n if output_index in skip_target_weighing_indices:\n return None, None\n\n if sample_weight_mode == 'temporal':\n shape = [None, None]\n mode = 'temporal'\n else:\n shape = [None]\n mode = None\n weight = K.placeholder(\n shape=shape,\n name=output_name + '_sample_weights')\n return weight, mode\n\n\ndef prepare_sample_weights(output_names, sample_weight_mode,\n skip_target_weighing_indices):\n \"\"\"Prepares sample weights for the model.\n\n # Arguments\n output_names: List of model output names.\n sample_weight_mode: sample weight mode user input passed from compile API.\n skip_target_weighing_indices: Indices of output for which sample weights\n should be skipped.\n\n # Returns\n A pair of list of sample weights and sample weight modes\n (one for each output).\n\n # Raises\n ValueError: In case of invalid `sample_weight_mode` input.\n \"\"\"\n sample_weights = []\n sample_weight_modes = []\n if isinstance(sample_weight_mode, dict):\n unknown_output = set(sample_weight_mode.keys()) - set(output_names)\n if unknown_output:\n raise ValueError(\n 'Unknown entry in '\n 'sample_weight_mode dictionary: \"' + str(unknown_output) +\n '\". Only expected the following keys: ' + str(output_names))\n for i, name in enumerate(output_names):\n if (i not in skip_target_weighing_indices and\n name not in sample_weight_mode):\n raise ValueError(\n 'Output missing from sample_weight_modes dictionary')\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices,\n sample_weight_mode.get(name),\n name,\n i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n elif isinstance(sample_weight_mode, list):\n if len(sample_weight_mode) != len(output_names):\n raise ValueError('When passing a list as sample_weight_mode, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed ' +\n str(len(sample_weight_mode)) + 'sample_weight_modes')\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode[i], name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n else:\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode, name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n return sample_weights, sample_weight_modes\n\n\ndef prepare_loss_functions(loss, output_names):\n \"\"\"Converts loss to a list of loss functions.\n\n # Arguments\n loss: String (name of objective function), objective function or\n `Loss` instance. If the model has multiple outputs, you can use\n a different loss on each output by passing a dictionary or a\n list of losses. The loss value that will be minimized by the model\n will then be the sum of all individual losses.\n output_names: List of model output names.\n\n # Returns\n A list of loss objective functions.\n\n # Raises:\n ValueError: If loss is a dict with keys not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if isinstance(loss, collections.Mapping):\n generic_utils.check_for_unexpected_keys('loss', loss, output_names)\n loss_functions = []\n for name in output_names:\n if name not in loss:\n warnings.warn(\n 'Output {0} missing from loss dictionary. We assume '\n 'this was done on purpose. The fit and evaluate APIs will not '\n 'be expecting any data to be passed to {0}.'.format(name))\n loss_functions.append(get_loss_function(loss.get(name, None)))\n elif isinstance(loss, six.string_types):\n loss_functions = [get_loss_function(loss) for _ in output_names]\n elif isinstance(loss, collections.Sequence):\n if len(loss) != len(output_names):\n raise ValueError('When passing a list as loss, it should have one entry '\n 'per model outputs. The model has {} outputs, but you '\n 'passed loss={}'.format(len(output_names), loss))\n loss_functions = [get_loss_function(l) for l in loss]\n else:\n loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]\n\n return loss_functions\n\n\ndef prepare_loss_weights(output_names, loss_weights=None):\n \"\"\"Converts loss weights to a list of loss weights.\n\n # Arguments\n output_names: List of model output names.\n loss_weights: Optional list or dictionary specifying scalar coefficients\n (Python floats) to weight the loss contributions of different model\n outputs. The loss value that will be minimized by the model will then be\n the *weighted sum* of all individual losses, weighted by the\n `loss_weights` coefficients. If a list, it is expected to have a 1:1\n mapping to the model's outputs. If a dict, it is expected to map\n output names (strings) to scalar coefficients.\n\n # Returns\n A list of loss weights of python floats.\n\n # Raises\n ValueError: If loss weight is a dict with key not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if loss_weights is None:\n weights_list = [1.] * len(output_names)\n elif isinstance(loss_weights, collections.Mapping):\n generic_utils.check_for_unexpected_keys('loss_weights', loss_weights,\n output_names)\n weights_list = [loss_weights.get(name, 1.) for name in output_names]\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(output_names):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n weights_list = loss_weights\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')\n\n return weights_list\n\n\ndef collect_per_output_metric_info(metrics,\n output_names,\n output_shapes,\n loss_fns,\n is_weighted=False):\n \"\"\"Maps metric names and functions to model outputs.\n\n # Arguments\n metrics: a list or a list of lists or a dict of metric functions.\n output_names: a list of the names (strings) of model outputs.\n output_shapes: a list of the shapes (strings) of model outputs.\n loss_fns: a list of the loss functions corresponding to the model outputs.\n is_weighted: Boolean indicating whether the given metrics are weighted.\n\n # Returns\n A list (one entry per model output) of dicts.\n For instance, if the model has 2 outputs, and for the first output\n we want to compute \"binary_accuracy\" and \"binary_crossentropy\",\n and just \"binary_accuracy\" for the second output,\n the list would look like: `[{\n 'acc': binary_accuracy(),\n 'ce': binary_crossentropy(),\n }, {\n 'acc': binary_accuracy(),\n }]`\n\n # Raises\n TypeError: if an incorrect type is passed for the `metrics` argument.\n \"\"\"\n if not metrics:\n return [{} for _ in output_names]\n\n if isinstance(metrics, list):\n any_sub_list = any(isinstance(m, list) for m in metrics)\n if any_sub_list:\n if len(metrics) != len(output_names):\n raise ValueError('When passing a list of lists as `metrics`, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed metrics=' + str(metrics))\n # User has provided a list of len = len(outputs).\n nested_metrics = [generic_utils.to_list(m) for m in metrics]\n else:\n # If it is a single list we then apply all metrics to all outputs.\n if len(output_names) > 1:\n nested_metrics = []\n for _ in output_names:\n nested_metrics.append(\n [metrics_module.clone_metric(m) for m in metrics])\n else:\n nested_metrics = [metrics]\n elif isinstance(metrics, collections.Mapping):\n generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)\n nested_metrics = []\n for name in output_names:\n output_metrics = generic_utils.to_list(metrics.get(name, []))\n nested_metrics.append(output_metrics)\n else:\n raise TypeError('Type of `metrics` argument not understood. '\n 'Expected a list or dictionary, found: ' + str(metrics))\n\n per_output_metrics = []\n for i, metrics in enumerate(nested_metrics):\n metrics_dict = OrderedDict()\n for metric in metrics:\n metric_name = get_metric_name(metric, is_weighted)\n metric_fn = get_metric_function(\n metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])\n\n # If the metric function is not stateful, we create a stateful version.\n if not isinstance(metric_fn, metrics_module.Metric):\n metric_fn = metrics_module.MeanMetricWrapper(\n metric_fn, name=metric_name)\n metrics_dict[metric_name] = metric_fn\n per_output_metrics.append(metrics_dict)\n\n return per_output_metrics\n\n\ndef get_metric_name(metric, weighted=False):\n \"\"\"Returns the name corresponding to the given metric input.\n\n # Arguments\n metric: Metric function name or reference.\n weighted: Boolean indicating if the given metric is weighted.\n\n # Returns\n The metric name.\n \"\"\"\n # We keep the string that the user has set in compile as the metric name.\n if isinstance(metric, six.string_types):\n return metric\n\n metric = metrics_module.get(metric)\n return metric.name if hasattr(metric, 'name') else metric.__name__\n\n\ndef get_metric_function(metric, output_shape=None, loss_fn=None):\n \"\"\"Returns the metric function corresponding to the given metric input.\n\n # Arguments\n metric: Metric function name or reference.\n output_shape: The shape of the output that this metric will be calculated\n for.\n loss_fn: The loss function used.\n\n # Returns\n The metric function.\n \"\"\"\n if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n return metrics_module.get(metric)\n\n is_sparse_categorical_crossentropy = (\n isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.sparse_categorical_crossentropy))\n\n is_binary_crossentropy = (\n isinstance(loss_fn, losses.BinaryCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.binary_crossentropy))\n\n if metric in ['accuracy', 'acc']:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_accuracy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_accuracy\n # If the output_shape[-1] is not 1, then we know output is `categorical`.\n # We assume it is sparse categorical only if loss is explicitly given\n # as sparse categorical crossentropy loss.\n return metrics_module.categorical_accuracy\n else:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_crossentropy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_crossentropy\n return metrics_module.categorical_crossentropy\n\n\ndef call_metric_function(metric_fn,\n y_true,\n y_pred=None,\n weights=None,\n mask=None):\n \"\"\"Invokes metric function and returns the metric result tensor.\"\"\"\n if mask is not None:\n mask = K.cast(mask, y_pred.dtype)\n if weights is None:\n # Use mask as sample weight.\n weights = mask\n else:\n # Update dimensions of weights to match with mask.\n mask, _, weights = losses_utils.squeeze_or_expand_dimensions(\n mask, sample_weight=weights)\n weights *= mask\n\n if y_pred is not None:\n update_ops = metric_fn.update_state(y_true, y_pred, sample_weight=weights)\n with K.control_dependencies(update_ops): # For TF\n metric_fn.result()\n else:\n # `Mean` metric only takes a single value.\n update_ops = metric_fn.update_state(y_true, sample_weight=weights)\n with K.control_dependencies(update_ops): # For TF\n metric_fn.result()\n", "path": "keras/engine/training_utils.py"}]} |
gh_patches_debug_1530 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-658 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transaction data decoding is incorrect
Transaction data always decoded two times if connection encoding was defined:
Test case:
```python
@pytest.mark.run_loop
async def test_global_encoding(redis, create_redis, server, loop):
redis = await create_redis(
server.tcp_address,
loop=loop, encoding='utf-8')
res = await redis.set('key', 'value')
assert res is True
res = await redis.hmset(
'hash-key', 'foo', 'val1', 'bar', 'val2')
assert res is True
tr = redis.multi_exec()
fut1 = tr.get('key')
fut2 = tr.get('key', encoding='utf-8')
fut3 = tr.get('key', encoding=None)
fut4 = tr.hgetall('hash-key', encoding='utf-8')
await tr.execute()
res = await fut1
assert res == 'value'
res = await fut2
assert res == 'value'
res = await fut3
assert res == b'value'
res = await fut4
assert res == {'foo': 'val1', 'bar': 'val2'}
```
Tracing for `util.decode`
```python
decode(b'PONG', utf-8)
decode(b'OK', utf-8)
decode(b'OK', utf-8)
decode(b'OK', utf-8)
decode(b'QUEUED', utf-8)
decode(b'QUEUED', utf-8)
decode(b'QUEUED', utf-8)
decode([b'value', b'value', b'value', [b'foo', b'val1', b'bar', b'val2']], utf-8)
decode(b'value', utf-8)
decode(b'value', utf-8)
decode(b'value', utf-8)
decode([b'foo', b'val1', b'bar', b'val2'], utf-8)
decode(b'foo', utf-8)
decode(b'val1', utf-8)
decode(b'bar', utf-8)
decode(b'val2', utf-8)
decode(value, utf-8)
decode(value, utf-8)
decode(['foo', 'val1', 'bar', 'val2'], utf-8)
decode(foo, utf-8)
decode(val1, utf-8)
decode(bar, utf-8)
decode(val2, utf-8)
```
You can see that `multi-exec` response `[b'value', b'value', b'value', [b'foo', b'val1', b'bar', b'val2']]` was decoded twice. In this case decoding in `RedisConnection._end_transaction` is not make sense because we have already decoded it in the `RedisConnection._process_data`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aioredis/connection.py`
Content:
```
1 import types
2 import asyncio
3 import socket
4 from functools import partial
5 from collections import deque
6 from contextlib import contextmanager
7
8 from .util import (
9 encode_command,
10 wait_ok,
11 _NOTSET,
12 _set_result,
13 _set_exception,
14 coerced_keys_dict,
15 decode,
16 parse_url,
17 )
18 from .parser import Reader
19 from .stream import open_connection, open_unix_connection
20 from .errors import (
21 ConnectionClosedError,
22 ConnectionForcedCloseError,
23 RedisError,
24 ProtocolError,
25 ReplyError,
26 WatchVariableError,
27 ReadOnlyError,
28 MaxClientsError
29 )
30 from .pubsub import Channel
31 from .abc import AbcChannel
32 from .abc import AbcConnection
33 from .log import logger
34
35
36 __all__ = ['create_connection', 'RedisConnection']
37
38 MAX_CHUNK_SIZE = 65536
39
40 _PUBSUB_COMMANDS = (
41 'SUBSCRIBE', b'SUBSCRIBE',
42 'PSUBSCRIBE', b'PSUBSCRIBE',
43 'UNSUBSCRIBE', b'UNSUBSCRIBE',
44 'PUNSUBSCRIBE', b'PUNSUBSCRIBE',
45 )
46
47
48 async def create_connection(address, *, db=None, password=None, ssl=None,
49 encoding=None, parser=None, loop=None,
50 timeout=None, connection_cls=None):
51 """Creates redis connection.
52
53 Opens connection to Redis server specified by address argument.
54 Address argument can be one of the following:
55 * A tuple representing (host, port) pair for TCP connections;
56 * A string representing either Redis URI or unix domain socket path.
57
58 SSL argument is passed through to asyncio.create_connection.
59 By default SSL/TLS is not used.
60
61 By default any timeout is applied at the connection stage, however
62 you can set a limitted time used trying to open a connection via
63 the `timeout` Kw.
64
65 Encoding argument can be used to decode byte-replies to strings.
66 By default no decoding is done.
67
68 Parser parameter can be used to pass custom Redis protocol parser class.
69 By default hiredis.Reader is used (unless it is missing or platform
70 is not CPython).
71
72 Return value is RedisConnection instance or a connection_cls if it is
73 given.
74
75 This function is a coroutine.
76 """
77 assert isinstance(address, (tuple, list, str)), "tuple or str expected"
78 if isinstance(address, str):
79 address, options = parse_url(address)
80 logger.debug("Parsed Redis URI %r", address)
81 db = options.setdefault('db', db)
82 password = options.setdefault('password', password)
83 encoding = options.setdefault('encoding', encoding)
84 timeout = options.setdefault('timeout', timeout)
85 if 'ssl' in options:
86 assert options['ssl'] or (not options['ssl'] and not ssl), (
87 "Conflicting ssl options are set", options['ssl'], ssl)
88 ssl = ssl or options['ssl']
89
90 if timeout is not None and timeout <= 0:
91 raise ValueError("Timeout has to be None or a number greater than 0")
92
93 if connection_cls:
94 assert issubclass(connection_cls, AbcConnection),\
95 "connection_class does not meet the AbcConnection contract"
96 cls = connection_cls
97 else:
98 cls = RedisConnection
99
100 if loop is None:
101 loop = asyncio.get_event_loop()
102
103 if isinstance(address, (list, tuple)):
104 host, port = address
105 logger.debug("Creating tcp connection to %r", address)
106 reader, writer = await asyncio.wait_for(open_connection(
107 host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop),
108 timeout, loop=loop)
109 sock = writer.transport.get_extra_info('socket')
110 if sock is not None:
111 sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
112 address = sock.getpeername()
113 address = tuple(address[:2])
114 else:
115 logger.debug("Creating unix connection to %r", address)
116 reader, writer = await asyncio.wait_for(open_unix_connection(
117 address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop),
118 timeout, loop=loop)
119 sock = writer.transport.get_extra_info('socket')
120 if sock is not None:
121 address = sock.getpeername()
122
123 conn = cls(reader, writer, encoding=encoding,
124 address=address, parser=parser,
125 loop=loop)
126
127 try:
128 if password is not None:
129 await conn.auth(password)
130 if db is not None:
131 await conn.select(db)
132 except Exception:
133 conn.close()
134 await conn.wait_closed()
135 raise
136 return conn
137
138
139 class RedisConnection(AbcConnection):
140 """Redis connection."""
141
142 def __init__(self, reader, writer, *, address, encoding=None,
143 parser=None, loop=None):
144 if loop is None:
145 loop = asyncio.get_event_loop()
146 if parser is None:
147 parser = Reader
148 assert callable(parser), (
149 "Parser argument is not callable", parser)
150 self._reader = reader
151 self._writer = writer
152 self._address = address
153 self._loop = loop
154 self._waiters = deque()
155 self._reader.set_parser(
156 parser(protocolError=ProtocolError, replyError=ReplyError)
157 )
158 self._reader_task = asyncio.ensure_future(self._read_data(),
159 loop=self._loop)
160 self._close_msg = None
161 self._db = 0
162 self._closing = False
163 self._closed = False
164 self._close_state = asyncio.Event()
165 self._reader_task.add_done_callback(lambda x: self._close_state.set())
166 self._in_transaction = None
167 self._transaction_error = None # XXX: never used?
168 self._in_pubsub = 0
169 self._pubsub_channels = coerced_keys_dict()
170 self._pubsub_patterns = coerced_keys_dict()
171 self._encoding = encoding
172 self._pipeline_buffer = None
173
174 def __repr__(self):
175 return '<RedisConnection [db:{}]>'.format(self._db)
176
177 async def _read_data(self):
178 """Response reader task."""
179 last_error = ConnectionClosedError(
180 "Connection has been closed by server")
181 while not self._reader.at_eof():
182 try:
183 obj = await self._reader.readobj()
184 except asyncio.CancelledError:
185 # NOTE: reader can get cancelled from `close()` method only.
186 last_error = RuntimeError('this is unexpected')
187 break
188 except ProtocolError as exc:
189 # ProtocolError is fatal
190 # so connection must be closed
191 if self._in_transaction is not None:
192 self._transaction_error = exc
193 last_error = exc
194 break
195 except Exception as exc:
196 # NOTE: for QUIT command connection error can be received
197 # before response
198 last_error = exc
199 break
200 else:
201 if (obj == b'' or obj is None) and self._reader.at_eof():
202 logger.debug("Connection has been closed by server,"
203 " response: %r", obj)
204 last_error = ConnectionClosedError("Reader at end of file")
205 break
206
207 if isinstance(obj, MaxClientsError):
208 last_error = obj
209 break
210 if self._in_pubsub:
211 self._process_pubsub(obj)
212 else:
213 self._process_data(obj)
214 self._closing = True
215 self._loop.call_soon(self._do_close, last_error)
216
217 def _process_data(self, obj):
218 """Processes command results."""
219 assert len(self._waiters) > 0, (type(obj), obj)
220 waiter, encoding, cb = self._waiters.popleft()
221 if isinstance(obj, RedisError):
222 if isinstance(obj, ReplyError):
223 if obj.args[0].startswith('READONLY'):
224 obj = ReadOnlyError(obj.args[0])
225 _set_exception(waiter, obj)
226 if self._in_transaction is not None:
227 self._transaction_error = obj
228 else:
229 if encoding is not None:
230 try:
231 obj = decode(obj, encoding)
232 except Exception as exc:
233 _set_exception(waiter, exc)
234 return
235 if cb is not None:
236 try:
237 obj = cb(obj)
238 except Exception as exc:
239 _set_exception(waiter, exc)
240 return
241 _set_result(waiter, obj)
242 if self._in_transaction is not None:
243 self._in_transaction.append((encoding, cb))
244
245 def _process_pubsub(self, obj, *, process_waiters=True):
246 """Processes pubsub messages."""
247 kind, *args, data = obj
248 if kind in (b'subscribe', b'unsubscribe'):
249 chan, = args
250 if process_waiters and self._in_pubsub and self._waiters:
251 self._process_data(obj)
252 if kind == b'unsubscribe':
253 ch = self._pubsub_channels.pop(chan, None)
254 if ch:
255 ch.close()
256 self._in_pubsub = data
257 elif kind in (b'psubscribe', b'punsubscribe'):
258 chan, = args
259 if process_waiters and self._in_pubsub and self._waiters:
260 self._process_data(obj)
261 if kind == b'punsubscribe':
262 ch = self._pubsub_patterns.pop(chan, None)
263 if ch:
264 ch.close()
265 self._in_pubsub = data
266 elif kind == b'message':
267 chan, = args
268 self._pubsub_channels[chan].put_nowait(data)
269 elif kind == b'pmessage':
270 pattern, chan = args
271 self._pubsub_patterns[pattern].put_nowait((chan, data))
272 elif kind == b'pong':
273 if process_waiters and self._in_pubsub and self._waiters:
274 self._process_data(data or b'PONG')
275 else:
276 logger.warning("Unknown pubsub message received %r", obj)
277
278 @contextmanager
279 def _buffered(self):
280 # XXX: we must ensure that no await happens
281 # as long as we buffer commands.
282 # Probably we can set some error-raising callback on enter
283 # and remove it on exit
284 # if some await happens in between -> throw an error.
285 # This is creepy solution, 'cause some one might want to await
286 # on some other source except redis.
287 # So we must only raise error we someone tries to await
288 # pending aioredis future
289 # One of solutions is to return coroutine instead of a future
290 # in `execute` method.
291 # In a coroutine we can check if buffering is enabled and raise error.
292
293 # TODO: describe in docs difference in pipeline mode for
294 # conn.execute vs pipeline.execute()
295 if self._pipeline_buffer is None:
296 self._pipeline_buffer = bytearray()
297 try:
298 yield self
299 buf = self._pipeline_buffer
300 self._writer.write(buf)
301 finally:
302 self._pipeline_buffer = None
303 else:
304 yield self
305
306 def execute(self, command, *args, encoding=_NOTSET):
307 """Executes redis command and returns Future waiting for the answer.
308
309 Raises:
310 * TypeError if any of args can not be encoded as bytes.
311 * ReplyError on redis '-ERR' responses.
312 * ProtocolError when response can not be decoded meaning connection
313 is broken.
314 * ConnectionClosedError when either client or server has closed the
315 connection.
316 """
317 if self._reader is None or self._reader.at_eof():
318 msg = self._close_msg or "Connection closed or corrupted"
319 raise ConnectionClosedError(msg)
320 if command is None:
321 raise TypeError("command must not be None")
322 if None in args:
323 raise TypeError("args must not contain None")
324 command = command.upper().strip()
325 is_pubsub = command in _PUBSUB_COMMANDS
326 is_ping = command in ('PING', b'PING')
327 if self._in_pubsub and not (is_pubsub or is_ping):
328 raise RedisError("Connection in SUBSCRIBE mode")
329 elif is_pubsub:
330 logger.warning("Deprecated. Use `execute_pubsub` method directly")
331 return self.execute_pubsub(command, *args)
332
333 if command in ('SELECT', b'SELECT'):
334 cb = partial(self._set_db, args=args)
335 elif command in ('MULTI', b'MULTI'):
336 cb = self._start_transaction
337 elif command in ('EXEC', b'EXEC'):
338 cb = partial(self._end_transaction, discard=False)
339 elif command in ('DISCARD', b'DISCARD'):
340 cb = partial(self._end_transaction, discard=True)
341 else:
342 cb = None
343 if encoding is _NOTSET:
344 encoding = self._encoding
345 fut = self._loop.create_future()
346 if self._pipeline_buffer is None:
347 self._writer.write(encode_command(command, *args))
348 else:
349 encode_command(command, *args, buf=self._pipeline_buffer)
350 self._waiters.append((fut, encoding, cb))
351 return fut
352
353 def execute_pubsub(self, command, *channels):
354 """Executes redis (p)subscribe/(p)unsubscribe commands.
355
356 Returns asyncio.gather coroutine waiting for all channels/patterns
357 to receive answers.
358 """
359 command = command.upper().strip()
360 assert command in _PUBSUB_COMMANDS, (
361 "Pub/Sub command expected", command)
362 if self._reader is None or self._reader.at_eof():
363 raise ConnectionClosedError("Connection closed or corrupted")
364 if None in set(channels):
365 raise TypeError("args must not contain None")
366 if not len(channels):
367 raise TypeError("No channels/patterns supplied")
368 is_pattern = len(command) in (10, 12)
369 mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop)
370 channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch)
371 for ch in channels]
372 if not all(ch.is_pattern == is_pattern for ch in channels):
373 raise ValueError("Not all channels {} match command {}"
374 .format(channels, command))
375 cmd = encode_command(command, *(ch.name for ch in channels))
376 res = []
377 for ch in channels:
378 fut = self._loop.create_future()
379 res.append(fut)
380 cb = partial(self._update_pubsub, ch=ch)
381 self._waiters.append((fut, None, cb))
382 if self._pipeline_buffer is None:
383 self._writer.write(cmd)
384 else:
385 self._pipeline_buffer.extend(cmd)
386 return asyncio.gather(*res, loop=self._loop)
387
388 def close(self):
389 """Close connection."""
390 self._do_close(ConnectionForcedCloseError())
391
392 def _do_close(self, exc):
393 if self._closed:
394 return
395 self._closed = True
396 self._closing = False
397 self._writer.transport.close()
398 self._reader_task.cancel()
399 self._reader_task = None
400 self._writer = None
401 self._reader = None
402 self._pipeline_buffer = None
403
404 if exc is not None:
405 self._close_msg = str(exc)
406
407 while self._waiters:
408 waiter, *spam = self._waiters.popleft()
409 logger.debug("Cancelling waiter %r", (waiter, spam))
410 if exc is None:
411 _set_exception(waiter, ConnectionForcedCloseError())
412 else:
413 _set_exception(waiter, exc)
414 while self._pubsub_channels:
415 _, ch = self._pubsub_channels.popitem()
416 logger.debug("Closing pubsub channel %r", ch)
417 ch.close(exc)
418 while self._pubsub_patterns:
419 _, ch = self._pubsub_patterns.popitem()
420 logger.debug("Closing pubsub pattern %r", ch)
421 ch.close(exc)
422
423 @property
424 def closed(self):
425 """True if connection is closed."""
426 closed = self._closing or self._closed
427 if not closed and self._reader and self._reader.at_eof():
428 self._closing = closed = True
429 self._loop.call_soon(self._do_close, None)
430 return closed
431
432 async def wait_closed(self):
433 """Coroutine waiting until connection is closed."""
434 await self._close_state.wait()
435
436 @property
437 def db(self):
438 """Currently selected db index."""
439 return self._db
440
441 @property
442 def encoding(self):
443 """Current set codec or None."""
444 return self._encoding
445
446 @property
447 def address(self):
448 """Redis server address, either host-port tuple or str."""
449 return self._address
450
451 def select(self, db):
452 """Change the selected database for the current connection."""
453 if not isinstance(db, int):
454 raise TypeError("DB must be of int type, not {!r}".format(db))
455 if db < 0:
456 raise ValueError("DB must be greater or equal 0, got {!r}"
457 .format(db))
458 fut = self.execute('SELECT', db)
459 return wait_ok(fut)
460
461 def _set_db(self, ok, args):
462 assert ok in {b'OK', 'OK'}, ("Unexpected result of SELECT", ok)
463 self._db = args[0]
464 return ok
465
466 def _start_transaction(self, ok):
467 assert self._in_transaction is None, (
468 "Connection is already in transaction", self._in_transaction)
469 self._in_transaction = deque()
470 self._transaction_error = None
471 return ok
472
473 def _end_transaction(self, obj, discard):
474 assert self._in_transaction is not None, (
475 "Connection is not in transaction", obj)
476 self._transaction_error = None
477 recall, self._in_transaction = self._in_transaction, None
478 recall.popleft() # ignore first (its _start_transaction)
479 if discard:
480 return obj
481 assert isinstance(obj, list) or (obj is None and not discard), (
482 "Unexpected MULTI/EXEC result", obj, recall)
483 # TODO: need to be able to re-try transaction
484 if obj is None:
485 err = WatchVariableError("WATCH variable has changed")
486 obj = [err] * len(recall)
487 assert len(obj) == len(recall), (
488 "Wrong number of result items in mutli-exec", obj, recall)
489 res = []
490 for o, (encoding, cb) in zip(obj, recall):
491 if not isinstance(o, RedisError):
492 try:
493 if encoding:
494 o = decode(o, encoding)
495 if cb:
496 o = cb(o)
497 except Exception as err:
498 res.append(err)
499 continue
500 res.append(o)
501 return res
502
503 def _update_pubsub(self, obj, *, ch):
504 kind, *pattern, channel, subscriptions = obj
505 self._in_pubsub, was_in_pubsub = subscriptions, self._in_pubsub
506 # XXX: the channels/patterns storage should be refactored.
507 # if code which supposed to read from channel/pattern
508 # failed (exception in reader or else) than
509 # the channel object will still reside in memory
510 # and leak memory (messages will be put in queue).
511 if kind == b'subscribe' and channel not in self._pubsub_channels:
512 self._pubsub_channels[channel] = ch
513 elif kind == b'psubscribe' and channel not in self._pubsub_patterns:
514 self._pubsub_patterns[channel] = ch
515 if not was_in_pubsub:
516 self._process_pubsub(obj, process_waiters=False)
517 return obj
518
519 @property
520 def in_transaction(self):
521 """Set to True when MULTI command was issued."""
522 return self._in_transaction is not None
523
524 @property
525 def in_pubsub(self):
526 """Indicates that connection is in PUB/SUB mode.
527
528 Provides the number of subscribed channels.
529 """
530 return self._in_pubsub
531
532 @property
533 def pubsub_channels(self):
534 """Returns read-only channels dict."""
535 return types.MappingProxyType(self._pubsub_channels)
536
537 @property
538 def pubsub_patterns(self):
539 """Returns read-only patterns dict."""
540 return types.MappingProxyType(self._pubsub_patterns)
541
542 def auth(self, password):
543 """Authenticate to server."""
544 fut = self.execute('AUTH', password)
545 return wait_ok(fut)
546
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aioredis/connection.py b/aioredis/connection.py
--- a/aioredis/connection.py
+++ b/aioredis/connection.py
@@ -336,6 +336,7 @@
cb = self._start_transaction
elif command in ('EXEC', b'EXEC'):
cb = partial(self._end_transaction, discard=False)
+ encoding = None
elif command in ('DISCARD', b'DISCARD'):
cb = partial(self._end_transaction, discard=True)
else:
| {"golden_diff": "diff --git a/aioredis/connection.py b/aioredis/connection.py\n--- a/aioredis/connection.py\n+++ b/aioredis/connection.py\n@@ -336,6 +336,7 @@\n cb = self._start_transaction\n elif command in ('EXEC', b'EXEC'):\n cb = partial(self._end_transaction, discard=False)\n+ encoding = None\n elif command in ('DISCARD', b'DISCARD'):\n cb = partial(self._end_transaction, discard=True)\n else:\n", "issue": "Transaction data decoding is incorrect\nTransaction data always decoded two times if connection encoding was defined:\r\n\r\nTest case:\r\n```python\r\[email protected]_loop\r\nasync def test_global_encoding(redis, create_redis, server, loop):\r\n redis = await create_redis(\r\n server.tcp_address,\r\n loop=loop, encoding='utf-8')\r\n res = await redis.set('key', 'value')\r\n assert res is True\r\n res = await redis.hmset(\r\n 'hash-key', 'foo', 'val1', 'bar', 'val2')\r\n assert res is True\r\n\r\n tr = redis.multi_exec()\r\n fut1 = tr.get('key')\r\n fut2 = tr.get('key', encoding='utf-8')\r\n fut3 = tr.get('key', encoding=None)\r\n fut4 = tr.hgetall('hash-key', encoding='utf-8')\r\n await tr.execute()\r\n res = await fut1\r\n assert res == 'value'\r\n res = await fut2\r\n assert res == 'value'\r\n res = await fut3\r\n assert res == b'value'\r\n res = await fut4\r\n assert res == {'foo': 'val1', 'bar': 'val2'}\r\n```\r\n\r\nTracing for `util.decode`\r\n```python\r\ndecode(b'PONG', utf-8)\r\ndecode(b'OK', utf-8)\r\ndecode(b'OK', utf-8)\r\ndecode(b'OK', utf-8)\r\ndecode(b'QUEUED', utf-8)\r\ndecode(b'QUEUED', utf-8)\r\ndecode(b'QUEUED', utf-8)\r\ndecode([b'value', b'value', b'value', [b'foo', b'val1', b'bar', b'val2']], utf-8)\r\ndecode(b'value', utf-8)\r\ndecode(b'value', utf-8)\r\ndecode(b'value', utf-8)\r\ndecode([b'foo', b'val1', b'bar', b'val2'], utf-8)\r\ndecode(b'foo', utf-8)\r\ndecode(b'val1', utf-8)\r\ndecode(b'bar', utf-8)\r\ndecode(b'val2', utf-8)\r\ndecode(value, utf-8)\r\ndecode(value, utf-8)\r\ndecode(['foo', 'val1', 'bar', 'val2'], utf-8)\r\ndecode(foo, utf-8)\r\ndecode(val1, utf-8)\r\ndecode(bar, utf-8)\r\ndecode(val2, utf-8)\r\n```\r\n\r\nYou can see that `multi-exec` response `[b'value', b'value', b'value', [b'foo', b'val1', b'bar', b'val2']]` was decoded twice. In this case decoding in `RedisConnection._end_transaction` is not make sense because we have already decoded it in the `RedisConnection._process_data`.\n", "before_files": [{"content": "import types\nimport asyncio\nimport socket\nfrom functools import partial\nfrom collections import deque\nfrom contextlib import contextmanager\n\nfrom .util import (\n encode_command,\n wait_ok,\n _NOTSET,\n _set_result,\n _set_exception,\n coerced_keys_dict,\n decode,\n parse_url,\n )\nfrom .parser import Reader\nfrom .stream import open_connection, open_unix_connection\nfrom .errors import (\n ConnectionClosedError,\n ConnectionForcedCloseError,\n RedisError,\n ProtocolError,\n ReplyError,\n WatchVariableError,\n ReadOnlyError,\n MaxClientsError\n )\nfrom .pubsub import Channel\nfrom .abc import AbcChannel\nfrom .abc import AbcConnection\nfrom .log import logger\n\n\n__all__ = ['create_connection', 'RedisConnection']\n\nMAX_CHUNK_SIZE = 65536\n\n_PUBSUB_COMMANDS = (\n 'SUBSCRIBE', b'SUBSCRIBE',\n 'PSUBSCRIBE', b'PSUBSCRIBE',\n 'UNSUBSCRIBE', b'UNSUBSCRIBE',\n 'PUNSUBSCRIBE', b'PUNSUBSCRIBE',\n )\n\n\nasync def create_connection(address, *, db=None, password=None, ssl=None,\n encoding=None, parser=None, loop=None,\n timeout=None, connection_cls=None):\n \"\"\"Creates redis connection.\n\n Opens connection to Redis server specified by address argument.\n Address argument can be one of the following:\n * A tuple representing (host, port) pair for TCP connections;\n * A string representing either Redis URI or unix domain socket path.\n\n SSL argument is passed through to asyncio.create_connection.\n By default SSL/TLS is not used.\n\n By default any timeout is applied at the connection stage, however\n you can set a limitted time used trying to open a connection via\n the `timeout` Kw.\n\n Encoding argument can be used to decode byte-replies to strings.\n By default no decoding is done.\n\n Parser parameter can be used to pass custom Redis protocol parser class.\n By default hiredis.Reader is used (unless it is missing or platform\n is not CPython).\n\n Return value is RedisConnection instance or a connection_cls if it is\n given.\n\n This function is a coroutine.\n \"\"\"\n assert isinstance(address, (tuple, list, str)), \"tuple or str expected\"\n if isinstance(address, str):\n address, options = parse_url(address)\n logger.debug(\"Parsed Redis URI %r\", address)\n db = options.setdefault('db', db)\n password = options.setdefault('password', password)\n encoding = options.setdefault('encoding', encoding)\n timeout = options.setdefault('timeout', timeout)\n if 'ssl' in options:\n assert options['ssl'] or (not options['ssl'] and not ssl), (\n \"Conflicting ssl options are set\", options['ssl'], ssl)\n ssl = ssl or options['ssl']\n\n if timeout is not None and timeout <= 0:\n raise ValueError(\"Timeout has to be None or a number greater than 0\")\n\n if connection_cls:\n assert issubclass(connection_cls, AbcConnection),\\\n \"connection_class does not meet the AbcConnection contract\"\n cls = connection_cls\n else:\n cls = RedisConnection\n\n if loop is None:\n loop = asyncio.get_event_loop()\n\n if isinstance(address, (list, tuple)):\n host, port = address\n logger.debug(\"Creating tcp connection to %r\", address)\n reader, writer = await asyncio.wait_for(open_connection(\n host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop),\n timeout, loop=loop)\n sock = writer.transport.get_extra_info('socket')\n if sock is not None:\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n address = sock.getpeername()\n address = tuple(address[:2])\n else:\n logger.debug(\"Creating unix connection to %r\", address)\n reader, writer = await asyncio.wait_for(open_unix_connection(\n address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop),\n timeout, loop=loop)\n sock = writer.transport.get_extra_info('socket')\n if sock is not None:\n address = sock.getpeername()\n\n conn = cls(reader, writer, encoding=encoding,\n address=address, parser=parser,\n loop=loop)\n\n try:\n if password is not None:\n await conn.auth(password)\n if db is not None:\n await conn.select(db)\n except Exception:\n conn.close()\n await conn.wait_closed()\n raise\n return conn\n\n\nclass RedisConnection(AbcConnection):\n \"\"\"Redis connection.\"\"\"\n\n def __init__(self, reader, writer, *, address, encoding=None,\n parser=None, loop=None):\n if loop is None:\n loop = asyncio.get_event_loop()\n if parser is None:\n parser = Reader\n assert callable(parser), (\n \"Parser argument is not callable\", parser)\n self._reader = reader\n self._writer = writer\n self._address = address\n self._loop = loop\n self._waiters = deque()\n self._reader.set_parser(\n parser(protocolError=ProtocolError, replyError=ReplyError)\n )\n self._reader_task = asyncio.ensure_future(self._read_data(),\n loop=self._loop)\n self._close_msg = None\n self._db = 0\n self._closing = False\n self._closed = False\n self._close_state = asyncio.Event()\n self._reader_task.add_done_callback(lambda x: self._close_state.set())\n self._in_transaction = None\n self._transaction_error = None # XXX: never used?\n self._in_pubsub = 0\n self._pubsub_channels = coerced_keys_dict()\n self._pubsub_patterns = coerced_keys_dict()\n self._encoding = encoding\n self._pipeline_buffer = None\n\n def __repr__(self):\n return '<RedisConnection [db:{}]>'.format(self._db)\n\n async def _read_data(self):\n \"\"\"Response reader task.\"\"\"\n last_error = ConnectionClosedError(\n \"Connection has been closed by server\")\n while not self._reader.at_eof():\n try:\n obj = await self._reader.readobj()\n except asyncio.CancelledError:\n # NOTE: reader can get cancelled from `close()` method only.\n last_error = RuntimeError('this is unexpected')\n break\n except ProtocolError as exc:\n # ProtocolError is fatal\n # so connection must be closed\n if self._in_transaction is not None:\n self._transaction_error = exc\n last_error = exc\n break\n except Exception as exc:\n # NOTE: for QUIT command connection error can be received\n # before response\n last_error = exc\n break\n else:\n if (obj == b'' or obj is None) and self._reader.at_eof():\n logger.debug(\"Connection has been closed by server,\"\n \" response: %r\", obj)\n last_error = ConnectionClosedError(\"Reader at end of file\")\n break\n\n if isinstance(obj, MaxClientsError):\n last_error = obj\n break\n if self._in_pubsub:\n self._process_pubsub(obj)\n else:\n self._process_data(obj)\n self._closing = True\n self._loop.call_soon(self._do_close, last_error)\n\n def _process_data(self, obj):\n \"\"\"Processes command results.\"\"\"\n assert len(self._waiters) > 0, (type(obj), obj)\n waiter, encoding, cb = self._waiters.popleft()\n if isinstance(obj, RedisError):\n if isinstance(obj, ReplyError):\n if obj.args[0].startswith('READONLY'):\n obj = ReadOnlyError(obj.args[0])\n _set_exception(waiter, obj)\n if self._in_transaction is not None:\n self._transaction_error = obj\n else:\n if encoding is not None:\n try:\n obj = decode(obj, encoding)\n except Exception as exc:\n _set_exception(waiter, exc)\n return\n if cb is not None:\n try:\n obj = cb(obj)\n except Exception as exc:\n _set_exception(waiter, exc)\n return\n _set_result(waiter, obj)\n if self._in_transaction is not None:\n self._in_transaction.append((encoding, cb))\n\n def _process_pubsub(self, obj, *, process_waiters=True):\n \"\"\"Processes pubsub messages.\"\"\"\n kind, *args, data = obj\n if kind in (b'subscribe', b'unsubscribe'):\n chan, = args\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(obj)\n if kind == b'unsubscribe':\n ch = self._pubsub_channels.pop(chan, None)\n if ch:\n ch.close()\n self._in_pubsub = data\n elif kind in (b'psubscribe', b'punsubscribe'):\n chan, = args\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(obj)\n if kind == b'punsubscribe':\n ch = self._pubsub_patterns.pop(chan, None)\n if ch:\n ch.close()\n self._in_pubsub = data\n elif kind == b'message':\n chan, = args\n self._pubsub_channels[chan].put_nowait(data)\n elif kind == b'pmessage':\n pattern, chan = args\n self._pubsub_patterns[pattern].put_nowait((chan, data))\n elif kind == b'pong':\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(data or b'PONG')\n else:\n logger.warning(\"Unknown pubsub message received %r\", obj)\n\n @contextmanager\n def _buffered(self):\n # XXX: we must ensure that no await happens\n # as long as we buffer commands.\n # Probably we can set some error-raising callback on enter\n # and remove it on exit\n # if some await happens in between -> throw an error.\n # This is creepy solution, 'cause some one might want to await\n # on some other source except redis.\n # So we must only raise error we someone tries to await\n # pending aioredis future\n # One of solutions is to return coroutine instead of a future\n # in `execute` method.\n # In a coroutine we can check if buffering is enabled and raise error.\n\n # TODO: describe in docs difference in pipeline mode for\n # conn.execute vs pipeline.execute()\n if self._pipeline_buffer is None:\n self._pipeline_buffer = bytearray()\n try:\n yield self\n buf = self._pipeline_buffer\n self._writer.write(buf)\n finally:\n self._pipeline_buffer = None\n else:\n yield self\n\n def execute(self, command, *args, encoding=_NOTSET):\n \"\"\"Executes redis command and returns Future waiting for the answer.\n\n Raises:\n * TypeError if any of args can not be encoded as bytes.\n * ReplyError on redis '-ERR' responses.\n * ProtocolError when response can not be decoded meaning connection\n is broken.\n * ConnectionClosedError when either client or server has closed the\n connection.\n \"\"\"\n if self._reader is None or self._reader.at_eof():\n msg = self._close_msg or \"Connection closed or corrupted\"\n raise ConnectionClosedError(msg)\n if command is None:\n raise TypeError(\"command must not be None\")\n if None in args:\n raise TypeError(\"args must not contain None\")\n command = command.upper().strip()\n is_pubsub = command in _PUBSUB_COMMANDS\n is_ping = command in ('PING', b'PING')\n if self._in_pubsub and not (is_pubsub or is_ping):\n raise RedisError(\"Connection in SUBSCRIBE mode\")\n elif is_pubsub:\n logger.warning(\"Deprecated. Use `execute_pubsub` method directly\")\n return self.execute_pubsub(command, *args)\n\n if command in ('SELECT', b'SELECT'):\n cb = partial(self._set_db, args=args)\n elif command in ('MULTI', b'MULTI'):\n cb = self._start_transaction\n elif command in ('EXEC', b'EXEC'):\n cb = partial(self._end_transaction, discard=False)\n elif command in ('DISCARD', b'DISCARD'):\n cb = partial(self._end_transaction, discard=True)\n else:\n cb = None\n if encoding is _NOTSET:\n encoding = self._encoding\n fut = self._loop.create_future()\n if self._pipeline_buffer is None:\n self._writer.write(encode_command(command, *args))\n else:\n encode_command(command, *args, buf=self._pipeline_buffer)\n self._waiters.append((fut, encoding, cb))\n return fut\n\n def execute_pubsub(self, command, *channels):\n \"\"\"Executes redis (p)subscribe/(p)unsubscribe commands.\n\n Returns asyncio.gather coroutine waiting for all channels/patterns\n to receive answers.\n \"\"\"\n command = command.upper().strip()\n assert command in _PUBSUB_COMMANDS, (\n \"Pub/Sub command expected\", command)\n if self._reader is None or self._reader.at_eof():\n raise ConnectionClosedError(\"Connection closed or corrupted\")\n if None in set(channels):\n raise TypeError(\"args must not contain None\")\n if not len(channels):\n raise TypeError(\"No channels/patterns supplied\")\n is_pattern = len(command) in (10, 12)\n mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop)\n channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch)\n for ch in channels]\n if not all(ch.is_pattern == is_pattern for ch in channels):\n raise ValueError(\"Not all channels {} match command {}\"\n .format(channels, command))\n cmd = encode_command(command, *(ch.name for ch in channels))\n res = []\n for ch in channels:\n fut = self._loop.create_future()\n res.append(fut)\n cb = partial(self._update_pubsub, ch=ch)\n self._waiters.append((fut, None, cb))\n if self._pipeline_buffer is None:\n self._writer.write(cmd)\n else:\n self._pipeline_buffer.extend(cmd)\n return asyncio.gather(*res, loop=self._loop)\n\n def close(self):\n \"\"\"Close connection.\"\"\"\n self._do_close(ConnectionForcedCloseError())\n\n def _do_close(self, exc):\n if self._closed:\n return\n self._closed = True\n self._closing = False\n self._writer.transport.close()\n self._reader_task.cancel()\n self._reader_task = None\n self._writer = None\n self._reader = None\n self._pipeline_buffer = None\n\n if exc is not None:\n self._close_msg = str(exc)\n\n while self._waiters:\n waiter, *spam = self._waiters.popleft()\n logger.debug(\"Cancelling waiter %r\", (waiter, spam))\n if exc is None:\n _set_exception(waiter, ConnectionForcedCloseError())\n else:\n _set_exception(waiter, exc)\n while self._pubsub_channels:\n _, ch = self._pubsub_channels.popitem()\n logger.debug(\"Closing pubsub channel %r\", ch)\n ch.close(exc)\n while self._pubsub_patterns:\n _, ch = self._pubsub_patterns.popitem()\n logger.debug(\"Closing pubsub pattern %r\", ch)\n ch.close(exc)\n\n @property\n def closed(self):\n \"\"\"True if connection is closed.\"\"\"\n closed = self._closing or self._closed\n if not closed and self._reader and self._reader.at_eof():\n self._closing = closed = True\n self._loop.call_soon(self._do_close, None)\n return closed\n\n async def wait_closed(self):\n \"\"\"Coroutine waiting until connection is closed.\"\"\"\n await self._close_state.wait()\n\n @property\n def db(self):\n \"\"\"Currently selected db index.\"\"\"\n return self._db\n\n @property\n def encoding(self):\n \"\"\"Current set codec or None.\"\"\"\n return self._encoding\n\n @property\n def address(self):\n \"\"\"Redis server address, either host-port tuple or str.\"\"\"\n return self._address\n\n def select(self, db):\n \"\"\"Change the selected database for the current connection.\"\"\"\n if not isinstance(db, int):\n raise TypeError(\"DB must be of int type, not {!r}\".format(db))\n if db < 0:\n raise ValueError(\"DB must be greater or equal 0, got {!r}\"\n .format(db))\n fut = self.execute('SELECT', db)\n return wait_ok(fut)\n\n def _set_db(self, ok, args):\n assert ok in {b'OK', 'OK'}, (\"Unexpected result of SELECT\", ok)\n self._db = args[0]\n return ok\n\n def _start_transaction(self, ok):\n assert self._in_transaction is None, (\n \"Connection is already in transaction\", self._in_transaction)\n self._in_transaction = deque()\n self._transaction_error = None\n return ok\n\n def _end_transaction(self, obj, discard):\n assert self._in_transaction is not None, (\n \"Connection is not in transaction\", obj)\n self._transaction_error = None\n recall, self._in_transaction = self._in_transaction, None\n recall.popleft() # ignore first (its _start_transaction)\n if discard:\n return obj\n assert isinstance(obj, list) or (obj is None and not discard), (\n \"Unexpected MULTI/EXEC result\", obj, recall)\n # TODO: need to be able to re-try transaction\n if obj is None:\n err = WatchVariableError(\"WATCH variable has changed\")\n obj = [err] * len(recall)\n assert len(obj) == len(recall), (\n \"Wrong number of result items in mutli-exec\", obj, recall)\n res = []\n for o, (encoding, cb) in zip(obj, recall):\n if not isinstance(o, RedisError):\n try:\n if encoding:\n o = decode(o, encoding)\n if cb:\n o = cb(o)\n except Exception as err:\n res.append(err)\n continue\n res.append(o)\n return res\n\n def _update_pubsub(self, obj, *, ch):\n kind, *pattern, channel, subscriptions = obj\n self._in_pubsub, was_in_pubsub = subscriptions, self._in_pubsub\n # XXX: the channels/patterns storage should be refactored.\n # if code which supposed to read from channel/pattern\n # failed (exception in reader or else) than\n # the channel object will still reside in memory\n # and leak memory (messages will be put in queue).\n if kind == b'subscribe' and channel not in self._pubsub_channels:\n self._pubsub_channels[channel] = ch\n elif kind == b'psubscribe' and channel not in self._pubsub_patterns:\n self._pubsub_patterns[channel] = ch\n if not was_in_pubsub:\n self._process_pubsub(obj, process_waiters=False)\n return obj\n\n @property\n def in_transaction(self):\n \"\"\"Set to True when MULTI command was issued.\"\"\"\n return self._in_transaction is not None\n\n @property\n def in_pubsub(self):\n \"\"\"Indicates that connection is in PUB/SUB mode.\n\n Provides the number of subscribed channels.\n \"\"\"\n return self._in_pubsub\n\n @property\n def pubsub_channels(self):\n \"\"\"Returns read-only channels dict.\"\"\"\n return types.MappingProxyType(self._pubsub_channels)\n\n @property\n def pubsub_patterns(self):\n \"\"\"Returns read-only patterns dict.\"\"\"\n return types.MappingProxyType(self._pubsub_patterns)\n\n def auth(self, password):\n \"\"\"Authenticate to server.\"\"\"\n fut = self.execute('AUTH', password)\n return wait_ok(fut)\n", "path": "aioredis/connection.py"}], "after_files": [{"content": "import types\nimport asyncio\nimport socket\nfrom functools import partial\nfrom collections import deque\nfrom contextlib import contextmanager\n\nfrom .util import (\n encode_command,\n wait_ok,\n _NOTSET,\n _set_result,\n _set_exception,\n coerced_keys_dict,\n decode,\n parse_url,\n )\nfrom .parser import Reader\nfrom .stream import open_connection, open_unix_connection\nfrom .errors import (\n ConnectionClosedError,\n ConnectionForcedCloseError,\n RedisError,\n ProtocolError,\n ReplyError,\n WatchVariableError,\n ReadOnlyError,\n MaxClientsError\n )\nfrom .pubsub import Channel\nfrom .abc import AbcChannel\nfrom .abc import AbcConnection\nfrom .log import logger\n\n\n__all__ = ['create_connection', 'RedisConnection']\n\nMAX_CHUNK_SIZE = 65536\n\n_PUBSUB_COMMANDS = (\n 'SUBSCRIBE', b'SUBSCRIBE',\n 'PSUBSCRIBE', b'PSUBSCRIBE',\n 'UNSUBSCRIBE', b'UNSUBSCRIBE',\n 'PUNSUBSCRIBE', b'PUNSUBSCRIBE',\n )\n\n\nasync def create_connection(address, *, db=None, password=None, ssl=None,\n encoding=None, parser=None, loop=None,\n timeout=None, connection_cls=None):\n \"\"\"Creates redis connection.\n\n Opens connection to Redis server specified by address argument.\n Address argument can be one of the following:\n * A tuple representing (host, port) pair for TCP connections;\n * A string representing either Redis URI or unix domain socket path.\n\n SSL argument is passed through to asyncio.create_connection.\n By default SSL/TLS is not used.\n\n By default any timeout is applied at the connection stage, however\n you can set a limitted time used trying to open a connection via\n the `timeout` Kw.\n\n Encoding argument can be used to decode byte-replies to strings.\n By default no decoding is done.\n\n Parser parameter can be used to pass custom Redis protocol parser class.\n By default hiredis.Reader is used (unless it is missing or platform\n is not CPython).\n\n Return value is RedisConnection instance or a connection_cls if it is\n given.\n\n This function is a coroutine.\n \"\"\"\n assert isinstance(address, (tuple, list, str)), \"tuple or str expected\"\n if isinstance(address, str):\n address, options = parse_url(address)\n logger.debug(\"Parsed Redis URI %r\", address)\n db = options.setdefault('db', db)\n password = options.setdefault('password', password)\n encoding = options.setdefault('encoding', encoding)\n timeout = options.setdefault('timeout', timeout)\n if 'ssl' in options:\n assert options['ssl'] or (not options['ssl'] and not ssl), (\n \"Conflicting ssl options are set\", options['ssl'], ssl)\n ssl = ssl or options['ssl']\n\n if timeout is not None and timeout <= 0:\n raise ValueError(\"Timeout has to be None or a number greater than 0\")\n\n if connection_cls:\n assert issubclass(connection_cls, AbcConnection),\\\n \"connection_class does not meet the AbcConnection contract\"\n cls = connection_cls\n else:\n cls = RedisConnection\n\n if loop is None:\n loop = asyncio.get_event_loop()\n\n if isinstance(address, (list, tuple)):\n host, port = address\n logger.debug(\"Creating tcp connection to %r\", address)\n reader, writer = await asyncio.wait_for(open_connection(\n host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop),\n timeout, loop=loop)\n sock = writer.transport.get_extra_info('socket')\n if sock is not None:\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n address = sock.getpeername()\n address = tuple(address[:2])\n else:\n logger.debug(\"Creating unix connection to %r\", address)\n reader, writer = await asyncio.wait_for(open_unix_connection(\n address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop),\n timeout, loop=loop)\n sock = writer.transport.get_extra_info('socket')\n if sock is not None:\n address = sock.getpeername()\n\n conn = cls(reader, writer, encoding=encoding,\n address=address, parser=parser,\n loop=loop)\n\n try:\n if password is not None:\n await conn.auth(password)\n if db is not None:\n await conn.select(db)\n except Exception:\n conn.close()\n await conn.wait_closed()\n raise\n return conn\n\n\nclass RedisConnection(AbcConnection):\n \"\"\"Redis connection.\"\"\"\n\n def __init__(self, reader, writer, *, address, encoding=None,\n parser=None, loop=None):\n if loop is None:\n loop = asyncio.get_event_loop()\n if parser is None:\n parser = Reader\n assert callable(parser), (\n \"Parser argument is not callable\", parser)\n self._reader = reader\n self._writer = writer\n self._address = address\n self._loop = loop\n self._waiters = deque()\n self._reader.set_parser(\n parser(protocolError=ProtocolError, replyError=ReplyError)\n )\n self._reader_task = asyncio.ensure_future(self._read_data(),\n loop=self._loop)\n self._close_msg = None\n self._db = 0\n self._closing = False\n self._closed = False\n self._close_state = asyncio.Event()\n self._reader_task.add_done_callback(lambda x: self._close_state.set())\n self._in_transaction = None\n self._transaction_error = None # XXX: never used?\n self._in_pubsub = 0\n self._pubsub_channels = coerced_keys_dict()\n self._pubsub_patterns = coerced_keys_dict()\n self._encoding = encoding\n self._pipeline_buffer = None\n\n def __repr__(self):\n return '<RedisConnection [db:{}]>'.format(self._db)\n\n async def _read_data(self):\n \"\"\"Response reader task.\"\"\"\n last_error = ConnectionClosedError(\n \"Connection has been closed by server\")\n while not self._reader.at_eof():\n try:\n obj = await self._reader.readobj()\n except asyncio.CancelledError:\n # NOTE: reader can get cancelled from `close()` method only.\n last_error = RuntimeError('this is unexpected')\n break\n except ProtocolError as exc:\n # ProtocolError is fatal\n # so connection must be closed\n if self._in_transaction is not None:\n self._transaction_error = exc\n last_error = exc\n break\n except Exception as exc:\n # NOTE: for QUIT command connection error can be received\n # before response\n last_error = exc\n break\n else:\n if (obj == b'' or obj is None) and self._reader.at_eof():\n logger.debug(\"Connection has been closed by server,\"\n \" response: %r\", obj)\n last_error = ConnectionClosedError(\"Reader at end of file\")\n break\n\n if isinstance(obj, MaxClientsError):\n last_error = obj\n break\n if self._in_pubsub:\n self._process_pubsub(obj)\n else:\n self._process_data(obj)\n self._closing = True\n self._loop.call_soon(self._do_close, last_error)\n\n def _process_data(self, obj):\n \"\"\"Processes command results.\"\"\"\n assert len(self._waiters) > 0, (type(obj), obj)\n waiter, encoding, cb = self._waiters.popleft()\n if isinstance(obj, RedisError):\n if isinstance(obj, ReplyError):\n if obj.args[0].startswith('READONLY'):\n obj = ReadOnlyError(obj.args[0])\n _set_exception(waiter, obj)\n if self._in_transaction is not None:\n self._transaction_error = obj\n else:\n if encoding is not None:\n try:\n obj = decode(obj, encoding)\n except Exception as exc:\n _set_exception(waiter, exc)\n return\n if cb is not None:\n try:\n obj = cb(obj)\n except Exception as exc:\n _set_exception(waiter, exc)\n return\n _set_result(waiter, obj)\n if self._in_transaction is not None:\n self._in_transaction.append((encoding, cb))\n\n def _process_pubsub(self, obj, *, process_waiters=True):\n \"\"\"Processes pubsub messages.\"\"\"\n kind, *args, data = obj\n if kind in (b'subscribe', b'unsubscribe'):\n chan, = args\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(obj)\n if kind == b'unsubscribe':\n ch = self._pubsub_channels.pop(chan, None)\n if ch:\n ch.close()\n self._in_pubsub = data\n elif kind in (b'psubscribe', b'punsubscribe'):\n chan, = args\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(obj)\n if kind == b'punsubscribe':\n ch = self._pubsub_patterns.pop(chan, None)\n if ch:\n ch.close()\n self._in_pubsub = data\n elif kind == b'message':\n chan, = args\n self._pubsub_channels[chan].put_nowait(data)\n elif kind == b'pmessage':\n pattern, chan = args\n self._pubsub_patterns[pattern].put_nowait((chan, data))\n elif kind == b'pong':\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(data or b'PONG')\n else:\n logger.warning(\"Unknown pubsub message received %r\", obj)\n\n @contextmanager\n def _buffered(self):\n # XXX: we must ensure that no await happens\n # as long as we buffer commands.\n # Probably we can set some error-raising callback on enter\n # and remove it on exit\n # if some await happens in between -> throw an error.\n # This is creepy solution, 'cause some one might want to await\n # on some other source except redis.\n # So we must only raise error we someone tries to await\n # pending aioredis future\n # One of solutions is to return coroutine instead of a future\n # in `execute` method.\n # In a coroutine we can check if buffering is enabled and raise error.\n\n # TODO: describe in docs difference in pipeline mode for\n # conn.execute vs pipeline.execute()\n if self._pipeline_buffer is None:\n self._pipeline_buffer = bytearray()\n try:\n yield self\n buf = self._pipeline_buffer\n self._writer.write(buf)\n finally:\n self._pipeline_buffer = None\n else:\n yield self\n\n def execute(self, command, *args, encoding=_NOTSET):\n \"\"\"Executes redis command and returns Future waiting for the answer.\n\n Raises:\n * TypeError if any of args can not be encoded as bytes.\n * ReplyError on redis '-ERR' responses.\n * ProtocolError when response can not be decoded meaning connection\n is broken.\n * ConnectionClosedError when either client or server has closed the\n connection.\n \"\"\"\n if self._reader is None or self._reader.at_eof():\n msg = self._close_msg or \"Connection closed or corrupted\"\n raise ConnectionClosedError(msg)\n if command is None:\n raise TypeError(\"command must not be None\")\n if None in args:\n raise TypeError(\"args must not contain None\")\n command = command.upper().strip()\n is_pubsub = command in _PUBSUB_COMMANDS\n is_ping = command in ('PING', b'PING')\n if self._in_pubsub and not (is_pubsub or is_ping):\n raise RedisError(\"Connection in SUBSCRIBE mode\")\n elif is_pubsub:\n logger.warning(\"Deprecated. Use `execute_pubsub` method directly\")\n return self.execute_pubsub(command, *args)\n\n if command in ('SELECT', b'SELECT'):\n cb = partial(self._set_db, args=args)\n elif command in ('MULTI', b'MULTI'):\n cb = self._start_transaction\n elif command in ('EXEC', b'EXEC'):\n cb = partial(self._end_transaction, discard=False)\n encoding = None\n elif command in ('DISCARD', b'DISCARD'):\n cb = partial(self._end_transaction, discard=True)\n else:\n cb = None\n if encoding is _NOTSET:\n encoding = self._encoding\n fut = self._loop.create_future()\n if self._pipeline_buffer is None:\n self._writer.write(encode_command(command, *args))\n else:\n encode_command(command, *args, buf=self._pipeline_buffer)\n self._waiters.append((fut, encoding, cb))\n return fut\n\n def execute_pubsub(self, command, *channels):\n \"\"\"Executes redis (p)subscribe/(p)unsubscribe commands.\n\n Returns asyncio.gather coroutine waiting for all channels/patterns\n to receive answers.\n \"\"\"\n command = command.upper().strip()\n assert command in _PUBSUB_COMMANDS, (\n \"Pub/Sub command expected\", command)\n if self._reader is None or self._reader.at_eof():\n raise ConnectionClosedError(\"Connection closed or corrupted\")\n if None in set(channels):\n raise TypeError(\"args must not contain None\")\n if not len(channels):\n raise TypeError(\"No channels/patterns supplied\")\n is_pattern = len(command) in (10, 12)\n mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop)\n channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch)\n for ch in channels]\n if not all(ch.is_pattern == is_pattern for ch in channels):\n raise ValueError(\"Not all channels {} match command {}\"\n .format(channels, command))\n cmd = encode_command(command, *(ch.name for ch in channels))\n res = []\n for ch in channels:\n fut = self._loop.create_future()\n res.append(fut)\n cb = partial(self._update_pubsub, ch=ch)\n self._waiters.append((fut, None, cb))\n if self._pipeline_buffer is None:\n self._writer.write(cmd)\n else:\n self._pipeline_buffer.extend(cmd)\n return asyncio.gather(*res, loop=self._loop)\n\n def close(self):\n \"\"\"Close connection.\"\"\"\n self._do_close(ConnectionForcedCloseError())\n\n def _do_close(self, exc):\n if self._closed:\n return\n self._closed = True\n self._closing = False\n self._writer.transport.close()\n self._reader_task.cancel()\n self._reader_task = None\n self._writer = None\n self._reader = None\n self._pipeline_buffer = None\n\n if exc is not None:\n self._close_msg = str(exc)\n\n while self._waiters:\n waiter, *spam = self._waiters.popleft()\n logger.debug(\"Cancelling waiter %r\", (waiter, spam))\n if exc is None:\n _set_exception(waiter, ConnectionForcedCloseError())\n else:\n _set_exception(waiter, exc)\n while self._pubsub_channels:\n _, ch = self._pubsub_channels.popitem()\n logger.debug(\"Closing pubsub channel %r\", ch)\n ch.close(exc)\n while self._pubsub_patterns:\n _, ch = self._pubsub_patterns.popitem()\n logger.debug(\"Closing pubsub pattern %r\", ch)\n ch.close(exc)\n\n @property\n def closed(self):\n \"\"\"True if connection is closed.\"\"\"\n closed = self._closing or self._closed\n if not closed and self._reader and self._reader.at_eof():\n self._closing = closed = True\n self._loop.call_soon(self._do_close, None)\n return closed\n\n async def wait_closed(self):\n \"\"\"Coroutine waiting until connection is closed.\"\"\"\n await self._close_state.wait()\n\n @property\n def db(self):\n \"\"\"Currently selected db index.\"\"\"\n return self._db\n\n @property\n def encoding(self):\n \"\"\"Current set codec or None.\"\"\"\n return self._encoding\n\n @property\n def address(self):\n \"\"\"Redis server address, either host-port tuple or str.\"\"\"\n return self._address\n\n def select(self, db):\n \"\"\"Change the selected database for the current connection.\"\"\"\n if not isinstance(db, int):\n raise TypeError(\"DB must be of int type, not {!r}\".format(db))\n if db < 0:\n raise ValueError(\"DB must be greater or equal 0, got {!r}\"\n .format(db))\n fut = self.execute('SELECT', db)\n return wait_ok(fut)\n\n def _set_db(self, ok, args):\n assert ok in {b'OK', 'OK'}, (\"Unexpected result of SELECT\", ok)\n self._db = args[0]\n return ok\n\n def _start_transaction(self, ok):\n assert self._in_transaction is None, (\n \"Connection is already in transaction\", self._in_transaction)\n self._in_transaction = deque()\n self._transaction_error = None\n return ok\n\n def _end_transaction(self, obj, discard):\n assert self._in_transaction is not None, (\n \"Connection is not in transaction\", obj)\n self._transaction_error = None\n recall, self._in_transaction = self._in_transaction, None\n recall.popleft() # ignore first (its _start_transaction)\n if discard:\n return obj\n assert isinstance(obj, list) or (obj is None and not discard), (\n \"Unexpected MULTI/EXEC result\", obj, recall)\n # TODO: need to be able to re-try transaction\n if obj is None:\n err = WatchVariableError(\"WATCH variable has changed\")\n obj = [err] * len(recall)\n assert len(obj) == len(recall), (\n \"Wrong number of result items in mutli-exec\", obj, recall)\n res = []\n for o, (encoding, cb) in zip(obj, recall):\n if not isinstance(o, RedisError):\n try:\n if encoding:\n o = decode(o, encoding)\n if cb:\n o = cb(o)\n except Exception as err:\n res.append(err)\n continue\n res.append(o)\n return res\n\n def _update_pubsub(self, obj, *, ch):\n kind, *pattern, channel, subscriptions = obj\n self._in_pubsub, was_in_pubsub = subscriptions, self._in_pubsub\n # XXX: the channels/patterns storage should be refactored.\n # if code which supposed to read from channel/pattern\n # failed (exception in reader or else) than\n # the channel object will still reside in memory\n # and leak memory (messages will be put in queue).\n if kind == b'subscribe' and channel not in self._pubsub_channels:\n self._pubsub_channels[channel] = ch\n elif kind == b'psubscribe' and channel not in self._pubsub_patterns:\n self._pubsub_patterns[channel] = ch\n if not was_in_pubsub:\n self._process_pubsub(obj, process_waiters=False)\n return obj\n\n @property\n def in_transaction(self):\n \"\"\"Set to True when MULTI command was issued.\"\"\"\n return self._in_transaction is not None\n\n @property\n def in_pubsub(self):\n \"\"\"Indicates that connection is in PUB/SUB mode.\n\n Provides the number of subscribed channels.\n \"\"\"\n return self._in_pubsub\n\n @property\n def pubsub_channels(self):\n \"\"\"Returns read-only channels dict.\"\"\"\n return types.MappingProxyType(self._pubsub_channels)\n\n @property\n def pubsub_patterns(self):\n \"\"\"Returns read-only patterns dict.\"\"\"\n return types.MappingProxyType(self._pubsub_patterns)\n\n def auth(self, password):\n \"\"\"Authenticate to server.\"\"\"\n fut = self.execute('AUTH', password)\n return wait_ok(fut)\n", "path": "aioredis/connection.py"}]} |
gh_patches_debug_1531 | rasdani/github-patches | git_diff | google__personfinder-328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Load third party libraries with PIP
We should probably follow "Using pip requirements files" section here to install third-party libraries:
https://cloud.google.com/appengine/docs/python/tools/using-libraries-python-27
instead of directly putting third party library *.py files under "app" directory.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/main.py`
Content:
```
1 #!/usr/bin/python2.7
2 # Copyright 2010 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """The main request handler. All dynamic requests except for remote_api are
17 handled by this handler, which dispatches to all other dynamic handlers."""
18
19 import django_setup # always keep this first
20
21 import mimetypes
22 import re
23 import os
24 import urlparse
25
26 from google.appengine.api import memcache
27 from google.appengine.api import users
28 from google.appengine.ext import webapp
29
30 import config
31 import const
32 import django.utils.html
33 import logging
34 import model
35 import pfif
36 import resources
37 import utils
38 import user_agents
39 import setup_pf
40
41
42 class AdminEnv(object):
43 """Template variables for admin pages."""
44
45 def __init__(self, request):
46 self.request = request
47 self.user = users.get_current_user()
48 self.logout_url = users.create_logout_url(self.request.url)
49
50 @property
51 def repo_options(self):
52 """This is different from env.repo_options because this contains all
53 repositories including deactivated ones.
54
55 This is defined as a property so that it is evaluated lazily only
56 when necessary.
57 """
58 try:
59 return [
60 utils.Struct(
61 repo=repo,
62 url=utils.get_repo_url(self.request, repo) + '/admin')
63 for repo in sorted(model.Repo.list())]
64 except:
65 # Logs the exception here because exceptions thrown during template
66 # variable evaluation is silently ignored. Note that
67 # logging.exception() logs the current exception by default.
68 logging.exception('Exception thrown')
69 return None
70
71
72 # When no action or repo is specified, redirect to this action.
73 HOME_ACTION = 'home.html'
74
75 # Map of URL actions to Python module and class names.
76 # TODO(kpy): Remove the need for this configuration information, either by
77 # regularizing the module and class names or adding a URL attribute to handlers.
78 HANDLER_CLASSES = dict((x, x.replace('/', '_') + '.Handler') for x in [
79 'start',
80 'query',
81 'results',
82 'create',
83 'view',
84 'multiview',
85 'reveal',
86 'photo',
87 'embed',
88 'extend',
89 'gadget',
90 'delete',
91 'flag_note',
92 'restore',
93 'subscribe',
94 'unsubscribe',
95 'disable_notes',
96 'confirm_disable_notes',
97 'enable_notes',
98 'confirm_enable_notes',
99 'post_flagged_note',
100 'confirm_post_flagged_note',
101 'third_party_search',
102 'admin',
103 'admin/create_repo',
104 'admin/dashboard',
105 'admin/delete_record',
106 'admin/resources',
107 'admin/review',
108 'admin/statistics',
109 'css',
110 'add_note',
111 'tos',
112 ])
113
114 # Exceptional cases where the module name doesn't match the URL.
115 HANDLER_CLASSES[''] = 'start.Handler'
116 HANDLER_CLASSES['admin/api_keys'] = 'admin_api_keys.CreateOrUpdateApiKey'
117 HANDLER_CLASSES['admin/api_keys/list'] = 'admin_api_keys.ListApiKeys'
118 HANDLER_CLASSES['api/import'] = 'api.Import'
119 HANDLER_CLASSES['api/import/notes'] = 'api.Import'
120 HANDLER_CLASSES['api/import/persons'] = 'api.Import'
121 HANDLER_CLASSES['api/read'] = 'api.Read'
122 HANDLER_CLASSES['api/write'] = 'api.Write'
123 HANDLER_CLASSES['api/search'] = 'api.Search'
124 HANDLER_CLASSES['api/subscribe'] = 'api.Subscribe'
125 HANDLER_CLASSES['api/unsubscribe'] = 'api.Unsubscribe'
126 HANDLER_CLASSES['api/stats'] = 'api.Stats'
127 HANDLER_CLASSES['api/handle_sms'] = 'api.HandleSMS'
128 HANDLER_CLASSES['api/photo_upload'] = 'api.PhotoUpload'
129 HANDLER_CLASSES['feeds/repo'] = 'feeds.Repo'
130 HANDLER_CLASSES['feeds/note'] = 'feeds.Note'
131 HANDLER_CLASSES['feeds/person'] = 'feeds.Person'
132 HANDLER_CLASSES['sitemap'] = 'sitemap.SiteMap'
133 HANDLER_CLASSES['sitemap/ping'] = 'sitemap.SiteMapPing'
134 HANDLER_CLASSES['tasks/count/note'] = 'tasks.CountNote'
135 HANDLER_CLASSES['tasks/count/person'] = 'tasks.CountPerson'
136 HANDLER_CLASSES['tasks/count/reindex'] = 'tasks.Reindex'
137 HANDLER_CLASSES['tasks/count/update_dead_status'] = 'tasks.UpdateDeadStatus'
138 HANDLER_CLASSES['tasks/count/update_status'] = 'tasks.UpdateStatus'
139 HANDLER_CLASSES['tasks/delete_expired'] = 'tasks.DeleteExpired'
140 HANDLER_CLASSES['tasks/delete_old'] = 'tasks.DeleteOld'
141 HANDLER_CLASSES['tasks/clean_up_in_test_mode'] = 'tasks.CleanUpInTestMode'
142 HANDLER_CLASSES['tasks/notify_many_unreviewed_notes'] = 'tasks.NotifyManyUnreviewedNotes'
143
144 def is_development_server():
145 """Returns True if the app is running in development."""
146 server = os.environ.get('SERVER_SOFTWARE', '')
147 return 'Development' in server
148
149 def is_cron_task(request):
150 """Returns True if the request is from appengine cron."""
151 return 'X-AppEngine-Cron' in request.headers
152
153 def is_task_queue_task(request):
154 """Returns True if the request is from the appengine task queue."""
155 return 'X-AppEngine-TaskName' in request.headers
156
157 def get_repo_and_action(request):
158 """Determines the repo and action for a request. The action is the part
159 of the URL path after the repo, with no leading or trailing slashes."""
160 scheme, netloc, path, _, _ = urlparse.urlsplit(request.url)
161 parts = path.lstrip('/').split('/')
162
163 # Depending on whether we're serving from appspot directly or
164 # google.org/personfinder we could have /global or /personfinder/global
165 # as the 'global' prefix.
166 if parts[0] == 'personfinder':
167 parts.pop(0)
168 repo = parts and parts.pop(0) or None
169 action = '/'.join(parts)
170 if repo == 'global':
171 repo = None
172 return repo, action
173
174 def select_charset(request):
175 """Given a request, chooses a charset for encoding the response.
176
177 If the selected charset is UTF-8, it always returns
178 'utf-8' (const.CHARSET_UTF8), not 'utf8', 'UTF-8', etc.
179 """
180 # We assume that any client that doesn't support UTF-8 will specify a
181 # preferred encoding in the Accept-Charset header, and will use this
182 # encoding for content, query parameters, and form data. We make this
183 # assumption across all repositories.
184
185 # Get a list of the charsets that the client supports.
186 if request.get('charsets'):
187 charsets = request.get('charsets').split(',')
188 elif user_agents.prefer_sjis_charset(request):
189 # Some Japanese feature phones don't (fully) support UTF-8.
190 # They only support Shift_JIS. But they may not send Accept-Charset
191 # header. Also, we haven't confirmed, but there may be phones whose
192 # Accept-Charset header includes UTF-8 but its UTF-8 support is buggy.
193 # So we always use Shift_JIS regardless of Accept-Charset header.
194 charsets = ['Shift_JIS']
195 else:
196 charsets = request.accept_charset.best_matches()
197
198 # Always prefer UTF-8 if the client supports it.
199 for charset in charsets:
200 if charset.lower().replace('_', '-') in ['utf8', 'utf-8']:
201 return const.CHARSET_UTF8
202
203 # Otherwise, look for a requested charset that Python supports.
204 for charset in charsets:
205 try:
206 'xyz'.encode(charset, 'replace') # test if charset is known
207 return charset
208 except:
209 continue
210
211 # If Python doesn't know any of the requested charsets, use UTF-8.
212 return const.CHARSET_UTF8
213
214 def select_lang(request, config=None):
215 """Selects the best language to use for a given request. The 'lang' query
216 parameter has priority, then the django_language cookie, then the first
217 language in the language menu, then the default setting."""
218 default_lang = (config and
219 config.language_menu_options and
220 config.language_menu_options[0])
221 lang = (request.get('lang') or
222 request.cookies.get('django_language', None) or
223 default_lang or
224 django_setup.LANGUAGE_CODE)
225 lang = re.sub('[^A-Za-z0-9-]', '', lang)
226 return const.LANGUAGE_SYNONYMS.get(lang, lang)
227
228 def get_repo_options(request, lang):
229 """Returns a list of the names and titles of the launched repositories."""
230 options = []
231 for repo in model.Repo.list_launched():
232 titles = config.get_for_repo(repo, 'repo_titles', {})
233 default_title = (titles.values() or ['?'])[0]
234 title = titles.get(lang, titles.get('en', default_title))
235 url = utils.get_repo_url(request, repo)
236 test_mode = config.get_for_repo(repo, 'test_mode')
237 options.append(utils.Struct(repo=repo, title=title, url=url,
238 test_mode=test_mode))
239 return options
240
241 def get_language_options(request, config, current_lang):
242 """Returns a list of information needed to generate the language menu."""
243 primary_langs = (config and config.language_menu_options) or ['en']
244 all_langs = sorted(
245 const.LANGUAGE_ENDONYMS.keys(),
246 key=lambda s: const.LANGUAGE_ENDONYMS[s])
247 return {
248 'primary':
249 [get_language_option(request, lang, lang == current_lang)
250 for lang in primary_langs],
251 'all':
252 # We put both 'primary' and 'all' languages into a single <select>
253 # box (See app/resources/language-menu.html.template).
254 # If current_lang is in the primary languages, we mark the
255 # language as is_selected in 'primary', not in 'all', to make sure
256 # a single option is selected in the <select> box.
257 [get_language_option(
258 request, lang,
259 lang == current_lang and lang not in primary_langs)
260 for lang in all_langs],
261 }
262
263 def get_language_option(request, lang, is_selected):
264 return {
265 'lang': lang,
266 'endonym': const.LANGUAGE_ENDONYMS.get(lang, '?'),
267 'url': utils.set_url_param(request.url, 'lang', lang),
268 'is_selected': is_selected,
269 }
270
271 def get_localized_message(localized_messages, lang, default):
272 """Gets the localized message for lang from a dictionary that maps language
273 codes to localized messages. Falls back to English if language 'lang' is
274 not available, or to a default message if English is not available."""
275 if not isinstance(localized_messages, dict):
276 return default
277 return localized_messages.get(lang, localized_messages.get('en', default))
278
279 def get_hidden_input_tags_for_preserved_query_params(request):
280 """Gets HTML with <input type="hidden"> tags to preserve query parameters
281 listed in utils.PRESERVED_QUERY_PARAM_NAMES e.g. "ui"."""
282 tags_str = ''
283 for name in utils.PRESERVED_QUERY_PARAM_NAMES:
284 value = request.get(name)
285 if value:
286 tags_str += '<input type="hidden" name="%s" value="%s">\n' % (
287 django.utils.html.escape(name),
288 django.utils.html.escape(value))
289 return tags_str
290
291 def setup_env(request):
292 """Constructs the 'env' object, which contains various template variables
293 that are commonly used by most handlers."""
294 env = utils.Struct()
295 env.repo, env.action = get_repo_and_action(request)
296 env.config = config.Configuration(env.repo or '*')
297 # TODO(ryok): Rename to local_test_mode or something alike to disambiguate
298 # better from repository's test_mode.
299 env.test_mode = (request.remote_addr == '127.0.0.1' and
300 request.get('test_mode'))
301
302 env.analytics_id = config.get('analytics_id')
303 env.maps_api_key = config.get('maps_api_key')
304
305 # Internationalization-related stuff.
306 env.charset = select_charset(request)
307 env.lang = select_lang(request, env.config)
308 env.rtl = env.lang in const.LANGUAGES_BIDI
309 env.virtual_keyboard_layout = const.VIRTUAL_KEYBOARD_LAYOUTS.get(env.lang)
310
311 # Used for parsing query params. This must be done before accessing any
312 # query params which may have multi-byte value, such as "given_name" below
313 # in this function.
314 request.charset = env.charset
315
316 # Determine the resource bundle to use.
317 env.default_resource_bundle = config.get('default_resource_bundle', '1')
318 env.resource_bundle = (request.cookies.get('resource_bundle', '') or
319 env.default_resource_bundle)
320
321 # Information about the request.
322 env.url = utils.set_url_param(request.url, 'lang', env.lang)
323 env.scheme, env.netloc, env.path, _, _ = urlparse.urlsplit(request.url)
324 env.force_https = False
325 env.domain = env.netloc.split(':')[0]
326 env.global_url = utils.get_repo_url(request, 'global')
327
328 # Commonly used information that's rendered or localized for templates.
329 env.language_options = get_language_options(request, env.config, env.lang)
330 env.repo_options = get_repo_options(request, env.lang)
331 env.expiry_options = [
332 utils.Struct(value=value, text=const.PERSON_EXPIRY_TEXT[value])
333 for value in sorted(const.PERSON_EXPIRY_TEXT.keys(), key=int)
334 ]
335 env.status_options = [
336 utils.Struct(value=value, text=const.NOTE_STATUS_TEXT[value])
337 for value in pfif.NOTE_STATUS_VALUES
338 if (value != 'believed_dead' or
339 not env.config or env.config.allow_believed_dead_via_ui)
340 ]
341 env.hidden_input_tags_for_preserved_query_params = (
342 get_hidden_input_tags_for_preserved_query_params(request))
343
344 ui_param = request.get('ui', '').strip().lower()
345
346 # Interprets "small" and "style" parameters for backward compatibility.
347 # TODO(ichikawa): Delete these in near future when we decide to drop
348 # support of these parameters.
349 small_param = request.get('small', '').strip().lower()
350 style_param = request.get('style', '').strip().lower()
351 if not ui_param and small_param == 'yes':
352 ui_param = 'small'
353 elif not ui_param and style_param:
354 ui_param = style_param
355
356 if ui_param:
357 env.ui = ui_param
358 elif user_agents.is_jp_tier2_mobile_phone(request):
359 env.ui = 'light'
360 else:
361 env.ui = 'default'
362
363 # UI configurations.
364 #
365 # Enables features which require JavaScript.
366 env.enable_javascript = True
367 # Enables operations which requires Captcha.
368 env.enable_captcha = True
369 # Enables photo upload.
370 env.enable_photo_upload = True
371 # Enables to flag/unflag notes as spam, and to reveal spam notes.
372 env.enable_spam_ops = True
373 # Enables duplicate marking mode.
374 env.enable_dup_mode = True
375 # Shows a logo on top of the page.
376 env.show_logo = True
377 # Shows language menu.
378 env.show_language_menu = True
379 # Uses short labels for buttons.
380 env.use_short_buttons = False
381 # Optional "target" attribute for links to non-small pages.
382 env.target_attr = ''
383 # Shows record IDs in the results page.
384 env.show_record_ids_in_results = True
385
386 if env.ui == 'small':
387 env.show_logo = False
388 env.target_attr = ' target="_blank" '
389
390 elif env.ui == 'light':
391 # Disables features which requires JavaScript. Some feature phones
392 # doesn't support JavaScript.
393 env.enable_javascript = False
394 # Disables operations which requires Captcha because Captcha requires
395 # JavaScript.
396 env.enable_captcha = False
397 # Uploading is often not supported in feature phones.
398 env.enable_photo_upload = False
399 # Disables spam operations because it requires JavaScript and
400 # supporting more pages on ui=light.
401 env.enable_spam_ops = False
402 # Disables duplicate marking mode because it doesn't support
403 # small screens and it requires JavaScript.
404 env.enable_dup_mode = False
405 # Hides the logo on the top to save the space. Also, the logo links
406 # to the global page which doesn't support small screens.
407 env.show_logo = False
408 # Hides language menu because the menu in the current position is
409 # annoying in feature phones.
410 # TODO(ichikawa): Consider layout of the language menu.
411 env.show_language_menu = False
412 # Too long buttons are not fully shown in some feature phones.
413 env.use_short_buttons = True
414 # To make it simple.
415 env.show_record_ids_in_results = False
416
417 env.back_chevron = u'\xab'
418 back_chevron_in_charset = True
419 try:
420 env.back_chevron.encode(env.charset)
421 except UnicodeEncodeError:
422 # u'\xab' is not in the charset (e.g. Shift_JIS).
423 back_chevron_in_charset = False
424 if not back_chevron_in_charset or env.ui == 'light':
425 # Use ASCII characters on ui=light too because some feature phones
426 # support UTF-8 but don't render UTF-8 symbols such as u'\xab'.
427 env.back_chevron = u'<<'
428
429 env.enable_maps = (
430 env.enable_javascript
431 and not env.config.zero_rating_mode
432 and env.maps_api_key)
433 env.enable_analytics = (
434 env.enable_javascript
435 and not env.config.zero_rating_mode
436 and env.analytics_id)
437 env.enable_translate = (
438 env.enable_javascript
439 and not env.config.zero_rating_mode
440 and env.config.translate_api_key)
441
442 env.admin = AdminEnv(request)
443
444 # Repo-specific information.
445 if env.repo:
446 # repo_url is the root URL for the repository.
447 env.repo_url = utils.get_repo_url(request, env.repo)
448 # start_url is like repo_url but preserves parameters such as 'ui'.
449 env.start_url = utils.get_url(request, env.repo, '')
450 # URL of the link in the heading. The link on ui=small links to the
451 # normal UI.
452 env.repo_title_url = (
453 env.repo_url if env.ui == 'small' else env.start_url)
454 # URL to force default UI. Note that we show ui=light version in some
455 # user agents when ui parameter is not specified.
456 env.default_ui_url = utils.get_url(request, env.repo, '', ui='default')
457 env.repo_path = urlparse.urlsplit(env.repo_url)[2]
458 env.repo_title = get_localized_message(
459 env.config.repo_titles, env.lang, '?')
460 env.start_page_custom_html = get_localized_message(
461 env.config.start_page_custom_htmls, env.lang, '')
462 env.results_page_custom_html = get_localized_message(
463 env.config.results_page_custom_htmls, env.lang, '')
464 env.view_page_custom_html = get_localized_message(
465 env.config.view_page_custom_htmls, env.lang, '')
466 env.seek_query_form_custom_html = get_localized_message(
467 env.config.seek_query_form_custom_htmls, env.lang, '')
468 env.footer_custom_html = get_localized_message(
469 env.config.footer_custom_htmls, env.lang, '')
470 # If the repository is deactivated, we should not show test mode
471 # notification.
472 env.repo_test_mode = (
473 env.config.test_mode and not env.config.deactivated)
474 env.force_https = env.config.force_https
475
476 env.params_full_name = request.get('full_name', '').strip()
477 if not env.params_full_name:
478 # Preformat the name from 'given_name' and 'family_name' parameters.
479 given_name = request.get('given_name', '').strip()
480 family_name = request.get('family_name', '').strip()
481 env.params_full_name = utils.get_full_name(
482 given_name, family_name, env.config)
483
484 return env
485
486 def flush_caches(*keywords):
487 """Flushes the specified set of caches. Pass '*' to flush everything."""
488 if '*' in keywords or 'resource' in keywords:
489 resources.clear_caches()
490 if '*' in keywords or 'memcache' in keywords:
491 memcache.flush_all()
492 if '*' in keywords or 'config' in keywords:
493 config.cache.flush()
494 for keyword in keywords:
495 if keyword.startswith('config/'):
496 config.cache.delete(keyword[7:])
497
498
499 class Main(webapp.RequestHandler):
500 """The main request handler. All dynamic requests except for remote_api are
501 handled by this handler, which dispatches to all other dynamic handlers."""
502
503 def initialize(self, request, response):
504 webapp.RequestHandler.initialize(self, request, response)
505
506 # If requested, set the clock before doing anything clock-related.
507 # Only works on localhost for testing. Specify ?utcnow=1293840000 to
508 # set the clock to 2011-01-01, or ?utcnow=real to revert to real time.
509 utcnow = request.get('utcnow')
510 if request.remote_addr == '127.0.0.1' and utcnow:
511 if utcnow == 'real':
512 utils.set_utcnow_for_test(None)
513 else:
514 utils.set_utcnow_for_test(float(utcnow))
515
516 # If requested, flush caches before we touch anything that uses them.
517 flush_caches(*request.get('flush', '').split(','))
518
519 # Gather commonly used information into self.env.
520 self.env = setup_env(request)
521
522 # Force a redirect if requested, except where https is not supported:
523 # - for cron jobs
524 # - for task queue jobs
525 # - in development
526 if (self.env.force_https and self.env.scheme == 'http'
527 and not is_cron_task(self.request)
528 and not is_task_queue_task(self.request)
529 and not is_development_server()):
530 self.redirect(self.env.url.replace('http:', 'https:'))
531
532 # Activate the selected language.
533 response.headers['Content-Language'] = self.env.lang
534 response.headers['Set-Cookie'] = \
535 'django_language=%s; path=/' % self.env.lang
536 django_setup.activate(self.env.lang)
537
538 # Activate the appropriate resource bundle.
539 resources.set_active_bundle_name(self.env.resource_bundle)
540
541 def serve(self):
542 request, response, env = self.request, self.response, self.env
543
544 # If the Person Finder instance has not been initialized yet,
545 # prepend to any served page a warning and a link to the admin
546 # page where the datastore can be initialized.
547 if not config.get('initialized'):
548 if request.get('operation') == 'setup_datastore':
549 setup_pf.setup_datastore()
550 self.redirect(env.global_url + '/')
551 return
552 else:
553 get_vars = lambda: {'env': env}
554 content = resources.get_rendered('setup_datastore.html', env.lang,
555 (env.repo, env.charset), get_vars)
556 response.out.write(content)
557
558 if not env.action and not env.repo:
559 # Redirect to the default home page.
560 self.redirect(env.global_url + '/' + HOME_ACTION)
561 elif env.action in HANDLER_CLASSES:
562 # Dispatch to the handler for the specified action.
563 module_name, class_name = HANDLER_CLASSES[env.action].split('.')
564 handler = getattr(__import__(module_name), class_name)(
565 request, response, env)
566 getattr(handler, request.method.lower())() # get() or post()
567 elif env.action.endswith('.template'):
568 # Don't serve template source code.
569 response.set_status(404)
570 response.out.write('Not found')
571 else:
572 # Serve a static page or file.
573 env.robots_ok = True
574 get_vars = lambda: {'env': env, 'config': env.config}
575 content = resources.get_rendered(
576 env.action, env.lang, (env.repo, env.charset), get_vars)
577 if content is None:
578 response.set_status(404)
579 response.out.write('Not found')
580 else:
581 content_type, encoding = mimetypes.guess_type(env.action)
582 response.headers['Content-Type'] = (
583 (content_type or 'text/plain') +
584 ('; charset=%s' % encoding if encoding else ''))
585 response.out.write(content)
586
587 def get(self):
588 self.serve()
589
590 def post(self):
591 self.serve()
592
593 def head(self):
594 self.request.method = 'GET'
595 self.serve()
596 self.response.clear()
597
598 if __name__ == '__main__':
599 webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)]))
600
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/main.py b/app/main.py
--- a/app/main.py
+++ b/app/main.py
@@ -21,6 +21,9 @@
import mimetypes
import re
import os
+import sys
+sys.path.append(os.path.join(os.path.dirname(__file__), 'vendors'))
+
import urlparse
from google.appengine.api import memcache
@@ -597,4 +600,4 @@
if __name__ == '__main__':
webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)]))
-
\ No newline at end of file
+
| {"golden_diff": "diff --git a/app/main.py b/app/main.py\n--- a/app/main.py\n+++ b/app/main.py\n@@ -21,6 +21,9 @@\n import mimetypes\n import re\n import os\n+import sys\n+sys.path.append(os.path.join(os.path.dirname(__file__), 'vendors'))\n+\n import urlparse\n \n from google.appengine.api import memcache\n@@ -597,4 +600,4 @@\n \n if __name__ == '__main__':\n webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)]))\n- \n\\ No newline at end of file\n+\n", "issue": "Load third party libraries with PIP\nWe should probably follow \"Using pip requirements files\" section here to install third-party libraries:\nhttps://cloud.google.com/appengine/docs/python/tools/using-libraries-python-27\ninstead of directly putting third party library *.py files under \"app\" directory.\n\n", "before_files": [{"content": "#!/usr/bin/python2.7\n# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The main request handler. All dynamic requests except for remote_api are\nhandled by this handler, which dispatches to all other dynamic handlers.\"\"\"\n\nimport django_setup # always keep this first\n\nimport mimetypes\nimport re\nimport os\nimport urlparse\n\nfrom google.appengine.api import memcache\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\n\nimport config\nimport const\nimport django.utils.html\nimport logging\nimport model\nimport pfif\nimport resources\nimport utils\nimport user_agents\nimport setup_pf\n\n\nclass AdminEnv(object):\n \"\"\"Template variables for admin pages.\"\"\"\n\n def __init__(self, request):\n self.request = request\n self.user = users.get_current_user()\n self.logout_url = users.create_logout_url(self.request.url)\n\n @property\n def repo_options(self):\n \"\"\"This is different from env.repo_options because this contains all\n repositories including deactivated ones.\n\n This is defined as a property so that it is evaluated lazily only\n when necessary.\n \"\"\"\n try:\n return [\n utils.Struct(\n repo=repo,\n url=utils.get_repo_url(self.request, repo) + '/admin')\n for repo in sorted(model.Repo.list())]\n except:\n # Logs the exception here because exceptions thrown during template\n # variable evaluation is silently ignored. Note that\n # logging.exception() logs the current exception by default.\n logging.exception('Exception thrown')\n return None\n\n\n# When no action or repo is specified, redirect to this action.\nHOME_ACTION = 'home.html'\n\n# Map of URL actions to Python module and class names.\n# TODO(kpy): Remove the need for this configuration information, either by\n# regularizing the module and class names or adding a URL attribute to handlers.\nHANDLER_CLASSES = dict((x, x.replace('/', '_') + '.Handler') for x in [\n 'start',\n 'query',\n 'results',\n 'create',\n 'view',\n 'multiview',\n 'reveal',\n 'photo',\n 'embed',\n 'extend',\n 'gadget',\n 'delete',\n 'flag_note',\n 'restore',\n 'subscribe',\n 'unsubscribe',\n 'disable_notes',\n 'confirm_disable_notes',\n 'enable_notes',\n 'confirm_enable_notes',\n 'post_flagged_note',\n 'confirm_post_flagged_note',\n 'third_party_search',\n 'admin',\n 'admin/create_repo',\n 'admin/dashboard',\n 'admin/delete_record',\n 'admin/resources',\n 'admin/review',\n 'admin/statistics',\n 'css',\n 'add_note',\n 'tos',\n])\n\n# Exceptional cases where the module name doesn't match the URL.\nHANDLER_CLASSES[''] = 'start.Handler'\nHANDLER_CLASSES['admin/api_keys'] = 'admin_api_keys.CreateOrUpdateApiKey'\nHANDLER_CLASSES['admin/api_keys/list'] = 'admin_api_keys.ListApiKeys'\nHANDLER_CLASSES['api/import'] = 'api.Import'\nHANDLER_CLASSES['api/import/notes'] = 'api.Import'\nHANDLER_CLASSES['api/import/persons'] = 'api.Import'\nHANDLER_CLASSES['api/read'] = 'api.Read'\nHANDLER_CLASSES['api/write'] = 'api.Write'\nHANDLER_CLASSES['api/search'] = 'api.Search'\nHANDLER_CLASSES['api/subscribe'] = 'api.Subscribe'\nHANDLER_CLASSES['api/unsubscribe'] = 'api.Unsubscribe'\nHANDLER_CLASSES['api/stats'] = 'api.Stats'\nHANDLER_CLASSES['api/handle_sms'] = 'api.HandleSMS'\nHANDLER_CLASSES['api/photo_upload'] = 'api.PhotoUpload'\nHANDLER_CLASSES['feeds/repo'] = 'feeds.Repo'\nHANDLER_CLASSES['feeds/note'] = 'feeds.Note'\nHANDLER_CLASSES['feeds/person'] = 'feeds.Person'\nHANDLER_CLASSES['sitemap'] = 'sitemap.SiteMap'\nHANDLER_CLASSES['sitemap/ping'] = 'sitemap.SiteMapPing'\nHANDLER_CLASSES['tasks/count/note'] = 'tasks.CountNote'\nHANDLER_CLASSES['tasks/count/person'] = 'tasks.CountPerson'\nHANDLER_CLASSES['tasks/count/reindex'] = 'tasks.Reindex'\nHANDLER_CLASSES['tasks/count/update_dead_status'] = 'tasks.UpdateDeadStatus'\nHANDLER_CLASSES['tasks/count/update_status'] = 'tasks.UpdateStatus'\nHANDLER_CLASSES['tasks/delete_expired'] = 'tasks.DeleteExpired'\nHANDLER_CLASSES['tasks/delete_old'] = 'tasks.DeleteOld'\nHANDLER_CLASSES['tasks/clean_up_in_test_mode'] = 'tasks.CleanUpInTestMode'\nHANDLER_CLASSES['tasks/notify_many_unreviewed_notes'] = 'tasks.NotifyManyUnreviewedNotes'\n\ndef is_development_server():\n \"\"\"Returns True if the app is running in development.\"\"\"\n server = os.environ.get('SERVER_SOFTWARE', '')\n return 'Development' in server\n\ndef is_cron_task(request):\n \"\"\"Returns True if the request is from appengine cron.\"\"\"\n return 'X-AppEngine-Cron' in request.headers\n\ndef is_task_queue_task(request):\n \"\"\"Returns True if the request is from the appengine task queue.\"\"\"\n return 'X-AppEngine-TaskName' in request.headers\n\ndef get_repo_and_action(request):\n \"\"\"Determines the repo and action for a request. The action is the part\n of the URL path after the repo, with no leading or trailing slashes.\"\"\"\n scheme, netloc, path, _, _ = urlparse.urlsplit(request.url)\n parts = path.lstrip('/').split('/')\n\n # Depending on whether we're serving from appspot directly or\n # google.org/personfinder we could have /global or /personfinder/global\n # as the 'global' prefix.\n if parts[0] == 'personfinder':\n parts.pop(0)\n repo = parts and parts.pop(0) or None\n action = '/'.join(parts)\n if repo == 'global':\n repo = None\n return repo, action\n\ndef select_charset(request):\n \"\"\"Given a request, chooses a charset for encoding the response.\n\n If the selected charset is UTF-8, it always returns\n 'utf-8' (const.CHARSET_UTF8), not 'utf8', 'UTF-8', etc.\n \"\"\"\n # We assume that any client that doesn't support UTF-8 will specify a\n # preferred encoding in the Accept-Charset header, and will use this\n # encoding for content, query parameters, and form data. We make this\n # assumption across all repositories.\n\n # Get a list of the charsets that the client supports.\n if request.get('charsets'):\n charsets = request.get('charsets').split(',')\n elif user_agents.prefer_sjis_charset(request):\n # Some Japanese feature phones don't (fully) support UTF-8.\n # They only support Shift_JIS. But they may not send Accept-Charset\n # header. Also, we haven't confirmed, but there may be phones whose\n # Accept-Charset header includes UTF-8 but its UTF-8 support is buggy.\n # So we always use Shift_JIS regardless of Accept-Charset header.\n charsets = ['Shift_JIS']\n else:\n charsets = request.accept_charset.best_matches()\n\n # Always prefer UTF-8 if the client supports it.\n for charset in charsets:\n if charset.lower().replace('_', '-') in ['utf8', 'utf-8']:\n return const.CHARSET_UTF8\n\n # Otherwise, look for a requested charset that Python supports.\n for charset in charsets:\n try:\n 'xyz'.encode(charset, 'replace') # test if charset is known\n return charset\n except:\n continue\n\n # If Python doesn't know any of the requested charsets, use UTF-8.\n return const.CHARSET_UTF8\n\ndef select_lang(request, config=None):\n \"\"\"Selects the best language to use for a given request. The 'lang' query\n parameter has priority, then the django_language cookie, then the first\n language in the language menu, then the default setting.\"\"\"\n default_lang = (config and\n config.language_menu_options and\n config.language_menu_options[0])\n lang = (request.get('lang') or\n request.cookies.get('django_language', None) or\n default_lang or\n django_setup.LANGUAGE_CODE)\n lang = re.sub('[^A-Za-z0-9-]', '', lang)\n return const.LANGUAGE_SYNONYMS.get(lang, lang)\n\ndef get_repo_options(request, lang):\n \"\"\"Returns a list of the names and titles of the launched repositories.\"\"\"\n options = []\n for repo in model.Repo.list_launched():\n titles = config.get_for_repo(repo, 'repo_titles', {})\n default_title = (titles.values() or ['?'])[0]\n title = titles.get(lang, titles.get('en', default_title))\n url = utils.get_repo_url(request, repo)\n test_mode = config.get_for_repo(repo, 'test_mode')\n options.append(utils.Struct(repo=repo, title=title, url=url,\n test_mode=test_mode))\n return options\n\ndef get_language_options(request, config, current_lang):\n \"\"\"Returns a list of information needed to generate the language menu.\"\"\"\n primary_langs = (config and config.language_menu_options) or ['en']\n all_langs = sorted(\n const.LANGUAGE_ENDONYMS.keys(),\n key=lambda s: const.LANGUAGE_ENDONYMS[s])\n return {\n 'primary':\n [get_language_option(request, lang, lang == current_lang)\n for lang in primary_langs],\n 'all':\n # We put both 'primary' and 'all' languages into a single <select>\n # box (See app/resources/language-menu.html.template).\n # If current_lang is in the primary languages, we mark the\n # language as is_selected in 'primary', not in 'all', to make sure\n # a single option is selected in the <select> box.\n [get_language_option(\n request, lang,\n lang == current_lang and lang not in primary_langs)\n for lang in all_langs],\n }\n\ndef get_language_option(request, lang, is_selected):\n return {\n 'lang': lang,\n 'endonym': const.LANGUAGE_ENDONYMS.get(lang, '?'),\n 'url': utils.set_url_param(request.url, 'lang', lang),\n 'is_selected': is_selected,\n }\n\ndef get_localized_message(localized_messages, lang, default):\n \"\"\"Gets the localized message for lang from a dictionary that maps language\n codes to localized messages. Falls back to English if language 'lang' is\n not available, or to a default message if English is not available.\"\"\"\n if not isinstance(localized_messages, dict):\n return default\n return localized_messages.get(lang, localized_messages.get('en', default))\n\ndef get_hidden_input_tags_for_preserved_query_params(request):\n \"\"\"Gets HTML with <input type=\"hidden\"> tags to preserve query parameters\n listed in utils.PRESERVED_QUERY_PARAM_NAMES e.g. \"ui\".\"\"\"\n tags_str = ''\n for name in utils.PRESERVED_QUERY_PARAM_NAMES:\n value = request.get(name)\n if value:\n tags_str += '<input type=\"hidden\" name=\"%s\" value=\"%s\">\\n' % (\n django.utils.html.escape(name),\n django.utils.html.escape(value))\n return tags_str\n\ndef setup_env(request):\n \"\"\"Constructs the 'env' object, which contains various template variables\n that are commonly used by most handlers.\"\"\"\n env = utils.Struct()\n env.repo, env.action = get_repo_and_action(request)\n env.config = config.Configuration(env.repo or '*')\n # TODO(ryok): Rename to local_test_mode or something alike to disambiguate\n # better from repository's test_mode.\n env.test_mode = (request.remote_addr == '127.0.0.1' and\n request.get('test_mode'))\n\n env.analytics_id = config.get('analytics_id')\n env.maps_api_key = config.get('maps_api_key')\n\n # Internationalization-related stuff.\n env.charset = select_charset(request)\n env.lang = select_lang(request, env.config)\n env.rtl = env.lang in const.LANGUAGES_BIDI\n env.virtual_keyboard_layout = const.VIRTUAL_KEYBOARD_LAYOUTS.get(env.lang)\n\n # Used for parsing query params. This must be done before accessing any\n # query params which may have multi-byte value, such as \"given_name\" below\n # in this function.\n request.charset = env.charset\n\n # Determine the resource bundle to use.\n env.default_resource_bundle = config.get('default_resource_bundle', '1')\n env.resource_bundle = (request.cookies.get('resource_bundle', '') or\n env.default_resource_bundle)\n\n # Information about the request.\n env.url = utils.set_url_param(request.url, 'lang', env.lang)\n env.scheme, env.netloc, env.path, _, _ = urlparse.urlsplit(request.url)\n env.force_https = False\n env.domain = env.netloc.split(':')[0]\n env.global_url = utils.get_repo_url(request, 'global')\n\n # Commonly used information that's rendered or localized for templates.\n env.language_options = get_language_options(request, env.config, env.lang)\n env.repo_options = get_repo_options(request, env.lang)\n env.expiry_options = [\n utils.Struct(value=value, text=const.PERSON_EXPIRY_TEXT[value])\n for value in sorted(const.PERSON_EXPIRY_TEXT.keys(), key=int)\n ]\n env.status_options = [\n utils.Struct(value=value, text=const.NOTE_STATUS_TEXT[value])\n for value in pfif.NOTE_STATUS_VALUES\n if (value != 'believed_dead' or\n not env.config or env.config.allow_believed_dead_via_ui)\n ]\n env.hidden_input_tags_for_preserved_query_params = (\n get_hidden_input_tags_for_preserved_query_params(request))\n\n ui_param = request.get('ui', '').strip().lower()\n\n # Interprets \"small\" and \"style\" parameters for backward compatibility.\n # TODO(ichikawa): Delete these in near future when we decide to drop\n # support of these parameters.\n small_param = request.get('small', '').strip().lower()\n style_param = request.get('style', '').strip().lower()\n if not ui_param and small_param == 'yes':\n ui_param = 'small'\n elif not ui_param and style_param:\n ui_param = style_param\n\n if ui_param:\n env.ui = ui_param\n elif user_agents.is_jp_tier2_mobile_phone(request):\n env.ui = 'light'\n else:\n env.ui = 'default'\n\n # UI configurations.\n #\n # Enables features which require JavaScript.\n env.enable_javascript = True\n # Enables operations which requires Captcha.\n env.enable_captcha = True\n # Enables photo upload.\n env.enable_photo_upload = True\n # Enables to flag/unflag notes as spam, and to reveal spam notes.\n env.enable_spam_ops = True\n # Enables duplicate marking mode.\n env.enable_dup_mode = True\n # Shows a logo on top of the page.\n env.show_logo = True\n # Shows language menu.\n env.show_language_menu = True\n # Uses short labels for buttons.\n env.use_short_buttons = False\n # Optional \"target\" attribute for links to non-small pages.\n env.target_attr = ''\n # Shows record IDs in the results page.\n env.show_record_ids_in_results = True\n\n if env.ui == 'small':\n env.show_logo = False\n env.target_attr = ' target=\"_blank\" '\n\n elif env.ui == 'light':\n # Disables features which requires JavaScript. Some feature phones\n # doesn't support JavaScript.\n env.enable_javascript = False\n # Disables operations which requires Captcha because Captcha requires\n # JavaScript.\n env.enable_captcha = False\n # Uploading is often not supported in feature phones.\n env.enable_photo_upload = False\n # Disables spam operations because it requires JavaScript and\n # supporting more pages on ui=light.\n env.enable_spam_ops = False\n # Disables duplicate marking mode because it doesn't support\n # small screens and it requires JavaScript.\n env.enable_dup_mode = False\n # Hides the logo on the top to save the space. Also, the logo links\n # to the global page which doesn't support small screens.\n env.show_logo = False\n # Hides language menu because the menu in the current position is\n # annoying in feature phones.\n # TODO(ichikawa): Consider layout of the language menu.\n env.show_language_menu = False\n # Too long buttons are not fully shown in some feature phones.\n env.use_short_buttons = True\n # To make it simple.\n env.show_record_ids_in_results = False\n\n env.back_chevron = u'\\xab'\n back_chevron_in_charset = True\n try:\n env.back_chevron.encode(env.charset)\n except UnicodeEncodeError:\n # u'\\xab' is not in the charset (e.g. Shift_JIS).\n back_chevron_in_charset = False\n if not back_chevron_in_charset or env.ui == 'light':\n # Use ASCII characters on ui=light too because some feature phones\n # support UTF-8 but don't render UTF-8 symbols such as u'\\xab'.\n env.back_chevron = u'<<'\n\n env.enable_maps = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.maps_api_key)\n env.enable_analytics = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.analytics_id)\n env.enable_translate = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.config.translate_api_key)\n\n env.admin = AdminEnv(request)\n\n # Repo-specific information.\n if env.repo:\n # repo_url is the root URL for the repository.\n env.repo_url = utils.get_repo_url(request, env.repo)\n # start_url is like repo_url but preserves parameters such as 'ui'.\n env.start_url = utils.get_url(request, env.repo, '')\n # URL of the link in the heading. The link on ui=small links to the\n # normal UI.\n env.repo_title_url = (\n env.repo_url if env.ui == 'small' else env.start_url)\n # URL to force default UI. Note that we show ui=light version in some\n # user agents when ui parameter is not specified.\n env.default_ui_url = utils.get_url(request, env.repo, '', ui='default')\n env.repo_path = urlparse.urlsplit(env.repo_url)[2]\n env.repo_title = get_localized_message(\n env.config.repo_titles, env.lang, '?')\n env.start_page_custom_html = get_localized_message(\n env.config.start_page_custom_htmls, env.lang, '')\n env.results_page_custom_html = get_localized_message(\n env.config.results_page_custom_htmls, env.lang, '')\n env.view_page_custom_html = get_localized_message(\n env.config.view_page_custom_htmls, env.lang, '')\n env.seek_query_form_custom_html = get_localized_message(\n env.config.seek_query_form_custom_htmls, env.lang, '')\n env.footer_custom_html = get_localized_message(\n env.config.footer_custom_htmls, env.lang, '')\n # If the repository is deactivated, we should not show test mode\n # notification.\n env.repo_test_mode = (\n env.config.test_mode and not env.config.deactivated)\n env.force_https = env.config.force_https\n\n env.params_full_name = request.get('full_name', '').strip()\n if not env.params_full_name:\n # Preformat the name from 'given_name' and 'family_name' parameters.\n given_name = request.get('given_name', '').strip()\n family_name = request.get('family_name', '').strip()\n env.params_full_name = utils.get_full_name(\n given_name, family_name, env.config)\n\n return env\n\ndef flush_caches(*keywords):\n \"\"\"Flushes the specified set of caches. Pass '*' to flush everything.\"\"\"\n if '*' in keywords or 'resource' in keywords:\n resources.clear_caches()\n if '*' in keywords or 'memcache' in keywords:\n memcache.flush_all()\n if '*' in keywords or 'config' in keywords:\n config.cache.flush()\n for keyword in keywords:\n if keyword.startswith('config/'):\n config.cache.delete(keyword[7:])\n\n\nclass Main(webapp.RequestHandler):\n \"\"\"The main request handler. All dynamic requests except for remote_api are\n handled by this handler, which dispatches to all other dynamic handlers.\"\"\"\n\n def initialize(self, request, response):\n webapp.RequestHandler.initialize(self, request, response)\n\n # If requested, set the clock before doing anything clock-related.\n # Only works on localhost for testing. Specify ?utcnow=1293840000 to\n # set the clock to 2011-01-01, or ?utcnow=real to revert to real time.\n utcnow = request.get('utcnow')\n if request.remote_addr == '127.0.0.1' and utcnow:\n if utcnow == 'real':\n utils.set_utcnow_for_test(None)\n else:\n utils.set_utcnow_for_test(float(utcnow))\n\n # If requested, flush caches before we touch anything that uses them.\n flush_caches(*request.get('flush', '').split(','))\n\n # Gather commonly used information into self.env.\n self.env = setup_env(request)\n\n # Force a redirect if requested, except where https is not supported:\n # - for cron jobs\n # - for task queue jobs\n # - in development\n if (self.env.force_https and self.env.scheme == 'http'\n and not is_cron_task(self.request)\n and not is_task_queue_task(self.request)\n and not is_development_server()):\n self.redirect(self.env.url.replace('http:', 'https:'))\n\n # Activate the selected language.\n response.headers['Content-Language'] = self.env.lang\n response.headers['Set-Cookie'] = \\\n 'django_language=%s; path=/' % self.env.lang\n django_setup.activate(self.env.lang)\n\n # Activate the appropriate resource bundle.\n resources.set_active_bundle_name(self.env.resource_bundle)\n\n def serve(self):\n request, response, env = self.request, self.response, self.env\n\n # If the Person Finder instance has not been initialized yet,\n # prepend to any served page a warning and a link to the admin\n # page where the datastore can be initialized.\n if not config.get('initialized'):\n if request.get('operation') == 'setup_datastore':\n setup_pf.setup_datastore()\n self.redirect(env.global_url + '/')\n return\n else:\n get_vars = lambda: {'env': env}\n content = resources.get_rendered('setup_datastore.html', env.lang,\n (env.repo, env.charset), get_vars)\n response.out.write(content)\n\n if not env.action and not env.repo:\n # Redirect to the default home page.\n self.redirect(env.global_url + '/' + HOME_ACTION)\n elif env.action in HANDLER_CLASSES:\n # Dispatch to the handler for the specified action.\n module_name, class_name = HANDLER_CLASSES[env.action].split('.')\n handler = getattr(__import__(module_name), class_name)(\n request, response, env)\n getattr(handler, request.method.lower())() # get() or post()\n elif env.action.endswith('.template'):\n # Don't serve template source code.\n response.set_status(404)\n response.out.write('Not found')\n else:\n # Serve a static page or file.\n env.robots_ok = True\n get_vars = lambda: {'env': env, 'config': env.config}\n content = resources.get_rendered(\n env.action, env.lang, (env.repo, env.charset), get_vars)\n if content is None:\n response.set_status(404)\n response.out.write('Not found')\n else:\n content_type, encoding = mimetypes.guess_type(env.action)\n response.headers['Content-Type'] = (\n (content_type or 'text/plain') +\n ('; charset=%s' % encoding if encoding else ''))\n response.out.write(content)\n\n def get(self):\n self.serve()\n\n def post(self):\n self.serve()\n\n def head(self):\n self.request.method = 'GET'\n self.serve()\n self.response.clear()\n\nif __name__ == '__main__':\n webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)]))\n", "path": "app/main.py"}], "after_files": [{"content": "#!/usr/bin/python2.7\n# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The main request handler. All dynamic requests except for remote_api are\nhandled by this handler, which dispatches to all other dynamic handlers.\"\"\"\n\nimport django_setup # always keep this first\n\nimport mimetypes\nimport re\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), 'vendors'))\n\nimport urlparse\n\nfrom google.appengine.api import memcache\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\n\nimport config\nimport const\nimport django.utils.html\nimport logging\nimport model\nimport pfif\nimport resources\nimport utils\nimport user_agents\nimport setup_pf\n\n\nclass AdminEnv(object):\n \"\"\"Template variables for admin pages.\"\"\"\n\n def __init__(self, request):\n self.request = request\n self.user = users.get_current_user()\n self.logout_url = users.create_logout_url(self.request.url)\n\n @property\n def repo_options(self):\n \"\"\"This is different from env.repo_options because this contains all\n repositories including deactivated ones.\n\n This is defined as a property so that it is evaluated lazily only\n when necessary.\n \"\"\"\n try:\n return [\n utils.Struct(\n repo=repo,\n url=utils.get_repo_url(self.request, repo) + '/admin')\n for repo in sorted(model.Repo.list())]\n except:\n # Logs the exception here because exceptions thrown during template\n # variable evaluation is silently ignored. Note that\n # logging.exception() logs the current exception by default.\n logging.exception('Exception thrown')\n return None\n\n\n# When no action or repo is specified, redirect to this action.\nHOME_ACTION = 'home.html'\n\n# Map of URL actions to Python module and class names.\n# TODO(kpy): Remove the need for this configuration information, either by\n# regularizing the module and class names or adding a URL attribute to handlers.\nHANDLER_CLASSES = dict((x, x.replace('/', '_') + '.Handler') for x in [\n 'start',\n 'query',\n 'results',\n 'create',\n 'view',\n 'multiview',\n 'reveal',\n 'photo',\n 'embed',\n 'extend',\n 'gadget',\n 'delete',\n 'flag_note',\n 'restore',\n 'subscribe',\n 'unsubscribe',\n 'disable_notes',\n 'confirm_disable_notes',\n 'enable_notes',\n 'confirm_enable_notes',\n 'post_flagged_note',\n 'confirm_post_flagged_note',\n 'third_party_search',\n 'admin',\n 'admin/create_repo',\n 'admin/dashboard',\n 'admin/delete_record',\n 'admin/resources',\n 'admin/review',\n 'admin/statistics',\n 'css',\n 'add_note',\n 'tos',\n])\n\n# Exceptional cases where the module name doesn't match the URL.\nHANDLER_CLASSES[''] = 'start.Handler'\nHANDLER_CLASSES['admin/api_keys'] = 'admin_api_keys.CreateOrUpdateApiKey'\nHANDLER_CLASSES['admin/api_keys/list'] = 'admin_api_keys.ListApiKeys'\nHANDLER_CLASSES['api/import'] = 'api.Import'\nHANDLER_CLASSES['api/import/notes'] = 'api.Import'\nHANDLER_CLASSES['api/import/persons'] = 'api.Import'\nHANDLER_CLASSES['api/read'] = 'api.Read'\nHANDLER_CLASSES['api/write'] = 'api.Write'\nHANDLER_CLASSES['api/search'] = 'api.Search'\nHANDLER_CLASSES['api/subscribe'] = 'api.Subscribe'\nHANDLER_CLASSES['api/unsubscribe'] = 'api.Unsubscribe'\nHANDLER_CLASSES['api/stats'] = 'api.Stats'\nHANDLER_CLASSES['api/handle_sms'] = 'api.HandleSMS'\nHANDLER_CLASSES['api/photo_upload'] = 'api.PhotoUpload'\nHANDLER_CLASSES['feeds/repo'] = 'feeds.Repo'\nHANDLER_CLASSES['feeds/note'] = 'feeds.Note'\nHANDLER_CLASSES['feeds/person'] = 'feeds.Person'\nHANDLER_CLASSES['sitemap'] = 'sitemap.SiteMap'\nHANDLER_CLASSES['sitemap/ping'] = 'sitemap.SiteMapPing'\nHANDLER_CLASSES['tasks/count/note'] = 'tasks.CountNote'\nHANDLER_CLASSES['tasks/count/person'] = 'tasks.CountPerson'\nHANDLER_CLASSES['tasks/count/reindex'] = 'tasks.Reindex'\nHANDLER_CLASSES['tasks/count/update_dead_status'] = 'tasks.UpdateDeadStatus'\nHANDLER_CLASSES['tasks/count/update_status'] = 'tasks.UpdateStatus'\nHANDLER_CLASSES['tasks/delete_expired'] = 'tasks.DeleteExpired'\nHANDLER_CLASSES['tasks/delete_old'] = 'tasks.DeleteOld'\nHANDLER_CLASSES['tasks/clean_up_in_test_mode'] = 'tasks.CleanUpInTestMode'\nHANDLER_CLASSES['tasks/notify_many_unreviewed_notes'] = 'tasks.NotifyManyUnreviewedNotes'\n\ndef is_development_server():\n \"\"\"Returns True if the app is running in development.\"\"\"\n server = os.environ.get('SERVER_SOFTWARE', '')\n return 'Development' in server\n\ndef is_cron_task(request):\n \"\"\"Returns True if the request is from appengine cron.\"\"\"\n return 'X-AppEngine-Cron' in request.headers\n\ndef is_task_queue_task(request):\n \"\"\"Returns True if the request is from the appengine task queue.\"\"\"\n return 'X-AppEngine-TaskName' in request.headers\n\ndef get_repo_and_action(request):\n \"\"\"Determines the repo and action for a request. The action is the part\n of the URL path after the repo, with no leading or trailing slashes.\"\"\"\n scheme, netloc, path, _, _ = urlparse.urlsplit(request.url)\n parts = path.lstrip('/').split('/')\n\n # Depending on whether we're serving from appspot directly or\n # google.org/personfinder we could have /global or /personfinder/global\n # as the 'global' prefix.\n if parts[0] == 'personfinder':\n parts.pop(0)\n repo = parts and parts.pop(0) or None\n action = '/'.join(parts)\n if repo == 'global':\n repo = None\n return repo, action\n\ndef select_charset(request):\n \"\"\"Given a request, chooses a charset for encoding the response.\n\n If the selected charset is UTF-8, it always returns\n 'utf-8' (const.CHARSET_UTF8), not 'utf8', 'UTF-8', etc.\n \"\"\"\n # We assume that any client that doesn't support UTF-8 will specify a\n # preferred encoding in the Accept-Charset header, and will use this\n # encoding for content, query parameters, and form data. We make this\n # assumption across all repositories.\n\n # Get a list of the charsets that the client supports.\n if request.get('charsets'):\n charsets = request.get('charsets').split(',')\n elif user_agents.prefer_sjis_charset(request):\n # Some Japanese feature phones don't (fully) support UTF-8.\n # They only support Shift_JIS. But they may not send Accept-Charset\n # header. Also, we haven't confirmed, but there may be phones whose\n # Accept-Charset header includes UTF-8 but its UTF-8 support is buggy.\n # So we always use Shift_JIS regardless of Accept-Charset header.\n charsets = ['Shift_JIS']\n else:\n charsets = request.accept_charset.best_matches()\n\n # Always prefer UTF-8 if the client supports it.\n for charset in charsets:\n if charset.lower().replace('_', '-') in ['utf8', 'utf-8']:\n return const.CHARSET_UTF8\n\n # Otherwise, look for a requested charset that Python supports.\n for charset in charsets:\n try:\n 'xyz'.encode(charset, 'replace') # test if charset is known\n return charset\n except:\n continue\n\n # If Python doesn't know any of the requested charsets, use UTF-8.\n return const.CHARSET_UTF8\n\ndef select_lang(request, config=None):\n \"\"\"Selects the best language to use for a given request. The 'lang' query\n parameter has priority, then the django_language cookie, then the first\n language in the language menu, then the default setting.\"\"\"\n default_lang = (config and\n config.language_menu_options and\n config.language_menu_options[0])\n lang = (request.get('lang') or\n request.cookies.get('django_language', None) or\n default_lang or\n django_setup.LANGUAGE_CODE)\n lang = re.sub('[^A-Za-z0-9-]', '', lang)\n return const.LANGUAGE_SYNONYMS.get(lang, lang)\n\ndef get_repo_options(request, lang):\n \"\"\"Returns a list of the names and titles of the launched repositories.\"\"\"\n options = []\n for repo in model.Repo.list_launched():\n titles = config.get_for_repo(repo, 'repo_titles', {})\n default_title = (titles.values() or ['?'])[0]\n title = titles.get(lang, titles.get('en', default_title))\n url = utils.get_repo_url(request, repo)\n test_mode = config.get_for_repo(repo, 'test_mode')\n options.append(utils.Struct(repo=repo, title=title, url=url,\n test_mode=test_mode))\n return options\n\ndef get_language_options(request, config, current_lang):\n \"\"\"Returns a list of information needed to generate the language menu.\"\"\"\n primary_langs = (config and config.language_menu_options) or ['en']\n all_langs = sorted(\n const.LANGUAGE_ENDONYMS.keys(),\n key=lambda s: const.LANGUAGE_ENDONYMS[s])\n return {\n 'primary':\n [get_language_option(request, lang, lang == current_lang)\n for lang in primary_langs],\n 'all':\n # We put both 'primary' and 'all' languages into a single <select>\n # box (See app/resources/language-menu.html.template).\n # If current_lang is in the primary languages, we mark the\n # language as is_selected in 'primary', not in 'all', to make sure\n # a single option is selected in the <select> box.\n [get_language_option(\n request, lang,\n lang == current_lang and lang not in primary_langs)\n for lang in all_langs],\n }\n\ndef get_language_option(request, lang, is_selected):\n return {\n 'lang': lang,\n 'endonym': const.LANGUAGE_ENDONYMS.get(lang, '?'),\n 'url': utils.set_url_param(request.url, 'lang', lang),\n 'is_selected': is_selected,\n }\n\ndef get_localized_message(localized_messages, lang, default):\n \"\"\"Gets the localized message for lang from a dictionary that maps language\n codes to localized messages. Falls back to English if language 'lang' is\n not available, or to a default message if English is not available.\"\"\"\n if not isinstance(localized_messages, dict):\n return default\n return localized_messages.get(lang, localized_messages.get('en', default))\n\ndef get_hidden_input_tags_for_preserved_query_params(request):\n \"\"\"Gets HTML with <input type=\"hidden\"> tags to preserve query parameters\n listed in utils.PRESERVED_QUERY_PARAM_NAMES e.g. \"ui\".\"\"\"\n tags_str = ''\n for name in utils.PRESERVED_QUERY_PARAM_NAMES:\n value = request.get(name)\n if value:\n tags_str += '<input type=\"hidden\" name=\"%s\" value=\"%s\">\\n' % (\n django.utils.html.escape(name),\n django.utils.html.escape(value))\n return tags_str\n\ndef setup_env(request):\n \"\"\"Constructs the 'env' object, which contains various template variables\n that are commonly used by most handlers.\"\"\"\n env = utils.Struct()\n env.repo, env.action = get_repo_and_action(request)\n env.config = config.Configuration(env.repo or '*')\n # TODO(ryok): Rename to local_test_mode or something alike to disambiguate\n # better from repository's test_mode.\n env.test_mode = (request.remote_addr == '127.0.0.1' and\n request.get('test_mode'))\n\n env.analytics_id = config.get('analytics_id')\n env.maps_api_key = config.get('maps_api_key')\n\n # Internationalization-related stuff.\n env.charset = select_charset(request)\n env.lang = select_lang(request, env.config)\n env.rtl = env.lang in const.LANGUAGES_BIDI\n env.virtual_keyboard_layout = const.VIRTUAL_KEYBOARD_LAYOUTS.get(env.lang)\n\n # Used for parsing query params. This must be done before accessing any\n # query params which may have multi-byte value, such as \"given_name\" below\n # in this function.\n request.charset = env.charset\n\n # Determine the resource bundle to use.\n env.default_resource_bundle = config.get('default_resource_bundle', '1')\n env.resource_bundle = (request.cookies.get('resource_bundle', '') or\n env.default_resource_bundle)\n\n # Information about the request.\n env.url = utils.set_url_param(request.url, 'lang', env.lang)\n env.scheme, env.netloc, env.path, _, _ = urlparse.urlsplit(request.url)\n env.force_https = False\n env.domain = env.netloc.split(':')[0]\n env.global_url = utils.get_repo_url(request, 'global')\n\n # Commonly used information that's rendered or localized for templates.\n env.language_options = get_language_options(request, env.config, env.lang)\n env.repo_options = get_repo_options(request, env.lang)\n env.expiry_options = [\n utils.Struct(value=value, text=const.PERSON_EXPIRY_TEXT[value])\n for value in sorted(const.PERSON_EXPIRY_TEXT.keys(), key=int)\n ]\n env.status_options = [\n utils.Struct(value=value, text=const.NOTE_STATUS_TEXT[value])\n for value in pfif.NOTE_STATUS_VALUES\n if (value != 'believed_dead' or\n not env.config or env.config.allow_believed_dead_via_ui)\n ]\n env.hidden_input_tags_for_preserved_query_params = (\n get_hidden_input_tags_for_preserved_query_params(request))\n\n ui_param = request.get('ui', '').strip().lower()\n\n # Interprets \"small\" and \"style\" parameters for backward compatibility.\n # TODO(ichikawa): Delete these in near future when we decide to drop\n # support of these parameters.\n small_param = request.get('small', '').strip().lower()\n style_param = request.get('style', '').strip().lower()\n if not ui_param and small_param == 'yes':\n ui_param = 'small'\n elif not ui_param and style_param:\n ui_param = style_param\n\n if ui_param:\n env.ui = ui_param\n elif user_agents.is_jp_tier2_mobile_phone(request):\n env.ui = 'light'\n else:\n env.ui = 'default'\n\n # UI configurations.\n #\n # Enables features which require JavaScript.\n env.enable_javascript = True\n # Enables operations which requires Captcha.\n env.enable_captcha = True\n # Enables photo upload.\n env.enable_photo_upload = True\n # Enables to flag/unflag notes as spam, and to reveal spam notes.\n env.enable_spam_ops = True\n # Enables duplicate marking mode.\n env.enable_dup_mode = True\n # Shows a logo on top of the page.\n env.show_logo = True\n # Shows language menu.\n env.show_language_menu = True\n # Uses short labels for buttons.\n env.use_short_buttons = False\n # Optional \"target\" attribute for links to non-small pages.\n env.target_attr = ''\n # Shows record IDs in the results page.\n env.show_record_ids_in_results = True\n\n if env.ui == 'small':\n env.show_logo = False\n env.target_attr = ' target=\"_blank\" '\n\n elif env.ui == 'light':\n # Disables features which requires JavaScript. Some feature phones\n # doesn't support JavaScript.\n env.enable_javascript = False\n # Disables operations which requires Captcha because Captcha requires\n # JavaScript.\n env.enable_captcha = False\n # Uploading is often not supported in feature phones.\n env.enable_photo_upload = False\n # Disables spam operations because it requires JavaScript and\n # supporting more pages on ui=light.\n env.enable_spam_ops = False\n # Disables duplicate marking mode because it doesn't support\n # small screens and it requires JavaScript.\n env.enable_dup_mode = False\n # Hides the logo on the top to save the space. Also, the logo links\n # to the global page which doesn't support small screens.\n env.show_logo = False\n # Hides language menu because the menu in the current position is\n # annoying in feature phones.\n # TODO(ichikawa): Consider layout of the language menu.\n env.show_language_menu = False\n # Too long buttons are not fully shown in some feature phones.\n env.use_short_buttons = True\n # To make it simple.\n env.show_record_ids_in_results = False\n\n env.back_chevron = u'\\xab'\n back_chevron_in_charset = True\n try:\n env.back_chevron.encode(env.charset)\n except UnicodeEncodeError:\n # u'\\xab' is not in the charset (e.g. Shift_JIS).\n back_chevron_in_charset = False\n if not back_chevron_in_charset or env.ui == 'light':\n # Use ASCII characters on ui=light too because some feature phones\n # support UTF-8 but don't render UTF-8 symbols such as u'\\xab'.\n env.back_chevron = u'<<'\n\n env.enable_maps = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.maps_api_key)\n env.enable_analytics = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.analytics_id)\n env.enable_translate = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.config.translate_api_key)\n\n env.admin = AdminEnv(request)\n\n # Repo-specific information.\n if env.repo:\n # repo_url is the root URL for the repository.\n env.repo_url = utils.get_repo_url(request, env.repo)\n # start_url is like repo_url but preserves parameters such as 'ui'.\n env.start_url = utils.get_url(request, env.repo, '')\n # URL of the link in the heading. The link on ui=small links to the\n # normal UI.\n env.repo_title_url = (\n env.repo_url if env.ui == 'small' else env.start_url)\n # URL to force default UI. Note that we show ui=light version in some\n # user agents when ui parameter is not specified.\n env.default_ui_url = utils.get_url(request, env.repo, '', ui='default')\n env.repo_path = urlparse.urlsplit(env.repo_url)[2]\n env.repo_title = get_localized_message(\n env.config.repo_titles, env.lang, '?')\n env.start_page_custom_html = get_localized_message(\n env.config.start_page_custom_htmls, env.lang, '')\n env.results_page_custom_html = get_localized_message(\n env.config.results_page_custom_htmls, env.lang, '')\n env.view_page_custom_html = get_localized_message(\n env.config.view_page_custom_htmls, env.lang, '')\n env.seek_query_form_custom_html = get_localized_message(\n env.config.seek_query_form_custom_htmls, env.lang, '')\n env.footer_custom_html = get_localized_message(\n env.config.footer_custom_htmls, env.lang, '')\n # If the repository is deactivated, we should not show test mode\n # notification.\n env.repo_test_mode = (\n env.config.test_mode and not env.config.deactivated)\n env.force_https = env.config.force_https\n\n env.params_full_name = request.get('full_name', '').strip()\n if not env.params_full_name:\n # Preformat the name from 'given_name' and 'family_name' parameters.\n given_name = request.get('given_name', '').strip()\n family_name = request.get('family_name', '').strip()\n env.params_full_name = utils.get_full_name(\n given_name, family_name, env.config)\n\n return env\n\ndef flush_caches(*keywords):\n \"\"\"Flushes the specified set of caches. Pass '*' to flush everything.\"\"\"\n if '*' in keywords or 'resource' in keywords:\n resources.clear_caches()\n if '*' in keywords or 'memcache' in keywords:\n memcache.flush_all()\n if '*' in keywords or 'config' in keywords:\n config.cache.flush()\n for keyword in keywords:\n if keyword.startswith('config/'):\n config.cache.delete(keyword[7:])\n\n\nclass Main(webapp.RequestHandler):\n \"\"\"The main request handler. All dynamic requests except for remote_api are\n handled by this handler, which dispatches to all other dynamic handlers.\"\"\"\n\n def initialize(self, request, response):\n webapp.RequestHandler.initialize(self, request, response)\n\n # If requested, set the clock before doing anything clock-related.\n # Only works on localhost for testing. Specify ?utcnow=1293840000 to\n # set the clock to 2011-01-01, or ?utcnow=real to revert to real time.\n utcnow = request.get('utcnow')\n if request.remote_addr == '127.0.0.1' and utcnow:\n if utcnow == 'real':\n utils.set_utcnow_for_test(None)\n else:\n utils.set_utcnow_for_test(float(utcnow))\n\n # If requested, flush caches before we touch anything that uses them.\n flush_caches(*request.get('flush', '').split(','))\n\n # Gather commonly used information into self.env.\n self.env = setup_env(request)\n\n # Force a redirect if requested, except where https is not supported:\n # - for cron jobs\n # - for task queue jobs\n # - in development\n if (self.env.force_https and self.env.scheme == 'http'\n and not is_cron_task(self.request)\n and not is_task_queue_task(self.request)\n and not is_development_server()):\n self.redirect(self.env.url.replace('http:', 'https:'))\n\n # Activate the selected language.\n response.headers['Content-Language'] = self.env.lang\n response.headers['Set-Cookie'] = \\\n 'django_language=%s; path=/' % self.env.lang\n django_setup.activate(self.env.lang)\n\n # Activate the appropriate resource bundle.\n resources.set_active_bundle_name(self.env.resource_bundle)\n\n def serve(self):\n request, response, env = self.request, self.response, self.env\n\n # If the Person Finder instance has not been initialized yet,\n # prepend to any served page a warning and a link to the admin\n # page where the datastore can be initialized.\n if not config.get('initialized'):\n if request.get('operation') == 'setup_datastore':\n setup_pf.setup_datastore()\n self.redirect(env.global_url + '/')\n return\n else:\n get_vars = lambda: {'env': env}\n content = resources.get_rendered('setup_datastore.html', env.lang,\n (env.repo, env.charset), get_vars)\n response.out.write(content)\n\n if not env.action and not env.repo:\n # Redirect to the default home page.\n self.redirect(env.global_url + '/' + HOME_ACTION)\n elif env.action in HANDLER_CLASSES:\n # Dispatch to the handler for the specified action.\n module_name, class_name = HANDLER_CLASSES[env.action].split('.')\n handler = getattr(__import__(module_name), class_name)(\n request, response, env)\n getattr(handler, request.method.lower())() # get() or post()\n elif env.action.endswith('.template'):\n # Don't serve template source code.\n response.set_status(404)\n response.out.write('Not found')\n else:\n # Serve a static page or file.\n env.robots_ok = True\n get_vars = lambda: {'env': env, 'config': env.config}\n content = resources.get_rendered(\n env.action, env.lang, (env.repo, env.charset), get_vars)\n if content is None:\n response.set_status(404)\n response.out.write('Not found')\n else:\n content_type, encoding = mimetypes.guess_type(env.action)\n response.headers['Content-Type'] = (\n (content_type or 'text/plain') +\n ('; charset=%s' % encoding if encoding else ''))\n response.out.write(content)\n\n def get(self):\n self.serve()\n\n def post(self):\n self.serve()\n\n def head(self):\n self.request.method = 'GET'\n self.serve()\n self.response.clear()\n\nif __name__ == '__main__':\n webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)]))\n \n", "path": "app/main.py"}]} |
gh_patches_debug_1532 | rasdani/github-patches | git_diff | joke2k__faker-640 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MacOS 10.13 OSError: [Errno 24] Too many open files
```shell
return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')]
OSError: [Errno 24] Too many open files: '/Users/abcdefg/.pyenv/versions/3.6.3/envs/weixin3/lib/python3.6/site-packages/faker/providers/address'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/utils/loading.py`
Content:
```
1 import os
2 from importlib import import_module
3 import pkgutil
4
5
6 def list_module(module):
7 path = os.path.dirname(module.__file__)
8 modules = [name for finder, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]
9 if len(modules) > 0:
10 return modules
11 return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')]
12
13
14 def find_available_locales(providers):
15 available_locales = set()
16
17 for provider_path in providers:
18
19 provider_module = import_module(provider_path)
20 if getattr(provider_module, 'localized', False):
21 langs = list_module(provider_module)
22 available_locales.update(langs)
23 return available_locales
24
25
26 def find_available_providers(modules):
27 available_providers = set()
28 for providers_mod in modules:
29 providers = ['.'.join([providers_mod.__package__, mod]) for mod in list_module(providers_mod)]
30 available_providers.update(providers)
31 return sorted(available_providers)
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/utils/loading.py b/faker/utils/loading.py
--- a/faker/utils/loading.py
+++ b/faker/utils/loading.py
@@ -6,9 +6,7 @@
def list_module(module):
path = os.path.dirname(module.__file__)
modules = [name for finder, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]
- if len(modules) > 0:
- return modules
- return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')]
+ return modules
def find_available_locales(providers):
| {"golden_diff": "diff --git a/faker/utils/loading.py b/faker/utils/loading.py\n--- a/faker/utils/loading.py\n+++ b/faker/utils/loading.py\n@@ -6,9 +6,7 @@\n def list_module(module):\n path = os.path.dirname(module.__file__)\n modules = [name for finder, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n- if len(modules) > 0:\n- return modules\n- return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')]\n+ return modules\n \n \n def find_available_locales(providers):\n", "issue": "MacOS 10.13 OSError: [Errno 24] Too many open files\n```shell\r\n return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')]\r\nOSError: [Errno 24] Too many open files: '/Users/abcdefg/.pyenv/versions/3.6.3/envs/weixin3/lib/python3.6/site-packages/faker/providers/address'\r\n```\r\n\r\n\n", "before_files": [{"content": "import os\nfrom importlib import import_module\nimport pkgutil\n\n\ndef list_module(module):\n path = os.path.dirname(module.__file__)\n modules = [name for finder, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n if len(modules) > 0:\n return modules\n return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')]\n\n\ndef find_available_locales(providers):\n available_locales = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n return available_locales\n\n\ndef find_available_providers(modules):\n available_providers = set()\n for providers_mod in modules:\n providers = ['.'.join([providers_mod.__package__, mod]) for mod in list_module(providers_mod)]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py"}], "after_files": [{"content": "import os\nfrom importlib import import_module\nimport pkgutil\n\n\ndef list_module(module):\n path = os.path.dirname(module.__file__)\n modules = [name for finder, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n return modules\n\n\ndef find_available_locales(providers):\n available_locales = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n return available_locales\n\n\ndef find_available_providers(modules):\n available_providers = set()\n for providers_mod in modules:\n providers = ['.'.join([providers_mod.__package__, mod]) for mod in list_module(providers_mod)]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py"}]} |
gh_patches_debug_1533 | rasdani/github-patches | git_diff | microsoft__AzureTRE-524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Service bus message times out on deployment of workspace template
**Describe the bug**
When deploying a template that takes > 10 minutes, although deployment is successful the status is not updated.
**Steps to reproduce**
1. Register and deploy the `azureml_devtestlabs` workspace
2. Log on to the VMSS resource processor using bastion
3. View the docker logs, wait until deployment is complete, and see similar to:
`LinkDetach("ErrorCodes.LinkDetachForced: The link 'G3:5725658:sender-link-bd7b69d4-9ad4-4b9b-b9f6-2e311be400a3' is force detached. Code: publisher(link3135). Details: AmqpMessagePublisher.IdleTimerExpired: Idle timeout: 00:10:00.")`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `processor_function/vm_porter/runner.py`
Content:
```
1 import os
2 import sys
3 import json
4 import socket
5 import asyncio
6 import logging
7 from shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa
8 from resources import strings # pylint: disable=import-error # noqa
9 from contextlib import asynccontextmanager
10 from azure.servicebus import ServiceBusMessage
11 from azure.servicebus.aio import ServiceBusClient, AutoLockRenewer
12 from azure.identity.aio import DefaultAzureCredential
13
14 logger_adapter = initialize_logging(logging.INFO, socket.gethostname())
15 disable_unwanted_loggers()
16
17
18 @asynccontextmanager
19 async def default_credentials(msi_id):
20 """
21 Context manager which yields the default credentials.
22 """
23 credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()
24 yield credential
25 await credential.close()
26
27
28 async def receive_message(env_vars, service_bus_client):
29 """
30 This method is an async generator which receives messages from service bus
31 and yields those messages. If the yielded function return True the message is
32 marked complete.
33 """
34 async with service_bus_client:
35 q_name = env_vars["resource_request_queue"]
36 renewer = AutoLockRenewer()
37 receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)
38
39 async with receiver:
40 received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)
41
42 for msg in received_msgs:
43 result = True
44 message = ""
45
46 try:
47 message = json.loads(str(msg))
48 result = (yield message)
49 except (json.JSONDecodeError) as e:
50 logging.error(f"Received bad service bus resource request message: {e}")
51 if result:
52 logging.info(f"Resource request for {message} is complete")
53 else:
54 logging.error('Message processing failed!')
55 logger_adapter.info(f"Message with id = {message['id']} processed as {result} and marked complete.")
56 await receiver.complete_message(msg)
57
58
59 def azure_login_command(env_vars):
60 local_login = f"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}"
61 vmss_login = f"az login --identity -u {env_vars['vmss_msi_id']}"
62 command = vmss_login if env_vars['vmss_msi_id'] else local_login
63 return command
64
65
66 def build_porter_command(msg_body, env_vars):
67 porter_parameters = ""
68 for parameter in msg_body['parameters']:
69 porter_parameters = porter_parameters + f" --param {parameter}={msg_body['parameters'][parameter]}"
70
71 installation_id = msg_body['parameters']['tre_id'] + "-" + msg_body['parameters']['workspace_id']
72
73 porter_parameters = porter_parameters + f" --param tfstate_container_name={env_vars['tfstate_container_name']}"
74 porter_parameters = porter_parameters + f" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}"
75 porter_parameters = porter_parameters + f" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}"
76 porter_parameters = porter_parameters + f" --param arm_use_msi={env_vars['arm_use_msi']}"
77
78 command_line = [f"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter "
79 f"{msg_body['action']} {installation_id} "
80 f" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}"
81 f" {porter_parameters} --cred ./vm_porter/azure.json --allow-docker-host-access"
82 f" && porter show {installation_id}"]
83 return command_line
84
85
86 def porter_envs(env_var):
87 porter_env_vars = {}
88 porter_env_vars["HOME"] = os.environ['HOME']
89 porter_env_vars["PATH"] = os.environ['PATH']
90 porter_env_vars["ARM_CLIENT_ID"] = env_var["arm_client_id"]
91 porter_env_vars["ARM_CLIENT_SECRET"] = env_var["arm_client_secret"]
92 porter_env_vars["ARM_SUBSCRIPTION_ID"] = env_var["arm_subscription_id"]
93 porter_env_vars["ARM_TENANT_ID"] = env_var["arm_tenant_id"]
94
95 return porter_env_vars
96
97
98 async def run_porter(command, env_vars):
99 proc = await asyncio.create_subprocess_shell(
100 ''.join(command),
101 stdout=asyncio.subprocess.PIPE,
102 stderr=asyncio.subprocess.PIPE,
103 env=porter_envs(env_vars))
104
105 stdout, stderr = await proc.communicate()
106 logging.info(f'[{command!r} exited with {proc.returncode}]')
107 result_stdout = None
108 result_stderr = None
109 if stdout:
110 result_stdout = stdout.decode()
111 logger_adapter.info('[stdout]')
112 for string in result_stdout.split('\n'):
113 if len(string) != 0:
114 logger_adapter.info(str(string))
115 if stderr:
116 result_stderr = stderr.decode()
117 logger_adapter.info('[stderr]')
118 for string in result_stderr.split('\n'):
119 if len(string) != 0:
120 logger_adapter.info(str(string))
121
122 return (proc.returncode, result_stdout, result_stderr)
123
124
125 def service_bus_message_generator(sb_message, status, deployment_message):
126 installation_id = sb_message['parameters']['tre_id'] + "-" + sb_message['parameters']['workspace_id']
127 resource_request_message = json.dumps({
128 "id": sb_message["id"],
129 "status": status,
130 "message": f"{installation_id}: {deployment_message}"
131 })
132 return resource_request_message
133
134
135 async def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):
136 installation_id = msg_body['parameters']['tre_id'] + "-" + msg_body['parameters']['workspace_id']
137 message_logger_adapter.info(f"{installation_id}: Deployment job configuration starting")
138 sb_sender = sb_client.get_queue_sender(queue_name=env_vars["deployment_status_queue"])
139 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, "Deployment job starting")
140 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
141
142 returncode, _, err = await run_porter(build_porter_command(msg_body, env_vars), env_vars)
143 if returncode != 0:
144 error_message = "Error context message = " + " ".join(err.split('\n'))
145 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)
146 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
147 message_logger_adapter.info(f"{installation_id}: Deployment job configuration failed error = {error_message}")
148 return False
149 else:
150 success_message = "Workspace was deployed successfully..."
151 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)
152 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
153 message_logger_adapter.info(f"{installation_id}: {success_message}")
154 return True
155
156
157 async def runner(env_vars):
158 msi_id = env_vars["vmss_msi_id"]
159 service_bus_namespace = env_vars["service_bus_namespace"]
160 async with default_credentials(msi_id) as credential:
161 service_bus_client = ServiceBusClient(service_bus_namespace, credential)
162 logger_adapter.info("Starting message receiving loop...")
163 while True:
164 logger_adapter.info("Checking for new messages...")
165 receive_message_gen = receive_message(env_vars, service_bus_client)
166 try:
167 async for message in receive_message_gen:
168 logger_adapter.info(f"Message received for id={message['id']}")
169 message_logger_adapter = initialize_logging(logging.INFO, message['id'])
170 result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)
171 await receive_message_gen.asend(result)
172 except StopAsyncIteration: # the async generator when finished signals end with this exception.
173 pass
174 logger_adapter.info("All messages done sleeping...")
175 await asyncio.sleep(60)
176
177
178 def read_env_vars():
179 env_vars = {
180 # Needed for local dev
181 "app_id": os.environ.get("AZURE_CLIENT_ID", None),
182 "app_password": os.environ.get("AZURE_CLIENT_SECRET", None),
183
184 "registry_server": os.environ["REGISTRY_SERVER"],
185 "tfstate_container_name": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],
186 "tfstate_resource_group_name": os.environ['MGMT_RESOURCE_GROUP_NAME'],
187 "tfstate_storage_account_name": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],
188 "deployment_status_queue": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],
189 "resource_request_queue": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],
190 "service_bus_namespace": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],
191 "vmss_msi_id": os.environ.get('VMSS_MSI_ID', None),
192
193 # Needed for running porter
194 "arm_use_msi": os.environ["ARM_USE_MSI"],
195 "arm_subscription_id": os.environ['ARM_SUBSCRIPTION_ID'],
196 "arm_client_id": os.environ["ARM_CLIENT_ID"],
197 "arm_tenant_id": os.environ["ARM_TENANT_ID"]
198 }
199
200 env_vars["arm_client_secret"] = os.environ["ARM_CLIENT_SECRET"] if env_vars["arm_use_msi"] == "false" else ""
201
202 return env_vars
203
204
205 if __name__ == "__main__":
206 try:
207 env_vars = read_env_vars()
208 except KeyError as e:
209 logger_adapter.error(f"Environment variable {e} is not set correctly...Exiting")
210 sys.exit(1)
211 logger_adapter.info("Started processor")
212 asyncio.run(runner(env_vars))
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/processor_function/vm_porter/runner.py b/processor_function/vm_porter/runner.py
--- a/processor_function/vm_porter/runner.py
+++ b/processor_function/vm_porter/runner.py
@@ -33,7 +33,7 @@
"""
async with service_bus_client:
q_name = env_vars["resource_request_queue"]
- renewer = AutoLockRenewer()
+ renewer = AutoLockRenewer(max_lock_renewal_duration=1800)
receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)
async with receiver:
| {"golden_diff": "diff --git a/processor_function/vm_porter/runner.py b/processor_function/vm_porter/runner.py\n--- a/processor_function/vm_porter/runner.py\n+++ b/processor_function/vm_porter/runner.py\n@@ -33,7 +33,7 @@\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n- renewer = AutoLockRenewer()\n+ renewer = AutoLockRenewer(max_lock_renewal_duration=1800)\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n \n async with receiver:\n", "issue": "[BUG] Service bus message times out on deployment of workspace template \n**Describe the bug**\r\nWhen deploying a template that takes > 10 minutes, although deployment is successful the status is not updated.\r\n\r\n**Steps to reproduce**\r\n\r\n1. Register and deploy the `azureml_devtestlabs` workspace\r\n2. Log on to the VMSS resource processor using bastion\r\n3. View the docker logs, wait until deployment is complete, and see similar to:\r\n\r\n`LinkDetach(\"ErrorCodes.LinkDetachForced: The link 'G3:5725658:sender-link-bd7b69d4-9ad4-4b9b-b9f6-2e311be400a3' is force detached. Code: publisher(link3135). Details: AmqpMessagePublisher.IdleTimerExpired: Idle timeout: 00:10:00.\")`\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport json\nimport socket\nimport asyncio\nimport logging\nfrom shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa\nfrom resources import strings # pylint: disable=import-error # noqa\nfrom contextlib import asynccontextmanager\nfrom azure.servicebus import ServiceBusMessage\nfrom azure.servicebus.aio import ServiceBusClient, AutoLockRenewer\nfrom azure.identity.aio import DefaultAzureCredential\n\nlogger_adapter = initialize_logging(logging.INFO, socket.gethostname())\ndisable_unwanted_loggers()\n\n\n@asynccontextmanager\nasync def default_credentials(msi_id):\n \"\"\"\n Context manager which yields the default credentials.\n \"\"\"\n credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()\n yield credential\n await credential.close()\n\n\nasync def receive_message(env_vars, service_bus_client):\n \"\"\"\n This method is an async generator which receives messages from service bus\n and yields those messages. If the yielded function return True the message is\n marked complete.\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n renewer = AutoLockRenewer()\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n\n async with receiver:\n received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)\n\n for msg in received_msgs:\n result = True\n message = \"\"\n\n try:\n message = json.loads(str(msg))\n result = (yield message)\n except (json.JSONDecodeError) as e:\n logging.error(f\"Received bad service bus resource request message: {e}\")\n if result:\n logging.info(f\"Resource request for {message} is complete\")\n else:\n logging.error('Message processing failed!')\n logger_adapter.info(f\"Message with id = {message['id']} processed as {result} and marked complete.\")\n await receiver.complete_message(msg)\n\n\ndef azure_login_command(env_vars):\n local_login = f\"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}\"\n vmss_login = f\"az login --identity -u {env_vars['vmss_msi_id']}\"\n command = vmss_login if env_vars['vmss_msi_id'] else local_login\n return command\n\n\ndef build_porter_command(msg_body, env_vars):\n porter_parameters = \"\"\n for parameter in msg_body['parameters']:\n porter_parameters = porter_parameters + f\" --param {parameter}={msg_body['parameters'][parameter]}\"\n\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n\n porter_parameters = porter_parameters + f\" --param tfstate_container_name={env_vars['tfstate_container_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}\"\n porter_parameters = porter_parameters + f\" --param arm_use_msi={env_vars['arm_use_msi']}\"\n\n command_line = [f\"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter \"\n f\"{msg_body['action']} {installation_id} \"\n f\" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}\"\n f\" {porter_parameters} --cred ./vm_porter/azure.json --allow-docker-host-access\"\n f\" && porter show {installation_id}\"]\n return command_line\n\n\ndef porter_envs(env_var):\n porter_env_vars = {}\n porter_env_vars[\"HOME\"] = os.environ['HOME']\n porter_env_vars[\"PATH\"] = os.environ['PATH']\n porter_env_vars[\"ARM_CLIENT_ID\"] = env_var[\"arm_client_id\"]\n porter_env_vars[\"ARM_CLIENT_SECRET\"] = env_var[\"arm_client_secret\"]\n porter_env_vars[\"ARM_SUBSCRIPTION_ID\"] = env_var[\"arm_subscription_id\"]\n porter_env_vars[\"ARM_TENANT_ID\"] = env_var[\"arm_tenant_id\"]\n\n return porter_env_vars\n\n\nasync def run_porter(command, env_vars):\n proc = await asyncio.create_subprocess_shell(\n ''.join(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=porter_envs(env_vars))\n\n stdout, stderr = await proc.communicate()\n logging.info(f'[{command!r} exited with {proc.returncode}]')\n result_stdout = None\n result_stderr = None\n if stdout:\n result_stdout = stdout.decode()\n logger_adapter.info('[stdout]')\n for string in result_stdout.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n if stderr:\n result_stderr = stderr.decode()\n logger_adapter.info('[stderr]')\n for string in result_stderr.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n\n return (proc.returncode, result_stdout, result_stderr)\n\n\ndef service_bus_message_generator(sb_message, status, deployment_message):\n installation_id = sb_message['parameters']['tre_id'] + \"-\" + sb_message['parameters']['workspace_id']\n resource_request_message = json.dumps({\n \"id\": sb_message[\"id\"],\n \"status\": status,\n \"message\": f\"{installation_id}: {deployment_message}\"\n })\n return resource_request_message\n\n\nasync def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration starting\")\n sb_sender = sb_client.get_queue_sender(queue_name=env_vars[\"deployment_status_queue\"])\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, \"Deployment job starting\")\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n\n returncode, _, err = await run_porter(build_porter_command(msg_body, env_vars), env_vars)\n if returncode != 0:\n error_message = \"Error context message = \" + \" \".join(err.split('\\n'))\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration failed error = {error_message}\")\n return False\n else:\n success_message = \"Workspace was deployed successfully...\"\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: {success_message}\")\n return True\n\n\nasync def runner(env_vars):\n msi_id = env_vars[\"vmss_msi_id\"]\n service_bus_namespace = env_vars[\"service_bus_namespace\"]\n async with default_credentials(msi_id) as credential:\n service_bus_client = ServiceBusClient(service_bus_namespace, credential)\n logger_adapter.info(\"Starting message receiving loop...\")\n while True:\n logger_adapter.info(\"Checking for new messages...\")\n receive_message_gen = receive_message(env_vars, service_bus_client)\n try:\n async for message in receive_message_gen:\n logger_adapter.info(f\"Message received for id={message['id']}\")\n message_logger_adapter = initialize_logging(logging.INFO, message['id'])\n result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)\n await receive_message_gen.asend(result)\n except StopAsyncIteration: # the async generator when finished signals end with this exception.\n pass\n logger_adapter.info(\"All messages done sleeping...\")\n await asyncio.sleep(60)\n\n\ndef read_env_vars():\n env_vars = {\n # Needed for local dev\n \"app_id\": os.environ.get(\"AZURE_CLIENT_ID\", None),\n \"app_password\": os.environ.get(\"AZURE_CLIENT_SECRET\", None),\n\n \"registry_server\": os.environ[\"REGISTRY_SERVER\"],\n \"tfstate_container_name\": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],\n \"tfstate_resource_group_name\": os.environ['MGMT_RESOURCE_GROUP_NAME'],\n \"tfstate_storage_account_name\": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],\n \"deployment_status_queue\": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],\n \"resource_request_queue\": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],\n \"service_bus_namespace\": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],\n \"vmss_msi_id\": os.environ.get('VMSS_MSI_ID', None),\n\n # Needed for running porter\n \"arm_use_msi\": os.environ[\"ARM_USE_MSI\"],\n \"arm_subscription_id\": os.environ['ARM_SUBSCRIPTION_ID'],\n \"arm_client_id\": os.environ[\"ARM_CLIENT_ID\"],\n \"arm_tenant_id\": os.environ[\"ARM_TENANT_ID\"]\n }\n\n env_vars[\"arm_client_secret\"] = os.environ[\"ARM_CLIENT_SECRET\"] if env_vars[\"arm_use_msi\"] == \"false\" else \"\"\n\n return env_vars\n\n\nif __name__ == \"__main__\":\n try:\n env_vars = read_env_vars()\n except KeyError as e:\n logger_adapter.error(f\"Environment variable {e} is not set correctly...Exiting\")\n sys.exit(1)\n logger_adapter.info(\"Started processor\")\n asyncio.run(runner(env_vars))\n", "path": "processor_function/vm_porter/runner.py"}], "after_files": [{"content": "import os\nimport sys\nimport json\nimport socket\nimport asyncio\nimport logging\nfrom shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa\nfrom resources import strings # pylint: disable=import-error # noqa\nfrom contextlib import asynccontextmanager\nfrom azure.servicebus import ServiceBusMessage\nfrom azure.servicebus.aio import ServiceBusClient, AutoLockRenewer\nfrom azure.identity.aio import DefaultAzureCredential\n\nlogger_adapter = initialize_logging(logging.INFO, socket.gethostname())\ndisable_unwanted_loggers()\n\n\n@asynccontextmanager\nasync def default_credentials(msi_id):\n \"\"\"\n Context manager which yields the default credentials.\n \"\"\"\n credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()\n yield credential\n await credential.close()\n\n\nasync def receive_message(env_vars, service_bus_client):\n \"\"\"\n This method is an async generator which receives messages from service bus\n and yields those messages. If the yielded function return True the message is\n marked complete.\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n renewer = AutoLockRenewer(max_lock_renewal_duration=1800)\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n\n async with receiver:\n received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)\n\n for msg in received_msgs:\n result = True\n message = \"\"\n\n try:\n message = json.loads(str(msg))\n result = (yield message)\n except (json.JSONDecodeError) as e:\n logging.error(f\"Received bad service bus resource request message: {e}\")\n if result:\n logging.info(f\"Resource request for {message} is complete\")\n else:\n logging.error('Message processing failed!')\n logger_adapter.info(f\"Message with id = {message['id']} processed as {result} and marked complete.\")\n await receiver.complete_message(msg)\n\n\ndef azure_login_command(env_vars):\n local_login = f\"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}\"\n vmss_login = f\"az login --identity -u {env_vars['vmss_msi_id']}\"\n command = vmss_login if env_vars['vmss_msi_id'] else local_login\n return command\n\n\ndef build_porter_command(msg_body, env_vars):\n porter_parameters = \"\"\n for parameter in msg_body['parameters']:\n porter_parameters = porter_parameters + f\" --param {parameter}={msg_body['parameters'][parameter]}\"\n\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n\n porter_parameters = porter_parameters + f\" --param tfstate_container_name={env_vars['tfstate_container_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}\"\n porter_parameters = porter_parameters + f\" --param arm_use_msi={env_vars['arm_use_msi']}\"\n\n command_line = [f\"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter \"\n f\"{msg_body['action']} {installation_id} \"\n f\" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}\"\n f\" {porter_parameters} --cred ./vm_porter/azure.json --allow-docker-host-access\"\n f\" && porter show {installation_id}\"]\n return command_line\n\n\ndef porter_envs(env_var):\n porter_env_vars = {}\n porter_env_vars[\"HOME\"] = os.environ['HOME']\n porter_env_vars[\"PATH\"] = os.environ['PATH']\n porter_env_vars[\"ARM_CLIENT_ID\"] = env_var[\"arm_client_id\"]\n porter_env_vars[\"ARM_CLIENT_SECRET\"] = env_var[\"arm_client_secret\"]\n porter_env_vars[\"ARM_SUBSCRIPTION_ID\"] = env_var[\"arm_subscription_id\"]\n porter_env_vars[\"ARM_TENANT_ID\"] = env_var[\"arm_tenant_id\"]\n\n return porter_env_vars\n\n\nasync def run_porter(command, env_vars):\n proc = await asyncio.create_subprocess_shell(\n ''.join(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=porter_envs(env_vars))\n\n stdout, stderr = await proc.communicate()\n logging.info(f'[{command!r} exited with {proc.returncode}]')\n result_stdout = None\n result_stderr = None\n if stdout:\n result_stdout = stdout.decode()\n logger_adapter.info('[stdout]')\n for string in result_stdout.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n if stderr:\n result_stderr = stderr.decode()\n logger_adapter.info('[stderr]')\n for string in result_stderr.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n\n return (proc.returncode, result_stdout, result_stderr)\n\n\ndef service_bus_message_generator(sb_message, status, deployment_message):\n installation_id = sb_message['parameters']['tre_id'] + \"-\" + sb_message['parameters']['workspace_id']\n resource_request_message = json.dumps({\n \"id\": sb_message[\"id\"],\n \"status\": status,\n \"message\": f\"{installation_id}: {deployment_message}\"\n })\n return resource_request_message\n\n\nasync def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration starting\")\n sb_sender = sb_client.get_queue_sender(queue_name=env_vars[\"deployment_status_queue\"])\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, \"Deployment job starting\")\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n\n returncode, _, err = await run_porter(build_porter_command(msg_body, env_vars), env_vars)\n if returncode != 0:\n error_message = \"Error context message = \" + \" \".join(err.split('\\n'))\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration failed error = {error_message}\")\n return False\n else:\n success_message = \"Workspace was deployed successfully...\"\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: {success_message}\")\n return True\n\n\nasync def runner(env_vars):\n msi_id = env_vars[\"vmss_msi_id\"]\n service_bus_namespace = env_vars[\"service_bus_namespace\"]\n async with default_credentials(msi_id) as credential:\n service_bus_client = ServiceBusClient(service_bus_namespace, credential)\n logger_adapter.info(\"Starting message receiving loop...\")\n while True:\n logger_adapter.info(\"Checking for new messages...\")\n receive_message_gen = receive_message(env_vars, service_bus_client)\n try:\n async for message in receive_message_gen:\n logger_adapter.info(f\"Message received for id={message['id']}\")\n message_logger_adapter = initialize_logging(logging.INFO, message['id'])\n result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)\n await receive_message_gen.asend(result)\n except StopAsyncIteration: # the async generator when finished signals end with this exception.\n pass\n logger_adapter.info(\"All messages done sleeping...\")\n await asyncio.sleep(60)\n\n\ndef read_env_vars():\n env_vars = {\n # Needed for local dev\n \"app_id\": os.environ.get(\"AZURE_CLIENT_ID\", None),\n \"app_password\": os.environ.get(\"AZURE_CLIENT_SECRET\", None),\n\n \"registry_server\": os.environ[\"REGISTRY_SERVER\"],\n \"tfstate_container_name\": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],\n \"tfstate_resource_group_name\": os.environ['MGMT_RESOURCE_GROUP_NAME'],\n \"tfstate_storage_account_name\": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],\n \"deployment_status_queue\": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],\n \"resource_request_queue\": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],\n \"service_bus_namespace\": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],\n \"vmss_msi_id\": os.environ.get('VMSS_MSI_ID', None),\n\n # Needed for running porter\n \"arm_use_msi\": os.environ[\"ARM_USE_MSI\"],\n \"arm_subscription_id\": os.environ['ARM_SUBSCRIPTION_ID'],\n \"arm_client_id\": os.environ[\"ARM_CLIENT_ID\"],\n \"arm_tenant_id\": os.environ[\"ARM_TENANT_ID\"]\n }\n\n env_vars[\"arm_client_secret\"] = os.environ[\"ARM_CLIENT_SECRET\"] if env_vars[\"arm_use_msi\"] == \"false\" else \"\"\n\n return env_vars\n\n\nif __name__ == \"__main__\":\n try:\n env_vars = read_env_vars()\n except KeyError as e:\n logger_adapter.error(f\"Environment variable {e} is not set correctly...Exiting\")\n sys.exit(1)\n logger_adapter.info(\"Started processor\")\n asyncio.run(runner(env_vars))\n", "path": "processor_function/vm_porter/runner.py"}]} |
gh_patches_debug_1534 | rasdani/github-patches | git_diff | pypa__pip-8124 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'pip cache info' fails when no-cache-dir set
pip version: pip 20.1b1
Python version: CPython 3.8.1
OS: Win 10 64
Testing 20.1 beta, execute 'pip cache info' and crashes. I'm guessing it's due to pip.ini turning off caching.
pip.ini:
```
[global]
no-cache-dir = false
```
Command execution:
```
> pip cache info
ERROR: Exception:
Traceback (most recent call last):
File "c:\program files\python38\lib\site-packages\pip\_internal\cli\base_command.py", line 188, in _main
status = self.run(options, args)
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 62, in run
handlers[action](options, args[1:])
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 74, in get_cache_info
num_packages = len(self._find_wheels(options, '*'))
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 145, in _find_wheels
wheel_dir = self._wheels_cache_dir(options)
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 141, in _wheels_cache_dir
return os.path.join(options.cache_dir, 'wheels')
File "c:\program files\python38\lib\ntpath.py", line 78, in join
path = os.fspath(path)
TypeError: expected str, bytes or os.PathLike object, not bool
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/commands/cache.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import os
5 import textwrap
6
7 import pip._internal.utils.filesystem as filesystem
8 from pip._internal.cli.base_command import Command
9 from pip._internal.cli.status_codes import ERROR, SUCCESS
10 from pip._internal.exceptions import CommandError, PipError
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from optparse import Values
15 from typing import Any, List
16
17
18 logger = logging.getLogger(__name__)
19
20
21 class CacheCommand(Command):
22 """
23 Inspect and manage pip's wheel cache.
24
25 Subcommands:
26
27 info: Show information about the cache.
28 list: List filenames of packages stored in the cache.
29 remove: Remove one or more package from the cache.
30 purge: Remove all items from the cache.
31
32 <pattern> can be a glob expression or a package name.
33 """
34
35 usage = """
36 %prog info
37 %prog list [<pattern>]
38 %prog remove <pattern>
39 %prog purge
40 """
41
42 def run(self, options, args):
43 # type: (Values, List[Any]) -> int
44 handlers = {
45 "info": self.get_cache_info,
46 "list": self.list_cache_items,
47 "remove": self.remove_cache_items,
48 "purge": self.purge_cache,
49 }
50
51 # Determine action
52 if not args or args[0] not in handlers:
53 logger.error("Need an action ({}) to perform.".format(
54 ", ".join(sorted(handlers)))
55 )
56 return ERROR
57
58 action = args[0]
59
60 # Error handling happens here, not in the action-handlers.
61 try:
62 handlers[action](options, args[1:])
63 except PipError as e:
64 logger.error(e.args[0])
65 return ERROR
66
67 return SUCCESS
68
69 def get_cache_info(self, options, args):
70 # type: (Values, List[Any]) -> None
71 if args:
72 raise CommandError('Too many arguments')
73
74 num_packages = len(self._find_wheels(options, '*'))
75
76 cache_location = self._wheels_cache_dir(options)
77 cache_size = filesystem.format_directory_size(cache_location)
78
79 message = textwrap.dedent("""
80 Location: {location}
81 Size: {size}
82 Number of wheels: {package_count}
83 """).format(
84 location=cache_location,
85 package_count=num_packages,
86 size=cache_size,
87 ).strip()
88
89 logger.info(message)
90
91 def list_cache_items(self, options, args):
92 # type: (Values, List[Any]) -> None
93 if len(args) > 1:
94 raise CommandError('Too many arguments')
95
96 if args:
97 pattern = args[0]
98 else:
99 pattern = '*'
100
101 files = self._find_wheels(options, pattern)
102
103 if not files:
104 logger.info('Nothing cached.')
105 return
106
107 results = []
108 for filename in files:
109 wheel = os.path.basename(filename)
110 size = filesystem.format_file_size(filename)
111 results.append(' - {} ({})'.format(wheel, size))
112 logger.info('Cache contents:\n')
113 logger.info('\n'.join(sorted(results)))
114
115 def remove_cache_items(self, options, args):
116 # type: (Values, List[Any]) -> None
117 if len(args) > 1:
118 raise CommandError('Too many arguments')
119
120 if not args:
121 raise CommandError('Please provide a pattern')
122
123 files = self._find_wheels(options, args[0])
124 if not files:
125 raise CommandError('No matching packages')
126
127 for filename in files:
128 os.unlink(filename)
129 logger.debug('Removed %s', filename)
130 logger.info('Files removed: %s', len(files))
131
132 def purge_cache(self, options, args):
133 # type: (Values, List[Any]) -> None
134 if args:
135 raise CommandError('Too many arguments')
136
137 return self.remove_cache_items(options, ['*'])
138
139 def _wheels_cache_dir(self, options):
140 # type: (Values) -> str
141 return os.path.join(options.cache_dir, 'wheels')
142
143 def _find_wheels(self, options, pattern):
144 # type: (Values, str) -> List[str]
145 wheel_dir = self._wheels_cache_dir(options)
146
147 # The wheel filename format, as specified in PEP 427, is:
148 # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
149 #
150 # Additionally, non-alphanumeric values in the distribution are
151 # normalized to underscores (_), meaning hyphens can never occur
152 # before `-{version}`.
153 #
154 # Given that information:
155 # - If the pattern we're given contains a hyphen (-), the user is
156 # providing at least the version. Thus, we can just append `*.whl`
157 # to match the rest of it.
158 # - If the pattern we're given doesn't contain a hyphen (-), the
159 # user is only providing the name. Thus, we append `-*.whl` to
160 # match the hyphen before the version, followed by anything else.
161 #
162 # PEP 427: https://www.python.org/dev/peps/pep-0427/
163 pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
164
165 return filesystem.find_files(wheel_dir, pattern)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py
--- a/src/pip/_internal/commands/cache.py
+++ b/src/pip/_internal/commands/cache.py
@@ -48,6 +48,11 @@
"purge": self.purge_cache,
}
+ if not options.cache_dir:
+ logger.error("pip cache commands can not "
+ "function since cache is disabled.")
+ return ERROR
+
# Determine action
if not args or args[0] not in handlers:
logger.error("Need an action ({}) to perform.".format(
| {"golden_diff": "diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py\n--- a/src/pip/_internal/commands/cache.py\n+++ b/src/pip/_internal/commands/cache.py\n@@ -48,6 +48,11 @@\n \"purge\": self.purge_cache,\n }\n \n+ if not options.cache_dir:\n+ logger.error(\"pip cache commands can not \"\n+ \"function since cache is disabled.\")\n+ return ERROR\n+\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n", "issue": "'pip cache info' fails when no-cache-dir set\npip version: pip 20.1b1\r\nPython version: CPython 3.8.1\r\nOS: Win 10 64\r\n\r\nTesting 20.1 beta, execute 'pip cache info' and crashes. I'm guessing it's due to pip.ini turning off caching.\r\n\r\npip.ini:\r\n```\r\n[global]\r\nno-cache-dir = false\r\n```\r\n\r\nCommand execution:\r\n```\r\n> pip cache info\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\cli\\base_command.py\", line 188, in _main\r\n status = self.run(options, args)\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 62, in run\r\n handlers[action](options, args[1:])\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 74, in get_cache_info\r\n num_packages = len(self._find_wheels(options, '*'))\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 145, in _find_wheels\r\n wheel_dir = self._wheels_cache_dir(options)\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 141, in _wheels_cache_dir\r\n return os.path.join(options.cache_dir, 'wheels')\r\n File \"c:\\program files\\python38\\lib\\ntpath.py\", line 78, in join\r\n path = os.fspath(path)\r\nTypeError: expected str, bytes or os.PathLike object, not bool\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n info: Show information about the cache.\n list: List filenames of packages stored in the cache.\n remove: Remove one or more package from the cache.\n purge: Remove all items from the cache.\n\n <pattern> can be a glob expression or a package name.\n \"\"\"\n\n usage = \"\"\"\n %prog info\n %prog list [<pattern>]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n info: Show information about the cache.\n list: List filenames of packages stored in the cache.\n remove: Remove one or more package from the cache.\n purge: Remove all items from the cache.\n\n <pattern> can be a glob expression or a package name.\n \"\"\"\n\n usage = \"\"\"\n %prog info\n %prog list [<pattern>]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n if not options.cache_dir:\n logger.error(\"pip cache commands can not \"\n \"function since cache is disabled.\")\n return ERROR\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py"}]} |
gh_patches_debug_1535 | rasdani/github-patches | git_diff | edgedb__edgedb-3087 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EdgeDB server doesn't always correctly recognize PostgreSQL server startup
<!-- Please search existing issues to avoid creating duplicates. -->
- EdgeDB Version: 1.0-rc.2+dev6093.d2021102414.gec7d60ef3.cv202110240000
- OS Version: Arch Linux under WSL2
When the EdgeDB server starts up during `edb server` or `edb test` commands and tries to read the `postmaster.pid` file, sometimes it doesn't do it quite correctly, as it seems to me.
I printed the content of the file and parsed host and port in `_connection_addr_from_pidfile` method and got the following:
```
157186
/home/nik/.local/share/edgedb/_localdev
1635094601
5432
27256 262160
('', 5432)
```
This results in parsing the host to an empty string, trying to connect to it in `pgcon._connect` and and getting a `ConnectionRefusedError` in the process. I suspect that the `postmaster.pid` file itself did not have time to initialize fully because when I ran `edgedb-server --botstrap-only` my computer already had almost 100% CPU/memory load. Also, the problem disappeared when I tried to repeat the steps, but my PC had free resources. But perhaps the `edb.server.pgcluster` module itself should have a check that the `postmaster.pid` file hasn't been fully loaded or something like that anyway.
Steps to Reproduce:
1. Load PC resources to the limit (?)
2. Run `edgedb-server --bootstrap-only`
Full logs of server startup:
```
INFO 159251 2021-10-24T20:04:35.317 edb.server: EdgeDB server (version: 1.0-rc.2+dev6093.d2021102414.gec7d60ef3.cv202110240000) is starting in DEV mode.
INFO 159251 2021-10-24T20:04:36.056 edb.server: Using 127 max backend connections based on total memory.
159273
/home/nik/.local/share/edgedb/_localdev
1635095076
5432
27256 262207
('', 5432)
INFO 159273 2021-10-24T20:04:36.229 postgres: starting PostgreSQL 13.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 11.1.0, 64-bit
INFO 159273 2021-10-24T20:04:37.240 postgres: listening on Unix socket "/home/nik/.local/share/edgedb/_localdev/.s.PGSQL.5432"
INFO 159273 2021-10-24T20:04:37.240 postgres: database system was shut down at 2021-10-24 20:04:33 MSK
INFO 159273 2021-10-24T20:04:37.241 postgres: database system is ready to accept connections
INFO 159273 2021-10-24T20:05:37.071 postgres: received fast shutdown request
INFO 159273 2021-10-24T20:05:37.106 postgres: aborting any active transactions
INFO 159273 2021-10-24T20:05:37.107 postgres: background worker "logical replication launcher" (PID 159284) exited with exit code 1
INFO 159273 2021-10-24T20:05:37.107 postgres: shutting down
INFO 159273 2021-10-24T20:05:37.239 postgres: database system is shut down
======================================================== Exception occurred: [Errno 111] Connection refused =========================================================
1. ConnectionRefusedError: [Errno 111] Connection refused
----------------------------------------------------------------------------- Traceback -----------------------------------------------------------------------------
/home/nik/.virtualenvs/edgedb/bin/edb, line 33, in <module>
> sys.exit(load_entry_point('edgedb-server', 'console_scripts', 'edb')())
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 829, in __call__
> return self.main(*args, **kwargs)
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 782, in main
> rv = self.invoke(ctx)
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 1259, in invoke
> return _process_result(sub_ctx.command.invoke(sub_ctx))
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 1066, in invoke
> return ctx.invoke(self.callback, **ctx.params)
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 610, in invoke
> return callback(*args, **kwargs)
/home/nik/projects/edgedb/edgedb/edb/tools/edb.py, line 55, in server
> srv_main.server_main(**kwargs)
/home/nik/projects/edgedb/edgedb/edb/server/main.py, line 556, in server_main
> asyncio.run(run_server(server_args))
/home/nik/.asdf/installs/python/3.10.0/lib/python3.10/asyncio/runners.py, line 44, in run
> return loop.run_until_complete(main)
uvloop/loop.pyx, line 1501, in uvloop.loop.Loop.run_until_complete
/home/nik/projects/edgedb/edgedb/edb/server/main.py, line 459, in run_server
> need_cluster_restart = await _init_cluster(cluster, args)
/home/nik/projects/edgedb/edgedb/edb/server/main.py, line 141, in _init_cluster
> need_restart = await bootstrap.ensure_bootstrapped(cluster, args)
/home/nik/projects/edgedb/edgedb/edb/server/bootstrap.py, line 1386, in ensure_bootstrapped
> pgconn = await cluster.connect()
/home/nik/projects/edgedb/edgedb/edb/server/pgcluster.py, line 184, in connect
> conn = await asyncpg.connect(**conn_info)
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connection.py, line 2045, in connect
> return await connect_utils._connect(
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 790, in _connect
> raise last_error
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 776, in _connect
> return await _connect_addr(
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 676, in _connect_addr
> return await __connect_addr(params, timeout, True, *args)
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 720, in __connect_addr
> tr, pr = await compat.wait_for(connector, timeout=timeout)
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/compat.py, line 66, in wait_for
> return await asyncio.wait_for(fut, timeout)
/home/nik/.asdf/installs/python/3.10.0/lib/python3.10/asyncio/tasks.py, line 447, in wait_for
> return fut.result()
/home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 586, in _create_ssl_connection
> tr, pr = await loop.create_connection(
uvloop/loop.pyx, line 2024, in create_connection
uvloop/loop.pyx, line 2001, in uvloop.loop.Loop.create_connection
ConnectionRefusedError: [Errno 111] Connection refused
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edb/server/pgcluster.py`
Content:
```
1 # Copyright (C) 2016-present MagicStack Inc. and the EdgeDB authors.
2 # Copyright (C) 2016-present the asyncpg authors and contributors
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 """PostgreSQL cluster management."""
18
19 from __future__ import annotations
20 from typing import *
21
22 import asyncio
23 import enum
24 import functools
25 import locale
26 import logging
27 import os
28 import os.path
29 import pathlib
30 import re
31 import shlex
32 import shutil
33 import textwrap
34 import time
35 import urllib.parse
36
37 import asyncpg
38
39 from edb import buildmeta
40 from edb.common import supervisor
41 from edb.common import uuidgen
42
43 from edb.server import defines
44 from edb.server.ha import base as ha_base
45 from edb.pgsql import common as pgcommon
46
47 from . import pgconnparams
48
49
50 logger = logging.getLogger('edb.pgcluster')
51 pg_dump_logger = logging.getLogger('pg_dump')
52 pg_ctl_logger = logging.getLogger('pg_ctl')
53 pg_config_logger = logging.getLogger('pg_config')
54 initdb_logger = logging.getLogger('initdb')
55 postgres_logger = logging.getLogger('postgres')
56
57 get_database_backend_name = pgcommon.get_database_backend_name
58 get_role_backend_name = pgcommon.get_role_backend_name
59
60
61 def _is_c_utf8_locale_present() -> bool:
62 try:
63 locale.setlocale(locale.LC_CTYPE, 'C.UTF-8')
64 except Exception:
65 return False
66 else:
67 # We specifically don't use locale.getlocale(), because
68 # it can lie and return a non-existent locale due to PEP 538.
69 locale.setlocale(locale.LC_CTYPE, '')
70 return True
71
72
73 class ClusterError(Exception):
74 pass
75
76
77 class PostgresPidFileNotReadyError(Exception):
78 """Raised on an attempt to read non-existent or bad Postgres PID file"""
79
80
81 class BackendCapabilities(enum.IntFlag):
82
83 NONE = 0
84 #: Whether CREATE ROLE .. SUPERUSER is allowed
85 SUPERUSER_ACCESS = 1 << 0
86 #: Whether reading PostgreSQL configuration files
87 #: via pg_file_settings is allowed
88 CONFIGFILE_ACCESS = 1 << 1
89 #: Whether the PostgreSQL server supports the C.UTF-8 locale
90 C_UTF8_LOCALE = 1 << 2
91
92
93 ALL_BACKEND_CAPABILITIES = (
94 BackendCapabilities.SUPERUSER_ACCESS
95 | BackendCapabilities.CONFIGFILE_ACCESS
96 | BackendCapabilities.C_UTF8_LOCALE
97 )
98
99
100 class BackendInstanceParams(NamedTuple):
101
102 capabilities: BackendCapabilities
103 tenant_id: str
104 base_superuser: Optional[str] = None
105 max_connections: int = 500
106 reserved_connections: int = 0
107
108
109 class BackendRuntimeParams(NamedTuple):
110
111 instance_params: BackendInstanceParams
112 session_authorization_role: Optional[str] = None
113
114
115 @functools.lru_cache
116 def get_default_runtime_params(**instance_params: Any) -> BackendRuntimeParams:
117 capabilities = ALL_BACKEND_CAPABILITIES
118 if not _is_c_utf8_locale_present():
119 capabilities &= ~BackendCapabilities.C_UTF8_LOCALE
120 instance_params.setdefault('capabilities', capabilities)
121 if 'tenant_id' not in instance_params:
122 instance_params = dict(
123 tenant_id=buildmeta.get_default_tenant_id(),
124 **instance_params,
125 )
126
127 return BackendRuntimeParams(
128 instance_params=BackendInstanceParams(**instance_params),
129 )
130
131
132 class BaseCluster:
133
134 def __init__(
135 self,
136 *,
137 instance_params: Optional[BackendInstanceParams] = None,
138 ) -> None:
139 self._connection_addr: Optional[Tuple[str, int]] = None
140 self._connection_params: Optional[
141 pgconnparams.ConnectionParameters
142 ] = None
143 self._default_session_auth: Optional[str] = None
144 self._pg_config_data: Dict[str, str] = {}
145 self._pg_bin_dir: Optional[pathlib.Path] = None
146 if instance_params is None:
147 self._instance_params = (
148 get_default_runtime_params().instance_params)
149 else:
150 self._instance_params = instance_params
151
152 def get_db_name(self, db_name: str) -> str:
153 return get_database_backend_name(
154 db_name,
155 tenant_id=self._instance_params.tenant_id,
156 )
157
158 def get_role_name(self, role_name: str) -> str:
159 return get_database_backend_name(
160 role_name,
161 tenant_id=self._instance_params.tenant_id,
162 )
163
164 async def start(
165 self,
166 wait: int = 60,
167 *,
168 server_settings: Optional[Mapping[str, str]] = None,
169 **opts: Any,
170 ) -> None:
171 raise NotImplementedError
172
173 async def stop(self, wait: int = 60) -> None:
174 raise NotImplementedError
175
176 def destroy(self) -> None:
177 raise NotImplementedError
178
179 async def connect(self, **kwargs: Any) -> asyncpg.Connection:
180 conn_info = self.get_connection_spec()
181 conn_info.update(kwargs)
182 if 'sslmode' in conn_info:
183 conn_info['ssl'] = conn_info.pop('sslmode').name
184 conn = await asyncpg.connect(**conn_info)
185
186 if (not kwargs.get('user')
187 and self._default_session_auth
188 and conn_info.get('user') != self._default_session_auth):
189 # No explicit user given, and the default
190 # SESSION AUTHORIZATION is different from the user
191 # used to connect.
192 await conn.execute(
193 f'SET ROLE {pgcommon.quote_ident(self._default_session_auth)}'
194 )
195
196 return conn
197
198 async def start_watching(
199 self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None
200 ) -> None:
201 pass
202
203 def stop_watching(self) -> None:
204 pass
205
206 def get_runtime_params(self) -> BackendRuntimeParams:
207 params = self.get_connection_params()
208 login_role: Optional[str] = params.user
209 sup_role = self.get_role_name(defines.EDGEDB_SUPERUSER)
210 return BackendRuntimeParams(
211 instance_params=self._instance_params,
212 session_authorization_role=(
213 None if login_role == sup_role else login_role
214 ),
215 )
216
217 def get_connection_addr(self) -> Optional[Tuple[str, int]]:
218 return self._get_connection_addr()
219
220 def set_default_session_authorization(self, rolename: str) -> None:
221 self._default_session_auth = rolename
222
223 def set_connection_params(
224 self,
225 params: pgconnparams.ConnectionParameters,
226 ) -> None:
227 self._connection_params = params
228
229 def get_connection_params(
230 self,
231 ) -> pgconnparams.ConnectionParameters:
232 assert self._connection_params is not None
233 return self._connection_params
234
235 def get_connection_spec(self) -> Dict[str, Any]:
236 conn_dict: Dict[str, Any] = {}
237 addr = self.get_connection_addr()
238 assert addr is not None
239 conn_dict['host'] = addr[0]
240 conn_dict['port'] = addr[1]
241 params = self.get_connection_params()
242 for k in (
243 'user',
244 'password',
245 'database',
246 'ssl',
247 'sslmode',
248 'server_settings',
249 ):
250 v = getattr(params, k)
251 if v is not None:
252 conn_dict[k] = v
253
254 cluster_settings = conn_dict.get('server_settings', {})
255
256 edgedb_settings = {
257 'client_encoding': 'utf-8',
258 'search_path': 'edgedb',
259 'timezone': 'UTC',
260 'intervalstyle': 'iso_8601',
261 'jit': 'off',
262 }
263
264 conn_dict['server_settings'] = {**cluster_settings, **edgedb_settings}
265
266 return conn_dict
267
268 def _get_connection_addr(self) -> Optional[Tuple[str, int]]:
269 return self._connection_addr
270
271 def is_managed(self) -> bool:
272 raise NotImplementedError
273
274 async def get_status(self) -> str:
275 raise NotImplementedError
276
277 async def dump_database(
278 self,
279 dbname: str,
280 *,
281 exclude_schemas: Iterable[str] = (),
282 dump_object_owners: bool = True,
283 ) -> bytes:
284 status = await self.get_status()
285 if status != 'running':
286 raise ClusterError('cannot dump: cluster is not running')
287
288 if self._pg_bin_dir is None:
289 await self.lookup_postgres()
290 pg_dump = self._find_pg_binary('pg_dump')
291 conn_spec = self.get_connection_spec()
292
293 args = [
294 pg_dump,
295 '--inserts',
296 f'--dbname={dbname}',
297 f'--host={conn_spec["host"]}',
298 f'--port={conn_spec["port"]}',
299 f'--username={conn_spec["user"]}',
300 ]
301
302 if not dump_object_owners:
303 args.append('--no-owner')
304
305 env = os.environ.copy()
306 if conn_spec.get("password"):
307 env['PGPASSWORD'] = conn_spec["password"]
308
309 if exclude_schemas:
310 for exclude_schema in exclude_schemas:
311 args.append(f'--exclude-schema={exclude_schema}')
312
313 stdout_lines, _, _ = await _run_logged_subprocess(
314 args,
315 logger=pg_dump_logger,
316 log_stdout=False,
317 env=env,
318 )
319 return b'\n'.join(stdout_lines)
320
321 def _find_pg_binary(self, binary: str) -> str:
322 assert self._pg_bin_dir is not None
323 bpath = self._pg_bin_dir / binary
324 if not bpath.is_file():
325 raise ClusterError(
326 'could not find {} executable: '.format(binary) +
327 '{!r} does not exist or is not a file'.format(bpath))
328
329 return str(bpath)
330
331 def _subprocess_error(
332 self,
333 name: str,
334 exitcode: int,
335 stderr: Optional[bytes],
336 ) -> ClusterError:
337 if stderr:
338 return ClusterError(
339 f'{name} exited with status {exitcode}:\n'
340 + textwrap.indent(stderr.decode(), ' ' * 4),
341 )
342 else:
343 return ClusterError(
344 f'{name} exited with status {exitcode}',
345 )
346
347 async def lookup_postgres(self) -> None:
348 self._pg_bin_dir = await get_pg_bin_dir()
349
350
351 class Cluster(BaseCluster):
352 def __init__(
353 self,
354 data_dir: pathlib.Path,
355 *,
356 runstate_dir: Optional[pathlib.Path] = None,
357 instance_params: Optional[BackendInstanceParams] = None,
358 log_level: str = 'i',
359 ):
360 super().__init__(instance_params=instance_params)
361 self._data_dir = data_dir
362 self._runstate_dir = (
363 runstate_dir if runstate_dir is not None else data_dir)
364 self._daemon_pid: Optional[int] = None
365 self._daemon_process: Optional[asyncio.subprocess.Process] = None
366 self._daemon_supervisor: Optional[supervisor.Supervisor] = None
367 self._log_level = log_level
368
369 def is_managed(self) -> bool:
370 return True
371
372 def get_data_dir(self) -> pathlib.Path:
373 return self._data_dir
374
375 async def get_status(self) -> str:
376 stdout_lines, stderr_lines, exit_code = (
377 await _run_logged_text_subprocess(
378 [self._pg_ctl, 'status', '-D', str(self._data_dir)],
379 logger=pg_ctl_logger,
380 check=False,
381 )
382 )
383
384 if (
385 exit_code == 4
386 or not os.path.exists(self._data_dir)
387 or not os.listdir(self._data_dir)
388 ):
389 return 'not-initialized'
390 elif exit_code == 3:
391 return 'stopped'
392 elif exit_code == 0:
393 output = '\n'.join(stdout_lines)
394 r = re.match(r'.*PID\s?:\s+(\d+).*', output)
395 if not r:
396 raise ClusterError(
397 f'could not parse pg_ctl status output: {output}')
398 self._daemon_pid = int(r.group(1))
399 if self._connection_addr is None:
400 self._connection_addr = self._connection_addr_from_pidfile()
401 return 'running'
402 else:
403 stderr_text = '\n'.join(stderr_lines)
404 raise ClusterError(
405 f'`pg_ctl status` exited with status {exit_code}:\n'
406 + textwrap.indent(stderr_text, ' ' * 4),
407 )
408
409 async def ensure_initialized(self, **settings: Any) -> bool:
410 cluster_status = await self.get_status()
411
412 if cluster_status == 'not-initialized':
413 logger.info(
414 'Initializing database cluster in %s', self._data_dir)
415
416 instance_params = self.get_runtime_params().instance_params
417 capabilities = instance_params.capabilities
418 have_c_utf8 = (
419 capabilities & BackendCapabilities.C_UTF8_LOCALE)
420 await self.init(
421 username='postgres',
422 locale='C.UTF-8' if have_c_utf8 else 'en_US.UTF-8',
423 lc_collate='C',
424 encoding='UTF8',
425 )
426 self.reset_hba()
427 self.add_hba_entry(
428 type='local',
429 database='all',
430 user='postgres',
431 auth_method='trust'
432 )
433 return True
434 else:
435 return False
436
437 async def init(self, **settings: str) -> None:
438 """Initialize cluster."""
439 if await self.get_status() != 'not-initialized':
440 raise ClusterError(
441 'cluster in {!r} has already been initialized'.format(
442 self._data_dir))
443
444 if settings:
445 settings_args = ['--{}={}'.format(k.replace('_', '-'), v)
446 for k, v in settings.items()]
447 extra_args = ['-o'] + [' '.join(settings_args)]
448 else:
449 extra_args = []
450
451 await _run_logged_subprocess(
452 [self._pg_ctl, 'init', '-D', str(self._data_dir)] + extra_args,
453 logger=initdb_logger,
454 )
455
456 async def start(
457 self,
458 wait: int = 60,
459 *,
460 server_settings: Optional[Mapping[str, str]] = None,
461 **opts: str,
462 ) -> None:
463 """Start the cluster."""
464 status = await self.get_status()
465 if status == 'running':
466 return
467 elif status == 'not-initialized':
468 raise ClusterError(
469 'cluster in {!r} has not been initialized'.format(
470 self._data_dir))
471
472 extra_args = ['--{}={}'.format(k, v) for k, v in opts.items()]
473
474 start_settings = {
475 'listen_addresses': '', # we use Unix sockets
476 'unix_socket_permissions': '0700',
477 'unix_socket_directories': str(self._runstate_dir),
478 # here we are not setting superuser_reserved_connections because
479 # we're using superuser only now (so all connections available),
480 # and we don't support reserving connections for now
481 'max_connections': str(self._instance_params.max_connections),
482 # From Postgres docs:
483 #
484 # You might need to raise this value if you have queries that
485 # touch many different tables in a single transaction, e.g.,
486 # query of a parent table with many children.
487 #
488 # EdgeDB queries might touch _lots_ of tables, especially in deep
489 # inheritance hierarchies. This is especially important in low
490 # `max_connections` scenarios.
491 'max_locks_per_transaction': 256,
492 }
493
494 if os.getenv('EDGEDB_DEBUG_PGSERVER'):
495 start_settings['log_min_messages'] = 'info'
496 start_settings['log_statement'] = 'all'
497 else:
498 log_level_map = {
499 'd': 'INFO',
500 'i': 'NOTICE',
501 'w': 'WARNING',
502 'e': 'ERROR',
503 's': 'PANIC',
504 }
505 start_settings['log_min_messages'] = log_level_map[self._log_level]
506 start_settings['log_statement'] = 'none'
507 start_settings['log_line_prefix'] = ''
508
509 if server_settings:
510 start_settings.update(server_settings)
511
512 ssl_key = start_settings.get('ssl_key_file')
513 if ssl_key:
514 # Make sure server certificate key file has correct permissions.
515 keyfile = os.path.join(self._data_dir, 'srvkey.pem')
516 assert isinstance(ssl_key, str)
517 shutil.copy(ssl_key, keyfile)
518 os.chmod(keyfile, 0o600)
519 start_settings['ssl_key_file'] = keyfile
520
521 for k, v in start_settings.items():
522 extra_args.extend(['-c', '{}={}'.format(k, v)])
523
524 self._daemon_process, *loggers = await _start_logged_subprocess(
525 [self._postgres, '-D', str(self._data_dir), *extra_args],
526 capture_stdout=False,
527 capture_stderr=False,
528 logger=postgres_logger,
529 log_processor=postgres_log_processor,
530 )
531 self._daemon_pid = self._daemon_process.pid
532
533 sup = await supervisor.Supervisor.create(name="postgres loggers")
534 for logger_coro in loggers:
535 sup.create_task(logger_coro)
536 self._daemon_supervisor = sup
537
538 await self._test_connection(timeout=wait)
539
540 async def reload(self) -> None:
541 """Reload server configuration."""
542 status = await self.get_status()
543 if status != 'running':
544 raise ClusterError('cannot reload: cluster is not running')
545
546 await _run_logged_subprocess(
547 [self._pg_ctl, 'reload', '-D', str(self._data_dir)],
548 logger=pg_ctl_logger,
549 )
550
551 async def stop(self, wait: int = 60) -> None:
552 await _run_logged_subprocess(
553 [
554 self._pg_ctl,
555 'stop', '-D', str(self._data_dir),
556 '-t', str(wait), '-m', 'fast'
557 ],
558 logger=pg_ctl_logger,
559 )
560
561 if (
562 self._daemon_process is not None and
563 self._daemon_process.returncode is None
564 ):
565 self._daemon_process.terminate()
566 await asyncio.wait_for(self._daemon_process.wait(), timeout=wait)
567
568 if self._daemon_supervisor is not None:
569 await self._daemon_supervisor.cancel()
570 self._daemon_supervisor = None
571
572 def destroy(self) -> None:
573 shutil.rmtree(self._data_dir)
574
575 def reset_hba(self) -> None:
576 """Remove all records from pg_hba.conf."""
577 pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')
578
579 try:
580 with open(pg_hba, 'w'):
581 pass
582 except IOError as e:
583 raise ClusterError(
584 'cannot modify HBA records: {}'.format(e)) from e
585
586 def add_hba_entry(
587 self,
588 *,
589 type: str = 'host',
590 database: str,
591 user: str,
592 address: Optional[str] = None,
593 auth_method: str,
594 auth_options: Optional[Mapping[str, Any]] = None,
595 ) -> None:
596 """Add a record to pg_hba.conf."""
597 if type not in {'local', 'host', 'hostssl', 'hostnossl'}:
598 raise ValueError('invalid HBA record type: {!r}'.format(type))
599
600 pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')
601
602 record = '{} {} {}'.format(type, database, user)
603
604 if type != 'local':
605 if address is None:
606 raise ValueError(
607 '{!r} entry requires a valid address'.format(type))
608 else:
609 record += ' {}'.format(address)
610
611 record += ' {}'.format(auth_method)
612
613 if auth_options is not None:
614 record += ' ' + ' '.join(
615 '{}={}'.format(k, v) for k, v in auth_options.items())
616
617 try:
618 with open(pg_hba, 'a') as f:
619 print(record, file=f)
620 except IOError as e:
621 raise ClusterError(
622 'cannot modify HBA records: {}'.format(e)) from e
623
624 async def trust_local_connections(self) -> None:
625 self.reset_hba()
626
627 self.add_hba_entry(type='local', database='all',
628 user='all', auth_method='trust')
629 self.add_hba_entry(type='host', address='127.0.0.1/32',
630 database='all', user='all',
631 auth_method='trust')
632 self.add_hba_entry(type='host', address='::1/128',
633 database='all', user='all',
634 auth_method='trust')
635 status = await self.get_status()
636 if status == 'running':
637 await self.reload()
638
639 async def lookup_postgres(self) -> None:
640 await super().lookup_postgres()
641 self._pg_ctl = self._find_pg_binary('pg_ctl')
642 self._postgres = self._find_pg_binary('postgres')
643
644 def _get_connection_addr(self) -> Tuple[str, int]:
645 if self._connection_addr is None:
646 self._connection_addr = self._connection_addr_from_pidfile()
647
648 return self._connection_addr
649
650 def _connection_addr_from_pidfile(self) -> Tuple[str, int]:
651 pidfile = os.path.join(self._data_dir, 'postmaster.pid')
652
653 try:
654 with open(pidfile, 'rt') as f:
655 piddata = f.read()
656 except FileNotFoundError:
657 raise PostgresPidFileNotReadyError
658
659 lines = piddata.splitlines()
660
661 if len(lines) < 6:
662 # A complete postgres pidfile is at least 6 lines
663 raise PostgresPidFileNotReadyError
664
665 pmpid = int(lines[0])
666 if self._daemon_pid and pmpid != self._daemon_pid:
667 # This might be an old pidfile left from previous postgres
668 # daemon run.
669 raise PostgresPidFileNotReadyError
670
671 portnum = int(lines[3])
672 sockdir = lines[4]
673 hostaddr = lines[5]
674
675 if sockdir:
676 if sockdir[0] != '/':
677 # Relative sockdir
678 sockdir = os.path.normpath(
679 os.path.join(self._data_dir, sockdir))
680 host_str = sockdir
681 else:
682 host_str = hostaddr
683
684 if host_str == '*':
685 host_str = 'localhost'
686 elif host_str == '0.0.0.0':
687 host_str = '127.0.0.1'
688 elif host_str == '::':
689 host_str = '::1'
690
691 return (host_str, portnum)
692
693 async def _test_connection(self, timeout: int = 60) -> str:
694 self._connection_addr = None
695 connected = False
696
697 for n in range(timeout + 1):
698 # pg usually comes up pretty quickly, but not so
699 # quickly that we don't hit the wait case. Make our
700 # first sleep pretty short, to shave almost a second
701 # off the happy case.
702 sleep_time = 1 if n else 0.10
703
704 try:
705 conn_addr = self._get_connection_addr()
706 except PostgresPidFileNotReadyError:
707 time.sleep(sleep_time)
708 continue
709
710 try:
711 con = await asyncpg.connect(
712 database='postgres',
713 user='postgres',
714 timeout=5,
715 host=conn_addr[0],
716 port=conn_addr[1],
717 )
718 except (
719 OSError,
720 asyncio.TimeoutError,
721 asyncpg.CannotConnectNowError,
722 asyncpg.PostgresConnectionError,
723 ):
724 time.sleep(sleep_time)
725 continue
726 except asyncpg.PostgresError:
727 # Any other error other than ServerNotReadyError or
728 # ConnectionError is interpreted to indicate the server is
729 # up.
730 break
731 else:
732 connected = True
733 await con.close()
734 break
735
736 if connected:
737 return 'running'
738 else:
739 return 'not-initialized'
740
741
742 class RemoteCluster(BaseCluster):
743 def __init__(
744 self,
745 addr: Tuple[str, int],
746 params: pgconnparams.ConnectionParameters,
747 *,
748 instance_params: Optional[BackendInstanceParams] = None,
749 ha_backend: Optional[ha_base.HABackend] = None,
750 ):
751 super().__init__(instance_params=instance_params)
752 self._connection_addr = addr
753 self._connection_params = params
754 self._ha_backend = ha_backend
755
756 def _get_connection_addr(self) -> Optional[Tuple[str, int]]:
757 if self._ha_backend is not None:
758 return self._ha_backend.get_master_addr()
759 return self._connection_addr
760
761 async def ensure_initialized(self, **settings: Any) -> bool:
762 return False
763
764 def is_managed(self) -> bool:
765 return False
766
767 async def get_status(self) -> str:
768 return 'running'
769
770 def init(self, **settings: str) -> str:
771 pass
772
773 async def start(
774 self,
775 wait: int = 60,
776 *,
777 server_settings: Optional[Mapping[str, str]] = None,
778 **opts: Any,
779 ) -> None:
780 pass
781
782 async def stop(self, wait: int = 60) -> None:
783 pass
784
785 def destroy(self) -> None:
786 pass
787
788 def reset_hba(self) -> None:
789 raise ClusterError('cannot modify HBA records of unmanaged cluster')
790
791 def add_hba_entry(
792 self,
793 *,
794 type: str = 'host',
795 database: str,
796 user: str,
797 address: Optional[str] = None,
798 auth_method: str,
799 auth_options: Optional[Mapping[str, Any]] = None,
800 ) -> None:
801 raise ClusterError('cannot modify HBA records of unmanaged cluster')
802
803 async def start_watching(
804 self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None
805 ) -> None:
806 if self._ha_backend is not None:
807 await self._ha_backend.start_watching(cluster_protocol)
808
809 def stop_watching(self) -> None:
810 if self._ha_backend is not None:
811 self._ha_backend.stop_watching()
812
813
814 async def get_pg_bin_dir() -> pathlib.Path:
815 pg_config_data = await get_pg_config()
816 pg_bin_dir = pg_config_data.get('bindir')
817 if not pg_bin_dir:
818 raise ClusterError(
819 'pg_config output did not provide the BINDIR value')
820 return pathlib.Path(pg_bin_dir)
821
822
823 async def get_pg_config() -> Dict[str, str]:
824 stdout_lines, _, _ = await _run_logged_text_subprocess(
825 [str(buildmeta.get_pg_config_path())],
826 logger=pg_config_logger,
827 )
828
829 config = {}
830 for line in stdout_lines:
831 k, eq, v = line.partition('=')
832 if eq:
833 config[k.strip().lower()] = v.strip()
834
835 return config
836
837
838 async def get_local_pg_cluster(
839 data_dir: pathlib.Path,
840 *,
841 runstate_dir: Optional[pathlib.Path] = None,
842 max_connections: Optional[int] = None,
843 tenant_id: Optional[str] = None,
844 log_level: Optional[str] = None,
845 ) -> Cluster:
846 if log_level is None:
847 log_level = 'i'
848 if tenant_id is None:
849 tenant_id = buildmeta.get_default_tenant_id()
850 instance_params = None
851 if max_connections is not None:
852 instance_params = get_default_runtime_params(
853 max_connections=max_connections,
854 tenant_id=tenant_id,
855 ).instance_params
856 cluster = Cluster(
857 data_dir=data_dir,
858 runstate_dir=runstate_dir,
859 instance_params=instance_params,
860 log_level=log_level,
861 )
862 await cluster.lookup_postgres()
863 return cluster
864
865
866 async def get_remote_pg_cluster(
867 dsn: str,
868 *,
869 tenant_id: Optional[str] = None,
870 ) -> RemoteCluster:
871 parsed = urllib.parse.urlparse(dsn)
872 ha_backend = None
873
874 if parsed.scheme not in {'postgresql', 'postgres'}:
875 ha_backend = ha_base.get_backend(parsed)
876 if ha_backend is None:
877 raise ValueError(
878 'invalid DSN: scheme is expected to be "postgresql", '
879 '"postgres" or one of the supported HA backend, '
880 'got {!r}'.format(parsed.scheme))
881
882 addr = await ha_backend.get_cluster_consensus()
883 dsn = 'postgresql://{}:{}'.format(*addr)
884
885 addrs, params = pgconnparams.parse_dsn(dsn)
886 if len(addrs) > 1:
887 raise ValueError('multiple hosts in Postgres DSN are not supported')
888 if tenant_id is None:
889 t_id = buildmeta.get_default_tenant_id()
890 else:
891 t_id = tenant_id
892 rcluster = RemoteCluster(addrs[0], params)
893
894 async def _get_cluster_type(
895 conn: asyncpg.Connection,
896 ) -> Tuple[Type[RemoteCluster], Optional[str]]:
897 managed_clouds = {
898 'rds_superuser': RemoteCluster, # Amazon RDS
899 'cloudsqlsuperuser': RemoteCluster, # GCP Cloud SQL
900 }
901
902 managed_cloud_super = await conn.fetchval(
903 """
904 SELECT
905 rolname
906 FROM
907 pg_roles
908 WHERE
909 rolname = any($1::text[])
910 LIMIT
911 1
912 """,
913 list(managed_clouds),
914 )
915
916 if managed_cloud_super is not None:
917 return managed_clouds[managed_cloud_super], managed_cloud_super
918 else:
919 return RemoteCluster, None
920
921 async def _detect_capabilities(
922 conn: asyncpg.Connection,
923 ) -> BackendCapabilities:
924 caps = BackendCapabilities.NONE
925
926 try:
927 await conn.execute(f'ALTER SYSTEM SET foo = 10')
928 except asyncpg.InsufficientPrivilegeError:
929 configfile_access = False
930 except asyncpg.UndefinedObjectError:
931 configfile_access = True
932 else:
933 configfile_access = True
934
935 if configfile_access:
936 caps |= BackendCapabilities.CONFIGFILE_ACCESS
937
938 tx = conn.transaction()
939 await tx.start()
940 rname = str(uuidgen.uuid1mc())
941
942 try:
943 await conn.execute(f'CREATE ROLE "{rname}" WITH SUPERUSER')
944 except asyncpg.InsufficientPrivilegeError:
945 can_make_superusers = False
946 else:
947 can_make_superusers = True
948 finally:
949 await tx.rollback()
950
951 if can_make_superusers:
952 caps |= BackendCapabilities.SUPERUSER_ACCESS
953
954 coll = await conn.fetchval('''
955 SELECT collname FROM pg_collation
956 WHERE lower(replace(collname, '-', '')) = 'c.utf8' LIMIT 1;
957 ''')
958
959 if coll is not None:
960 caps |= BackendCapabilities.C_UTF8_LOCALE
961
962 return caps
963
964 async def _get_pg_settings(
965 conn: asyncpg.Connection,
966 name: str,
967 ) -> str:
968 return await conn.fetchval( # type: ignore
969 'SELECT setting FROM pg_settings WHERE name = $1', name
970 )
971
972 async def _get_reserved_connections(
973 conn: asyncpg.Connection,
974 ) -> int:
975 rv = int(
976 await _get_pg_settings(conn, 'superuser_reserved_connections')
977 )
978 for name in [
979 'rds.rds_superuser_reserved_connections',
980 ]:
981 value = await _get_pg_settings(conn, name)
982 if value:
983 rv += int(value)
984 return rv
985
986 conn = await rcluster.connect()
987 try:
988 cluster_type, superuser_name = await _get_cluster_type(conn)
989 max_connections = await _get_pg_settings(conn, 'max_connections')
990 instance_params = BackendInstanceParams(
991 capabilities=await _detect_capabilities(conn),
992 base_superuser=superuser_name,
993 max_connections=int(max_connections),
994 reserved_connections=await _get_reserved_connections(conn),
995 tenant_id=t_id,
996 )
997 finally:
998 await conn.close()
999
1000 return cluster_type(
1001 addrs[0],
1002 params,
1003 instance_params=instance_params,
1004 ha_backend=ha_backend,
1005 )
1006
1007
1008 async def _run_logged_text_subprocess(
1009 args: Sequence[str],
1010 logger: logging.Logger,
1011 level: int = logging.DEBUG,
1012 check: bool = True,
1013 log_stdout: bool = True,
1014 timeout: Optional[float] = None,
1015 **kwargs: Any,
1016 ) -> Tuple[List[str], List[str], int]:
1017 stdout_lines, stderr_lines, exit_code = await _run_logged_subprocess(
1018 args,
1019 logger=logger,
1020 level=level,
1021 check=check,
1022 log_stdout=log_stdout,
1023 timeout=timeout,
1024 **kwargs,
1025 )
1026
1027 return (
1028 [line.decode() for line in stdout_lines],
1029 [line.decode() for line in stderr_lines],
1030 exit_code,
1031 )
1032
1033
1034 async def _run_logged_subprocess(
1035 args: Sequence[str],
1036 logger: logging.Logger,
1037 level: int = logging.DEBUG,
1038 check: bool = True,
1039 log_stdout: bool = True,
1040 log_stderr: bool = True,
1041 capture_stdout: bool = True,
1042 capture_stderr: bool = True,
1043 timeout: Optional[float] = None,
1044 **kwargs: Any,
1045 ) -> Tuple[List[bytes], List[bytes], int]:
1046 process, stdout_reader, stderr_reader = await _start_logged_subprocess(
1047 args,
1048 logger=logger,
1049 level=level,
1050 log_stdout=log_stdout,
1051 log_stderr=log_stderr,
1052 capture_stdout=capture_stdout,
1053 capture_stderr=capture_stderr,
1054 **kwargs,
1055 )
1056
1057 exit_code, stdout_lines, stderr_lines = await asyncio.wait_for(
1058 asyncio.gather(process.wait(), stdout_reader, stderr_reader),
1059 timeout=timeout,
1060 )
1061
1062 if exit_code != 0 and check:
1063 stderr_text = b'\n'.join(stderr_lines).decode()
1064 raise ClusterError(
1065 f'{args[0]} exited with status {exit_code}:\n'
1066 + textwrap.indent(stderr_text, ' ' * 4),
1067 )
1068 else:
1069 return stdout_lines, stderr_lines, exit_code
1070
1071
1072 async def _start_logged_subprocess(
1073 args: Sequence[str],
1074 *,
1075 logger: logging.Logger,
1076 level: int = logging.DEBUG,
1077 log_stdout: bool = True,
1078 log_stderr: bool = True,
1079 capture_stdout: bool = True,
1080 capture_stderr: bool = True,
1081 log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,
1082 **kwargs: Any,
1083 ) -> Tuple[
1084 asyncio.subprocess.Process,
1085 Coroutine[Any, Any, List[bytes]],
1086 Coroutine[Any, Any, List[bytes]],
1087 ]:
1088 logger.log(
1089 level,
1090 f'running `{" ".join(shlex.quote(arg) for arg in args)}`'
1091 )
1092
1093 process = await asyncio.create_subprocess_exec(
1094 *args,
1095 stdout=(
1096 asyncio.subprocess.PIPE if log_stdout or capture_stdout
1097 else asyncio.subprocess.DEVNULL
1098 ),
1099 stderr=(
1100 asyncio.subprocess.PIPE if log_stderr or capture_stderr
1101 else asyncio.subprocess.DEVNULL
1102 ),
1103 **kwargs,
1104 )
1105
1106 assert process.stderr is not None
1107 assert process.stdout is not None
1108
1109 if log_stderr and capture_stderr:
1110 stderr_reader = _capture_and_log_subprocess_output(
1111 process.pid,
1112 process.stderr,
1113 logger,
1114 level,
1115 log_processor,
1116 )
1117 elif capture_stderr:
1118 stderr_reader = _capture_subprocess_output(process.stderr)
1119 elif log_stderr:
1120 stderr_reader = _log_subprocess_output(
1121 process.pid, process.stderr, logger, level, log_processor)
1122 else:
1123 stderr_reader = _dummy()
1124
1125 if log_stdout and capture_stdout:
1126 stdout_reader = _capture_and_log_subprocess_output(
1127 process.pid,
1128 process.stdout,
1129 logger,
1130 level,
1131 log_processor,
1132 )
1133 elif capture_stdout:
1134 stdout_reader = _capture_subprocess_output(process.stdout)
1135 elif log_stdout:
1136 stdout_reader = _log_subprocess_output(
1137 process.pid, process.stdout, logger, level, log_processor)
1138 else:
1139 stdout_reader = _dummy()
1140
1141 return process, stdout_reader, stderr_reader
1142
1143
1144 async def _capture_subprocess_output(
1145 stream: asyncio.StreamReader,
1146 ) -> List[bytes]:
1147 lines = []
1148 while not stream.at_eof():
1149 line = await stream.readline()
1150 if line or not stream.at_eof():
1151 lines.append(line.rstrip(b'\n'))
1152 return lines
1153
1154
1155 async def _capture_and_log_subprocess_output(
1156 pid: int,
1157 stream: asyncio.StreamReader,
1158 logger: logging.Logger,
1159 level: int,
1160 log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,
1161 ) -> List[bytes]:
1162 lines = []
1163 while not stream.at_eof():
1164 line = await stream.readline()
1165 if line or not stream.at_eof():
1166 line = line.rstrip(b'\n')
1167 lines.append(line)
1168 log_line = line.decode()
1169 if log_processor is not None:
1170 log_line, level = log_processor(log_line)
1171 logger.log(level, log_line, extra={"process": pid})
1172 return lines
1173
1174
1175 async def _log_subprocess_output(
1176 pid: int,
1177 stream: asyncio.StreamReader,
1178 logger: logging.Logger,
1179 level: int,
1180 log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,
1181 ) -> List[bytes]:
1182 while not stream.at_eof():
1183 line = await stream.readline()
1184 if line or not stream.at_eof():
1185 log_line = line.rstrip(b'\n').decode()
1186 if log_processor is not None:
1187 log_line, level = log_processor(log_line)
1188 logger.log(level, log_line, extra={"process": pid})
1189 return []
1190
1191
1192 async def _dummy() -> List[bytes]:
1193 return []
1194
1195
1196 postgres_to_python_level_map = {
1197 "DEBUG5": logging.DEBUG,
1198 "DEBUG4": logging.DEBUG,
1199 "DEBUG3": logging.DEBUG,
1200 "DEBUG2": logging.DEBUG,
1201 "DEBUG1": logging.DEBUG,
1202 "INFO": logging.INFO,
1203 "NOTICE": logging.INFO,
1204 "LOG": logging.INFO,
1205 "WARNING": logging.WARNING,
1206 "ERROR": logging.ERROR,
1207 "FATAL": logging.CRITICAL,
1208 "PANIC": logging.CRITICAL,
1209 }
1210
1211 postgres_log_re = re.compile(r'^(\w+):\s*(.*)$')
1212
1213 postgres_specific_msg_level_map = {
1214 "terminating connection due to administrator command": logging.INFO,
1215 "the database system is shutting down": logging.INFO,
1216 }
1217
1218
1219 def postgres_log_processor(msg: str) -> Tuple[str, int]:
1220 if m := postgres_log_re.match(msg):
1221 postgres_level = m.group(1)
1222 msg = m.group(2)
1223 level = postgres_specific_msg_level_map.get(
1224 msg,
1225 postgres_to_python_level_map.get(postgres_level, logging.INFO),
1226 )
1227 else:
1228 level = logging.INFO
1229
1230 return msg, level
1231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/edb/server/pgcluster.py b/edb/server/pgcluster.py
--- a/edb/server/pgcluster.py
+++ b/edb/server/pgcluster.py
@@ -678,8 +678,10 @@
sockdir = os.path.normpath(
os.path.join(self._data_dir, sockdir))
host_str = sockdir
- else:
+ elif hostaddr:
host_str = hostaddr
+ else:
+ raise PostgresPidFileNotReadyError
if host_str == '*':
host_str = 'localhost'
| {"golden_diff": "diff --git a/edb/server/pgcluster.py b/edb/server/pgcluster.py\n--- a/edb/server/pgcluster.py\n+++ b/edb/server/pgcluster.py\n@@ -678,8 +678,10 @@\n sockdir = os.path.normpath(\n os.path.join(self._data_dir, sockdir))\n host_str = sockdir\n- else:\n+ elif hostaddr:\n host_str = hostaddr\n+ else:\n+ raise PostgresPidFileNotReadyError\n \n if host_str == '*':\n host_str = 'localhost'\n", "issue": "EdgeDB server doesn't always correctly recognize PostgreSQL server startup\n<!-- Please search existing issues to avoid creating duplicates. -->\r\n- EdgeDB Version: 1.0-rc.2+dev6093.d2021102414.gec7d60ef3.cv202110240000\r\n- OS Version: Arch Linux under WSL2\r\n\r\nWhen the EdgeDB server starts up during `edb server` or `edb test` commands and tries to read the `postmaster.pid` file, sometimes it doesn't do it quite correctly, as it seems to me. \r\n\r\nI printed the content of the file and parsed host and port in `_connection_addr_from_pidfile` method and got the following:\r\n```\r\n157186\r\n/home/nik/.local/share/edgedb/_localdev\r\n1635094601\r\n5432\r\n\r\n\r\n 27256 262160\r\n\r\n('', 5432)\r\n```\r\n\r\nThis results in parsing the host to an empty string, trying to connect to it in `pgcon._connect` and and getting a `ConnectionRefusedError` in the process. I suspect that the `postmaster.pid` file itself did not have time to initialize fully because when I ran `edgedb-server --botstrap-only` my computer already had almost 100% CPU/memory load. Also, the problem disappeared when I tried to repeat the steps, but my PC had free resources. But perhaps the `edb.server.pgcluster` module itself should have a check that the `postmaster.pid` file hasn't been fully loaded or something like that anyway.\r\n\r\nSteps to Reproduce:\r\n\r\n1. Load PC resources to the limit (?)\r\n2. Run `edgedb-server --bootstrap-only`\r\n\r\nFull logs of server startup:\r\n```\r\nINFO 159251 2021-10-24T20:04:35.317 edb.server: EdgeDB server (version: 1.0-rc.2+dev6093.d2021102414.gec7d60ef3.cv202110240000) is starting in DEV mode.\r\nINFO 159251 2021-10-24T20:04:36.056 edb.server: Using 127 max backend connections based on total memory.\r\n159273\r\n/home/nik/.local/share/edgedb/_localdev\r\n1635095076\r\n5432\r\n\r\n\r\n 27256 262207\r\n\r\n('', 5432)\r\nINFO 159273 2021-10-24T20:04:36.229 postgres: starting PostgreSQL 13.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 11.1.0, 64-bit\r\nINFO 159273 2021-10-24T20:04:37.240 postgres: listening on Unix socket \"/home/nik/.local/share/edgedb/_localdev/.s.PGSQL.5432\"\r\nINFO 159273 2021-10-24T20:04:37.240 postgres: database system was shut down at 2021-10-24 20:04:33 MSK\r\nINFO 159273 2021-10-24T20:04:37.241 postgres: database system is ready to accept connections\r\nINFO 159273 2021-10-24T20:05:37.071 postgres: received fast shutdown request\r\nINFO 159273 2021-10-24T20:05:37.106 postgres: aborting any active transactions\r\nINFO 159273 2021-10-24T20:05:37.107 postgres: background worker \"logical replication launcher\" (PID 159284) exited with exit code 1\r\nINFO 159273 2021-10-24T20:05:37.107 postgres: shutting down\r\nINFO 159273 2021-10-24T20:05:37.239 postgres: database system is shut down\r\n======================================================== Exception occurred: [Errno 111] Connection refused =========================================================\r\n\r\n1. ConnectionRefusedError: [Errno 111] Connection refused\r\n\r\n----------------------------------------------------------------------------- Traceback -----------------------------------------------------------------------------\r\n\r\n /home/nik/.virtualenvs/edgedb/bin/edb, line 33, in <module>\r\n > sys.exit(load_entry_point('edgedb-server', 'console_scripts', 'edb')())\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 829, in __call__\r\n > return self.main(*args, **kwargs)\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 782, in main\r\n > rv = self.invoke(ctx)\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 1259, in invoke\r\n > return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 1066, in invoke\r\n > return ctx.invoke(self.callback, **ctx.params)\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 610, in invoke\r\n > return callback(*args, **kwargs)\r\n /home/nik/projects/edgedb/edgedb/edb/tools/edb.py, line 55, in server\r\n > srv_main.server_main(**kwargs)\r\n /home/nik/projects/edgedb/edgedb/edb/server/main.py, line 556, in server_main\r\n > asyncio.run(run_server(server_args))\r\n /home/nik/.asdf/installs/python/3.10.0/lib/python3.10/asyncio/runners.py, line 44, in run\r\n > return loop.run_until_complete(main)\r\n uvloop/loop.pyx, line 1501, in uvloop.loop.Loop.run_until_complete\r\n \r\n /home/nik/projects/edgedb/edgedb/edb/server/main.py, line 459, in run_server\r\n > need_cluster_restart = await _init_cluster(cluster, args)\r\n /home/nik/projects/edgedb/edgedb/edb/server/main.py, line 141, in _init_cluster\r\n > need_restart = await bootstrap.ensure_bootstrapped(cluster, args)\r\n /home/nik/projects/edgedb/edgedb/edb/server/bootstrap.py, line 1386, in ensure_bootstrapped\r\n > pgconn = await cluster.connect()\r\n /home/nik/projects/edgedb/edgedb/edb/server/pgcluster.py, line 184, in connect\r\n > conn = await asyncpg.connect(**conn_info)\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connection.py, line 2045, in connect\r\n > return await connect_utils._connect(\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 790, in _connect\r\n > raise last_error\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 776, in _connect\r\n > return await _connect_addr(\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 676, in _connect_addr\r\n > return await __connect_addr(params, timeout, True, *args)\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 720, in __connect_addr\r\n > tr, pr = await compat.wait_for(connector, timeout=timeout)\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/compat.py, line 66, in wait_for\r\n > return await asyncio.wait_for(fut, timeout)\r\n /home/nik/.asdf/installs/python/3.10.0/lib/python3.10/asyncio/tasks.py, line 447, in wait_for\r\n > return fut.result()\r\n /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 586, in _create_ssl_connection\r\n > tr, pr = await loop.create_connection(\r\n uvloop/loop.pyx, line 2024, in create_connection\r\n \r\n uvloop/loop.pyx, line 2001, in uvloop.loop.Loop.create_connection\r\n \r\n\r\nConnectionRefusedError: [Errno 111] Connection refused\r\n```\n", "before_files": [{"content": "# Copyright (C) 2016-present MagicStack Inc. and the EdgeDB authors.\n# Copyright (C) 2016-present the asyncpg authors and contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"PostgreSQL cluster management.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport asyncio\nimport enum\nimport functools\nimport locale\nimport logging\nimport os\nimport os.path\nimport pathlib\nimport re\nimport shlex\nimport shutil\nimport textwrap\nimport time\nimport urllib.parse\n\nimport asyncpg\n\nfrom edb import buildmeta\nfrom edb.common import supervisor\nfrom edb.common import uuidgen\n\nfrom edb.server import defines\nfrom edb.server.ha import base as ha_base\nfrom edb.pgsql import common as pgcommon\n\nfrom . import pgconnparams\n\n\nlogger = logging.getLogger('edb.pgcluster')\npg_dump_logger = logging.getLogger('pg_dump')\npg_ctl_logger = logging.getLogger('pg_ctl')\npg_config_logger = logging.getLogger('pg_config')\ninitdb_logger = logging.getLogger('initdb')\npostgres_logger = logging.getLogger('postgres')\n\nget_database_backend_name = pgcommon.get_database_backend_name\nget_role_backend_name = pgcommon.get_role_backend_name\n\n\ndef _is_c_utf8_locale_present() -> bool:\n try:\n locale.setlocale(locale.LC_CTYPE, 'C.UTF-8')\n except Exception:\n return False\n else:\n # We specifically don't use locale.getlocale(), because\n # it can lie and return a non-existent locale due to PEP 538.\n locale.setlocale(locale.LC_CTYPE, '')\n return True\n\n\nclass ClusterError(Exception):\n pass\n\n\nclass PostgresPidFileNotReadyError(Exception):\n \"\"\"Raised on an attempt to read non-existent or bad Postgres PID file\"\"\"\n\n\nclass BackendCapabilities(enum.IntFlag):\n\n NONE = 0\n #: Whether CREATE ROLE .. SUPERUSER is allowed\n SUPERUSER_ACCESS = 1 << 0\n #: Whether reading PostgreSQL configuration files\n #: via pg_file_settings is allowed\n CONFIGFILE_ACCESS = 1 << 1\n #: Whether the PostgreSQL server supports the C.UTF-8 locale\n C_UTF8_LOCALE = 1 << 2\n\n\nALL_BACKEND_CAPABILITIES = (\n BackendCapabilities.SUPERUSER_ACCESS\n | BackendCapabilities.CONFIGFILE_ACCESS\n | BackendCapabilities.C_UTF8_LOCALE\n)\n\n\nclass BackendInstanceParams(NamedTuple):\n\n capabilities: BackendCapabilities\n tenant_id: str\n base_superuser: Optional[str] = None\n max_connections: int = 500\n reserved_connections: int = 0\n\n\nclass BackendRuntimeParams(NamedTuple):\n\n instance_params: BackendInstanceParams\n session_authorization_role: Optional[str] = None\n\n\[email protected]_cache\ndef get_default_runtime_params(**instance_params: Any) -> BackendRuntimeParams:\n capabilities = ALL_BACKEND_CAPABILITIES\n if not _is_c_utf8_locale_present():\n capabilities &= ~BackendCapabilities.C_UTF8_LOCALE\n instance_params.setdefault('capabilities', capabilities)\n if 'tenant_id' not in instance_params:\n instance_params = dict(\n tenant_id=buildmeta.get_default_tenant_id(),\n **instance_params,\n )\n\n return BackendRuntimeParams(\n instance_params=BackendInstanceParams(**instance_params),\n )\n\n\nclass BaseCluster:\n\n def __init__(\n self,\n *,\n instance_params: Optional[BackendInstanceParams] = None,\n ) -> None:\n self._connection_addr: Optional[Tuple[str, int]] = None\n self._connection_params: Optional[\n pgconnparams.ConnectionParameters\n ] = None\n self._default_session_auth: Optional[str] = None\n self._pg_config_data: Dict[str, str] = {}\n self._pg_bin_dir: Optional[pathlib.Path] = None\n if instance_params is None:\n self._instance_params = (\n get_default_runtime_params().instance_params)\n else:\n self._instance_params = instance_params\n\n def get_db_name(self, db_name: str) -> str:\n return get_database_backend_name(\n db_name,\n tenant_id=self._instance_params.tenant_id,\n )\n\n def get_role_name(self, role_name: str) -> str:\n return get_database_backend_name(\n role_name,\n tenant_id=self._instance_params.tenant_id,\n )\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: Any,\n ) -> None:\n raise NotImplementedError\n\n async def stop(self, wait: int = 60) -> None:\n raise NotImplementedError\n\n def destroy(self) -> None:\n raise NotImplementedError\n\n async def connect(self, **kwargs: Any) -> asyncpg.Connection:\n conn_info = self.get_connection_spec()\n conn_info.update(kwargs)\n if 'sslmode' in conn_info:\n conn_info['ssl'] = conn_info.pop('sslmode').name\n conn = await asyncpg.connect(**conn_info)\n\n if (not kwargs.get('user')\n and self._default_session_auth\n and conn_info.get('user') != self._default_session_auth):\n # No explicit user given, and the default\n # SESSION AUTHORIZATION is different from the user\n # used to connect.\n await conn.execute(\n f'SET ROLE {pgcommon.quote_ident(self._default_session_auth)}'\n )\n\n return conn\n\n async def start_watching(\n self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None\n ) -> None:\n pass\n\n def stop_watching(self) -> None:\n pass\n\n def get_runtime_params(self) -> BackendRuntimeParams:\n params = self.get_connection_params()\n login_role: Optional[str] = params.user\n sup_role = self.get_role_name(defines.EDGEDB_SUPERUSER)\n return BackendRuntimeParams(\n instance_params=self._instance_params,\n session_authorization_role=(\n None if login_role == sup_role else login_role\n ),\n )\n\n def get_connection_addr(self) -> Optional[Tuple[str, int]]:\n return self._get_connection_addr()\n\n def set_default_session_authorization(self, rolename: str) -> None:\n self._default_session_auth = rolename\n\n def set_connection_params(\n self,\n params: pgconnparams.ConnectionParameters,\n ) -> None:\n self._connection_params = params\n\n def get_connection_params(\n self,\n ) -> pgconnparams.ConnectionParameters:\n assert self._connection_params is not None\n return self._connection_params\n\n def get_connection_spec(self) -> Dict[str, Any]:\n conn_dict: Dict[str, Any] = {}\n addr = self.get_connection_addr()\n assert addr is not None\n conn_dict['host'] = addr[0]\n conn_dict['port'] = addr[1]\n params = self.get_connection_params()\n for k in (\n 'user',\n 'password',\n 'database',\n 'ssl',\n 'sslmode',\n 'server_settings',\n ):\n v = getattr(params, k)\n if v is not None:\n conn_dict[k] = v\n\n cluster_settings = conn_dict.get('server_settings', {})\n\n edgedb_settings = {\n 'client_encoding': 'utf-8',\n 'search_path': 'edgedb',\n 'timezone': 'UTC',\n 'intervalstyle': 'iso_8601',\n 'jit': 'off',\n }\n\n conn_dict['server_settings'] = {**cluster_settings, **edgedb_settings}\n\n return conn_dict\n\n def _get_connection_addr(self) -> Optional[Tuple[str, int]]:\n return self._connection_addr\n\n def is_managed(self) -> bool:\n raise NotImplementedError\n\n async def get_status(self) -> str:\n raise NotImplementedError\n\n async def dump_database(\n self,\n dbname: str,\n *,\n exclude_schemas: Iterable[str] = (),\n dump_object_owners: bool = True,\n ) -> bytes:\n status = await self.get_status()\n if status != 'running':\n raise ClusterError('cannot dump: cluster is not running')\n\n if self._pg_bin_dir is None:\n await self.lookup_postgres()\n pg_dump = self._find_pg_binary('pg_dump')\n conn_spec = self.get_connection_spec()\n\n args = [\n pg_dump,\n '--inserts',\n f'--dbname={dbname}',\n f'--host={conn_spec[\"host\"]}',\n f'--port={conn_spec[\"port\"]}',\n f'--username={conn_spec[\"user\"]}',\n ]\n\n if not dump_object_owners:\n args.append('--no-owner')\n\n env = os.environ.copy()\n if conn_spec.get(\"password\"):\n env['PGPASSWORD'] = conn_spec[\"password\"]\n\n if exclude_schemas:\n for exclude_schema in exclude_schemas:\n args.append(f'--exclude-schema={exclude_schema}')\n\n stdout_lines, _, _ = await _run_logged_subprocess(\n args,\n logger=pg_dump_logger,\n log_stdout=False,\n env=env,\n )\n return b'\\n'.join(stdout_lines)\n\n def _find_pg_binary(self, binary: str) -> str:\n assert self._pg_bin_dir is not None\n bpath = self._pg_bin_dir / binary\n if not bpath.is_file():\n raise ClusterError(\n 'could not find {} executable: '.format(binary) +\n '{!r} does not exist or is not a file'.format(bpath))\n\n return str(bpath)\n\n def _subprocess_error(\n self,\n name: str,\n exitcode: int,\n stderr: Optional[bytes],\n ) -> ClusterError:\n if stderr:\n return ClusterError(\n f'{name} exited with status {exitcode}:\\n'\n + textwrap.indent(stderr.decode(), ' ' * 4),\n )\n else:\n return ClusterError(\n f'{name} exited with status {exitcode}',\n )\n\n async def lookup_postgres(self) -> None:\n self._pg_bin_dir = await get_pg_bin_dir()\n\n\nclass Cluster(BaseCluster):\n def __init__(\n self,\n data_dir: pathlib.Path,\n *,\n runstate_dir: Optional[pathlib.Path] = None,\n instance_params: Optional[BackendInstanceParams] = None,\n log_level: str = 'i',\n ):\n super().__init__(instance_params=instance_params)\n self._data_dir = data_dir\n self._runstate_dir = (\n runstate_dir if runstate_dir is not None else data_dir)\n self._daemon_pid: Optional[int] = None\n self._daemon_process: Optional[asyncio.subprocess.Process] = None\n self._daemon_supervisor: Optional[supervisor.Supervisor] = None\n self._log_level = log_level\n\n def is_managed(self) -> bool:\n return True\n\n def get_data_dir(self) -> pathlib.Path:\n return self._data_dir\n\n async def get_status(self) -> str:\n stdout_lines, stderr_lines, exit_code = (\n await _run_logged_text_subprocess(\n [self._pg_ctl, 'status', '-D', str(self._data_dir)],\n logger=pg_ctl_logger,\n check=False,\n )\n )\n\n if (\n exit_code == 4\n or not os.path.exists(self._data_dir)\n or not os.listdir(self._data_dir)\n ):\n return 'not-initialized'\n elif exit_code == 3:\n return 'stopped'\n elif exit_code == 0:\n output = '\\n'.join(stdout_lines)\n r = re.match(r'.*PID\\s?:\\s+(\\d+).*', output)\n if not r:\n raise ClusterError(\n f'could not parse pg_ctl status output: {output}')\n self._daemon_pid = int(r.group(1))\n if self._connection_addr is None:\n self._connection_addr = self._connection_addr_from_pidfile()\n return 'running'\n else:\n stderr_text = '\\n'.join(stderr_lines)\n raise ClusterError(\n f'`pg_ctl status` exited with status {exit_code}:\\n'\n + textwrap.indent(stderr_text, ' ' * 4),\n )\n\n async def ensure_initialized(self, **settings: Any) -> bool:\n cluster_status = await self.get_status()\n\n if cluster_status == 'not-initialized':\n logger.info(\n 'Initializing database cluster in %s', self._data_dir)\n\n instance_params = self.get_runtime_params().instance_params\n capabilities = instance_params.capabilities\n have_c_utf8 = (\n capabilities & BackendCapabilities.C_UTF8_LOCALE)\n await self.init(\n username='postgres',\n locale='C.UTF-8' if have_c_utf8 else 'en_US.UTF-8',\n lc_collate='C',\n encoding='UTF8',\n )\n self.reset_hba()\n self.add_hba_entry(\n type='local',\n database='all',\n user='postgres',\n auth_method='trust'\n )\n return True\n else:\n return False\n\n async def init(self, **settings: str) -> None:\n \"\"\"Initialize cluster.\"\"\"\n if await self.get_status() != 'not-initialized':\n raise ClusterError(\n 'cluster in {!r} has already been initialized'.format(\n self._data_dir))\n\n if settings:\n settings_args = ['--{}={}'.format(k.replace('_', '-'), v)\n for k, v in settings.items()]\n extra_args = ['-o'] + [' '.join(settings_args)]\n else:\n extra_args = []\n\n await _run_logged_subprocess(\n [self._pg_ctl, 'init', '-D', str(self._data_dir)] + extra_args,\n logger=initdb_logger,\n )\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: str,\n ) -> None:\n \"\"\"Start the cluster.\"\"\"\n status = await self.get_status()\n if status == 'running':\n return\n elif status == 'not-initialized':\n raise ClusterError(\n 'cluster in {!r} has not been initialized'.format(\n self._data_dir))\n\n extra_args = ['--{}={}'.format(k, v) for k, v in opts.items()]\n\n start_settings = {\n 'listen_addresses': '', # we use Unix sockets\n 'unix_socket_permissions': '0700',\n 'unix_socket_directories': str(self._runstate_dir),\n # here we are not setting superuser_reserved_connections because\n # we're using superuser only now (so all connections available),\n # and we don't support reserving connections for now\n 'max_connections': str(self._instance_params.max_connections),\n # From Postgres docs:\n #\n # You might need to raise this value if you have queries that\n # touch many different tables in a single transaction, e.g.,\n # query of a parent table with many children.\n #\n # EdgeDB queries might touch _lots_ of tables, especially in deep\n # inheritance hierarchies. This is especially important in low\n # `max_connections` scenarios.\n 'max_locks_per_transaction': 256,\n }\n\n if os.getenv('EDGEDB_DEBUG_PGSERVER'):\n start_settings['log_min_messages'] = 'info'\n start_settings['log_statement'] = 'all'\n else:\n log_level_map = {\n 'd': 'INFO',\n 'i': 'NOTICE',\n 'w': 'WARNING',\n 'e': 'ERROR',\n 's': 'PANIC',\n }\n start_settings['log_min_messages'] = log_level_map[self._log_level]\n start_settings['log_statement'] = 'none'\n start_settings['log_line_prefix'] = ''\n\n if server_settings:\n start_settings.update(server_settings)\n\n ssl_key = start_settings.get('ssl_key_file')\n if ssl_key:\n # Make sure server certificate key file has correct permissions.\n keyfile = os.path.join(self._data_dir, 'srvkey.pem')\n assert isinstance(ssl_key, str)\n shutil.copy(ssl_key, keyfile)\n os.chmod(keyfile, 0o600)\n start_settings['ssl_key_file'] = keyfile\n\n for k, v in start_settings.items():\n extra_args.extend(['-c', '{}={}'.format(k, v)])\n\n self._daemon_process, *loggers = await _start_logged_subprocess(\n [self._postgres, '-D', str(self._data_dir), *extra_args],\n capture_stdout=False,\n capture_stderr=False,\n logger=postgres_logger,\n log_processor=postgres_log_processor,\n )\n self._daemon_pid = self._daemon_process.pid\n\n sup = await supervisor.Supervisor.create(name=\"postgres loggers\")\n for logger_coro in loggers:\n sup.create_task(logger_coro)\n self._daemon_supervisor = sup\n\n await self._test_connection(timeout=wait)\n\n async def reload(self) -> None:\n \"\"\"Reload server configuration.\"\"\"\n status = await self.get_status()\n if status != 'running':\n raise ClusterError('cannot reload: cluster is not running')\n\n await _run_logged_subprocess(\n [self._pg_ctl, 'reload', '-D', str(self._data_dir)],\n logger=pg_ctl_logger,\n )\n\n async def stop(self, wait: int = 60) -> None:\n await _run_logged_subprocess(\n [\n self._pg_ctl,\n 'stop', '-D', str(self._data_dir),\n '-t', str(wait), '-m', 'fast'\n ],\n logger=pg_ctl_logger,\n )\n\n if (\n self._daemon_process is not None and\n self._daemon_process.returncode is None\n ):\n self._daemon_process.terminate()\n await asyncio.wait_for(self._daemon_process.wait(), timeout=wait)\n\n if self._daemon_supervisor is not None:\n await self._daemon_supervisor.cancel()\n self._daemon_supervisor = None\n\n def destroy(self) -> None:\n shutil.rmtree(self._data_dir)\n\n def reset_hba(self) -> None:\n \"\"\"Remove all records from pg_hba.conf.\"\"\"\n pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')\n\n try:\n with open(pg_hba, 'w'):\n pass\n except IOError as e:\n raise ClusterError(\n 'cannot modify HBA records: {}'.format(e)) from e\n\n def add_hba_entry(\n self,\n *,\n type: str = 'host',\n database: str,\n user: str,\n address: Optional[str] = None,\n auth_method: str,\n auth_options: Optional[Mapping[str, Any]] = None,\n ) -> None:\n \"\"\"Add a record to pg_hba.conf.\"\"\"\n if type not in {'local', 'host', 'hostssl', 'hostnossl'}:\n raise ValueError('invalid HBA record type: {!r}'.format(type))\n\n pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')\n\n record = '{} {} {}'.format(type, database, user)\n\n if type != 'local':\n if address is None:\n raise ValueError(\n '{!r} entry requires a valid address'.format(type))\n else:\n record += ' {}'.format(address)\n\n record += ' {}'.format(auth_method)\n\n if auth_options is not None:\n record += ' ' + ' '.join(\n '{}={}'.format(k, v) for k, v in auth_options.items())\n\n try:\n with open(pg_hba, 'a') as f:\n print(record, file=f)\n except IOError as e:\n raise ClusterError(\n 'cannot modify HBA records: {}'.format(e)) from e\n\n async def trust_local_connections(self) -> None:\n self.reset_hba()\n\n self.add_hba_entry(type='local', database='all',\n user='all', auth_method='trust')\n self.add_hba_entry(type='host', address='127.0.0.1/32',\n database='all', user='all',\n auth_method='trust')\n self.add_hba_entry(type='host', address='::1/128',\n database='all', user='all',\n auth_method='trust')\n status = await self.get_status()\n if status == 'running':\n await self.reload()\n\n async def lookup_postgres(self) -> None:\n await super().lookup_postgres()\n self._pg_ctl = self._find_pg_binary('pg_ctl')\n self._postgres = self._find_pg_binary('postgres')\n\n def _get_connection_addr(self) -> Tuple[str, int]:\n if self._connection_addr is None:\n self._connection_addr = self._connection_addr_from_pidfile()\n\n return self._connection_addr\n\n def _connection_addr_from_pidfile(self) -> Tuple[str, int]:\n pidfile = os.path.join(self._data_dir, 'postmaster.pid')\n\n try:\n with open(pidfile, 'rt') as f:\n piddata = f.read()\n except FileNotFoundError:\n raise PostgresPidFileNotReadyError\n\n lines = piddata.splitlines()\n\n if len(lines) < 6:\n # A complete postgres pidfile is at least 6 lines\n raise PostgresPidFileNotReadyError\n\n pmpid = int(lines[0])\n if self._daemon_pid and pmpid != self._daemon_pid:\n # This might be an old pidfile left from previous postgres\n # daemon run.\n raise PostgresPidFileNotReadyError\n\n portnum = int(lines[3])\n sockdir = lines[4]\n hostaddr = lines[5]\n\n if sockdir:\n if sockdir[0] != '/':\n # Relative sockdir\n sockdir = os.path.normpath(\n os.path.join(self._data_dir, sockdir))\n host_str = sockdir\n else:\n host_str = hostaddr\n\n if host_str == '*':\n host_str = 'localhost'\n elif host_str == '0.0.0.0':\n host_str = '127.0.0.1'\n elif host_str == '::':\n host_str = '::1'\n\n return (host_str, portnum)\n\n async def _test_connection(self, timeout: int = 60) -> str:\n self._connection_addr = None\n connected = False\n\n for n in range(timeout + 1):\n # pg usually comes up pretty quickly, but not so\n # quickly that we don't hit the wait case. Make our\n # first sleep pretty short, to shave almost a second\n # off the happy case.\n sleep_time = 1 if n else 0.10\n\n try:\n conn_addr = self._get_connection_addr()\n except PostgresPidFileNotReadyError:\n time.sleep(sleep_time)\n continue\n\n try:\n con = await asyncpg.connect(\n database='postgres',\n user='postgres',\n timeout=5,\n host=conn_addr[0],\n port=conn_addr[1],\n )\n except (\n OSError,\n asyncio.TimeoutError,\n asyncpg.CannotConnectNowError,\n asyncpg.PostgresConnectionError,\n ):\n time.sleep(sleep_time)\n continue\n except asyncpg.PostgresError:\n # Any other error other than ServerNotReadyError or\n # ConnectionError is interpreted to indicate the server is\n # up.\n break\n else:\n connected = True\n await con.close()\n break\n\n if connected:\n return 'running'\n else:\n return 'not-initialized'\n\n\nclass RemoteCluster(BaseCluster):\n def __init__(\n self,\n addr: Tuple[str, int],\n params: pgconnparams.ConnectionParameters,\n *,\n instance_params: Optional[BackendInstanceParams] = None,\n ha_backend: Optional[ha_base.HABackend] = None,\n ):\n super().__init__(instance_params=instance_params)\n self._connection_addr = addr\n self._connection_params = params\n self._ha_backend = ha_backend\n\n def _get_connection_addr(self) -> Optional[Tuple[str, int]]:\n if self._ha_backend is not None:\n return self._ha_backend.get_master_addr()\n return self._connection_addr\n\n async def ensure_initialized(self, **settings: Any) -> bool:\n return False\n\n def is_managed(self) -> bool:\n return False\n\n async def get_status(self) -> str:\n return 'running'\n\n def init(self, **settings: str) -> str:\n pass\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: Any,\n ) -> None:\n pass\n\n async def stop(self, wait: int = 60) -> None:\n pass\n\n def destroy(self) -> None:\n pass\n\n def reset_hba(self) -> None:\n raise ClusterError('cannot modify HBA records of unmanaged cluster')\n\n def add_hba_entry(\n self,\n *,\n type: str = 'host',\n database: str,\n user: str,\n address: Optional[str] = None,\n auth_method: str,\n auth_options: Optional[Mapping[str, Any]] = None,\n ) -> None:\n raise ClusterError('cannot modify HBA records of unmanaged cluster')\n\n async def start_watching(\n self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None\n ) -> None:\n if self._ha_backend is not None:\n await self._ha_backend.start_watching(cluster_protocol)\n\n def stop_watching(self) -> None:\n if self._ha_backend is not None:\n self._ha_backend.stop_watching()\n\n\nasync def get_pg_bin_dir() -> pathlib.Path:\n pg_config_data = await get_pg_config()\n pg_bin_dir = pg_config_data.get('bindir')\n if not pg_bin_dir:\n raise ClusterError(\n 'pg_config output did not provide the BINDIR value')\n return pathlib.Path(pg_bin_dir)\n\n\nasync def get_pg_config() -> Dict[str, str]:\n stdout_lines, _, _ = await _run_logged_text_subprocess(\n [str(buildmeta.get_pg_config_path())],\n logger=pg_config_logger,\n )\n\n config = {}\n for line in stdout_lines:\n k, eq, v = line.partition('=')\n if eq:\n config[k.strip().lower()] = v.strip()\n\n return config\n\n\nasync def get_local_pg_cluster(\n data_dir: pathlib.Path,\n *,\n runstate_dir: Optional[pathlib.Path] = None,\n max_connections: Optional[int] = None,\n tenant_id: Optional[str] = None,\n log_level: Optional[str] = None,\n) -> Cluster:\n if log_level is None:\n log_level = 'i'\n if tenant_id is None:\n tenant_id = buildmeta.get_default_tenant_id()\n instance_params = None\n if max_connections is not None:\n instance_params = get_default_runtime_params(\n max_connections=max_connections,\n tenant_id=tenant_id,\n ).instance_params\n cluster = Cluster(\n data_dir=data_dir,\n runstate_dir=runstate_dir,\n instance_params=instance_params,\n log_level=log_level,\n )\n await cluster.lookup_postgres()\n return cluster\n\n\nasync def get_remote_pg_cluster(\n dsn: str,\n *,\n tenant_id: Optional[str] = None,\n) -> RemoteCluster:\n parsed = urllib.parse.urlparse(dsn)\n ha_backend = None\n\n if parsed.scheme not in {'postgresql', 'postgres'}:\n ha_backend = ha_base.get_backend(parsed)\n if ha_backend is None:\n raise ValueError(\n 'invalid DSN: scheme is expected to be \"postgresql\", '\n '\"postgres\" or one of the supported HA backend, '\n 'got {!r}'.format(parsed.scheme))\n\n addr = await ha_backend.get_cluster_consensus()\n dsn = 'postgresql://{}:{}'.format(*addr)\n\n addrs, params = pgconnparams.parse_dsn(dsn)\n if len(addrs) > 1:\n raise ValueError('multiple hosts in Postgres DSN are not supported')\n if tenant_id is None:\n t_id = buildmeta.get_default_tenant_id()\n else:\n t_id = tenant_id\n rcluster = RemoteCluster(addrs[0], params)\n\n async def _get_cluster_type(\n conn: asyncpg.Connection,\n ) -> Tuple[Type[RemoteCluster], Optional[str]]:\n managed_clouds = {\n 'rds_superuser': RemoteCluster, # Amazon RDS\n 'cloudsqlsuperuser': RemoteCluster, # GCP Cloud SQL\n }\n\n managed_cloud_super = await conn.fetchval(\n \"\"\"\n SELECT\n rolname\n FROM\n pg_roles\n WHERE\n rolname = any($1::text[])\n LIMIT\n 1\n \"\"\",\n list(managed_clouds),\n )\n\n if managed_cloud_super is not None:\n return managed_clouds[managed_cloud_super], managed_cloud_super\n else:\n return RemoteCluster, None\n\n async def _detect_capabilities(\n conn: asyncpg.Connection,\n ) -> BackendCapabilities:\n caps = BackendCapabilities.NONE\n\n try:\n await conn.execute(f'ALTER SYSTEM SET foo = 10')\n except asyncpg.InsufficientPrivilegeError:\n configfile_access = False\n except asyncpg.UndefinedObjectError:\n configfile_access = True\n else:\n configfile_access = True\n\n if configfile_access:\n caps |= BackendCapabilities.CONFIGFILE_ACCESS\n\n tx = conn.transaction()\n await tx.start()\n rname = str(uuidgen.uuid1mc())\n\n try:\n await conn.execute(f'CREATE ROLE \"{rname}\" WITH SUPERUSER')\n except asyncpg.InsufficientPrivilegeError:\n can_make_superusers = False\n else:\n can_make_superusers = True\n finally:\n await tx.rollback()\n\n if can_make_superusers:\n caps |= BackendCapabilities.SUPERUSER_ACCESS\n\n coll = await conn.fetchval('''\n SELECT collname FROM pg_collation\n WHERE lower(replace(collname, '-', '')) = 'c.utf8' LIMIT 1;\n ''')\n\n if coll is not None:\n caps |= BackendCapabilities.C_UTF8_LOCALE\n\n return caps\n\n async def _get_pg_settings(\n conn: asyncpg.Connection,\n name: str,\n ) -> str:\n return await conn.fetchval( # type: ignore\n 'SELECT setting FROM pg_settings WHERE name = $1', name\n )\n\n async def _get_reserved_connections(\n conn: asyncpg.Connection,\n ) -> int:\n rv = int(\n await _get_pg_settings(conn, 'superuser_reserved_connections')\n )\n for name in [\n 'rds.rds_superuser_reserved_connections',\n ]:\n value = await _get_pg_settings(conn, name)\n if value:\n rv += int(value)\n return rv\n\n conn = await rcluster.connect()\n try:\n cluster_type, superuser_name = await _get_cluster_type(conn)\n max_connections = await _get_pg_settings(conn, 'max_connections')\n instance_params = BackendInstanceParams(\n capabilities=await _detect_capabilities(conn),\n base_superuser=superuser_name,\n max_connections=int(max_connections),\n reserved_connections=await _get_reserved_connections(conn),\n tenant_id=t_id,\n )\n finally:\n await conn.close()\n\n return cluster_type(\n addrs[0],\n params,\n instance_params=instance_params,\n ha_backend=ha_backend,\n )\n\n\nasync def _run_logged_text_subprocess(\n args: Sequence[str],\n logger: logging.Logger,\n level: int = logging.DEBUG,\n check: bool = True,\n log_stdout: bool = True,\n timeout: Optional[float] = None,\n **kwargs: Any,\n) -> Tuple[List[str], List[str], int]:\n stdout_lines, stderr_lines, exit_code = await _run_logged_subprocess(\n args,\n logger=logger,\n level=level,\n check=check,\n log_stdout=log_stdout,\n timeout=timeout,\n **kwargs,\n )\n\n return (\n [line.decode() for line in stdout_lines],\n [line.decode() for line in stderr_lines],\n exit_code,\n )\n\n\nasync def _run_logged_subprocess(\n args: Sequence[str],\n logger: logging.Logger,\n level: int = logging.DEBUG,\n check: bool = True,\n log_stdout: bool = True,\n log_stderr: bool = True,\n capture_stdout: bool = True,\n capture_stderr: bool = True,\n timeout: Optional[float] = None,\n **kwargs: Any,\n) -> Tuple[List[bytes], List[bytes], int]:\n process, stdout_reader, stderr_reader = await _start_logged_subprocess(\n args,\n logger=logger,\n level=level,\n log_stdout=log_stdout,\n log_stderr=log_stderr,\n capture_stdout=capture_stdout,\n capture_stderr=capture_stderr,\n **kwargs,\n )\n\n exit_code, stdout_lines, stderr_lines = await asyncio.wait_for(\n asyncio.gather(process.wait(), stdout_reader, stderr_reader),\n timeout=timeout,\n )\n\n if exit_code != 0 and check:\n stderr_text = b'\\n'.join(stderr_lines).decode()\n raise ClusterError(\n f'{args[0]} exited with status {exit_code}:\\n'\n + textwrap.indent(stderr_text, ' ' * 4),\n )\n else:\n return stdout_lines, stderr_lines, exit_code\n\n\nasync def _start_logged_subprocess(\n args: Sequence[str],\n *,\n logger: logging.Logger,\n level: int = logging.DEBUG,\n log_stdout: bool = True,\n log_stderr: bool = True,\n capture_stdout: bool = True,\n capture_stderr: bool = True,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n **kwargs: Any,\n) -> Tuple[\n asyncio.subprocess.Process,\n Coroutine[Any, Any, List[bytes]],\n Coroutine[Any, Any, List[bytes]],\n]:\n logger.log(\n level,\n f'running `{\" \".join(shlex.quote(arg) for arg in args)}`'\n )\n\n process = await asyncio.create_subprocess_exec(\n *args,\n stdout=(\n asyncio.subprocess.PIPE if log_stdout or capture_stdout\n else asyncio.subprocess.DEVNULL\n ),\n stderr=(\n asyncio.subprocess.PIPE if log_stderr or capture_stderr\n else asyncio.subprocess.DEVNULL\n ),\n **kwargs,\n )\n\n assert process.stderr is not None\n assert process.stdout is not None\n\n if log_stderr and capture_stderr:\n stderr_reader = _capture_and_log_subprocess_output(\n process.pid,\n process.stderr,\n logger,\n level,\n log_processor,\n )\n elif capture_stderr:\n stderr_reader = _capture_subprocess_output(process.stderr)\n elif log_stderr:\n stderr_reader = _log_subprocess_output(\n process.pid, process.stderr, logger, level, log_processor)\n else:\n stderr_reader = _dummy()\n\n if log_stdout and capture_stdout:\n stdout_reader = _capture_and_log_subprocess_output(\n process.pid,\n process.stdout,\n logger,\n level,\n log_processor,\n )\n elif capture_stdout:\n stdout_reader = _capture_subprocess_output(process.stdout)\n elif log_stdout:\n stdout_reader = _log_subprocess_output(\n process.pid, process.stdout, logger, level, log_processor)\n else:\n stdout_reader = _dummy()\n\n return process, stdout_reader, stderr_reader\n\n\nasync def _capture_subprocess_output(\n stream: asyncio.StreamReader,\n) -> List[bytes]:\n lines = []\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n lines.append(line.rstrip(b'\\n'))\n return lines\n\n\nasync def _capture_and_log_subprocess_output(\n pid: int,\n stream: asyncio.StreamReader,\n logger: logging.Logger,\n level: int,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n) -> List[bytes]:\n lines = []\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n line = line.rstrip(b'\\n')\n lines.append(line)\n log_line = line.decode()\n if log_processor is not None:\n log_line, level = log_processor(log_line)\n logger.log(level, log_line, extra={\"process\": pid})\n return lines\n\n\nasync def _log_subprocess_output(\n pid: int,\n stream: asyncio.StreamReader,\n logger: logging.Logger,\n level: int,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n) -> List[bytes]:\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n log_line = line.rstrip(b'\\n').decode()\n if log_processor is not None:\n log_line, level = log_processor(log_line)\n logger.log(level, log_line, extra={\"process\": pid})\n return []\n\n\nasync def _dummy() -> List[bytes]:\n return []\n\n\npostgres_to_python_level_map = {\n \"DEBUG5\": logging.DEBUG,\n \"DEBUG4\": logging.DEBUG,\n \"DEBUG3\": logging.DEBUG,\n \"DEBUG2\": logging.DEBUG,\n \"DEBUG1\": logging.DEBUG,\n \"INFO\": logging.INFO,\n \"NOTICE\": logging.INFO,\n \"LOG\": logging.INFO,\n \"WARNING\": logging.WARNING,\n \"ERROR\": logging.ERROR,\n \"FATAL\": logging.CRITICAL,\n \"PANIC\": logging.CRITICAL,\n}\n\npostgres_log_re = re.compile(r'^(\\w+):\\s*(.*)$')\n\npostgres_specific_msg_level_map = {\n \"terminating connection due to administrator command\": logging.INFO,\n \"the database system is shutting down\": logging.INFO,\n}\n\n\ndef postgres_log_processor(msg: str) -> Tuple[str, int]:\n if m := postgres_log_re.match(msg):\n postgres_level = m.group(1)\n msg = m.group(2)\n level = postgres_specific_msg_level_map.get(\n msg,\n postgres_to_python_level_map.get(postgres_level, logging.INFO),\n )\n else:\n level = logging.INFO\n\n return msg, level\n", "path": "edb/server/pgcluster.py"}], "after_files": [{"content": "# Copyright (C) 2016-present MagicStack Inc. and the EdgeDB authors.\n# Copyright (C) 2016-present the asyncpg authors and contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"PostgreSQL cluster management.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport asyncio\nimport enum\nimport functools\nimport locale\nimport logging\nimport os\nimport os.path\nimport pathlib\nimport re\nimport shlex\nimport shutil\nimport textwrap\nimport time\nimport urllib.parse\n\nimport asyncpg\n\nfrom edb import buildmeta\nfrom edb.common import supervisor\nfrom edb.common import uuidgen\n\nfrom edb.server import defines\nfrom edb.server.ha import base as ha_base\nfrom edb.pgsql import common as pgcommon\n\nfrom . import pgconnparams\n\n\nlogger = logging.getLogger('edb.pgcluster')\npg_dump_logger = logging.getLogger('pg_dump')\npg_ctl_logger = logging.getLogger('pg_ctl')\npg_config_logger = logging.getLogger('pg_config')\ninitdb_logger = logging.getLogger('initdb')\npostgres_logger = logging.getLogger('postgres')\n\nget_database_backend_name = pgcommon.get_database_backend_name\nget_role_backend_name = pgcommon.get_role_backend_name\n\n\ndef _is_c_utf8_locale_present() -> bool:\n try:\n locale.setlocale(locale.LC_CTYPE, 'C.UTF-8')\n except Exception:\n return False\n else:\n # We specifically don't use locale.getlocale(), because\n # it can lie and return a non-existent locale due to PEP 538.\n locale.setlocale(locale.LC_CTYPE, '')\n return True\n\n\nclass ClusterError(Exception):\n pass\n\n\nclass PostgresPidFileNotReadyError(Exception):\n \"\"\"Raised on an attempt to read non-existent or bad Postgres PID file\"\"\"\n\n\nclass BackendCapabilities(enum.IntFlag):\n\n NONE = 0\n #: Whether CREATE ROLE .. SUPERUSER is allowed\n SUPERUSER_ACCESS = 1 << 0\n #: Whether reading PostgreSQL configuration files\n #: via pg_file_settings is allowed\n CONFIGFILE_ACCESS = 1 << 1\n #: Whether the PostgreSQL server supports the C.UTF-8 locale\n C_UTF8_LOCALE = 1 << 2\n\n\nALL_BACKEND_CAPABILITIES = (\n BackendCapabilities.SUPERUSER_ACCESS\n | BackendCapabilities.CONFIGFILE_ACCESS\n | BackendCapabilities.C_UTF8_LOCALE\n)\n\n\nclass BackendInstanceParams(NamedTuple):\n\n capabilities: BackendCapabilities\n tenant_id: str\n base_superuser: Optional[str] = None\n max_connections: int = 500\n reserved_connections: int = 0\n\n\nclass BackendRuntimeParams(NamedTuple):\n\n instance_params: BackendInstanceParams\n session_authorization_role: Optional[str] = None\n\n\[email protected]_cache\ndef get_default_runtime_params(**instance_params: Any) -> BackendRuntimeParams:\n capabilities = ALL_BACKEND_CAPABILITIES\n if not _is_c_utf8_locale_present():\n capabilities &= ~BackendCapabilities.C_UTF8_LOCALE\n instance_params.setdefault('capabilities', capabilities)\n if 'tenant_id' not in instance_params:\n instance_params = dict(\n tenant_id=buildmeta.get_default_tenant_id(),\n **instance_params,\n )\n\n return BackendRuntimeParams(\n instance_params=BackendInstanceParams(**instance_params),\n )\n\n\nclass BaseCluster:\n\n def __init__(\n self,\n *,\n instance_params: Optional[BackendInstanceParams] = None,\n ) -> None:\n self._connection_addr: Optional[Tuple[str, int]] = None\n self._connection_params: Optional[\n pgconnparams.ConnectionParameters\n ] = None\n self._default_session_auth: Optional[str] = None\n self._pg_config_data: Dict[str, str] = {}\n self._pg_bin_dir: Optional[pathlib.Path] = None\n if instance_params is None:\n self._instance_params = (\n get_default_runtime_params().instance_params)\n else:\n self._instance_params = instance_params\n\n def get_db_name(self, db_name: str) -> str:\n return get_database_backend_name(\n db_name,\n tenant_id=self._instance_params.tenant_id,\n )\n\n def get_role_name(self, role_name: str) -> str:\n return get_database_backend_name(\n role_name,\n tenant_id=self._instance_params.tenant_id,\n )\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: Any,\n ) -> None:\n raise NotImplementedError\n\n async def stop(self, wait: int = 60) -> None:\n raise NotImplementedError\n\n def destroy(self) -> None:\n raise NotImplementedError\n\n async def connect(self, **kwargs: Any) -> asyncpg.Connection:\n conn_info = self.get_connection_spec()\n conn_info.update(kwargs)\n if 'sslmode' in conn_info:\n conn_info['ssl'] = conn_info.pop('sslmode').name\n conn = await asyncpg.connect(**conn_info)\n\n if (not kwargs.get('user')\n and self._default_session_auth\n and conn_info.get('user') != self._default_session_auth):\n # No explicit user given, and the default\n # SESSION AUTHORIZATION is different from the user\n # used to connect.\n await conn.execute(\n f'SET ROLE {pgcommon.quote_ident(self._default_session_auth)}'\n )\n\n return conn\n\n async def start_watching(\n self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None\n ) -> None:\n pass\n\n def stop_watching(self) -> None:\n pass\n\n def get_runtime_params(self) -> BackendRuntimeParams:\n params = self.get_connection_params()\n login_role: Optional[str] = params.user\n sup_role = self.get_role_name(defines.EDGEDB_SUPERUSER)\n return BackendRuntimeParams(\n instance_params=self._instance_params,\n session_authorization_role=(\n None if login_role == sup_role else login_role\n ),\n )\n\n def get_connection_addr(self) -> Optional[Tuple[str, int]]:\n return self._get_connection_addr()\n\n def set_default_session_authorization(self, rolename: str) -> None:\n self._default_session_auth = rolename\n\n def set_connection_params(\n self,\n params: pgconnparams.ConnectionParameters,\n ) -> None:\n self._connection_params = params\n\n def get_connection_params(\n self,\n ) -> pgconnparams.ConnectionParameters:\n assert self._connection_params is not None\n return self._connection_params\n\n def get_connection_spec(self) -> Dict[str, Any]:\n conn_dict: Dict[str, Any] = {}\n addr = self.get_connection_addr()\n assert addr is not None\n conn_dict['host'] = addr[0]\n conn_dict['port'] = addr[1]\n params = self.get_connection_params()\n for k in (\n 'user',\n 'password',\n 'database',\n 'ssl',\n 'sslmode',\n 'server_settings',\n ):\n v = getattr(params, k)\n if v is not None:\n conn_dict[k] = v\n\n cluster_settings = conn_dict.get('server_settings', {})\n\n edgedb_settings = {\n 'client_encoding': 'utf-8',\n 'search_path': 'edgedb',\n 'timezone': 'UTC',\n 'intervalstyle': 'iso_8601',\n 'jit': 'off',\n }\n\n conn_dict['server_settings'] = {**cluster_settings, **edgedb_settings}\n\n return conn_dict\n\n def _get_connection_addr(self) -> Optional[Tuple[str, int]]:\n return self._connection_addr\n\n def is_managed(self) -> bool:\n raise NotImplementedError\n\n async def get_status(self) -> str:\n raise NotImplementedError\n\n async def dump_database(\n self,\n dbname: str,\n *,\n exclude_schemas: Iterable[str] = (),\n dump_object_owners: bool = True,\n ) -> bytes:\n status = await self.get_status()\n if status != 'running':\n raise ClusterError('cannot dump: cluster is not running')\n\n if self._pg_bin_dir is None:\n await self.lookup_postgres()\n pg_dump = self._find_pg_binary('pg_dump')\n conn_spec = self.get_connection_spec()\n\n args = [\n pg_dump,\n '--inserts',\n f'--dbname={dbname}',\n f'--host={conn_spec[\"host\"]}',\n f'--port={conn_spec[\"port\"]}',\n f'--username={conn_spec[\"user\"]}',\n ]\n\n if not dump_object_owners:\n args.append('--no-owner')\n\n env = os.environ.copy()\n if conn_spec.get(\"password\"):\n env['PGPASSWORD'] = conn_spec[\"password\"]\n\n if exclude_schemas:\n for exclude_schema in exclude_schemas:\n args.append(f'--exclude-schema={exclude_schema}')\n\n stdout_lines, _, _ = await _run_logged_subprocess(\n args,\n logger=pg_dump_logger,\n log_stdout=False,\n env=env,\n )\n return b'\\n'.join(stdout_lines)\n\n def _find_pg_binary(self, binary: str) -> str:\n assert self._pg_bin_dir is not None\n bpath = self._pg_bin_dir / binary\n if not bpath.is_file():\n raise ClusterError(\n 'could not find {} executable: '.format(binary) +\n '{!r} does not exist or is not a file'.format(bpath))\n\n return str(bpath)\n\n def _subprocess_error(\n self,\n name: str,\n exitcode: int,\n stderr: Optional[bytes],\n ) -> ClusterError:\n if stderr:\n return ClusterError(\n f'{name} exited with status {exitcode}:\\n'\n + textwrap.indent(stderr.decode(), ' ' * 4),\n )\n else:\n return ClusterError(\n f'{name} exited with status {exitcode}',\n )\n\n async def lookup_postgres(self) -> None:\n self._pg_bin_dir = await get_pg_bin_dir()\n\n\nclass Cluster(BaseCluster):\n def __init__(\n self,\n data_dir: pathlib.Path,\n *,\n runstate_dir: Optional[pathlib.Path] = None,\n instance_params: Optional[BackendInstanceParams] = None,\n log_level: str = 'i',\n ):\n super().__init__(instance_params=instance_params)\n self._data_dir = data_dir\n self._runstate_dir = (\n runstate_dir if runstate_dir is not None else data_dir)\n self._daemon_pid: Optional[int] = None\n self._daemon_process: Optional[asyncio.subprocess.Process] = None\n self._daemon_supervisor: Optional[supervisor.Supervisor] = None\n self._log_level = log_level\n\n def is_managed(self) -> bool:\n return True\n\n def get_data_dir(self) -> pathlib.Path:\n return self._data_dir\n\n async def get_status(self) -> str:\n stdout_lines, stderr_lines, exit_code = (\n await _run_logged_text_subprocess(\n [self._pg_ctl, 'status', '-D', str(self._data_dir)],\n logger=pg_ctl_logger,\n check=False,\n )\n )\n\n if (\n exit_code == 4\n or not os.path.exists(self._data_dir)\n or not os.listdir(self._data_dir)\n ):\n return 'not-initialized'\n elif exit_code == 3:\n return 'stopped'\n elif exit_code == 0:\n output = '\\n'.join(stdout_lines)\n r = re.match(r'.*PID\\s?:\\s+(\\d+).*', output)\n if not r:\n raise ClusterError(\n f'could not parse pg_ctl status output: {output}')\n self._daemon_pid = int(r.group(1))\n if self._connection_addr is None:\n self._connection_addr = self._connection_addr_from_pidfile()\n return 'running'\n else:\n stderr_text = '\\n'.join(stderr_lines)\n raise ClusterError(\n f'`pg_ctl status` exited with status {exit_code}:\\n'\n + textwrap.indent(stderr_text, ' ' * 4),\n )\n\n async def ensure_initialized(self, **settings: Any) -> bool:\n cluster_status = await self.get_status()\n\n if cluster_status == 'not-initialized':\n logger.info(\n 'Initializing database cluster in %s', self._data_dir)\n\n instance_params = self.get_runtime_params().instance_params\n capabilities = instance_params.capabilities\n have_c_utf8 = (\n capabilities & BackendCapabilities.C_UTF8_LOCALE)\n await self.init(\n username='postgres',\n locale='C.UTF-8' if have_c_utf8 else 'en_US.UTF-8',\n lc_collate='C',\n encoding='UTF8',\n )\n self.reset_hba()\n self.add_hba_entry(\n type='local',\n database='all',\n user='postgres',\n auth_method='trust'\n )\n return True\n else:\n return False\n\n async def init(self, **settings: str) -> None:\n \"\"\"Initialize cluster.\"\"\"\n if await self.get_status() != 'not-initialized':\n raise ClusterError(\n 'cluster in {!r} has already been initialized'.format(\n self._data_dir))\n\n if settings:\n settings_args = ['--{}={}'.format(k.replace('_', '-'), v)\n for k, v in settings.items()]\n extra_args = ['-o'] + [' '.join(settings_args)]\n else:\n extra_args = []\n\n await _run_logged_subprocess(\n [self._pg_ctl, 'init', '-D', str(self._data_dir)] + extra_args,\n logger=initdb_logger,\n )\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: str,\n ) -> None:\n \"\"\"Start the cluster.\"\"\"\n status = await self.get_status()\n if status == 'running':\n return\n elif status == 'not-initialized':\n raise ClusterError(\n 'cluster in {!r} has not been initialized'.format(\n self._data_dir))\n\n extra_args = ['--{}={}'.format(k, v) for k, v in opts.items()]\n\n start_settings = {\n 'listen_addresses': '', # we use Unix sockets\n 'unix_socket_permissions': '0700',\n 'unix_socket_directories': str(self._runstate_dir),\n # here we are not setting superuser_reserved_connections because\n # we're using superuser only now (so all connections available),\n # and we don't support reserving connections for now\n 'max_connections': str(self._instance_params.max_connections),\n # From Postgres docs:\n #\n # You might need to raise this value if you have queries that\n # touch many different tables in a single transaction, e.g.,\n # query of a parent table with many children.\n #\n # EdgeDB queries might touch _lots_ of tables, especially in deep\n # inheritance hierarchies. This is especially important in low\n # `max_connections` scenarios.\n 'max_locks_per_transaction': 256,\n }\n\n if os.getenv('EDGEDB_DEBUG_PGSERVER'):\n start_settings['log_min_messages'] = 'info'\n start_settings['log_statement'] = 'all'\n else:\n log_level_map = {\n 'd': 'INFO',\n 'i': 'NOTICE',\n 'w': 'WARNING',\n 'e': 'ERROR',\n 's': 'PANIC',\n }\n start_settings['log_min_messages'] = log_level_map[self._log_level]\n start_settings['log_statement'] = 'none'\n start_settings['log_line_prefix'] = ''\n\n if server_settings:\n start_settings.update(server_settings)\n\n ssl_key = start_settings.get('ssl_key_file')\n if ssl_key:\n # Make sure server certificate key file has correct permissions.\n keyfile = os.path.join(self._data_dir, 'srvkey.pem')\n assert isinstance(ssl_key, str)\n shutil.copy(ssl_key, keyfile)\n os.chmod(keyfile, 0o600)\n start_settings['ssl_key_file'] = keyfile\n\n for k, v in start_settings.items():\n extra_args.extend(['-c', '{}={}'.format(k, v)])\n\n self._daemon_process, *loggers = await _start_logged_subprocess(\n [self._postgres, '-D', str(self._data_dir), *extra_args],\n capture_stdout=False,\n capture_stderr=False,\n logger=postgres_logger,\n log_processor=postgres_log_processor,\n )\n self._daemon_pid = self._daemon_process.pid\n\n sup = await supervisor.Supervisor.create(name=\"postgres loggers\")\n for logger_coro in loggers:\n sup.create_task(logger_coro)\n self._daemon_supervisor = sup\n\n await self._test_connection(timeout=wait)\n\n async def reload(self) -> None:\n \"\"\"Reload server configuration.\"\"\"\n status = await self.get_status()\n if status != 'running':\n raise ClusterError('cannot reload: cluster is not running')\n\n await _run_logged_subprocess(\n [self._pg_ctl, 'reload', '-D', str(self._data_dir)],\n logger=pg_ctl_logger,\n )\n\n async def stop(self, wait: int = 60) -> None:\n await _run_logged_subprocess(\n [\n self._pg_ctl,\n 'stop', '-D', str(self._data_dir),\n '-t', str(wait), '-m', 'fast'\n ],\n logger=pg_ctl_logger,\n )\n\n if (\n self._daemon_process is not None and\n self._daemon_process.returncode is None\n ):\n self._daemon_process.terminate()\n await asyncio.wait_for(self._daemon_process.wait(), timeout=wait)\n\n if self._daemon_supervisor is not None:\n await self._daemon_supervisor.cancel()\n self._daemon_supervisor = None\n\n def destroy(self) -> None:\n shutil.rmtree(self._data_dir)\n\n def reset_hba(self) -> None:\n \"\"\"Remove all records from pg_hba.conf.\"\"\"\n pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')\n\n try:\n with open(pg_hba, 'w'):\n pass\n except IOError as e:\n raise ClusterError(\n 'cannot modify HBA records: {}'.format(e)) from e\n\n def add_hba_entry(\n self,\n *,\n type: str = 'host',\n database: str,\n user: str,\n address: Optional[str] = None,\n auth_method: str,\n auth_options: Optional[Mapping[str, Any]] = None,\n ) -> None:\n \"\"\"Add a record to pg_hba.conf.\"\"\"\n if type not in {'local', 'host', 'hostssl', 'hostnossl'}:\n raise ValueError('invalid HBA record type: {!r}'.format(type))\n\n pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')\n\n record = '{} {} {}'.format(type, database, user)\n\n if type != 'local':\n if address is None:\n raise ValueError(\n '{!r} entry requires a valid address'.format(type))\n else:\n record += ' {}'.format(address)\n\n record += ' {}'.format(auth_method)\n\n if auth_options is not None:\n record += ' ' + ' '.join(\n '{}={}'.format(k, v) for k, v in auth_options.items())\n\n try:\n with open(pg_hba, 'a') as f:\n print(record, file=f)\n except IOError as e:\n raise ClusterError(\n 'cannot modify HBA records: {}'.format(e)) from e\n\n async def trust_local_connections(self) -> None:\n self.reset_hba()\n\n self.add_hba_entry(type='local', database='all',\n user='all', auth_method='trust')\n self.add_hba_entry(type='host', address='127.0.0.1/32',\n database='all', user='all',\n auth_method='trust')\n self.add_hba_entry(type='host', address='::1/128',\n database='all', user='all',\n auth_method='trust')\n status = await self.get_status()\n if status == 'running':\n await self.reload()\n\n async def lookup_postgres(self) -> None:\n await super().lookup_postgres()\n self._pg_ctl = self._find_pg_binary('pg_ctl')\n self._postgres = self._find_pg_binary('postgres')\n\n def _get_connection_addr(self) -> Tuple[str, int]:\n if self._connection_addr is None:\n self._connection_addr = self._connection_addr_from_pidfile()\n\n return self._connection_addr\n\n def _connection_addr_from_pidfile(self) -> Tuple[str, int]:\n pidfile = os.path.join(self._data_dir, 'postmaster.pid')\n\n try:\n with open(pidfile, 'rt') as f:\n piddata = f.read()\n except FileNotFoundError:\n raise PostgresPidFileNotReadyError\n\n lines = piddata.splitlines()\n\n if len(lines) < 6:\n # A complete postgres pidfile is at least 6 lines\n raise PostgresPidFileNotReadyError\n\n pmpid = int(lines[0])\n if self._daemon_pid and pmpid != self._daemon_pid:\n # This might be an old pidfile left from previous postgres\n # daemon run.\n raise PostgresPidFileNotReadyError\n\n portnum = int(lines[3])\n sockdir = lines[4]\n hostaddr = lines[5]\n\n if sockdir:\n if sockdir[0] != '/':\n # Relative sockdir\n sockdir = os.path.normpath(\n os.path.join(self._data_dir, sockdir))\n host_str = sockdir\n elif hostaddr:\n host_str = hostaddr\n else:\n raise PostgresPidFileNotReadyError\n\n if host_str == '*':\n host_str = 'localhost'\n elif host_str == '0.0.0.0':\n host_str = '127.0.0.1'\n elif host_str == '::':\n host_str = '::1'\n\n return (host_str, portnum)\n\n async def _test_connection(self, timeout: int = 60) -> str:\n self._connection_addr = None\n connected = False\n\n for n in range(timeout + 1):\n # pg usually comes up pretty quickly, but not so\n # quickly that we don't hit the wait case. Make our\n # first sleep pretty short, to shave almost a second\n # off the happy case.\n sleep_time = 1 if n else 0.10\n\n try:\n conn_addr = self._get_connection_addr()\n except PostgresPidFileNotReadyError:\n time.sleep(sleep_time)\n continue\n\n try:\n con = await asyncpg.connect(\n database='postgres',\n user='postgres',\n timeout=5,\n host=conn_addr[0],\n port=conn_addr[1],\n )\n except (\n OSError,\n asyncio.TimeoutError,\n asyncpg.CannotConnectNowError,\n asyncpg.PostgresConnectionError,\n ):\n time.sleep(sleep_time)\n continue\n except asyncpg.PostgresError:\n # Any other error other than ServerNotReadyError or\n # ConnectionError is interpreted to indicate the server is\n # up.\n break\n else:\n connected = True\n await con.close()\n break\n\n if connected:\n return 'running'\n else:\n return 'not-initialized'\n\n\nclass RemoteCluster(BaseCluster):\n def __init__(\n self,\n addr: Tuple[str, int],\n params: pgconnparams.ConnectionParameters,\n *,\n instance_params: Optional[BackendInstanceParams] = None,\n ha_backend: Optional[ha_base.HABackend] = None,\n ):\n super().__init__(instance_params=instance_params)\n self._connection_addr = addr\n self._connection_params = params\n self._ha_backend = ha_backend\n\n def _get_connection_addr(self) -> Optional[Tuple[str, int]]:\n if self._ha_backend is not None:\n return self._ha_backend.get_master_addr()\n return self._connection_addr\n\n async def ensure_initialized(self, **settings: Any) -> bool:\n return False\n\n def is_managed(self) -> bool:\n return False\n\n async def get_status(self) -> str:\n return 'running'\n\n def init(self, **settings: str) -> str:\n pass\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: Any,\n ) -> None:\n pass\n\n async def stop(self, wait: int = 60) -> None:\n pass\n\n def destroy(self) -> None:\n pass\n\n def reset_hba(self) -> None:\n raise ClusterError('cannot modify HBA records of unmanaged cluster')\n\n def add_hba_entry(\n self,\n *,\n type: str = 'host',\n database: str,\n user: str,\n address: Optional[str] = None,\n auth_method: str,\n auth_options: Optional[Mapping[str, Any]] = None,\n ) -> None:\n raise ClusterError('cannot modify HBA records of unmanaged cluster')\n\n async def start_watching(\n self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None\n ) -> None:\n if self._ha_backend is not None:\n await self._ha_backend.start_watching(cluster_protocol)\n\n def stop_watching(self) -> None:\n if self._ha_backend is not None:\n self._ha_backend.stop_watching()\n\n\nasync def get_pg_bin_dir() -> pathlib.Path:\n pg_config_data = await get_pg_config()\n pg_bin_dir = pg_config_data.get('bindir')\n if not pg_bin_dir:\n raise ClusterError(\n 'pg_config output did not provide the BINDIR value')\n return pathlib.Path(pg_bin_dir)\n\n\nasync def get_pg_config() -> Dict[str, str]:\n stdout_lines, _, _ = await _run_logged_text_subprocess(\n [str(buildmeta.get_pg_config_path())],\n logger=pg_config_logger,\n )\n\n config = {}\n for line in stdout_lines:\n k, eq, v = line.partition('=')\n if eq:\n config[k.strip().lower()] = v.strip()\n\n return config\n\n\nasync def get_local_pg_cluster(\n data_dir: pathlib.Path,\n *,\n runstate_dir: Optional[pathlib.Path] = None,\n max_connections: Optional[int] = None,\n tenant_id: Optional[str] = None,\n log_level: Optional[str] = None,\n) -> Cluster:\n if log_level is None:\n log_level = 'i'\n if tenant_id is None:\n tenant_id = buildmeta.get_default_tenant_id()\n instance_params = None\n if max_connections is not None:\n instance_params = get_default_runtime_params(\n max_connections=max_connections,\n tenant_id=tenant_id,\n ).instance_params\n cluster = Cluster(\n data_dir=data_dir,\n runstate_dir=runstate_dir,\n instance_params=instance_params,\n log_level=log_level,\n )\n await cluster.lookup_postgres()\n return cluster\n\n\nasync def get_remote_pg_cluster(\n dsn: str,\n *,\n tenant_id: Optional[str] = None,\n) -> RemoteCluster:\n parsed = urllib.parse.urlparse(dsn)\n ha_backend = None\n\n if parsed.scheme not in {'postgresql', 'postgres'}:\n ha_backend = ha_base.get_backend(parsed)\n if ha_backend is None:\n raise ValueError(\n 'invalid DSN: scheme is expected to be \"postgresql\", '\n '\"postgres\" or one of the supported HA backend, '\n 'got {!r}'.format(parsed.scheme))\n\n addr = await ha_backend.get_cluster_consensus()\n dsn = 'postgresql://{}:{}'.format(*addr)\n\n addrs, params = pgconnparams.parse_dsn(dsn)\n if len(addrs) > 1:\n raise ValueError('multiple hosts in Postgres DSN are not supported')\n if tenant_id is None:\n t_id = buildmeta.get_default_tenant_id()\n else:\n t_id = tenant_id\n rcluster = RemoteCluster(addrs[0], params)\n\n async def _get_cluster_type(\n conn: asyncpg.Connection,\n ) -> Tuple[Type[RemoteCluster], Optional[str]]:\n managed_clouds = {\n 'rds_superuser': RemoteCluster, # Amazon RDS\n 'cloudsqlsuperuser': RemoteCluster, # GCP Cloud SQL\n }\n\n managed_cloud_super = await conn.fetchval(\n \"\"\"\n SELECT\n rolname\n FROM\n pg_roles\n WHERE\n rolname = any($1::text[])\n LIMIT\n 1\n \"\"\",\n list(managed_clouds),\n )\n\n if managed_cloud_super is not None:\n return managed_clouds[managed_cloud_super], managed_cloud_super\n else:\n return RemoteCluster, None\n\n async def _detect_capabilities(\n conn: asyncpg.Connection,\n ) -> BackendCapabilities:\n caps = BackendCapabilities.NONE\n\n try:\n await conn.execute(f'ALTER SYSTEM SET foo = 10')\n except asyncpg.InsufficientPrivilegeError:\n configfile_access = False\n except asyncpg.UndefinedObjectError:\n configfile_access = True\n else:\n configfile_access = True\n\n if configfile_access:\n caps |= BackendCapabilities.CONFIGFILE_ACCESS\n\n tx = conn.transaction()\n await tx.start()\n rname = str(uuidgen.uuid1mc())\n\n try:\n await conn.execute(f'CREATE ROLE \"{rname}\" WITH SUPERUSER')\n except asyncpg.InsufficientPrivilegeError:\n can_make_superusers = False\n else:\n can_make_superusers = True\n finally:\n await tx.rollback()\n\n if can_make_superusers:\n caps |= BackendCapabilities.SUPERUSER_ACCESS\n\n coll = await conn.fetchval('''\n SELECT collname FROM pg_collation\n WHERE lower(replace(collname, '-', '')) = 'c.utf8' LIMIT 1;\n ''')\n\n if coll is not None:\n caps |= BackendCapabilities.C_UTF8_LOCALE\n\n return caps\n\n async def _get_pg_settings(\n conn: asyncpg.Connection,\n name: str,\n ) -> str:\n return await conn.fetchval( # type: ignore\n 'SELECT setting FROM pg_settings WHERE name = $1', name\n )\n\n async def _get_reserved_connections(\n conn: asyncpg.Connection,\n ) -> int:\n rv = int(\n await _get_pg_settings(conn, 'superuser_reserved_connections')\n )\n for name in [\n 'rds.rds_superuser_reserved_connections',\n ]:\n value = await _get_pg_settings(conn, name)\n if value:\n rv += int(value)\n return rv\n\n conn = await rcluster.connect()\n try:\n cluster_type, superuser_name = await _get_cluster_type(conn)\n max_connections = await _get_pg_settings(conn, 'max_connections')\n instance_params = BackendInstanceParams(\n capabilities=await _detect_capabilities(conn),\n base_superuser=superuser_name,\n max_connections=int(max_connections),\n reserved_connections=await _get_reserved_connections(conn),\n tenant_id=t_id,\n )\n finally:\n await conn.close()\n\n return cluster_type(\n addrs[0],\n params,\n instance_params=instance_params,\n ha_backend=ha_backend,\n )\n\n\nasync def _run_logged_text_subprocess(\n args: Sequence[str],\n logger: logging.Logger,\n level: int = logging.DEBUG,\n check: bool = True,\n log_stdout: bool = True,\n timeout: Optional[float] = None,\n **kwargs: Any,\n) -> Tuple[List[str], List[str], int]:\n stdout_lines, stderr_lines, exit_code = await _run_logged_subprocess(\n args,\n logger=logger,\n level=level,\n check=check,\n log_stdout=log_stdout,\n timeout=timeout,\n **kwargs,\n )\n\n return (\n [line.decode() for line in stdout_lines],\n [line.decode() for line in stderr_lines],\n exit_code,\n )\n\n\nasync def _run_logged_subprocess(\n args: Sequence[str],\n logger: logging.Logger,\n level: int = logging.DEBUG,\n check: bool = True,\n log_stdout: bool = True,\n log_stderr: bool = True,\n capture_stdout: bool = True,\n capture_stderr: bool = True,\n timeout: Optional[float] = None,\n **kwargs: Any,\n) -> Tuple[List[bytes], List[bytes], int]:\n process, stdout_reader, stderr_reader = await _start_logged_subprocess(\n args,\n logger=logger,\n level=level,\n log_stdout=log_stdout,\n log_stderr=log_stderr,\n capture_stdout=capture_stdout,\n capture_stderr=capture_stderr,\n **kwargs,\n )\n\n exit_code, stdout_lines, stderr_lines = await asyncio.wait_for(\n asyncio.gather(process.wait(), stdout_reader, stderr_reader),\n timeout=timeout,\n )\n\n if exit_code != 0 and check:\n stderr_text = b'\\n'.join(stderr_lines).decode()\n raise ClusterError(\n f'{args[0]} exited with status {exit_code}:\\n'\n + textwrap.indent(stderr_text, ' ' * 4),\n )\n else:\n return stdout_lines, stderr_lines, exit_code\n\n\nasync def _start_logged_subprocess(\n args: Sequence[str],\n *,\n logger: logging.Logger,\n level: int = logging.DEBUG,\n log_stdout: bool = True,\n log_stderr: bool = True,\n capture_stdout: bool = True,\n capture_stderr: bool = True,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n **kwargs: Any,\n) -> Tuple[\n asyncio.subprocess.Process,\n Coroutine[Any, Any, List[bytes]],\n Coroutine[Any, Any, List[bytes]],\n]:\n logger.log(\n level,\n f'running `{\" \".join(shlex.quote(arg) for arg in args)}`'\n )\n\n process = await asyncio.create_subprocess_exec(\n *args,\n stdout=(\n asyncio.subprocess.PIPE if log_stdout or capture_stdout\n else asyncio.subprocess.DEVNULL\n ),\n stderr=(\n asyncio.subprocess.PIPE if log_stderr or capture_stderr\n else asyncio.subprocess.DEVNULL\n ),\n **kwargs,\n )\n\n assert process.stderr is not None\n assert process.stdout is not None\n\n if log_stderr and capture_stderr:\n stderr_reader = _capture_and_log_subprocess_output(\n process.pid,\n process.stderr,\n logger,\n level,\n log_processor,\n )\n elif capture_stderr:\n stderr_reader = _capture_subprocess_output(process.stderr)\n elif log_stderr:\n stderr_reader = _log_subprocess_output(\n process.pid, process.stderr, logger, level, log_processor)\n else:\n stderr_reader = _dummy()\n\n if log_stdout and capture_stdout:\n stdout_reader = _capture_and_log_subprocess_output(\n process.pid,\n process.stdout,\n logger,\n level,\n log_processor,\n )\n elif capture_stdout:\n stdout_reader = _capture_subprocess_output(process.stdout)\n elif log_stdout:\n stdout_reader = _log_subprocess_output(\n process.pid, process.stdout, logger, level, log_processor)\n else:\n stdout_reader = _dummy()\n\n return process, stdout_reader, stderr_reader\n\n\nasync def _capture_subprocess_output(\n stream: asyncio.StreamReader,\n) -> List[bytes]:\n lines = []\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n lines.append(line.rstrip(b'\\n'))\n return lines\n\n\nasync def _capture_and_log_subprocess_output(\n pid: int,\n stream: asyncio.StreamReader,\n logger: logging.Logger,\n level: int,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n) -> List[bytes]:\n lines = []\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n line = line.rstrip(b'\\n')\n lines.append(line)\n log_line = line.decode()\n if log_processor is not None:\n log_line, level = log_processor(log_line)\n logger.log(level, log_line, extra={\"process\": pid})\n return lines\n\n\nasync def _log_subprocess_output(\n pid: int,\n stream: asyncio.StreamReader,\n logger: logging.Logger,\n level: int,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n) -> List[bytes]:\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n log_line = line.rstrip(b'\\n').decode()\n if log_processor is not None:\n log_line, level = log_processor(log_line)\n logger.log(level, log_line, extra={\"process\": pid})\n return []\n\n\nasync def _dummy() -> List[bytes]:\n return []\n\n\npostgres_to_python_level_map = {\n \"DEBUG5\": logging.DEBUG,\n \"DEBUG4\": logging.DEBUG,\n \"DEBUG3\": logging.DEBUG,\n \"DEBUG2\": logging.DEBUG,\n \"DEBUG1\": logging.DEBUG,\n \"INFO\": logging.INFO,\n \"NOTICE\": logging.INFO,\n \"LOG\": logging.INFO,\n \"WARNING\": logging.WARNING,\n \"ERROR\": logging.ERROR,\n \"FATAL\": logging.CRITICAL,\n \"PANIC\": logging.CRITICAL,\n}\n\npostgres_log_re = re.compile(r'^(\\w+):\\s*(.*)$')\n\npostgres_specific_msg_level_map = {\n \"terminating connection due to administrator command\": logging.INFO,\n \"the database system is shutting down\": logging.INFO,\n}\n\n\ndef postgres_log_processor(msg: str) -> Tuple[str, int]:\n if m := postgres_log_re.match(msg):\n postgres_level = m.group(1)\n msg = m.group(2)\n level = postgres_specific_msg_level_map.get(\n msg,\n postgres_to_python_level_map.get(postgres_level, logging.INFO),\n )\n else:\n level = logging.INFO\n\n return msg, level\n", "path": "edb/server/pgcluster.py"}]} |
gh_patches_debug_1536 | rasdani/github-patches | git_diff | pydantic__pydantic-391 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include a PEP 561 marker file
# Feature Request
Hi,
The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.
I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.
https://mypy.readthedocs.io/en/latest/installed_packages.html
https://www.python.org/dev/peps/pep-0561/
Include a PEP 561 marker file
# Feature Request
Hi,
The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.
I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.
https://mypy.readthedocs.io/en/latest/installed_packages.html
https://www.python.org/dev/peps/pep-0561/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 from importlib.machinery import SourceFileLoader
3 from pathlib import Path
4 from setuptools import setup
5
6
7 class ReplaceLinks:
8 def __init__(self):
9 self.links = set()
10
11 def replace_issues(self, m):
12 id = m.group(1)
13 self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')
14 return f'`#{id}`_'
15
16 def replace_users(self, m):
17 name = m.group(2)
18 self.links.add(f'.. _@{name}: https://github.com/{name}')
19 return f'{m.group(1)}`@{name}`_'
20
21 def extra(self):
22 return '\n\n' + '\n'.join(self.links) + '\n'
23
24
25 description = 'Data validation and settings management using python 3.6 type hinting'
26 THIS_DIR = Path(__file__).resolve().parent
27 try:
28 history = THIS_DIR.joinpath('HISTORY.rst').read_text()
29
30 replacer = ReplaceLinks()
31 history = re.sub(r'#(\d+)', replacer.replace_issues, history)
32 history = re.sub(r'( +)@(\w+)', replacer.replace_users, history, flags=re.I)
33 history = re.sub(r'@@', '@', history)
34 history += replacer.extra()
35
36 long_description = '\n\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])
37 except FileNotFoundError:
38 long_description = description + '.\n\nSee https://pydantic-docs.helpmanual.io/ for documentation.'
39
40 # avoid loading the package before requirements are installed:
41 version = SourceFileLoader('version', 'pydantic/version.py').load_module()
42
43 setup(
44 name='pydantic',
45 version=str(version.VERSION),
46 description=description,
47 long_description=long_description,
48 classifiers=[
49 'Development Status :: 5 - Production/Stable',
50 'Programming Language :: Python',
51 'Programming Language :: Python :: 3',
52 'Programming Language :: Python :: 3 :: Only',
53 'Programming Language :: Python :: 3.6',
54 'Programming Language :: Python :: 3.7',
55 'Intended Audience :: Developers',
56 'Intended Audience :: Information Technology',
57 'Intended Audience :: System Administrators',
58 'License :: OSI Approved :: MIT License',
59 'Operating System :: Unix',
60 'Operating System :: POSIX :: Linux',
61 'Environment :: Console',
62 'Environment :: MacOS X',
63 'Topic :: Software Development :: Libraries :: Python Modules',
64 'Topic :: Internet',
65 ],
66 author='Samuel Colvin',
67 author_email='[email protected]',
68 url='https://github.com/samuelcolvin/pydantic',
69 license='MIT',
70 packages=['pydantic'],
71 python_requires='>=3.6',
72 zip_safe=True,
73 install_requires=[
74 'dataclasses>=0.6;python_version<"3.7"'
75 ],
76 extras_require={
77 'ujson': ['ujson>=1.35'],
78 'email': ['email-validator>=1.0.3'],
79 }
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,8 +68,9 @@
url='https://github.com/samuelcolvin/pydantic',
license='MIT',
packages=['pydantic'],
+ package_data={'pydantic': ['py.typed']},
python_requires='>=3.6',
- zip_safe=True,
+ zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html
install_requires=[
'dataclasses>=0.6;python_version<"3.7"'
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,8 +68,9 @@\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n+ package_data={'pydantic': ['py.typed']},\n python_requires='>=3.6',\n- zip_safe=True,\n+ zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n", "issue": "Include a PEP 561 marker file\n# Feature Request\r\n\r\nHi,\r\n\r\nThe new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.\r\n\r\nI think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.\r\n\r\nhttps://mypy.readthedocs.io/en/latest/installed_packages.html\r\nhttps://www.python.org/dev/peps/pep-0561/\nInclude a PEP 561 marker file\n# Feature Request\r\n\r\nHi,\r\n\r\nThe new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.\r\n\r\nI think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.\r\n\r\nhttps://mypy.readthedocs.io/en/latest/installed_packages.html\r\nhttps://www.python.org/dev/peps/pep-0561/\n", "before_files": [{"content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n python_requires='>=3.6',\n zip_safe=True,\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n package_data={'pydantic': ['py.typed']},\n python_requires='>=3.6',\n zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1537 | rasdani/github-patches | git_diff | e-valuation__EvaP-1241 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Single result publishing fails
Single results can't be published because https://github.com/fsr-itse/EvaP/blob/master/evap/evaluation/models.py#L449 asserts `self._voter_count is None` which it is not for single results.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/models.py`
Content:
```
1 from datetime import datetime, date, timedelta
2 import logging
3 import random
4 import uuid
5
6 from django.conf import settings
7 from django.contrib import messages
8 from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, Group, PermissionsMixin
9 from django.core.cache import caches
10 from django.core.exceptions import ValidationError
11 from django.core.mail import EmailMessage
12 from django.db import models, transaction
13 from django.db.models import Count, Q, Manager
14 from django.dispatch import Signal, receiver
15 from django.template import Context, Template
16 from django.template.base import TemplateSyntaxError
17 from django.utils.functional import cached_property
18 from django.utils.translation import ugettext_lazy as _
19 from django_fsm import FSMField, transition
20 from django_fsm.signals import post_transition
21 # see evaluation.meta for the use of Translate in this file
22 from evap.evaluation.meta import LocalizeModelBase, Translate
23 from evap.evaluation.tools import date_to_datetime, get_due_courses_for_user
24
25 logger = logging.getLogger(__name__)
26
27
28 class NotArchiveable(Exception):
29 """An attempt has been made to archive something that is not archiveable."""
30 pass
31
32
33 class Semester(models.Model, metaclass=LocalizeModelBase):
34 """Represents a semester, e.g. the winter term of 2011/2012."""
35
36 name_de = models.CharField(max_length=1024, unique=True, verbose_name=_("name (german)"))
37 name_en = models.CharField(max_length=1024, unique=True, verbose_name=_("name (english)"))
38 name = Translate
39
40 short_name_de = models.CharField(max_length=20, unique=True, verbose_name=_("short name (german)"))
41 short_name_en = models.CharField(max_length=20, unique=True, verbose_name=_("short name (english)"))
42 short_name = Translate
43
44 participations_are_archived = models.BooleanField(default=False, verbose_name=_("participations are archived"))
45 grade_documents_are_deleted = models.BooleanField(default=False, verbose_name=_("grade documents are deleted"))
46 results_are_archived = models.BooleanField(default=False, verbose_name=_("results are archived"))
47
48 created_at = models.DateField(verbose_name=_("created at"), auto_now_add=True)
49
50 class Meta:
51 ordering = ('-created_at', 'name_de')
52 verbose_name = _("semester")
53 verbose_name_plural = _("semesters")
54
55 def __str__(self):
56 return self.name
57
58 @property
59 def can_staff_delete(self):
60 return all(course.can_staff_delete for course in self.course_set.all())
61
62 @property
63 def participations_can_be_archived(self):
64 return not self.participations_are_archived and all(course.participations_can_be_archived for course in self.course_set.all())
65
66 @property
67 def grade_documents_can_be_deleted(self):
68 return not self.grade_documents_are_deleted
69
70 @property
71 def results_can_be_archived(self):
72 return not self.results_are_archived
73
74 @transaction.atomic
75 def archive_participations(self):
76 if not self.participations_can_be_archived:
77 raise NotArchiveable()
78 for course in self.course_set.all():
79 course._archive_participations()
80 self.participations_are_archived = True
81 self.save()
82
83 @transaction.atomic
84 def delete_grade_documents(self):
85 from evap.grades.models import GradeDocument
86
87 if not self.grade_documents_can_be_deleted:
88 raise NotArchiveable()
89 GradeDocument.objects.filter(course__semester=self).delete()
90 self.grade_documents_are_deleted = True
91 self.save()
92
93 def archive_results(self):
94 if not self.results_can_be_archived:
95 raise NotArchiveable()
96 self.results_are_archived = True
97 self.save()
98
99 @classmethod
100 def get_all_with_unarchived_results(cls):
101 return cls.objects.filter(results_are_archived=False).distinct()
102
103 @classmethod
104 def get_all_with_published_unarchived_results(cls):
105 return cls.objects.filter(course__state="published", results_are_archived=False).distinct()
106
107 @classmethod
108 def active_semester(cls):
109 return cls.objects.order_by("created_at").last()
110
111 @property
112 def is_active_semester(self):
113 return self == Semester.active_semester()
114
115
116 class QuestionnaireManager(Manager):
117 def course_questionnaires(self):
118 return super().get_queryset().exclude(type=Questionnaire.CONTRIBUTOR)
119
120 def contributor_questionnaires(self):
121 return super().get_queryset().filter(type=Questionnaire.CONTRIBUTOR)
122
123
124 class Questionnaire(models.Model, metaclass=LocalizeModelBase):
125 """A named collection of questions."""
126
127 TOP = 10
128 CONTRIBUTOR = 20
129 BOTTOM = 30
130 TYPE_CHOICES = (
131 (TOP, _('Top questionnaire')),
132 (CONTRIBUTOR, _('Contributor questionnaire')),
133 (BOTTOM, _('Bottom questionnaire')),
134 )
135 type = models.IntegerField(choices=TYPE_CHOICES, verbose_name=_('type'), default=TOP)
136
137 name_de = models.CharField(max_length=1024, unique=True, verbose_name=_("name (german)"))
138 name_en = models.CharField(max_length=1024, unique=True, verbose_name=_("name (english)"))
139 name = Translate
140
141 description_de = models.TextField(verbose_name=_("description (german)"), blank=True, null=True)
142 description_en = models.TextField(verbose_name=_("description (english)"), blank=True, null=True)
143 description = Translate
144
145 public_name_de = models.CharField(max_length=1024, verbose_name=_("display name (german)"))
146 public_name_en = models.CharField(max_length=1024, verbose_name=_("display name (english)"))
147 public_name = Translate
148
149 teaser_de = models.TextField(verbose_name=_("teaser (german)"), blank=True, null=True)
150 teaser_en = models.TextField(verbose_name=_("teaser (english)"), blank=True, null=True)
151 teaser = Translate
152
153 order = models.IntegerField(verbose_name=_("ordering index"), default=0)
154
155 staff_only = models.BooleanField(verbose_name=_("display for staff only"), default=False)
156 obsolete = models.BooleanField(verbose_name=_("obsolete"), default=False)
157
158 objects = QuestionnaireManager()
159
160 class Meta:
161 ordering = ('type', 'order', 'name_de')
162 verbose_name = _("questionnaire")
163 verbose_name_plural = _("questionnaires")
164
165 def __str__(self):
166 return self.name
167
168 def __lt__(self, other):
169 return (self.type, self.order, self.name_de) < (other.type, other.order, self.name_de)
170
171 def __gt__(self, other):
172 return (self.type, self.order, self.name_de) > (other.type, other.order, self.name_de)
173
174 @property
175 def is_above_contributors(self):
176 return self.type == self.TOP
177
178 @property
179 def is_below_contributors(self):
180 return self.type == self.BOTTOM
181
182 @property
183 def can_staff_edit(self):
184 return not self.contributions.exclude(course__state='new').exists()
185
186 @property
187 def can_staff_delete(self):
188 return not self.contributions.exists()
189
190 @property
191 def text_questions(self):
192 return [question for question in self.question_set.all() if question.is_text_question]
193
194 @property
195 def rating_questions(self):
196 return [question for question in self.question_set.all() if question.is_rating_question]
197
198 SINGLE_RESULT_QUESTIONNAIRE_NAME = "Single result"
199
200 @classmethod
201 def single_result_questionnaire(cls):
202 return cls.objects.get(name_en=cls.SINGLE_RESULT_QUESTIONNAIRE_NAME)
203
204
205 class Degree(models.Model, metaclass=LocalizeModelBase):
206 name_de = models.CharField(max_length=1024, verbose_name=_("name (german)"), unique=True)
207 name_en = models.CharField(max_length=1024, verbose_name=_("name (english)"), unique=True)
208 name = Translate
209
210 order = models.IntegerField(verbose_name=_("degree order"), default=-1)
211
212 class Meta:
213 ordering = ['order', ]
214
215 def __str__(self):
216 return self.name
217
218 def can_staff_delete(self):
219 if self.pk is None:
220 return True
221 return not self.courses.all().exists()
222
223
224 class CourseType(models.Model, metaclass=LocalizeModelBase):
225 """Model for the type of a course, e.g. a lecture"""
226
227 name_de = models.CharField(max_length=1024, verbose_name=_("name (german)"), unique=True)
228 name_en = models.CharField(max_length=1024, verbose_name=_("name (english)"), unique=True)
229 name = Translate
230
231 class Meta:
232 ordering = ['name_de', ]
233
234 def __str__(self):
235 return self.name
236
237 def __lt__(self, other):
238 return self.name_de < other.name_de
239
240 def can_staff_delete(self):
241 if not self.pk:
242 return True
243 return not self.courses.all().exists()
244
245
246 class Course(models.Model, metaclass=LocalizeModelBase):
247 """Models a single course, e.g. the Math 101 course of 2002."""
248
249 state = FSMField(default='new', protected=True)
250
251 semester = models.ForeignKey(Semester, models.PROTECT, verbose_name=_("semester"))
252
253 name_de = models.CharField(max_length=1024, verbose_name=_("name (german)"))
254 name_en = models.CharField(max_length=1024, verbose_name=_("name (english)"))
255 name = Translate
256
257 # type of course: lecture, seminar, project
258 type = models.ForeignKey(CourseType, models.PROTECT, verbose_name=_("course type"), related_name="courses")
259
260 is_single_result = models.BooleanField(verbose_name=_("is single result"), default=False)
261
262 # e.g. Bachelor, Master
263 degrees = models.ManyToManyField(Degree, verbose_name=_("degrees"), related_name="courses")
264
265 # default is True as that's the more restrictive option
266 is_graded = models.BooleanField(verbose_name=_("is graded"), default=True)
267
268 # defines whether results can only be seen by contributors and participants
269 is_private = models.BooleanField(verbose_name=_("is private"), default=False)
270
271 # grade publishers can set this to True, then the course will be handled as if final grades have already been uploaded
272 gets_no_grade_documents = models.BooleanField(verbose_name=_("gets no grade documents"), default=False)
273
274 # whether participants must vote to qualify for reward points
275 is_rewarded = models.BooleanField(verbose_name=_("is rewarded"), default=True)
276
277 # whether the evaluation does take place during the semester, stating that evaluation results will be published while the course is still running
278 is_midterm_evaluation = models.BooleanField(verbose_name=_("is midterm evaluation"), default=False)
279
280 # True, if the course has at least two voters or if the first voter explicitly confirmed that given text answers
281 # can be published even if no other person evaluates the course
282 can_publish_text_results = models.BooleanField(verbose_name=_("can publish text results"), default=False)
283
284 # students that are allowed to vote
285 participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_("participants"), blank=True, related_name='courses_participating_in')
286 _participant_count = models.IntegerField(verbose_name=_("participant count"), blank=True, null=True, default=None)
287
288 # students that already voted
289 voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_("voters"), blank=True, related_name='courses_voted_for')
290 _voter_count = models.IntegerField(verbose_name=_("voter count"), blank=True, null=True, default=None)
291
292 # when the evaluation takes place
293 vote_start_datetime = models.DateTimeField(verbose_name=_("start of evaluation"))
294 vote_end_date = models.DateField(verbose_name=_("last day of evaluation"))
295
296 # who last modified this course
297 last_modified_time = models.DateTimeField(auto_now=True)
298 last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name="course_last_modified_user+")
299
300 course_evaluated = Signal(providing_args=['request', 'semester'])
301
302 class Meta:
303 ordering = ('name_de',)
304 unique_together = (
305 ('semester', 'name_de'),
306 ('semester', 'name_en'),
307 )
308 verbose_name = _("course")
309 verbose_name_plural = _("courses")
310
311 def __str__(self):
312 return self.name
313
314 def save(self, *args, **kw):
315 first_save = self.pk is None
316 super().save(*args, **kw)
317
318 # make sure there is a general contribution
319 if not self.general_contribution:
320 self.contributions.create(contributor=None)
321 del self.general_contribution # invalidate cached property
322
323 if self.is_single_result:
324 # adding m2ms such as contributions/questionnaires requires saving the course first,
325 # therefore we must allow the single result questionnaire to not exist on first save
326 assert first_save or Questionnaire.objects.get(contributions__course=self).name_en == Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME
327 assert self.vote_end_date == self.vote_start_datetime.date()
328 else:
329 assert self.vote_end_date >= self.vote_start_datetime.date()
330
331 @property
332 def is_fully_reviewed(self):
333 if not self.can_publish_text_results:
334 return True
335 return not self.unreviewed_textanswer_set.exists()
336
337 @property
338 def vote_end_datetime(self):
339 # The evaluation ends at EVALUATION_END_OFFSET_HOURS:00 of the day AFTER self.vote_end_date.
340 return date_to_datetime(self.vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS)
341
342 @property
343 def is_in_evaluation_period(self):
344 return self.vote_start_datetime <= datetime.now() <= self.vote_end_datetime
345
346 @property
347 def general_contribution_has_questionnaires(self):
348 return self.general_contribution and (self.is_single_result or self.general_contribution.questionnaires.count() > 0)
349
350 @property
351 def all_contributions_have_questionnaires(self):
352 return self.general_contribution and (self.is_single_result or all(self.contributions.annotate(Count('questionnaires')).values_list("questionnaires__count", flat=True)))
353
354 def can_user_vote(self, user):
355 """Returns whether the user is allowed to vote on this course."""
356 return (self.state == "in_evaluation"
357 and self.is_in_evaluation_period
358 and user in self.participants.all()
359 and user not in self.voters.all())
360
361 def can_user_see_course(self, user):
362 if user.is_staff:
363 return True
364 if user.is_reviewer and not self.semester.results_are_archived:
365 return True
366 if self.is_private or user.is_external:
367 return self.is_user_contributor_or_delegate(user) or self.participants.filter(pk=user.pk).exists()
368 return True
369
370 def can_user_see_results_page(self, user):
371 if self.is_single_result:
372 return False
373 if user.is_staff:
374 return True
375 if user.is_reviewer and not self.semester.results_are_archived:
376 return True
377 if self.state != 'published':
378 return False
379 if not self.can_publish_rating_results or self.semester.results_are_archived or not self.can_user_see_course(user):
380 return self.is_user_contributor_or_delegate(user)
381 return True
382
383 @property
384 def can_staff_edit(self):
385 return not self.participations_are_archived and self.state in ['new', 'prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']
386
387 @property
388 def can_staff_delete(self):
389 return self.can_staff_edit and (self.num_voters == 0 or self.is_single_result)
390
391 @property
392 def can_publish_average_grade(self):
393 if self.is_single_result:
394 return True
395
396 # the average grade is only published if at least the configured percentage of participants voted during the evaluation for significance reasons
397 return self.can_publish_rating_results and self.num_voters / self.num_participants >= settings.VOTER_PERCENTAGE_NEEDED_FOR_PUBLISHING_AVERAGE_GRADE
398
399 @property
400 def can_publish_rating_results(self):
401 if self.is_single_result:
402 return True
403
404 # the rating results are only published if at least the configured number of participants voted during the evaluation for anonymity reasons
405 return self.num_voters >= settings.VOTER_COUNT_NEEDED_FOR_PUBLISHING_RATING_RESULTS
406
407 @transition(field=state, source=['new', 'editor_approved'], target='prepared')
408 def ready_for_editors(self):
409 pass
410
411 @transition(field=state, source='prepared', target='editor_approved')
412 def editor_approve(self):
413 pass
414
415 @transition(field=state, source=['new', 'prepared', 'editor_approved'], target='approved', conditions=[lambda self: self.general_contribution_has_questionnaires])
416 def staff_approve(self):
417 pass
418
419 @transition(field=state, source=['prepared', 'editor_approved', 'approved'], target='new')
420 def revert_to_new(self):
421 pass
422
423 @transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])
424 def evaluation_begin(self):
425 pass
426
427 @transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])
428 def reopen_evaluation(self):
429 pass
430
431 @transition(field=state, source='in_evaluation', target='evaluated')
432 def evaluation_end(self):
433 pass
434
435 @transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed])
436 def review_finished(self):
437 pass
438
439 @transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result])
440 def single_result_created(self):
441 pass
442
443 @transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: not self.is_fully_reviewed])
444 def reopen_review(self):
445 pass
446
447 @transition(field=state, source='reviewed', target='published')
448 def publish(self):
449 assert self._voter_count is None and self._participant_count is None
450 self._voter_count = self.num_voters
451 self._participant_count = self.num_participants
452
453 if not self.can_publish_text_results:
454 self.textanswer_set.delete()
455 else:
456 self.textanswer_set.filter(state=TextAnswer.HIDDEN).delete()
457 self.textanswer_set.update(original_answer=None)
458
459 @transition(field=state, source='published', target='reviewed')
460 def unpublish(self):
461 assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()
462 self._voter_count = None
463 self._participant_count = None
464
465 @cached_property
466 def general_contribution(self):
467 try:
468 return self.contributions.get(contributor=None)
469 except Contribution.DoesNotExist:
470 return None
471
472 @cached_property
473 def num_participants(self):
474 if self._participant_count is not None:
475 return self._participant_count
476 return self.participants.count()
477
478 @cached_property
479 def num_voters(self):
480 if self._voter_count is not None:
481 return self._voter_count
482 return self.voters.count()
483
484 @property
485 def due_participants(self):
486 return self.participants.exclude(pk__in=self.voters.all())
487
488 @cached_property
489 def responsible_contributors(self):
490 return UserProfile.objects.filter(contributions__course=self, contributions__responsible=True).order_by('contributions__order')
491
492 @cached_property
493 def num_contributors(self):
494 return UserProfile.objects.filter(contributions__course=self).count()
495
496 @property
497 def days_left_for_evaluation(self):
498 return (self.vote_end_date - date.today()).days
499
500 @property
501 def time_left_for_evaluation(self):
502 return self.vote_end_datetime - datetime.now()
503
504 def evaluation_ends_soon(self):
505 return 0 < self.time_left_for_evaluation.total_seconds() < settings.EVALUATION_END_WARNING_PERIOD * 3600
506
507 @property
508 def days_until_evaluation(self):
509 days_left = (self.vote_start_datetime.date() - date.today()).days
510 if self.vote_start_datetime < datetime.now():
511 days_left -= 1
512 return days_left
513
514 def is_user_editor_or_delegate(self, user):
515 return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all()), can_edit=True).exists()
516
517 def is_user_contributor_or_delegate(self, user):
518 # early out that saves database hits since is_contributor_or_delegate is a cached_property
519 if not user.is_contributor_or_delegate:
520 return False
521 return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all())).exists()
522
523 @property
524 def textanswer_set(self):
525 return TextAnswer.objects.filter(contribution__course=self)
526
527 @cached_property
528 def num_textanswers(self):
529 if not self.can_publish_text_results:
530 return 0
531 return self.textanswer_set.count()
532
533 @property
534 def unreviewed_textanswer_set(self):
535 return self.textanswer_set.filter(state=TextAnswer.NOT_REVIEWED)
536
537 @property
538 def reviewed_textanswer_set(self):
539 return self.textanswer_set.exclude(state=TextAnswer.NOT_REVIEWED)
540
541 @cached_property
542 def num_reviewed_textanswers(self):
543 return self.reviewed_textanswer_set.count()
544
545 @property
546 def ratinganswer_counters(self):
547 return RatingAnswerCounter.objects.filter(contribution__course=self)
548
549 def _archive_participations(self):
550 """Should be called only via Semester.archive_participations"""
551 if not self.participations_can_be_archived:
552 raise NotArchiveable()
553 if self._participant_count is not None:
554 assert self._voter_count is not None
555 assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()
556 return
557 assert self._participant_count is None and self._voter_count is None
558 self._participant_count = self.num_participants
559 self._voter_count = self.num_voters
560 self.save()
561
562 @property
563 def participations_are_archived(self):
564 semester_participations_are_archived = self.semester.participations_are_archived
565 if semester_participations_are_archived:
566 assert self._participant_count is not None and self._voter_count is not None
567 return semester_participations_are_archived
568
569 @property
570 def participations_can_be_archived(self):
571 return not self.semester.participations_are_archived and self.state in ["new", "published"]
572
573 @property
574 def final_grade_documents(self):
575 from evap.grades.models import GradeDocument
576 return self.grade_documents.filter(type=GradeDocument.FINAL_GRADES)
577
578 @property
579 def midterm_grade_documents(self):
580 from evap.grades.models import GradeDocument
581 return self.grade_documents.filter(type=GradeDocument.MIDTERM_GRADES)
582
583 @classmethod
584 def update_courses(cls):
585 logger.info("update_courses called. Processing courses now.")
586 from evap.evaluation.tools import send_publish_notifications
587
588 courses_new_in_evaluation = []
589 evaluation_results_courses = []
590
591 for course in cls.objects.all():
592 try:
593 if course.state == "approved" and course.vote_start_datetime <= datetime.now():
594 course.evaluation_begin()
595 course.last_modified_user = UserProfile.objects.cronjob_user()
596 course.save()
597 courses_new_in_evaluation.append(course)
598 elif course.state == "in_evaluation" and datetime.now() >= course.vote_end_datetime:
599 course.evaluation_end()
600 if course.is_fully_reviewed:
601 course.review_finished()
602 if not course.is_graded or course.final_grade_documents.exists() or course.gets_no_grade_documents:
603 course.publish()
604 evaluation_results_courses.append(course)
605 course.last_modified_user = UserProfile.objects.cronjob_user()
606 course.save()
607 except Exception:
608 logger.exception('An error occured when updating the state of course "{}" (id {}).'.format(course, course.id))
609
610 template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)
611 EmailTemplate.send_to_users_in_courses(template, courses_new_in_evaluation, [EmailTemplate.ALL_PARTICIPANTS], use_cc=False, request=None)
612 send_publish_notifications(evaluation_results_courses)
613 logger.info("update_courses finished.")
614
615
616 @receiver(post_transition, sender=Course)
617 def warmup_cache_on_publish(instance, target, **_kwargs):
618 if target == 'published':
619 from evap.results.tools import collect_results
620 from evap.results.views import warm_up_template_cache
621 collect_results(instance)
622 warm_up_template_cache([instance])
623
624
625 @receiver(post_transition, sender=Course)
626 def delete_cache_on_unpublish(instance, source, **_kwargs):
627 if source == 'published':
628 from evap.results.tools import get_collect_results_cache_key
629 from evap.results.views import delete_template_cache
630 caches['results'].delete(get_collect_results_cache_key(instance))
631 delete_template_cache(instance)
632
633
634 @receiver(post_transition, sender=Course)
635 def log_state_transition(instance, name, source, target, **_kwargs):
636 logger.info('Course "{}" (id {}) moved from state "{}" to state "{}", caused by transition "{}".'.format(instance, instance.pk, source, target, name))
637
638
639 class Contribution(models.Model):
640 """A contributor who is assigned to a course and his questionnaires."""
641
642 OWN_COMMENTS = 'OWN'
643 COURSE_COMMENTS = 'COURSE'
644 ALL_COMMENTS = 'ALL'
645 COMMENT_VISIBILITY_CHOICES = (
646 (OWN_COMMENTS, _('Own')),
647 (COURSE_COMMENTS, _('Course')),
648 (ALL_COMMENTS, _('All')),
649 )
650 IS_CONTRIBUTOR = 'CONTRIBUTOR'
651 IS_EDITOR = 'EDITOR'
652 IS_RESPONSIBLE = 'RESPONSIBLE'
653 RESPONSIBILITY_CHOICES = (
654 (IS_CONTRIBUTOR, _('Contributor')),
655 (IS_EDITOR, _('Editor')),
656 (IS_RESPONSIBLE, _('Responsible')),
657 )
658
659 course = models.ForeignKey(Course, models.CASCADE, verbose_name=_("course"), related_name='contributions')
660 contributor = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT, verbose_name=_("contributor"), blank=True, null=True, related_name='contributions')
661 questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_("questionnaires"), blank=True, related_name="contributions")
662 responsible = models.BooleanField(verbose_name=_("responsible"), default=False)
663 can_edit = models.BooleanField(verbose_name=_("can edit"), default=False)
664 comment_visibility = models.CharField(max_length=10, choices=COMMENT_VISIBILITY_CHOICES, verbose_name=_('comment visibility'), default=OWN_COMMENTS)
665 label = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("label"))
666
667 order = models.IntegerField(verbose_name=_("contribution order"), default=-1)
668
669 class Meta:
670 unique_together = (
671 ('course', 'contributor'),
672 )
673 ordering = ['order', ]
674
675 def save(self, *args, **kw):
676 super().save(*args, **kw)
677 if self.responsible and not self.course.is_single_result:
678 assert self.can_edit and self.comment_visibility == self.ALL_COMMENTS
679
680 @property
681 def is_general(self):
682 return self.contributor_id is None
683
684
685 class Question(models.Model, metaclass=LocalizeModelBase):
686 """A question including a type."""
687
688 QUESTION_TYPES = (
689 ("T", _("Text Question")),
690 ("L", _("Likert Question")),
691 ("G", _("Grade Question")),
692 ("P", _("Positive Yes-No Question")),
693 ("N", _("Negative Yes-No Question")),
694 ("H", _("Heading")),
695 )
696
697 order = models.IntegerField(verbose_name=_("question order"), default=-1)
698 questionnaire = models.ForeignKey(Questionnaire, models.CASCADE)
699 text_de = models.CharField(max_length=1024, verbose_name=_("question text (german)"))
700 text_en = models.CharField(max_length=1024, verbose_name=_("question text (english)"))
701 type = models.CharField(max_length=1, choices=QUESTION_TYPES, verbose_name=_("question type"))
702
703 text = Translate
704
705 class Meta:
706 ordering = ['order', ]
707 verbose_name = _("question")
708 verbose_name_plural = _("questions")
709
710 @property
711 def answer_class(self):
712 if self.is_text_question:
713 return TextAnswer
714 elif self.is_rating_question:
715 return RatingAnswerCounter
716 else:
717 raise Exception("Unknown answer type: %r" % self.type)
718
719 @property
720 def is_likert_question(self):
721 return self.type == "L"
722
723 @property
724 def is_text_question(self):
725 return self.type == "T"
726
727 @property
728 def is_grade_question(self):
729 return self.type == "G"
730
731 @property
732 def is_positive_yes_no_question(self):
733 return self.type == "P"
734
735 @property
736 def is_negative_yes_no_question(self):
737 return self.type == "N"
738
739 @property
740 def is_yes_no_question(self):
741 return self.is_positive_yes_no_question or self.is_negative_yes_no_question
742
743 @property
744 def is_rating_question(self):
745 return self.is_grade_question or self.is_likert_question or self.is_yes_no_question
746
747 @property
748 def is_non_grade_rating_question(self):
749 return self.is_rating_question and not self.is_grade_question
750
751 @property
752 def is_heading_question(self):
753 return self.type == "H"
754
755
756 class Answer(models.Model):
757 """An abstract answer to a question. For anonymity purposes, the answering
758 user ist not stored in the object. Concrete subclasses are `RatingAnswerCounter`,
759 and `TextAnswer`."""
760
761 question = models.ForeignKey(Question, models.PROTECT)
762 contribution = models.ForeignKey(Contribution, models.PROTECT, related_name="%(class)s_set")
763
764 class Meta:
765 abstract = True
766 verbose_name = _("answer")
767 verbose_name_plural = _("answers")
768
769
770 class RatingAnswerCounter(Answer):
771 """A rating answer counter to a question. A lower answer is better or indicates more agreement."""
772
773 answer = models.IntegerField(verbose_name=_("answer"))
774 count = models.IntegerField(verbose_name=_("count"), default=0)
775
776 class Meta:
777 unique_together = (
778 ('question', 'contribution', 'answer'),
779 )
780 verbose_name = _("rating answer")
781 verbose_name_plural = _("rating answers")
782
783
784 class TextAnswer(Answer):
785 """A free-form text answer to a question (usually a comment about a course
786 or a contributor)."""
787
788 id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
789
790 answer = models.TextField(verbose_name=_("answer"))
791 original_answer = models.TextField(verbose_name=_("original answer"), blank=True, null=True)
792
793 HIDDEN = 'HI'
794 PUBLISHED = 'PU'
795 PRIVATE = 'PR'
796 NOT_REVIEWED = 'NR'
797 TEXT_ANSWER_STATES = (
798 (HIDDEN, _('hidden')),
799 (PUBLISHED, _('published')),
800 (PRIVATE, _('private')),
801 (NOT_REVIEWED, _('not reviewed')),
802 )
803 state = models.CharField(max_length=2, choices=TEXT_ANSWER_STATES, verbose_name=_('state of answer'), default=NOT_REVIEWED)
804
805 class Meta:
806 # Prevent ordering by date for privacy reasons
807 ordering = ['id', ]
808 verbose_name = _("text answer")
809 verbose_name_plural = _("text answers")
810
811 @property
812 def is_hidden(self):
813 return self.state == self.HIDDEN
814
815 @property
816 def is_private(self):
817 return self.state == self.PRIVATE
818
819 @property
820 def is_published(self):
821 return self.state == self.PUBLISHED
822
823 def save(self, *args, **kwargs):
824 super().save(*args, **kwargs)
825 assert self.answer != self.original_answer
826
827 def publish(self):
828 self.state = self.PUBLISHED
829
830 def hide(self):
831 self.state = self.HIDDEN
832
833 def make_private(self):
834 self.state = self.PRIVATE
835
836 def unreview(self):
837 self.state = self.NOT_REVIEWED
838
839
840 class FaqSection(models.Model, metaclass=LocalizeModelBase):
841 """Section in the frequently asked questions"""
842
843 order = models.IntegerField(verbose_name=_("section order"), default=-1)
844
845 title_de = models.CharField(max_length=255, verbose_name=_("section title (german)"))
846 title_en = models.CharField(max_length=255, verbose_name=_("section title (english)"))
847 title = Translate
848
849 class Meta:
850 ordering = ['order', ]
851 verbose_name = _("section")
852 verbose_name_plural = _("sections")
853
854
855 class FaqQuestion(models.Model, metaclass=LocalizeModelBase):
856 """Question and answer in the frequently asked questions"""
857
858 section = models.ForeignKey(FaqSection, models.CASCADE, related_name="questions")
859
860 order = models.IntegerField(verbose_name=_("question order"), default=-1)
861
862 question_de = models.CharField(max_length=1024, verbose_name=_("question (german)"))
863 question_en = models.CharField(max_length=1024, verbose_name=_("question (english)"))
864 question = Translate
865
866 answer_de = models.TextField(verbose_name=_("answer (german)"))
867 answer_en = models.TextField(verbose_name=_("answer (german)"))
868 answer = Translate
869
870 class Meta:
871 ordering = ['order', ]
872 verbose_name = _("question")
873 verbose_name_plural = _("questions")
874
875
876 class UserProfileManager(BaseUserManager):
877 def get_queryset(self):
878 return super().get_queryset().exclude(username=UserProfile.CRONJOB_USER_USERNAME)
879
880 def cronjob_user(self):
881 return super().get_queryset().get(username=UserProfile.CRONJOB_USER_USERNAME)
882
883 def exclude_inactive_users(self):
884 return self.get_queryset().exclude(is_active=False)
885
886 def create_user(self, username, password=None, email=None, first_name=None, last_name=None):
887 if not username:
888 raise ValueError(_('Users must have a username'))
889
890 user = self.model(
891 username=username,
892 email=self.normalize_email(email),
893 first_name=first_name,
894 last_name=last_name
895 )
896 user.set_password(password)
897 user.save()
898 return user
899
900 def create_superuser(self, username, password, email=None, first_name=None, last_name=None):
901 user = self.create_user(
902 username=username,
903 password=password,
904 email=email,
905 first_name=first_name,
906 last_name=last_name
907 )
908 user.is_superuser = True
909 user.save()
910 user.groups.add(Group.objects.get(name="Staff"))
911 return user
912
913
914 class UserProfile(AbstractBaseUser, PermissionsMixin):
915 username = models.CharField(max_length=255, unique=True, verbose_name=_('username'))
916
917 # null=True because users created through kerberos logins and certain external users don't have an address.
918 email = models.EmailField(max_length=255, unique=True, blank=True, null=True, verbose_name=_('email address'))
919
920 title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("Title"))
921 first_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("first name"))
922 last_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("last name"))
923
924 language = models.CharField(max_length=8, blank=True, null=True, verbose_name=_("language"))
925
926 # delegates of the user, which can also manage their courses
927 delegates = models.ManyToManyField("UserProfile", verbose_name=_("Delegates"), related_name="represented_users", blank=True)
928
929 # users to which all emails should be sent in cc without giving them delegate rights
930 cc_users = models.ManyToManyField("UserProfile", verbose_name=_("CC Users"), related_name="ccing_users", blank=True)
931
932 # key for url based login of this user
933 MAX_LOGIN_KEY = 2**31 - 1
934
935 login_key = models.IntegerField(verbose_name=_("Login Key"), unique=True, blank=True, null=True)
936 login_key_valid_until = models.DateField(verbose_name=_("Login Key Validity"), blank=True, null=True)
937
938 is_active = models.BooleanField(default=True, verbose_name=_("active"))
939
940 class Meta:
941 ordering = ('last_name', 'first_name', 'username')
942 verbose_name = _('user')
943 verbose_name_plural = _('users')
944
945 USERNAME_FIELD = 'username'
946 REQUIRED_FIELDS = []
947
948 objects = UserProfileManager()
949
950 @property
951 def full_name(self):
952 if self.last_name:
953 name = self.last_name
954 if self.first_name:
955 name = self.first_name + " " + name
956 if self.title:
957 name = self.title + " " + name
958 return name
959 else:
960 return self.username
961
962 @property
963 def full_name_with_username(self):
964 name = self.full_name
965 if self.username not in name:
966 name += " (" + self.username + ")"
967 return name
968
969 def __str__(self):
970 return self.full_name
971
972 @cached_property
973 def is_staff(self):
974 return self.groups.filter(name='Staff').exists()
975
976 @cached_property
977 def is_reviewer(self):
978 return self.is_staff or self.groups.filter(name='Reviewer').exists()
979
980 @cached_property
981 def is_grade_publisher(self):
982 return self.groups.filter(name='Grade publisher').exists()
983
984 CRONJOB_USER_USERNAME = "cronjob"
985
986 @property
987 def can_staff_mark_inactive(self):
988 if self.is_reviewer or self.is_grade_publisher or self.is_superuser:
989 return False
990 if any(not course.participations_are_archived for course in self.courses_participating_in.all()):
991 return False
992 if any(not contribution.course.participations_are_archived for contribution in self.contributions.all()):
993 return False
994 return True
995
996 @property
997 def can_staff_delete(self):
998 if self.is_contributor or self.is_reviewer or self.is_grade_publisher or self.is_superuser:
999 return False
1000 if any(not course.participations_are_archived for course in self.courses_participating_in.all()):
1001 return False
1002 if any(not user.can_staff_delete for user in self.represented_users.all()):
1003 return False
1004 if any(not user.can_staff_delete for user in self.ccing_users.all()):
1005 return False
1006 return True
1007
1008 @property
1009 def is_participant(self):
1010 return self.courses_participating_in.exists()
1011
1012 @property
1013 def is_student(self):
1014 """
1015 A UserProfile is not considered to be a student anymore if the
1016 newest contribution is newer than the newest participation.
1017 """
1018 if not self.is_participant:
1019 return False
1020
1021 if not self.is_contributor:
1022 return True
1023
1024 last_semester_participated = Semester.objects.filter(course__participants=self).order_by("-created_at").first()
1025 last_semester_contributed = Semester.objects.filter(course__contributions__contributor=self).order_by("-created_at").first()
1026
1027 return last_semester_participated.created_at >= last_semester_contributed.created_at
1028
1029 @property
1030 def is_contributor(self):
1031 return self.contributions.exists()
1032
1033 @property
1034 def is_editor(self):
1035 return self.contributions.filter(can_edit=True).exists()
1036
1037 @property
1038 def is_responsible(self):
1039 # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it
1040 return any(contribution.responsible for contribution in self.contributions.all())
1041
1042 @property
1043 def is_delegate(self):
1044 return self.represented_users.exists()
1045
1046 @property
1047 def is_editor_or_delegate(self):
1048 return self.is_editor or self.is_delegate
1049
1050 @cached_property
1051 def is_contributor_or_delegate(self):
1052 return self.is_contributor or self.is_delegate
1053
1054 @property
1055 def is_external(self):
1056 # do the import here to prevent a circular import
1057 from evap.evaluation.tools import is_external_email
1058 if not self.email:
1059 return True
1060 return is_external_email(self.email)
1061
1062 @property
1063 def can_download_grades(self):
1064 return not self.is_external
1065
1066 @classmethod
1067 def email_needs_login_key(cls, email):
1068 # do the import here to prevent a circular import
1069 from evap.evaluation.tools import is_external_email
1070 return is_external_email(email)
1071
1072 @property
1073 def needs_login_key(self):
1074 return UserProfile.email_needs_login_key(self.email)
1075
1076 def ensure_valid_login_key(self):
1077 if self.login_key and self.login_key_valid_until > date.today():
1078 return
1079
1080 while True:
1081 key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)
1082 if not UserProfile.objects.filter(login_key=key).exists():
1083 # key not yet used
1084 self.login_key = key
1085 break
1086 self.refresh_login_key()
1087
1088 def refresh_login_key(self):
1089 self.login_key_valid_until = date.today() + timedelta(settings.LOGIN_KEY_VALIDITY)
1090 self.save()
1091
1092 @property
1093 def login_url(self):
1094 if not self.needs_login_key:
1095 return ""
1096 return settings.PAGE_URL + "?loginkey=" + str(self.login_key)
1097
1098 def get_sorted_contributions(self):
1099 return self.contributions.order_by('course__semester__created_at', 'course__name_de')
1100
1101 def get_sorted_courses_participating_in(self):
1102 return self.courses_participating_in.order_by('semester__created_at', 'name_de')
1103
1104 def get_sorted_courses_voted_for(self):
1105 return self.courses_voted_for.order_by('semester__created_at', 'name_de')
1106
1107
1108 def validate_template(value):
1109 """Field validator which ensures that the value can be compiled into a
1110 Django Template."""
1111 try:
1112 Template(value)
1113 except TemplateSyntaxError as e:
1114 raise ValidationError(str(e))
1115
1116
1117 class EmailTemplate(models.Model):
1118 name = models.CharField(max_length=1024, unique=True, verbose_name=_("Name"))
1119
1120 subject = models.CharField(max_length=1024, verbose_name=_("Subject"), validators=[validate_template])
1121 body = models.TextField(verbose_name=_("Body"), validators=[validate_template])
1122
1123 EDITOR_REVIEW_NOTICE = "Editor Review Notice"
1124 EDITOR_REVIEW_REMINDER = "Editor Review Reminder"
1125 STUDENT_REMINDER = "Student Reminder"
1126 PUBLISHING_NOTICE = "Publishing Notice"
1127 LOGIN_KEY_CREATED = "Login Key Created"
1128 EVALUATION_STARTED = "Evaluation Started"
1129
1130 ALL_PARTICIPANTS = 'all_participants'
1131 DUE_PARTICIPANTS = 'due_participants'
1132 RESPONSIBLE = 'responsible'
1133 EDITORS = 'editors'
1134 CONTRIBUTORS = 'contributors'
1135
1136 EMAIL_RECIPIENTS = (
1137 (ALL_PARTICIPANTS, _('all participants')),
1138 (DUE_PARTICIPANTS, _('due participants')),
1139 (RESPONSIBLE, _('responsible person')),
1140 (EDITORS, _('all editors')),
1141 (CONTRIBUTORS, _('all contributors'))
1142 )
1143
1144 @classmethod
1145 def recipient_list_for_course(cls, course, recipient_groups, filter_users_in_cc):
1146 recipients = []
1147
1148 if cls.CONTRIBUTORS in recipient_groups:
1149 recipients += UserProfile.objects.filter(contributions__course=course)
1150 elif cls.EDITORS in recipient_groups:
1151 recipients += UserProfile.objects.filter(contributions__course=course, contributions__can_edit=True)
1152 elif cls.RESPONSIBLE in recipient_groups:
1153 recipients += course.responsible_contributors
1154
1155 if cls.ALL_PARTICIPANTS in recipient_groups:
1156 recipients += course.participants.all()
1157 elif cls.DUE_PARTICIPANTS in recipient_groups:
1158 recipients += course.due_participants
1159
1160 if filter_users_in_cc:
1161 # remove delegates and CC users of recipients from the recipient list
1162 # so they won't get the exact same email twice
1163 users_excluded = UserProfile.objects.filter(Q(represented_users__in=recipients) | Q(ccing_users__in=recipients))
1164 # but do so only if they have no delegates/cc_users, because otherwise
1165 # those won't get the email at all. consequently, some "edge case users"
1166 # will get the email twice, but there is no satisfying way around that.
1167 users_excluded = users_excluded.filter(delegates=None, cc_users=None)
1168
1169 recipients = list(set(recipients) - set(users_excluded))
1170
1171 return recipients
1172
1173 @classmethod
1174 def render_string(cls, text, dictionary):
1175 return Template(text).render(Context(dictionary, autoescape=False))
1176
1177 @classmethod
1178 def send_to_users_in_courses(cls, template, courses, recipient_groups, use_cc, request):
1179 user_course_map = {}
1180 for course in courses:
1181 recipients = cls.recipient_list_for_course(course, recipient_groups, filter_users_in_cc=use_cc)
1182 for user in recipients:
1183 user_course_map.setdefault(user, []).append(course)
1184
1185 for user, courses in user_course_map.items():
1186 subject_params = {}
1187 body_params = {'user': user, 'courses': courses, 'due_courses': get_due_courses_for_user(user)}
1188 cls.send_to_user(user, template, subject_params, body_params, use_cc=use_cc, request=request)
1189
1190 @classmethod
1191 def send_to_user(cls, user, template, subject_params, body_params, use_cc, request=None):
1192 if not user.email:
1193 warning_message = "{} has no email address defined. Could not send email.".format(user.username)
1194 # If this method is triggered by a cronjob changing course states, the request is None.
1195 # In this case warnings should be sent to the admins via email (configured in the settings for logger.error).
1196 # If a request exists, the page is displayed in the browser and the message can be shown on the page (messages.warning).
1197 if request is not None:
1198 logger.warning(warning_message)
1199 messages.warning(request, _(warning_message))
1200 else:
1201 logger.error(warning_message)
1202 return
1203
1204 if use_cc:
1205 cc_users = set(user.delegates.all() | user.cc_users.all())
1206 cc_addresses = [p.email for p in cc_users if p.email]
1207 else:
1208 cc_addresses = []
1209
1210 send_separate_login_url = False
1211 body_params['login_url'] = ""
1212 if user.needs_login_key:
1213 user.ensure_valid_login_key()
1214 if not cc_addresses:
1215 body_params['login_url'] = user.login_url
1216 else:
1217 send_separate_login_url = True
1218
1219 subject = cls.render_string(template.subject, subject_params)
1220 body = cls.render_string(template.body, body_params)
1221
1222 mail = EmailMessage(
1223 subject=subject,
1224 body=body,
1225 to=[user.email],
1226 cc=cc_addresses,
1227 bcc=[a[1] for a in settings.MANAGERS],
1228 headers={'Reply-To': settings.REPLY_TO_EMAIL})
1229
1230 try:
1231 mail.send(False)
1232 logger.info(('Sent email "{}" to {}.').format(subject, user.username))
1233 if send_separate_login_url:
1234 cls.send_login_url_to_user(user)
1235 except Exception:
1236 logger.exception('An exception occurred when sending the following email to user "{}":\n{}\n'.format(user.username, mail.message()))
1237
1238 @classmethod
1239 def send_reminder_to_user(cls, user, first_due_in_days, due_courses):
1240 template = cls.objects.get(name=cls.STUDENT_REMINDER)
1241 subject_params = {'user': user, 'first_due_in_days': first_due_in_days}
1242 body_params = {'user': user, 'first_due_in_days': first_due_in_days, 'due_courses': due_courses}
1243
1244 cls.send_to_user(user, template, subject_params, body_params, use_cc=False)
1245
1246 @classmethod
1247 def send_login_url_to_user(cls, user):
1248 template = cls.objects.get(name=cls.LOGIN_KEY_CREATED)
1249 subject_params = {}
1250 body_params = {'user': user, 'login_url': user.login_url}
1251
1252 cls.send_to_user(user, template, subject_params, body_params, use_cc=False)
1253 logger.info(('Sent login url to {}.').format(user.username))
1254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py
--- a/evap/evaluation/models.py
+++ b/evap/evaluation/models.py
@@ -446,7 +446,7 @@
@transition(field=state, source='reviewed', target='published')
def publish(self):
- assert self._voter_count is None and self._participant_count is None
+ assert self.is_single_result or self._voter_count is None and self._participant_count is None
self._voter_count = self.num_voters
self._participant_count = self.num_participants
| {"golden_diff": "diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py\n--- a/evap/evaluation/models.py\n+++ b/evap/evaluation/models.py\n@@ -446,7 +446,7 @@\n \n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n- assert self._voter_count is None and self._participant_count is None\n+ assert self.is_single_result or self._voter_count is None and self._participant_count is None\n self._voter_count = self.num_voters\n self._participant_count = self.num_participants\n", "issue": "Single result publishing fails\nSingle results can't be published because https://github.com/fsr-itse/EvaP/blob/master/evap/evaluation/models.py#L449 asserts `self._voter_count is None` which it is not for single results.\n", "before_files": [{"content": "from datetime import datetime, date, timedelta\nimport logging\nimport random\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, Group, PermissionsMixin\nfrom django.core.cache import caches\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.db import models, transaction\nfrom django.db.models import Count, Q, Manager\nfrom django.dispatch import Signal, receiver\nfrom django.template import Context, Template\nfrom django.template.base import TemplateSyntaxError\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\n# see evaluation.meta for the use of Translate in this file\nfrom evap.evaluation.meta import LocalizeModelBase, Translate\nfrom evap.evaluation.tools import date_to_datetime, get_due_courses_for_user\n\nlogger = logging.getLogger(__name__)\n\n\nclass NotArchiveable(Exception):\n \"\"\"An attempt has been made to archive something that is not archiveable.\"\"\"\n pass\n\n\nclass Semester(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Represents a semester, e.g. the winter term of 2011/2012.\"\"\"\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n short_name_de = models.CharField(max_length=20, unique=True, verbose_name=_(\"short name (german)\"))\n short_name_en = models.CharField(max_length=20, unique=True, verbose_name=_(\"short name (english)\"))\n short_name = Translate\n\n participations_are_archived = models.BooleanField(default=False, verbose_name=_(\"participations are archived\"))\n grade_documents_are_deleted = models.BooleanField(default=False, verbose_name=_(\"grade documents are deleted\"))\n results_are_archived = models.BooleanField(default=False, verbose_name=_(\"results are archived\"))\n\n created_at = models.DateField(verbose_name=_(\"created at\"), auto_now_add=True)\n\n class Meta:\n ordering = ('-created_at', 'name_de')\n verbose_name = _(\"semester\")\n verbose_name_plural = _(\"semesters\")\n\n def __str__(self):\n return self.name\n\n @property\n def can_staff_delete(self):\n return all(course.can_staff_delete for course in self.course_set.all())\n\n @property\n def participations_can_be_archived(self):\n return not self.participations_are_archived and all(course.participations_can_be_archived for course in self.course_set.all())\n\n @property\n def grade_documents_can_be_deleted(self):\n return not self.grade_documents_are_deleted\n\n @property\n def results_can_be_archived(self):\n return not self.results_are_archived\n\n @transaction.atomic\n def archive_participations(self):\n if not self.participations_can_be_archived:\n raise NotArchiveable()\n for course in self.course_set.all():\n course._archive_participations()\n self.participations_are_archived = True\n self.save()\n\n @transaction.atomic\n def delete_grade_documents(self):\n from evap.grades.models import GradeDocument\n\n if not self.grade_documents_can_be_deleted:\n raise NotArchiveable()\n GradeDocument.objects.filter(course__semester=self).delete()\n self.grade_documents_are_deleted = True\n self.save()\n\n def archive_results(self):\n if not self.results_can_be_archived:\n raise NotArchiveable()\n self.results_are_archived = True\n self.save()\n\n @classmethod\n def get_all_with_unarchived_results(cls):\n return cls.objects.filter(results_are_archived=False).distinct()\n\n @classmethod\n def get_all_with_published_unarchived_results(cls):\n return cls.objects.filter(course__state=\"published\", results_are_archived=False).distinct()\n\n @classmethod\n def active_semester(cls):\n return cls.objects.order_by(\"created_at\").last()\n\n @property\n def is_active_semester(self):\n return self == Semester.active_semester()\n\n\nclass QuestionnaireManager(Manager):\n def course_questionnaires(self):\n return super().get_queryset().exclude(type=Questionnaire.CONTRIBUTOR)\n\n def contributor_questionnaires(self):\n return super().get_queryset().filter(type=Questionnaire.CONTRIBUTOR)\n\n\nclass Questionnaire(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A named collection of questions.\"\"\"\n\n TOP = 10\n CONTRIBUTOR = 20\n BOTTOM = 30\n TYPE_CHOICES = (\n (TOP, _('Top questionnaire')),\n (CONTRIBUTOR, _('Contributor questionnaire')),\n (BOTTOM, _('Bottom questionnaire')),\n )\n type = models.IntegerField(choices=TYPE_CHOICES, verbose_name=_('type'), default=TOP)\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n description_de = models.TextField(verbose_name=_(\"description (german)\"), blank=True, null=True)\n description_en = models.TextField(verbose_name=_(\"description (english)\"), blank=True, null=True)\n description = Translate\n\n public_name_de = models.CharField(max_length=1024, verbose_name=_(\"display name (german)\"))\n public_name_en = models.CharField(max_length=1024, verbose_name=_(\"display name (english)\"))\n public_name = Translate\n\n teaser_de = models.TextField(verbose_name=_(\"teaser (german)\"), blank=True, null=True)\n teaser_en = models.TextField(verbose_name=_(\"teaser (english)\"), blank=True, null=True)\n teaser = Translate\n\n order = models.IntegerField(verbose_name=_(\"ordering index\"), default=0)\n\n staff_only = models.BooleanField(verbose_name=_(\"display for staff only\"), default=False)\n obsolete = models.BooleanField(verbose_name=_(\"obsolete\"), default=False)\n\n objects = QuestionnaireManager()\n\n class Meta:\n ordering = ('type', 'order', 'name_de')\n verbose_name = _(\"questionnaire\")\n verbose_name_plural = _(\"questionnaires\")\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return (self.type, self.order, self.name_de) < (other.type, other.order, self.name_de)\n\n def __gt__(self, other):\n return (self.type, self.order, self.name_de) > (other.type, other.order, self.name_de)\n\n @property\n def is_above_contributors(self):\n return self.type == self.TOP\n\n @property\n def is_below_contributors(self):\n return self.type == self.BOTTOM\n\n @property\n def can_staff_edit(self):\n return not self.contributions.exclude(course__state='new').exists()\n\n @property\n def can_staff_delete(self):\n return not self.contributions.exists()\n\n @property\n def text_questions(self):\n return [question for question in self.question_set.all() if question.is_text_question]\n\n @property\n def rating_questions(self):\n return [question for question in self.question_set.all() if question.is_rating_question]\n\n SINGLE_RESULT_QUESTIONNAIRE_NAME = \"Single result\"\n\n @classmethod\n def single_result_questionnaire(cls):\n return cls.objects.get(name_en=cls.SINGLE_RESULT_QUESTIONNAIRE_NAME)\n\n\nclass Degree(models.Model, metaclass=LocalizeModelBase):\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n order = models.IntegerField(verbose_name=_(\"degree order\"), default=-1)\n\n class Meta:\n ordering = ['order', ]\n\n def __str__(self):\n return self.name\n\n def can_staff_delete(self):\n if self.pk is None:\n return True\n return not self.courses.all().exists()\n\n\nclass CourseType(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Model for the type of a course, e.g. a lecture\"\"\"\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n class Meta:\n ordering = ['name_de', ]\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return self.name_de < other.name_de\n\n def can_staff_delete(self):\n if not self.pk:\n return True\n return not self.courses.all().exists()\n\n\nclass Course(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Models a single course, e.g. the Math 101 course of 2002.\"\"\"\n\n state = FSMField(default='new', protected=True)\n\n semester = models.ForeignKey(Semester, models.PROTECT, verbose_name=_(\"semester\"))\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"))\n name = Translate\n\n # type of course: lecture, seminar, project\n type = models.ForeignKey(CourseType, models.PROTECT, verbose_name=_(\"course type\"), related_name=\"courses\")\n\n is_single_result = models.BooleanField(verbose_name=_(\"is single result\"), default=False)\n\n # e.g. Bachelor, Master\n degrees = models.ManyToManyField(Degree, verbose_name=_(\"degrees\"), related_name=\"courses\")\n\n # default is True as that's the more restrictive option\n is_graded = models.BooleanField(verbose_name=_(\"is graded\"), default=True)\n\n # defines whether results can only be seen by contributors and participants\n is_private = models.BooleanField(verbose_name=_(\"is private\"), default=False)\n\n # grade publishers can set this to True, then the course will be handled as if final grades have already been uploaded\n gets_no_grade_documents = models.BooleanField(verbose_name=_(\"gets no grade documents\"), default=False)\n\n # whether participants must vote to qualify for reward points\n is_rewarded = models.BooleanField(verbose_name=_(\"is rewarded\"), default=True)\n\n # whether the evaluation does take place during the semester, stating that evaluation results will be published while the course is still running\n is_midterm_evaluation = models.BooleanField(verbose_name=_(\"is midterm evaluation\"), default=False)\n\n # True, if the course has at least two voters or if the first voter explicitly confirmed that given text answers\n # can be published even if no other person evaluates the course\n can_publish_text_results = models.BooleanField(verbose_name=_(\"can publish text results\"), default=False)\n\n # students that are allowed to vote\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"participants\"), blank=True, related_name='courses_participating_in')\n _participant_count = models.IntegerField(verbose_name=_(\"participant count\"), blank=True, null=True, default=None)\n\n # students that already voted\n voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"voters\"), blank=True, related_name='courses_voted_for')\n _voter_count = models.IntegerField(verbose_name=_(\"voter count\"), blank=True, null=True, default=None)\n\n # when the evaluation takes place\n vote_start_datetime = models.DateTimeField(verbose_name=_(\"start of evaluation\"))\n vote_end_date = models.DateField(verbose_name=_(\"last day of evaluation\"))\n\n # who last modified this course\n last_modified_time = models.DateTimeField(auto_now=True)\n last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name=\"course_last_modified_user+\")\n\n course_evaluated = Signal(providing_args=['request', 'semester'])\n\n class Meta:\n ordering = ('name_de',)\n unique_together = (\n ('semester', 'name_de'),\n ('semester', 'name_en'),\n )\n verbose_name = _(\"course\")\n verbose_name_plural = _(\"courses\")\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kw):\n first_save = self.pk is None\n super().save(*args, **kw)\n\n # make sure there is a general contribution\n if not self.general_contribution:\n self.contributions.create(contributor=None)\n del self.general_contribution # invalidate cached property\n\n if self.is_single_result:\n # adding m2ms such as contributions/questionnaires requires saving the course first,\n # therefore we must allow the single result questionnaire to not exist on first save\n assert first_save or Questionnaire.objects.get(contributions__course=self).name_en == Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME\n assert self.vote_end_date == self.vote_start_datetime.date()\n else:\n assert self.vote_end_date >= self.vote_start_datetime.date()\n\n @property\n def is_fully_reviewed(self):\n if not self.can_publish_text_results:\n return True\n return not self.unreviewed_textanswer_set.exists()\n\n @property\n def vote_end_datetime(self):\n # The evaluation ends at EVALUATION_END_OFFSET_HOURS:00 of the day AFTER self.vote_end_date.\n return date_to_datetime(self.vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS)\n\n @property\n def is_in_evaluation_period(self):\n return self.vote_start_datetime <= datetime.now() <= self.vote_end_datetime\n\n @property\n def general_contribution_has_questionnaires(self):\n return self.general_contribution and (self.is_single_result or self.general_contribution.questionnaires.count() > 0)\n\n @property\n def all_contributions_have_questionnaires(self):\n return self.general_contribution and (self.is_single_result or all(self.contributions.annotate(Count('questionnaires')).values_list(\"questionnaires__count\", flat=True)))\n\n def can_user_vote(self, user):\n \"\"\"Returns whether the user is allowed to vote on this course.\"\"\"\n return (self.state == \"in_evaluation\"\n and self.is_in_evaluation_period\n and user in self.participants.all()\n and user not in self.voters.all())\n\n def can_user_see_course(self, user):\n if user.is_staff:\n return True\n if user.is_reviewer and not self.semester.results_are_archived:\n return True\n if self.is_private or user.is_external:\n return self.is_user_contributor_or_delegate(user) or self.participants.filter(pk=user.pk).exists()\n return True\n\n def can_user_see_results_page(self, user):\n if self.is_single_result:\n return False\n if user.is_staff:\n return True\n if user.is_reviewer and not self.semester.results_are_archived:\n return True\n if self.state != 'published':\n return False\n if not self.can_publish_rating_results or self.semester.results_are_archived or not self.can_user_see_course(user):\n return self.is_user_contributor_or_delegate(user)\n return True\n\n @property\n def can_staff_edit(self):\n return not self.participations_are_archived and self.state in ['new', 'prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']\n\n @property\n def can_staff_delete(self):\n return self.can_staff_edit and (self.num_voters == 0 or self.is_single_result)\n\n @property\n def can_publish_average_grade(self):\n if self.is_single_result:\n return True\n\n # the average grade is only published if at least the configured percentage of participants voted during the evaluation for significance reasons\n return self.can_publish_rating_results and self.num_voters / self.num_participants >= settings.VOTER_PERCENTAGE_NEEDED_FOR_PUBLISHING_AVERAGE_GRADE\n\n @property\n def can_publish_rating_results(self):\n if self.is_single_result:\n return True\n\n # the rating results are only published if at least the configured number of participants voted during the evaluation for anonymity reasons\n return self.num_voters >= settings.VOTER_COUNT_NEEDED_FOR_PUBLISHING_RATING_RESULTS\n\n @transition(field=state, source=['new', 'editor_approved'], target='prepared')\n def ready_for_editors(self):\n pass\n\n @transition(field=state, source='prepared', target='editor_approved')\n def editor_approve(self):\n pass\n\n @transition(field=state, source=['new', 'prepared', 'editor_approved'], target='approved', conditions=[lambda self: self.general_contribution_has_questionnaires])\n def staff_approve(self):\n pass\n\n @transition(field=state, source=['prepared', 'editor_approved', 'approved'], target='new')\n def revert_to_new(self):\n pass\n\n @transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def evaluation_begin(self):\n pass\n\n @transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def reopen_evaluation(self):\n pass\n\n @transition(field=state, source='in_evaluation', target='evaluated')\n def evaluation_end(self):\n pass\n\n @transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed])\n def review_finished(self):\n pass\n\n @transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result])\n def single_result_created(self):\n pass\n\n @transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: not self.is_fully_reviewed])\n def reopen_review(self):\n pass\n\n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n assert self._voter_count is None and self._participant_count is None\n self._voter_count = self.num_voters\n self._participant_count = self.num_participants\n\n if not self.can_publish_text_results:\n self.textanswer_set.delete()\n else:\n self.textanswer_set.filter(state=TextAnswer.HIDDEN).delete()\n self.textanswer_set.update(original_answer=None)\n\n @transition(field=state, source='published', target='reviewed')\n def unpublish(self):\n assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()\n self._voter_count = None\n self._participant_count = None\n\n @cached_property\n def general_contribution(self):\n try:\n return self.contributions.get(contributor=None)\n except Contribution.DoesNotExist:\n return None\n\n @cached_property\n def num_participants(self):\n if self._participant_count is not None:\n return self._participant_count\n return self.participants.count()\n\n @cached_property\n def num_voters(self):\n if self._voter_count is not None:\n return self._voter_count\n return self.voters.count()\n\n @property\n def due_participants(self):\n return self.participants.exclude(pk__in=self.voters.all())\n\n @cached_property\n def responsible_contributors(self):\n return UserProfile.objects.filter(contributions__course=self, contributions__responsible=True).order_by('contributions__order')\n\n @cached_property\n def num_contributors(self):\n return UserProfile.objects.filter(contributions__course=self).count()\n\n @property\n def days_left_for_evaluation(self):\n return (self.vote_end_date - date.today()).days\n\n @property\n def time_left_for_evaluation(self):\n return self.vote_end_datetime - datetime.now()\n\n def evaluation_ends_soon(self):\n return 0 < self.time_left_for_evaluation.total_seconds() < settings.EVALUATION_END_WARNING_PERIOD * 3600\n\n @property\n def days_until_evaluation(self):\n days_left = (self.vote_start_datetime.date() - date.today()).days\n if self.vote_start_datetime < datetime.now():\n days_left -= 1\n return days_left\n\n def is_user_editor_or_delegate(self, user):\n return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all()), can_edit=True).exists()\n\n def is_user_contributor_or_delegate(self, user):\n # early out that saves database hits since is_contributor_or_delegate is a cached_property\n if not user.is_contributor_or_delegate:\n return False\n return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all())).exists()\n\n @property\n def textanswer_set(self):\n return TextAnswer.objects.filter(contribution__course=self)\n\n @cached_property\n def num_textanswers(self):\n if not self.can_publish_text_results:\n return 0\n return self.textanswer_set.count()\n\n @property\n def unreviewed_textanswer_set(self):\n return self.textanswer_set.filter(state=TextAnswer.NOT_REVIEWED)\n\n @property\n def reviewed_textanswer_set(self):\n return self.textanswer_set.exclude(state=TextAnswer.NOT_REVIEWED)\n\n @cached_property\n def num_reviewed_textanswers(self):\n return self.reviewed_textanswer_set.count()\n\n @property\n def ratinganswer_counters(self):\n return RatingAnswerCounter.objects.filter(contribution__course=self)\n\n def _archive_participations(self):\n \"\"\"Should be called only via Semester.archive_participations\"\"\"\n if not self.participations_can_be_archived:\n raise NotArchiveable()\n if self._participant_count is not None:\n assert self._voter_count is not None\n assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()\n return\n assert self._participant_count is None and self._voter_count is None\n self._participant_count = self.num_participants\n self._voter_count = self.num_voters\n self.save()\n\n @property\n def participations_are_archived(self):\n semester_participations_are_archived = self.semester.participations_are_archived\n if semester_participations_are_archived:\n assert self._participant_count is not None and self._voter_count is not None\n return semester_participations_are_archived\n\n @property\n def participations_can_be_archived(self):\n return not self.semester.participations_are_archived and self.state in [\"new\", \"published\"]\n\n @property\n def final_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.FINAL_GRADES)\n\n @property\n def midterm_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.MIDTERM_GRADES)\n\n @classmethod\n def update_courses(cls):\n logger.info(\"update_courses called. Processing courses now.\")\n from evap.evaluation.tools import send_publish_notifications\n\n courses_new_in_evaluation = []\n evaluation_results_courses = []\n\n for course in cls.objects.all():\n try:\n if course.state == \"approved\" and course.vote_start_datetime <= datetime.now():\n course.evaluation_begin()\n course.last_modified_user = UserProfile.objects.cronjob_user()\n course.save()\n courses_new_in_evaluation.append(course)\n elif course.state == \"in_evaluation\" and datetime.now() >= course.vote_end_datetime:\n course.evaluation_end()\n if course.is_fully_reviewed:\n course.review_finished()\n if not course.is_graded or course.final_grade_documents.exists() or course.gets_no_grade_documents:\n course.publish()\n evaluation_results_courses.append(course)\n course.last_modified_user = UserProfile.objects.cronjob_user()\n course.save()\n except Exception:\n logger.exception('An error occured when updating the state of course \"{}\" (id {}).'.format(course, course.id))\n\n template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)\n EmailTemplate.send_to_users_in_courses(template, courses_new_in_evaluation, [EmailTemplate.ALL_PARTICIPANTS], use_cc=False, request=None)\n send_publish_notifications(evaluation_results_courses)\n logger.info(\"update_courses finished.\")\n\n\n@receiver(post_transition, sender=Course)\ndef warmup_cache_on_publish(instance, target, **_kwargs):\n if target == 'published':\n from evap.results.tools import collect_results\n from evap.results.views import warm_up_template_cache\n collect_results(instance)\n warm_up_template_cache([instance])\n\n\n@receiver(post_transition, sender=Course)\ndef delete_cache_on_unpublish(instance, source, **_kwargs):\n if source == 'published':\n from evap.results.tools import get_collect_results_cache_key\n from evap.results.views import delete_template_cache\n caches['results'].delete(get_collect_results_cache_key(instance))\n delete_template_cache(instance)\n\n\n@receiver(post_transition, sender=Course)\ndef log_state_transition(instance, name, source, target, **_kwargs):\n logger.info('Course \"{}\" (id {}) moved from state \"{}\" to state \"{}\", caused by transition \"{}\".'.format(instance, instance.pk, source, target, name))\n\n\nclass Contribution(models.Model):\n \"\"\"A contributor who is assigned to a course and his questionnaires.\"\"\"\n\n OWN_COMMENTS = 'OWN'\n COURSE_COMMENTS = 'COURSE'\n ALL_COMMENTS = 'ALL'\n COMMENT_VISIBILITY_CHOICES = (\n (OWN_COMMENTS, _('Own')),\n (COURSE_COMMENTS, _('Course')),\n (ALL_COMMENTS, _('All')),\n )\n IS_CONTRIBUTOR = 'CONTRIBUTOR'\n IS_EDITOR = 'EDITOR'\n IS_RESPONSIBLE = 'RESPONSIBLE'\n RESPONSIBILITY_CHOICES = (\n (IS_CONTRIBUTOR, _('Contributor')),\n (IS_EDITOR, _('Editor')),\n (IS_RESPONSIBLE, _('Responsible')),\n )\n\n course = models.ForeignKey(Course, models.CASCADE, verbose_name=_(\"course\"), related_name='contributions')\n contributor = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT, verbose_name=_(\"contributor\"), blank=True, null=True, related_name='contributions')\n questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_(\"questionnaires\"), blank=True, related_name=\"contributions\")\n responsible = models.BooleanField(verbose_name=_(\"responsible\"), default=False)\n can_edit = models.BooleanField(verbose_name=_(\"can edit\"), default=False)\n comment_visibility = models.CharField(max_length=10, choices=COMMENT_VISIBILITY_CHOICES, verbose_name=_('comment visibility'), default=OWN_COMMENTS)\n label = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"label\"))\n\n order = models.IntegerField(verbose_name=_(\"contribution order\"), default=-1)\n\n class Meta:\n unique_together = (\n ('course', 'contributor'),\n )\n ordering = ['order', ]\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n if self.responsible and not self.course.is_single_result:\n assert self.can_edit and self.comment_visibility == self.ALL_COMMENTS\n\n @property\n def is_general(self):\n return self.contributor_id is None\n\n\nclass Question(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A question including a type.\"\"\"\n\n QUESTION_TYPES = (\n (\"T\", _(\"Text Question\")),\n (\"L\", _(\"Likert Question\")),\n (\"G\", _(\"Grade Question\")),\n (\"P\", _(\"Positive Yes-No Question\")),\n (\"N\", _(\"Negative Yes-No Question\")),\n (\"H\", _(\"Heading\")),\n )\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n questionnaire = models.ForeignKey(Questionnaire, models.CASCADE)\n text_de = models.CharField(max_length=1024, verbose_name=_(\"question text (german)\"))\n text_en = models.CharField(max_length=1024, verbose_name=_(\"question text (english)\"))\n type = models.CharField(max_length=1, choices=QUESTION_TYPES, verbose_name=_(\"question type\"))\n\n text = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n @property\n def answer_class(self):\n if self.is_text_question:\n return TextAnswer\n elif self.is_rating_question:\n return RatingAnswerCounter\n else:\n raise Exception(\"Unknown answer type: %r\" % self.type)\n\n @property\n def is_likert_question(self):\n return self.type == \"L\"\n\n @property\n def is_text_question(self):\n return self.type == \"T\"\n\n @property\n def is_grade_question(self):\n return self.type == \"G\"\n\n @property\n def is_positive_yes_no_question(self):\n return self.type == \"P\"\n\n @property\n def is_negative_yes_no_question(self):\n return self.type == \"N\"\n\n @property\n def is_yes_no_question(self):\n return self.is_positive_yes_no_question or self.is_negative_yes_no_question\n\n @property\n def is_rating_question(self):\n return self.is_grade_question or self.is_likert_question or self.is_yes_no_question\n\n @property\n def is_non_grade_rating_question(self):\n return self.is_rating_question and not self.is_grade_question\n\n @property\n def is_heading_question(self):\n return self.type == \"H\"\n\n\nclass Answer(models.Model):\n \"\"\"An abstract answer to a question. For anonymity purposes, the answering\n user ist not stored in the object. Concrete subclasses are `RatingAnswerCounter`,\n and `TextAnswer`.\"\"\"\n\n question = models.ForeignKey(Question, models.PROTECT)\n contribution = models.ForeignKey(Contribution, models.PROTECT, related_name=\"%(class)s_set\")\n\n class Meta:\n abstract = True\n verbose_name = _(\"answer\")\n verbose_name_plural = _(\"answers\")\n\n\nclass RatingAnswerCounter(Answer):\n \"\"\"A rating answer counter to a question. A lower answer is better or indicates more agreement.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(\"answer\"))\n count = models.IntegerField(verbose_name=_(\"count\"), default=0)\n\n class Meta:\n unique_together = (\n ('question', 'contribution', 'answer'),\n )\n verbose_name = _(\"rating answer\")\n verbose_name_plural = _(\"rating answers\")\n\n\nclass TextAnswer(Answer):\n \"\"\"A free-form text answer to a question (usually a comment about a course\n or a contributor).\"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n answer = models.TextField(verbose_name=_(\"answer\"))\n original_answer = models.TextField(verbose_name=_(\"original answer\"), blank=True, null=True)\n\n HIDDEN = 'HI'\n PUBLISHED = 'PU'\n PRIVATE = 'PR'\n NOT_REVIEWED = 'NR'\n TEXT_ANSWER_STATES = (\n (HIDDEN, _('hidden')),\n (PUBLISHED, _('published')),\n (PRIVATE, _('private')),\n (NOT_REVIEWED, _('not reviewed')),\n )\n state = models.CharField(max_length=2, choices=TEXT_ANSWER_STATES, verbose_name=_('state of answer'), default=NOT_REVIEWED)\n\n class Meta:\n # Prevent ordering by date for privacy reasons\n ordering = ['id', ]\n verbose_name = _(\"text answer\")\n verbose_name_plural = _(\"text answers\")\n\n @property\n def is_hidden(self):\n return self.state == self.HIDDEN\n\n @property\n def is_private(self):\n return self.state == self.PRIVATE\n\n @property\n def is_published(self):\n return self.state == self.PUBLISHED\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n assert self.answer != self.original_answer\n\n def publish(self):\n self.state = self.PUBLISHED\n\n def hide(self):\n self.state = self.HIDDEN\n\n def make_private(self):\n self.state = self.PRIVATE\n\n def unreview(self):\n self.state = self.NOT_REVIEWED\n\n\nclass FaqSection(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Section in the frequently asked questions\"\"\"\n\n order = models.IntegerField(verbose_name=_(\"section order\"), default=-1)\n\n title_de = models.CharField(max_length=255, verbose_name=_(\"section title (german)\"))\n title_en = models.CharField(max_length=255, verbose_name=_(\"section title (english)\"))\n title = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"section\")\n verbose_name_plural = _(\"sections\")\n\n\nclass FaqQuestion(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Question and answer in the frequently asked questions\"\"\"\n\n section = models.ForeignKey(FaqSection, models.CASCADE, related_name=\"questions\")\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n\n question_de = models.CharField(max_length=1024, verbose_name=_(\"question (german)\"))\n question_en = models.CharField(max_length=1024, verbose_name=_(\"question (english)\"))\n question = Translate\n\n answer_de = models.TextField(verbose_name=_(\"answer (german)\"))\n answer_en = models.TextField(verbose_name=_(\"answer (german)\"))\n answer = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n\nclass UserProfileManager(BaseUserManager):\n def get_queryset(self):\n return super().get_queryset().exclude(username=UserProfile.CRONJOB_USER_USERNAME)\n\n def cronjob_user(self):\n return super().get_queryset().get(username=UserProfile.CRONJOB_USER_USERNAME)\n\n def exclude_inactive_users(self):\n return self.get_queryset().exclude(is_active=False)\n\n def create_user(self, username, password=None, email=None, first_name=None, last_name=None):\n if not username:\n raise ValueError(_('Users must have a username'))\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name\n )\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, username, password, email=None, first_name=None, last_name=None):\n user = self.create_user(\n username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name\n )\n user.is_superuser = True\n user.save()\n user.groups.add(Group.objects.get(name=\"Staff\"))\n return user\n\n\nclass UserProfile(AbstractBaseUser, PermissionsMixin):\n username = models.CharField(max_length=255, unique=True, verbose_name=_('username'))\n\n # null=True because users created through kerberos logins and certain external users don't have an address.\n email = models.EmailField(max_length=255, unique=True, blank=True, null=True, verbose_name=_('email address'))\n\n title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"Title\"))\n first_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"first name\"))\n last_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"last name\"))\n\n language = models.CharField(max_length=8, blank=True, null=True, verbose_name=_(\"language\"))\n\n # delegates of the user, which can also manage their courses\n delegates = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"Delegates\"), related_name=\"represented_users\", blank=True)\n\n # users to which all emails should be sent in cc without giving them delegate rights\n cc_users = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"CC Users\"), related_name=\"ccing_users\", blank=True)\n\n # key for url based login of this user\n MAX_LOGIN_KEY = 2**31 - 1\n\n login_key = models.IntegerField(verbose_name=_(\"Login Key\"), unique=True, blank=True, null=True)\n login_key_valid_until = models.DateField(verbose_name=_(\"Login Key Validity\"), blank=True, null=True)\n\n is_active = models.BooleanField(default=True, verbose_name=_(\"active\"))\n\n class Meta:\n ordering = ('last_name', 'first_name', 'username')\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = []\n\n objects = UserProfileManager()\n\n @property\n def full_name(self):\n if self.last_name:\n name = self.last_name\n if self.first_name:\n name = self.first_name + \" \" + name\n if self.title:\n name = self.title + \" \" + name\n return name\n else:\n return self.username\n\n @property\n def full_name_with_username(self):\n name = self.full_name\n if self.username not in name:\n name += \" (\" + self.username + \")\"\n return name\n\n def __str__(self):\n return self.full_name\n\n @cached_property\n def is_staff(self):\n return self.groups.filter(name='Staff').exists()\n\n @cached_property\n def is_reviewer(self):\n return self.is_staff or self.groups.filter(name='Reviewer').exists()\n\n @cached_property\n def is_grade_publisher(self):\n return self.groups.filter(name='Grade publisher').exists()\n\n CRONJOB_USER_USERNAME = \"cronjob\"\n\n @property\n def can_staff_mark_inactive(self):\n if self.is_reviewer or self.is_grade_publisher or self.is_superuser:\n return False\n if any(not course.participations_are_archived for course in self.courses_participating_in.all()):\n return False\n if any(not contribution.course.participations_are_archived for contribution in self.contributions.all()):\n return False\n return True\n\n @property\n def can_staff_delete(self):\n if self.is_contributor or self.is_reviewer or self.is_grade_publisher or self.is_superuser:\n return False\n if any(not course.participations_are_archived for course in self.courses_participating_in.all()):\n return False\n if any(not user.can_staff_delete for user in self.represented_users.all()):\n return False\n if any(not user.can_staff_delete for user in self.ccing_users.all()):\n return False\n return True\n\n @property\n def is_participant(self):\n return self.courses_participating_in.exists()\n\n @property\n def is_student(self):\n \"\"\"\n A UserProfile is not considered to be a student anymore if the\n newest contribution is newer than the newest participation.\n \"\"\"\n if not self.is_participant:\n return False\n\n if not self.is_contributor:\n return True\n\n last_semester_participated = Semester.objects.filter(course__participants=self).order_by(\"-created_at\").first()\n last_semester_contributed = Semester.objects.filter(course__contributions__contributor=self).order_by(\"-created_at\").first()\n\n return last_semester_participated.created_at >= last_semester_contributed.created_at\n\n @property\n def is_contributor(self):\n return self.contributions.exists()\n\n @property\n def is_editor(self):\n return self.contributions.filter(can_edit=True).exists()\n\n @property\n def is_responsible(self):\n # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it\n return any(contribution.responsible for contribution in self.contributions.all())\n\n @property\n def is_delegate(self):\n return self.represented_users.exists()\n\n @property\n def is_editor_or_delegate(self):\n return self.is_editor or self.is_delegate\n\n @cached_property\n def is_contributor_or_delegate(self):\n return self.is_contributor or self.is_delegate\n\n @property\n def is_external(self):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n if not self.email:\n return True\n return is_external_email(self.email)\n\n @property\n def can_download_grades(self):\n return not self.is_external\n\n @classmethod\n def email_needs_login_key(cls, email):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n return is_external_email(email)\n\n @property\n def needs_login_key(self):\n return UserProfile.email_needs_login_key(self.email)\n\n def ensure_valid_login_key(self):\n if self.login_key and self.login_key_valid_until > date.today():\n return\n\n while True:\n key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)\n if not UserProfile.objects.filter(login_key=key).exists():\n # key not yet used\n self.login_key = key\n break\n self.refresh_login_key()\n\n def refresh_login_key(self):\n self.login_key_valid_until = date.today() + timedelta(settings.LOGIN_KEY_VALIDITY)\n self.save()\n\n @property\n def login_url(self):\n if not self.needs_login_key:\n return \"\"\n return settings.PAGE_URL + \"?loginkey=\" + str(self.login_key)\n\n def get_sorted_contributions(self):\n return self.contributions.order_by('course__semester__created_at', 'course__name_de')\n\n def get_sorted_courses_participating_in(self):\n return self.courses_participating_in.order_by('semester__created_at', 'name_de')\n\n def get_sorted_courses_voted_for(self):\n return self.courses_voted_for.order_by('semester__created_at', 'name_de')\n\n\ndef validate_template(value):\n \"\"\"Field validator which ensures that the value can be compiled into a\n Django Template.\"\"\"\n try:\n Template(value)\n except TemplateSyntaxError as e:\n raise ValidationError(str(e))\n\n\nclass EmailTemplate(models.Model):\n name = models.CharField(max_length=1024, unique=True, verbose_name=_(\"Name\"))\n\n subject = models.CharField(max_length=1024, verbose_name=_(\"Subject\"), validators=[validate_template])\n body = models.TextField(verbose_name=_(\"Body\"), validators=[validate_template])\n\n EDITOR_REVIEW_NOTICE = \"Editor Review Notice\"\n EDITOR_REVIEW_REMINDER = \"Editor Review Reminder\"\n STUDENT_REMINDER = \"Student Reminder\"\n PUBLISHING_NOTICE = \"Publishing Notice\"\n LOGIN_KEY_CREATED = \"Login Key Created\"\n EVALUATION_STARTED = \"Evaluation Started\"\n\n ALL_PARTICIPANTS = 'all_participants'\n DUE_PARTICIPANTS = 'due_participants'\n RESPONSIBLE = 'responsible'\n EDITORS = 'editors'\n CONTRIBUTORS = 'contributors'\n\n EMAIL_RECIPIENTS = (\n (ALL_PARTICIPANTS, _('all participants')),\n (DUE_PARTICIPANTS, _('due participants')),\n (RESPONSIBLE, _('responsible person')),\n (EDITORS, _('all editors')),\n (CONTRIBUTORS, _('all contributors'))\n )\n\n @classmethod\n def recipient_list_for_course(cls, course, recipient_groups, filter_users_in_cc):\n recipients = []\n\n if cls.CONTRIBUTORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course)\n elif cls.EDITORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course, contributions__can_edit=True)\n elif cls.RESPONSIBLE in recipient_groups:\n recipients += course.responsible_contributors\n\n if cls.ALL_PARTICIPANTS in recipient_groups:\n recipients += course.participants.all()\n elif cls.DUE_PARTICIPANTS in recipient_groups:\n recipients += course.due_participants\n\n if filter_users_in_cc:\n # remove delegates and CC users of recipients from the recipient list\n # so they won't get the exact same email twice\n users_excluded = UserProfile.objects.filter(Q(represented_users__in=recipients) | Q(ccing_users__in=recipients))\n # but do so only if they have no delegates/cc_users, because otherwise\n # those won't get the email at all. consequently, some \"edge case users\"\n # will get the email twice, but there is no satisfying way around that.\n users_excluded = users_excluded.filter(delegates=None, cc_users=None)\n\n recipients = list(set(recipients) - set(users_excluded))\n\n return recipients\n\n @classmethod\n def render_string(cls, text, dictionary):\n return Template(text).render(Context(dictionary, autoescape=False))\n\n @classmethod\n def send_to_users_in_courses(cls, template, courses, recipient_groups, use_cc, request):\n user_course_map = {}\n for course in courses:\n recipients = cls.recipient_list_for_course(course, recipient_groups, filter_users_in_cc=use_cc)\n for user in recipients:\n user_course_map.setdefault(user, []).append(course)\n\n for user, courses in user_course_map.items():\n subject_params = {}\n body_params = {'user': user, 'courses': courses, 'due_courses': get_due_courses_for_user(user)}\n cls.send_to_user(user, template, subject_params, body_params, use_cc=use_cc, request=request)\n\n @classmethod\n def send_to_user(cls, user, template, subject_params, body_params, use_cc, request=None):\n if not user.email:\n warning_message = \"{} has no email address defined. Could not send email.\".format(user.username)\n # If this method is triggered by a cronjob changing course states, the request is None.\n # In this case warnings should be sent to the admins via email (configured in the settings for logger.error).\n # If a request exists, the page is displayed in the browser and the message can be shown on the page (messages.warning).\n if request is not None:\n logger.warning(warning_message)\n messages.warning(request, _(warning_message))\n else:\n logger.error(warning_message)\n return\n\n if use_cc:\n cc_users = set(user.delegates.all() | user.cc_users.all())\n cc_addresses = [p.email for p in cc_users if p.email]\n else:\n cc_addresses = []\n\n send_separate_login_url = False\n body_params['login_url'] = \"\"\n if user.needs_login_key:\n user.ensure_valid_login_key()\n if not cc_addresses:\n body_params['login_url'] = user.login_url\n else:\n send_separate_login_url = True\n\n subject = cls.render_string(template.subject, subject_params)\n body = cls.render_string(template.body, body_params)\n\n mail = EmailMessage(\n subject=subject,\n body=body,\n to=[user.email],\n cc=cc_addresses,\n bcc=[a[1] for a in settings.MANAGERS],\n headers={'Reply-To': settings.REPLY_TO_EMAIL})\n\n try:\n mail.send(False)\n logger.info(('Sent email \"{}\" to {}.').format(subject, user.username))\n if send_separate_login_url:\n cls.send_login_url_to_user(user)\n except Exception:\n logger.exception('An exception occurred when sending the following email to user \"{}\":\\n{}\\n'.format(user.username, mail.message()))\n\n @classmethod\n def send_reminder_to_user(cls, user, first_due_in_days, due_courses):\n template = cls.objects.get(name=cls.STUDENT_REMINDER)\n subject_params = {'user': user, 'first_due_in_days': first_due_in_days}\n body_params = {'user': user, 'first_due_in_days': first_due_in_days, 'due_courses': due_courses}\n\n cls.send_to_user(user, template, subject_params, body_params, use_cc=False)\n\n @classmethod\n def send_login_url_to_user(cls, user):\n template = cls.objects.get(name=cls.LOGIN_KEY_CREATED)\n subject_params = {}\n body_params = {'user': user, 'login_url': user.login_url}\n\n cls.send_to_user(user, template, subject_params, body_params, use_cc=False)\n logger.info(('Sent login url to {}.').format(user.username))\n", "path": "evap/evaluation/models.py"}], "after_files": [{"content": "from datetime import datetime, date, timedelta\nimport logging\nimport random\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, Group, PermissionsMixin\nfrom django.core.cache import caches\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.db import models, transaction\nfrom django.db.models import Count, Q, Manager\nfrom django.dispatch import Signal, receiver\nfrom django.template import Context, Template\nfrom django.template.base import TemplateSyntaxError\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\n# see evaluation.meta for the use of Translate in this file\nfrom evap.evaluation.meta import LocalizeModelBase, Translate\nfrom evap.evaluation.tools import date_to_datetime, get_due_courses_for_user\n\nlogger = logging.getLogger(__name__)\n\n\nclass NotArchiveable(Exception):\n \"\"\"An attempt has been made to archive something that is not archiveable.\"\"\"\n pass\n\n\nclass Semester(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Represents a semester, e.g. the winter term of 2011/2012.\"\"\"\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n short_name_de = models.CharField(max_length=20, unique=True, verbose_name=_(\"short name (german)\"))\n short_name_en = models.CharField(max_length=20, unique=True, verbose_name=_(\"short name (english)\"))\n short_name = Translate\n\n participations_are_archived = models.BooleanField(default=False, verbose_name=_(\"participations are archived\"))\n grade_documents_are_deleted = models.BooleanField(default=False, verbose_name=_(\"grade documents are deleted\"))\n results_are_archived = models.BooleanField(default=False, verbose_name=_(\"results are archived\"))\n\n created_at = models.DateField(verbose_name=_(\"created at\"), auto_now_add=True)\n\n class Meta:\n ordering = ('-created_at', 'name_de')\n verbose_name = _(\"semester\")\n verbose_name_plural = _(\"semesters\")\n\n def __str__(self):\n return self.name\n\n @property\n def can_staff_delete(self):\n return all(course.can_staff_delete for course in self.course_set.all())\n\n @property\n def participations_can_be_archived(self):\n return not self.participations_are_archived and all(course.participations_can_be_archived for course in self.course_set.all())\n\n @property\n def grade_documents_can_be_deleted(self):\n return not self.grade_documents_are_deleted\n\n @property\n def results_can_be_archived(self):\n return not self.results_are_archived\n\n @transaction.atomic\n def archive_participations(self):\n if not self.participations_can_be_archived:\n raise NotArchiveable()\n for course in self.course_set.all():\n course._archive_participations()\n self.participations_are_archived = True\n self.save()\n\n @transaction.atomic\n def delete_grade_documents(self):\n from evap.grades.models import GradeDocument\n\n if not self.grade_documents_can_be_deleted:\n raise NotArchiveable()\n GradeDocument.objects.filter(course__semester=self).delete()\n self.grade_documents_are_deleted = True\n self.save()\n\n def archive_results(self):\n if not self.results_can_be_archived:\n raise NotArchiveable()\n self.results_are_archived = True\n self.save()\n\n @classmethod\n def get_all_with_unarchived_results(cls):\n return cls.objects.filter(results_are_archived=False).distinct()\n\n @classmethod\n def get_all_with_published_unarchived_results(cls):\n return cls.objects.filter(course__state=\"published\", results_are_archived=False).distinct()\n\n @classmethod\n def active_semester(cls):\n return cls.objects.order_by(\"created_at\").last()\n\n @property\n def is_active_semester(self):\n return self == Semester.active_semester()\n\n\nclass QuestionnaireManager(Manager):\n def course_questionnaires(self):\n return super().get_queryset().exclude(type=Questionnaire.CONTRIBUTOR)\n\n def contributor_questionnaires(self):\n return super().get_queryset().filter(type=Questionnaire.CONTRIBUTOR)\n\n\nclass Questionnaire(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A named collection of questions.\"\"\"\n\n TOP = 10\n CONTRIBUTOR = 20\n BOTTOM = 30\n TYPE_CHOICES = (\n (TOP, _('Top questionnaire')),\n (CONTRIBUTOR, _('Contributor questionnaire')),\n (BOTTOM, _('Bottom questionnaire')),\n )\n type = models.IntegerField(choices=TYPE_CHOICES, verbose_name=_('type'), default=TOP)\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n description_de = models.TextField(verbose_name=_(\"description (german)\"), blank=True, null=True)\n description_en = models.TextField(verbose_name=_(\"description (english)\"), blank=True, null=True)\n description = Translate\n\n public_name_de = models.CharField(max_length=1024, verbose_name=_(\"display name (german)\"))\n public_name_en = models.CharField(max_length=1024, verbose_name=_(\"display name (english)\"))\n public_name = Translate\n\n teaser_de = models.TextField(verbose_name=_(\"teaser (german)\"), blank=True, null=True)\n teaser_en = models.TextField(verbose_name=_(\"teaser (english)\"), blank=True, null=True)\n teaser = Translate\n\n order = models.IntegerField(verbose_name=_(\"ordering index\"), default=0)\n\n staff_only = models.BooleanField(verbose_name=_(\"display for staff only\"), default=False)\n obsolete = models.BooleanField(verbose_name=_(\"obsolete\"), default=False)\n\n objects = QuestionnaireManager()\n\n class Meta:\n ordering = ('type', 'order', 'name_de')\n verbose_name = _(\"questionnaire\")\n verbose_name_plural = _(\"questionnaires\")\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return (self.type, self.order, self.name_de) < (other.type, other.order, self.name_de)\n\n def __gt__(self, other):\n return (self.type, self.order, self.name_de) > (other.type, other.order, self.name_de)\n\n @property\n def is_above_contributors(self):\n return self.type == self.TOP\n\n @property\n def is_below_contributors(self):\n return self.type == self.BOTTOM\n\n @property\n def can_staff_edit(self):\n return not self.contributions.exclude(course__state='new').exists()\n\n @property\n def can_staff_delete(self):\n return not self.contributions.exists()\n\n @property\n def text_questions(self):\n return [question for question in self.question_set.all() if question.is_text_question]\n\n @property\n def rating_questions(self):\n return [question for question in self.question_set.all() if question.is_rating_question]\n\n SINGLE_RESULT_QUESTIONNAIRE_NAME = \"Single result\"\n\n @classmethod\n def single_result_questionnaire(cls):\n return cls.objects.get(name_en=cls.SINGLE_RESULT_QUESTIONNAIRE_NAME)\n\n\nclass Degree(models.Model, metaclass=LocalizeModelBase):\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n order = models.IntegerField(verbose_name=_(\"degree order\"), default=-1)\n\n class Meta:\n ordering = ['order', ]\n\n def __str__(self):\n return self.name\n\n def can_staff_delete(self):\n if self.pk is None:\n return True\n return not self.courses.all().exists()\n\n\nclass CourseType(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Model for the type of a course, e.g. a lecture\"\"\"\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n class Meta:\n ordering = ['name_de', ]\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return self.name_de < other.name_de\n\n def can_staff_delete(self):\n if not self.pk:\n return True\n return not self.courses.all().exists()\n\n\nclass Course(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Models a single course, e.g. the Math 101 course of 2002.\"\"\"\n\n state = FSMField(default='new', protected=True)\n\n semester = models.ForeignKey(Semester, models.PROTECT, verbose_name=_(\"semester\"))\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"))\n name = Translate\n\n # type of course: lecture, seminar, project\n type = models.ForeignKey(CourseType, models.PROTECT, verbose_name=_(\"course type\"), related_name=\"courses\")\n\n is_single_result = models.BooleanField(verbose_name=_(\"is single result\"), default=False)\n\n # e.g. Bachelor, Master\n degrees = models.ManyToManyField(Degree, verbose_name=_(\"degrees\"), related_name=\"courses\")\n\n # default is True as that's the more restrictive option\n is_graded = models.BooleanField(verbose_name=_(\"is graded\"), default=True)\n\n # defines whether results can only be seen by contributors and participants\n is_private = models.BooleanField(verbose_name=_(\"is private\"), default=False)\n\n # grade publishers can set this to True, then the course will be handled as if final grades have already been uploaded\n gets_no_grade_documents = models.BooleanField(verbose_name=_(\"gets no grade documents\"), default=False)\n\n # whether participants must vote to qualify for reward points\n is_rewarded = models.BooleanField(verbose_name=_(\"is rewarded\"), default=True)\n\n # whether the evaluation does take place during the semester, stating that evaluation results will be published while the course is still running\n is_midterm_evaluation = models.BooleanField(verbose_name=_(\"is midterm evaluation\"), default=False)\n\n # True, if the course has at least two voters or if the first voter explicitly confirmed that given text answers\n # can be published even if no other person evaluates the course\n can_publish_text_results = models.BooleanField(verbose_name=_(\"can publish text results\"), default=False)\n\n # students that are allowed to vote\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"participants\"), blank=True, related_name='courses_participating_in')\n _participant_count = models.IntegerField(verbose_name=_(\"participant count\"), blank=True, null=True, default=None)\n\n # students that already voted\n voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"voters\"), blank=True, related_name='courses_voted_for')\n _voter_count = models.IntegerField(verbose_name=_(\"voter count\"), blank=True, null=True, default=None)\n\n # when the evaluation takes place\n vote_start_datetime = models.DateTimeField(verbose_name=_(\"start of evaluation\"))\n vote_end_date = models.DateField(verbose_name=_(\"last day of evaluation\"))\n\n # who last modified this course\n last_modified_time = models.DateTimeField(auto_now=True)\n last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name=\"course_last_modified_user+\")\n\n course_evaluated = Signal(providing_args=['request', 'semester'])\n\n class Meta:\n ordering = ('name_de',)\n unique_together = (\n ('semester', 'name_de'),\n ('semester', 'name_en'),\n )\n verbose_name = _(\"course\")\n verbose_name_plural = _(\"courses\")\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kw):\n first_save = self.pk is None\n super().save(*args, **kw)\n\n # make sure there is a general contribution\n if not self.general_contribution:\n self.contributions.create(contributor=None)\n del self.general_contribution # invalidate cached property\n\n if self.is_single_result:\n # adding m2ms such as contributions/questionnaires requires saving the course first,\n # therefore we must allow the single result questionnaire to not exist on first save\n assert first_save or Questionnaire.objects.get(contributions__course=self).name_en == Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME\n assert self.vote_end_date == self.vote_start_datetime.date()\n else:\n assert self.vote_end_date >= self.vote_start_datetime.date()\n\n @property\n def is_fully_reviewed(self):\n if not self.can_publish_text_results:\n return True\n return not self.unreviewed_textanswer_set.exists()\n\n @property\n def vote_end_datetime(self):\n # The evaluation ends at EVALUATION_END_OFFSET_HOURS:00 of the day AFTER self.vote_end_date.\n return date_to_datetime(self.vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS)\n\n @property\n def is_in_evaluation_period(self):\n return self.vote_start_datetime <= datetime.now() <= self.vote_end_datetime\n\n @property\n def general_contribution_has_questionnaires(self):\n return self.general_contribution and (self.is_single_result or self.general_contribution.questionnaires.count() > 0)\n\n @property\n def all_contributions_have_questionnaires(self):\n return self.general_contribution and (self.is_single_result or all(self.contributions.annotate(Count('questionnaires')).values_list(\"questionnaires__count\", flat=True)))\n\n def can_user_vote(self, user):\n \"\"\"Returns whether the user is allowed to vote on this course.\"\"\"\n return (self.state == \"in_evaluation\"\n and self.is_in_evaluation_period\n and user in self.participants.all()\n and user not in self.voters.all())\n\n def can_user_see_course(self, user):\n if user.is_staff:\n return True\n if user.is_reviewer and not self.semester.results_are_archived:\n return True\n if self.is_private or user.is_external:\n return self.is_user_contributor_or_delegate(user) or self.participants.filter(pk=user.pk).exists()\n return True\n\n def can_user_see_results_page(self, user):\n if self.is_single_result:\n return False\n if user.is_staff:\n return True\n if user.is_reviewer and not self.semester.results_are_archived:\n return True\n if self.state != 'published':\n return False\n if not self.can_publish_rating_results or self.semester.results_are_archived or not self.can_user_see_course(user):\n return self.is_user_contributor_or_delegate(user)\n return True\n\n @property\n def can_staff_edit(self):\n return not self.participations_are_archived and self.state in ['new', 'prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']\n\n @property\n def can_staff_delete(self):\n return self.can_staff_edit and (self.num_voters == 0 or self.is_single_result)\n\n @property\n def can_publish_average_grade(self):\n if self.is_single_result:\n return True\n\n # the average grade is only published if at least the configured percentage of participants voted during the evaluation for significance reasons\n return self.can_publish_rating_results and self.num_voters / self.num_participants >= settings.VOTER_PERCENTAGE_NEEDED_FOR_PUBLISHING_AVERAGE_GRADE\n\n @property\n def can_publish_rating_results(self):\n if self.is_single_result:\n return True\n\n # the rating results are only published if at least the configured number of participants voted during the evaluation for anonymity reasons\n return self.num_voters >= settings.VOTER_COUNT_NEEDED_FOR_PUBLISHING_RATING_RESULTS\n\n @transition(field=state, source=['new', 'editor_approved'], target='prepared')\n def ready_for_editors(self):\n pass\n\n @transition(field=state, source='prepared', target='editor_approved')\n def editor_approve(self):\n pass\n\n @transition(field=state, source=['new', 'prepared', 'editor_approved'], target='approved', conditions=[lambda self: self.general_contribution_has_questionnaires])\n def staff_approve(self):\n pass\n\n @transition(field=state, source=['prepared', 'editor_approved', 'approved'], target='new')\n def revert_to_new(self):\n pass\n\n @transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def evaluation_begin(self):\n pass\n\n @transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def reopen_evaluation(self):\n pass\n\n @transition(field=state, source='in_evaluation', target='evaluated')\n def evaluation_end(self):\n pass\n\n @transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed])\n def review_finished(self):\n pass\n\n @transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result])\n def single_result_created(self):\n pass\n\n @transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: not self.is_fully_reviewed])\n def reopen_review(self):\n pass\n\n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n assert self.is_single_result or self._voter_count is None and self._participant_count is None\n self._voter_count = self.num_voters\n self._participant_count = self.num_participants\n\n if not self.can_publish_text_results:\n self.textanswer_set.delete()\n else:\n self.textanswer_set.filter(state=TextAnswer.HIDDEN).delete()\n self.textanswer_set.update(original_answer=None)\n\n @transition(field=state, source='published', target='reviewed')\n def unpublish(self):\n assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()\n self._voter_count = None\n self._participant_count = None\n\n @cached_property\n def general_contribution(self):\n try:\n return self.contributions.get(contributor=None)\n except Contribution.DoesNotExist:\n return None\n\n @cached_property\n def num_participants(self):\n if self._participant_count is not None:\n return self._participant_count\n return self.participants.count()\n\n @cached_property\n def num_voters(self):\n if self._voter_count is not None:\n return self._voter_count\n return self.voters.count()\n\n @property\n def due_participants(self):\n return self.participants.exclude(pk__in=self.voters.all())\n\n @cached_property\n def responsible_contributors(self):\n return UserProfile.objects.filter(contributions__course=self, contributions__responsible=True).order_by('contributions__order')\n\n @cached_property\n def num_contributors(self):\n return UserProfile.objects.filter(contributions__course=self).count()\n\n @property\n def days_left_for_evaluation(self):\n return (self.vote_end_date - date.today()).days\n\n @property\n def time_left_for_evaluation(self):\n return self.vote_end_datetime - datetime.now()\n\n def evaluation_ends_soon(self):\n return 0 < self.time_left_for_evaluation.total_seconds() < settings.EVALUATION_END_WARNING_PERIOD * 3600\n\n @property\n def days_until_evaluation(self):\n days_left = (self.vote_start_datetime.date() - date.today()).days\n if self.vote_start_datetime < datetime.now():\n days_left -= 1\n return days_left\n\n def is_user_editor_or_delegate(self, user):\n return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all()), can_edit=True).exists()\n\n def is_user_contributor_or_delegate(self, user):\n # early out that saves database hits since is_contributor_or_delegate is a cached_property\n if not user.is_contributor_or_delegate:\n return False\n return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all())).exists()\n\n @property\n def textanswer_set(self):\n return TextAnswer.objects.filter(contribution__course=self)\n\n @cached_property\n def num_textanswers(self):\n if not self.can_publish_text_results:\n return 0\n return self.textanswer_set.count()\n\n @property\n def unreviewed_textanswer_set(self):\n return self.textanswer_set.filter(state=TextAnswer.NOT_REVIEWED)\n\n @property\n def reviewed_textanswer_set(self):\n return self.textanswer_set.exclude(state=TextAnswer.NOT_REVIEWED)\n\n @cached_property\n def num_reviewed_textanswers(self):\n return self.reviewed_textanswer_set.count()\n\n @property\n def ratinganswer_counters(self):\n return RatingAnswerCounter.objects.filter(contribution__course=self)\n\n def _archive_participations(self):\n \"\"\"Should be called only via Semester.archive_participations\"\"\"\n if not self.participations_can_be_archived:\n raise NotArchiveable()\n if self._participant_count is not None:\n assert self._voter_count is not None\n assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()\n return\n assert self._participant_count is None and self._voter_count is None\n self._participant_count = self.num_participants\n self._voter_count = self.num_voters\n self.save()\n\n @property\n def participations_are_archived(self):\n semester_participations_are_archived = self.semester.participations_are_archived\n if semester_participations_are_archived:\n assert self._participant_count is not None and self._voter_count is not None\n return semester_participations_are_archived\n\n @property\n def participations_can_be_archived(self):\n return not self.semester.participations_are_archived and self.state in [\"new\", \"published\"]\n\n @property\n def final_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.FINAL_GRADES)\n\n @property\n def midterm_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.MIDTERM_GRADES)\n\n @classmethod\n def update_courses(cls):\n logger.info(\"update_courses called. Processing courses now.\")\n from evap.evaluation.tools import send_publish_notifications\n\n courses_new_in_evaluation = []\n evaluation_results_courses = []\n\n for course in cls.objects.all():\n try:\n if course.state == \"approved\" and course.vote_start_datetime <= datetime.now():\n course.evaluation_begin()\n course.last_modified_user = UserProfile.objects.cronjob_user()\n course.save()\n courses_new_in_evaluation.append(course)\n elif course.state == \"in_evaluation\" and datetime.now() >= course.vote_end_datetime:\n course.evaluation_end()\n if course.is_fully_reviewed:\n course.review_finished()\n if not course.is_graded or course.final_grade_documents.exists() or course.gets_no_grade_documents:\n course.publish()\n evaluation_results_courses.append(course)\n course.last_modified_user = UserProfile.objects.cronjob_user()\n course.save()\n except Exception:\n logger.exception('An error occured when updating the state of course \"{}\" (id {}).'.format(course, course.id))\n\n template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)\n EmailTemplate.send_to_users_in_courses(template, courses_new_in_evaluation, [EmailTemplate.ALL_PARTICIPANTS], use_cc=False, request=None)\n send_publish_notifications(evaluation_results_courses)\n logger.info(\"update_courses finished.\")\n\n\n@receiver(post_transition, sender=Course)\ndef warmup_cache_on_publish(instance, target, **_kwargs):\n if target == 'published':\n from evap.results.tools import collect_results\n from evap.results.views import warm_up_template_cache\n collect_results(instance)\n warm_up_template_cache([instance])\n\n\n@receiver(post_transition, sender=Course)\ndef delete_cache_on_unpublish(instance, source, **_kwargs):\n if source == 'published':\n from evap.results.tools import get_collect_results_cache_key\n from evap.results.views import delete_template_cache\n caches['results'].delete(get_collect_results_cache_key(instance))\n delete_template_cache(instance)\n\n\n@receiver(post_transition, sender=Course)\ndef log_state_transition(instance, name, source, target, **_kwargs):\n logger.info('Course \"{}\" (id {}) moved from state \"{}\" to state \"{}\", caused by transition \"{}\".'.format(instance, instance.pk, source, target, name))\n\n\nclass Contribution(models.Model):\n \"\"\"A contributor who is assigned to a course and his questionnaires.\"\"\"\n\n OWN_COMMENTS = 'OWN'\n COURSE_COMMENTS = 'COURSE'\n ALL_COMMENTS = 'ALL'\n COMMENT_VISIBILITY_CHOICES = (\n (OWN_COMMENTS, _('Own')),\n (COURSE_COMMENTS, _('Course')),\n (ALL_COMMENTS, _('All')),\n )\n IS_CONTRIBUTOR = 'CONTRIBUTOR'\n IS_EDITOR = 'EDITOR'\n IS_RESPONSIBLE = 'RESPONSIBLE'\n RESPONSIBILITY_CHOICES = (\n (IS_CONTRIBUTOR, _('Contributor')),\n (IS_EDITOR, _('Editor')),\n (IS_RESPONSIBLE, _('Responsible')),\n )\n\n course = models.ForeignKey(Course, models.CASCADE, verbose_name=_(\"course\"), related_name='contributions')\n contributor = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT, verbose_name=_(\"contributor\"), blank=True, null=True, related_name='contributions')\n questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_(\"questionnaires\"), blank=True, related_name=\"contributions\")\n responsible = models.BooleanField(verbose_name=_(\"responsible\"), default=False)\n can_edit = models.BooleanField(verbose_name=_(\"can edit\"), default=False)\n comment_visibility = models.CharField(max_length=10, choices=COMMENT_VISIBILITY_CHOICES, verbose_name=_('comment visibility'), default=OWN_COMMENTS)\n label = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"label\"))\n\n order = models.IntegerField(verbose_name=_(\"contribution order\"), default=-1)\n\n class Meta:\n unique_together = (\n ('course', 'contributor'),\n )\n ordering = ['order', ]\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n if self.responsible and not self.course.is_single_result:\n assert self.can_edit and self.comment_visibility == self.ALL_COMMENTS\n\n @property\n def is_general(self):\n return self.contributor_id is None\n\n\nclass Question(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A question including a type.\"\"\"\n\n QUESTION_TYPES = (\n (\"T\", _(\"Text Question\")),\n (\"L\", _(\"Likert Question\")),\n (\"G\", _(\"Grade Question\")),\n (\"P\", _(\"Positive Yes-No Question\")),\n (\"N\", _(\"Negative Yes-No Question\")),\n (\"H\", _(\"Heading\")),\n )\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n questionnaire = models.ForeignKey(Questionnaire, models.CASCADE)\n text_de = models.CharField(max_length=1024, verbose_name=_(\"question text (german)\"))\n text_en = models.CharField(max_length=1024, verbose_name=_(\"question text (english)\"))\n type = models.CharField(max_length=1, choices=QUESTION_TYPES, verbose_name=_(\"question type\"))\n\n text = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n @property\n def answer_class(self):\n if self.is_text_question:\n return TextAnswer\n elif self.is_rating_question:\n return RatingAnswerCounter\n else:\n raise Exception(\"Unknown answer type: %r\" % self.type)\n\n @property\n def is_likert_question(self):\n return self.type == \"L\"\n\n @property\n def is_text_question(self):\n return self.type == \"T\"\n\n @property\n def is_grade_question(self):\n return self.type == \"G\"\n\n @property\n def is_positive_yes_no_question(self):\n return self.type == \"P\"\n\n @property\n def is_negative_yes_no_question(self):\n return self.type == \"N\"\n\n @property\n def is_yes_no_question(self):\n return self.is_positive_yes_no_question or self.is_negative_yes_no_question\n\n @property\n def is_rating_question(self):\n return self.is_grade_question or self.is_likert_question or self.is_yes_no_question\n\n @property\n def is_non_grade_rating_question(self):\n return self.is_rating_question and not self.is_grade_question\n\n @property\n def is_heading_question(self):\n return self.type == \"H\"\n\n\nclass Answer(models.Model):\n \"\"\"An abstract answer to a question. For anonymity purposes, the answering\n user ist not stored in the object. Concrete subclasses are `RatingAnswerCounter`,\n and `TextAnswer`.\"\"\"\n\n question = models.ForeignKey(Question, models.PROTECT)\n contribution = models.ForeignKey(Contribution, models.PROTECT, related_name=\"%(class)s_set\")\n\n class Meta:\n abstract = True\n verbose_name = _(\"answer\")\n verbose_name_plural = _(\"answers\")\n\n\nclass RatingAnswerCounter(Answer):\n \"\"\"A rating answer counter to a question. A lower answer is better or indicates more agreement.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(\"answer\"))\n count = models.IntegerField(verbose_name=_(\"count\"), default=0)\n\n class Meta:\n unique_together = (\n ('question', 'contribution', 'answer'),\n )\n verbose_name = _(\"rating answer\")\n verbose_name_plural = _(\"rating answers\")\n\n\nclass TextAnswer(Answer):\n \"\"\"A free-form text answer to a question (usually a comment about a course\n or a contributor).\"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n answer = models.TextField(verbose_name=_(\"answer\"))\n original_answer = models.TextField(verbose_name=_(\"original answer\"), blank=True, null=True)\n\n HIDDEN = 'HI'\n PUBLISHED = 'PU'\n PRIVATE = 'PR'\n NOT_REVIEWED = 'NR'\n TEXT_ANSWER_STATES = (\n (HIDDEN, _('hidden')),\n (PUBLISHED, _('published')),\n (PRIVATE, _('private')),\n (NOT_REVIEWED, _('not reviewed')),\n )\n state = models.CharField(max_length=2, choices=TEXT_ANSWER_STATES, verbose_name=_('state of answer'), default=NOT_REVIEWED)\n\n class Meta:\n # Prevent ordering by date for privacy reasons\n ordering = ['id', ]\n verbose_name = _(\"text answer\")\n verbose_name_plural = _(\"text answers\")\n\n @property\n def is_hidden(self):\n return self.state == self.HIDDEN\n\n @property\n def is_private(self):\n return self.state == self.PRIVATE\n\n @property\n def is_published(self):\n return self.state == self.PUBLISHED\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n assert self.answer != self.original_answer\n\n def publish(self):\n self.state = self.PUBLISHED\n\n def hide(self):\n self.state = self.HIDDEN\n\n def make_private(self):\n self.state = self.PRIVATE\n\n def unreview(self):\n self.state = self.NOT_REVIEWED\n\n\nclass FaqSection(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Section in the frequently asked questions\"\"\"\n\n order = models.IntegerField(verbose_name=_(\"section order\"), default=-1)\n\n title_de = models.CharField(max_length=255, verbose_name=_(\"section title (german)\"))\n title_en = models.CharField(max_length=255, verbose_name=_(\"section title (english)\"))\n title = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"section\")\n verbose_name_plural = _(\"sections\")\n\n\nclass FaqQuestion(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Question and answer in the frequently asked questions\"\"\"\n\n section = models.ForeignKey(FaqSection, models.CASCADE, related_name=\"questions\")\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n\n question_de = models.CharField(max_length=1024, verbose_name=_(\"question (german)\"))\n question_en = models.CharField(max_length=1024, verbose_name=_(\"question (english)\"))\n question = Translate\n\n answer_de = models.TextField(verbose_name=_(\"answer (german)\"))\n answer_en = models.TextField(verbose_name=_(\"answer (german)\"))\n answer = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n\nclass UserProfileManager(BaseUserManager):\n def get_queryset(self):\n return super().get_queryset().exclude(username=UserProfile.CRONJOB_USER_USERNAME)\n\n def cronjob_user(self):\n return super().get_queryset().get(username=UserProfile.CRONJOB_USER_USERNAME)\n\n def exclude_inactive_users(self):\n return self.get_queryset().exclude(is_active=False)\n\n def create_user(self, username, password=None, email=None, first_name=None, last_name=None):\n if not username:\n raise ValueError(_('Users must have a username'))\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name\n )\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, username, password, email=None, first_name=None, last_name=None):\n user = self.create_user(\n username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name\n )\n user.is_superuser = True\n user.save()\n user.groups.add(Group.objects.get(name=\"Staff\"))\n return user\n\n\nclass UserProfile(AbstractBaseUser, PermissionsMixin):\n username = models.CharField(max_length=255, unique=True, verbose_name=_('username'))\n\n # null=True because users created through kerberos logins and certain external users don't have an address.\n email = models.EmailField(max_length=255, unique=True, blank=True, null=True, verbose_name=_('email address'))\n\n title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"Title\"))\n first_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"first name\"))\n last_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"last name\"))\n\n language = models.CharField(max_length=8, blank=True, null=True, verbose_name=_(\"language\"))\n\n # delegates of the user, which can also manage their courses\n delegates = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"Delegates\"), related_name=\"represented_users\", blank=True)\n\n # users to which all emails should be sent in cc without giving them delegate rights\n cc_users = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"CC Users\"), related_name=\"ccing_users\", blank=True)\n\n # key for url based login of this user\n MAX_LOGIN_KEY = 2**31 - 1\n\n login_key = models.IntegerField(verbose_name=_(\"Login Key\"), unique=True, blank=True, null=True)\n login_key_valid_until = models.DateField(verbose_name=_(\"Login Key Validity\"), blank=True, null=True)\n\n is_active = models.BooleanField(default=True, verbose_name=_(\"active\"))\n\n class Meta:\n ordering = ('last_name', 'first_name', 'username')\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = []\n\n objects = UserProfileManager()\n\n @property\n def full_name(self):\n if self.last_name:\n name = self.last_name\n if self.first_name:\n name = self.first_name + \" \" + name\n if self.title:\n name = self.title + \" \" + name\n return name\n else:\n return self.username\n\n @property\n def full_name_with_username(self):\n name = self.full_name\n if self.username not in name:\n name += \" (\" + self.username + \")\"\n return name\n\n def __str__(self):\n return self.full_name\n\n @cached_property\n def is_staff(self):\n return self.groups.filter(name='Staff').exists()\n\n @cached_property\n def is_reviewer(self):\n return self.is_staff or self.groups.filter(name='Reviewer').exists()\n\n @cached_property\n def is_grade_publisher(self):\n return self.groups.filter(name='Grade publisher').exists()\n\n CRONJOB_USER_USERNAME = \"cronjob\"\n\n @property\n def can_staff_mark_inactive(self):\n if self.is_reviewer or self.is_grade_publisher or self.is_superuser:\n return False\n if any(not course.participations_are_archived for course in self.courses_participating_in.all()):\n return False\n if any(not contribution.course.participations_are_archived for contribution in self.contributions.all()):\n return False\n return True\n\n @property\n def can_staff_delete(self):\n if self.is_contributor or self.is_reviewer or self.is_grade_publisher or self.is_superuser:\n return False\n if any(not course.participations_are_archived for course in self.courses_participating_in.all()):\n return False\n if any(not user.can_staff_delete for user in self.represented_users.all()):\n return False\n if any(not user.can_staff_delete for user in self.ccing_users.all()):\n return False\n return True\n\n @property\n def is_participant(self):\n return self.courses_participating_in.exists()\n\n @property\n def is_student(self):\n \"\"\"\n A UserProfile is not considered to be a student anymore if the\n newest contribution is newer than the newest participation.\n \"\"\"\n if not self.is_participant:\n return False\n\n if not self.is_contributor:\n return True\n\n last_semester_participated = Semester.objects.filter(course__participants=self).order_by(\"-created_at\").first()\n last_semester_contributed = Semester.objects.filter(course__contributions__contributor=self).order_by(\"-created_at\").first()\n\n return last_semester_participated.created_at >= last_semester_contributed.created_at\n\n @property\n def is_contributor(self):\n return self.contributions.exists()\n\n @property\n def is_editor(self):\n return self.contributions.filter(can_edit=True).exists()\n\n @property\n def is_responsible(self):\n # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it\n return any(contribution.responsible for contribution in self.contributions.all())\n\n @property\n def is_delegate(self):\n return self.represented_users.exists()\n\n @property\n def is_editor_or_delegate(self):\n return self.is_editor or self.is_delegate\n\n @cached_property\n def is_contributor_or_delegate(self):\n return self.is_contributor or self.is_delegate\n\n @property\n def is_external(self):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n if not self.email:\n return True\n return is_external_email(self.email)\n\n @property\n def can_download_grades(self):\n return not self.is_external\n\n @classmethod\n def email_needs_login_key(cls, email):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n return is_external_email(email)\n\n @property\n def needs_login_key(self):\n return UserProfile.email_needs_login_key(self.email)\n\n def ensure_valid_login_key(self):\n if self.login_key and self.login_key_valid_until > date.today():\n return\n\n while True:\n key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)\n if not UserProfile.objects.filter(login_key=key).exists():\n # key not yet used\n self.login_key = key\n break\n self.refresh_login_key()\n\n def refresh_login_key(self):\n self.login_key_valid_until = date.today() + timedelta(settings.LOGIN_KEY_VALIDITY)\n self.save()\n\n @property\n def login_url(self):\n if not self.needs_login_key:\n return \"\"\n return settings.PAGE_URL + \"?loginkey=\" + str(self.login_key)\n\n def get_sorted_contributions(self):\n return self.contributions.order_by('course__semester__created_at', 'course__name_de')\n\n def get_sorted_courses_participating_in(self):\n return self.courses_participating_in.order_by('semester__created_at', 'name_de')\n\n def get_sorted_courses_voted_for(self):\n return self.courses_voted_for.order_by('semester__created_at', 'name_de')\n\n\ndef validate_template(value):\n \"\"\"Field validator which ensures that the value can be compiled into a\n Django Template.\"\"\"\n try:\n Template(value)\n except TemplateSyntaxError as e:\n raise ValidationError(str(e))\n\n\nclass EmailTemplate(models.Model):\n name = models.CharField(max_length=1024, unique=True, verbose_name=_(\"Name\"))\n\n subject = models.CharField(max_length=1024, verbose_name=_(\"Subject\"), validators=[validate_template])\n body = models.TextField(verbose_name=_(\"Body\"), validators=[validate_template])\n\n EDITOR_REVIEW_NOTICE = \"Editor Review Notice\"\n EDITOR_REVIEW_REMINDER = \"Editor Review Reminder\"\n STUDENT_REMINDER = \"Student Reminder\"\n PUBLISHING_NOTICE = \"Publishing Notice\"\n LOGIN_KEY_CREATED = \"Login Key Created\"\n EVALUATION_STARTED = \"Evaluation Started\"\n\n ALL_PARTICIPANTS = 'all_participants'\n DUE_PARTICIPANTS = 'due_participants'\n RESPONSIBLE = 'responsible'\n EDITORS = 'editors'\n CONTRIBUTORS = 'contributors'\n\n EMAIL_RECIPIENTS = (\n (ALL_PARTICIPANTS, _('all participants')),\n (DUE_PARTICIPANTS, _('due participants')),\n (RESPONSIBLE, _('responsible person')),\n (EDITORS, _('all editors')),\n (CONTRIBUTORS, _('all contributors'))\n )\n\n @classmethod\n def recipient_list_for_course(cls, course, recipient_groups, filter_users_in_cc):\n recipients = []\n\n if cls.CONTRIBUTORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course)\n elif cls.EDITORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course, contributions__can_edit=True)\n elif cls.RESPONSIBLE in recipient_groups:\n recipients += course.responsible_contributors\n\n if cls.ALL_PARTICIPANTS in recipient_groups:\n recipients += course.participants.all()\n elif cls.DUE_PARTICIPANTS in recipient_groups:\n recipients += course.due_participants\n\n if filter_users_in_cc:\n # remove delegates and CC users of recipients from the recipient list\n # so they won't get the exact same email twice\n users_excluded = UserProfile.objects.filter(Q(represented_users__in=recipients) | Q(ccing_users__in=recipients))\n # but do so only if they have no delegates/cc_users, because otherwise\n # those won't get the email at all. consequently, some \"edge case users\"\n # will get the email twice, but there is no satisfying way around that.\n users_excluded = users_excluded.filter(delegates=None, cc_users=None)\n\n recipients = list(set(recipients) - set(users_excluded))\n\n return recipients\n\n @classmethod\n def render_string(cls, text, dictionary):\n return Template(text).render(Context(dictionary, autoescape=False))\n\n @classmethod\n def send_to_users_in_courses(cls, template, courses, recipient_groups, use_cc, request):\n user_course_map = {}\n for course in courses:\n recipients = cls.recipient_list_for_course(course, recipient_groups, filter_users_in_cc=use_cc)\n for user in recipients:\n user_course_map.setdefault(user, []).append(course)\n\n for user, courses in user_course_map.items():\n subject_params = {}\n body_params = {'user': user, 'courses': courses, 'due_courses': get_due_courses_for_user(user)}\n cls.send_to_user(user, template, subject_params, body_params, use_cc=use_cc, request=request)\n\n @classmethod\n def send_to_user(cls, user, template, subject_params, body_params, use_cc, request=None):\n if not user.email:\n warning_message = \"{} has no email address defined. Could not send email.\".format(user.username)\n # If this method is triggered by a cronjob changing course states, the request is None.\n # In this case warnings should be sent to the admins via email (configured in the settings for logger.error).\n # If a request exists, the page is displayed in the browser and the message can be shown on the page (messages.warning).\n if request is not None:\n logger.warning(warning_message)\n messages.warning(request, _(warning_message))\n else:\n logger.error(warning_message)\n return\n\n if use_cc:\n cc_users = set(user.delegates.all() | user.cc_users.all())\n cc_addresses = [p.email for p in cc_users if p.email]\n else:\n cc_addresses = []\n\n send_separate_login_url = False\n body_params['login_url'] = \"\"\n if user.needs_login_key:\n user.ensure_valid_login_key()\n if not cc_addresses:\n body_params['login_url'] = user.login_url\n else:\n send_separate_login_url = True\n\n subject = cls.render_string(template.subject, subject_params)\n body = cls.render_string(template.body, body_params)\n\n mail = EmailMessage(\n subject=subject,\n body=body,\n to=[user.email],\n cc=cc_addresses,\n bcc=[a[1] for a in settings.MANAGERS],\n headers={'Reply-To': settings.REPLY_TO_EMAIL})\n\n try:\n mail.send(False)\n logger.info(('Sent email \"{}\" to {}.').format(subject, user.username))\n if send_separate_login_url:\n cls.send_login_url_to_user(user)\n except Exception:\n logger.exception('An exception occurred when sending the following email to user \"{}\":\\n{}\\n'.format(user.username, mail.message()))\n\n @classmethod\n def send_reminder_to_user(cls, user, first_due_in_days, due_courses):\n template = cls.objects.get(name=cls.STUDENT_REMINDER)\n subject_params = {'user': user, 'first_due_in_days': first_due_in_days}\n body_params = {'user': user, 'first_due_in_days': first_due_in_days, 'due_courses': due_courses}\n\n cls.send_to_user(user, template, subject_params, body_params, use_cc=False)\n\n @classmethod\n def send_login_url_to_user(cls, user):\n template = cls.objects.get(name=cls.LOGIN_KEY_CREATED)\n subject_params = {}\n body_params = {'user': user, 'login_url': user.login_url}\n\n cls.send_to_user(user, template, subject_params, body_params, use_cc=False)\n logger.info(('Sent login url to {}.').format(user.username))\n", "path": "evap/evaluation/models.py"}]} |
gh_patches_debug_1538 | rasdani/github-patches | git_diff | analysiscenter__batchflow-593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo
https://github.com/analysiscenter/batchflow/blob/cd9062150811665b5a4e51a1080da5855f1c4dcb/batchflow/decorators.py#L337
`random_stat` -> `random_state`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `batchflow/decorators.py`
Content:
```
1 """ Pipeline decorators """
2 import os
3 import traceback
4 import threading
5 import concurrent.futures as cf
6 import asyncio
7 import functools
8 import logging
9 import inspect
10
11 try:
12 from numba import jit
13 except ImportError:
14 jit = None
15
16 from .named_expr import P
17 from .utils_random import make_seed_sequence, spawn_seed_sequence
18
19
20 def make_function(method, is_global=False):
21 """ Makes a function from a method
22
23 Parameters
24 ----------
25 method
26 a callable
27
28 is_global : bool
29 whether to create a function in a global namespace
30
31 Notes
32 -----
33 A method should not be decorated with any other decorator.
34 """
35 source = inspect.getsource(method).split('\n')
36 indent = len(source[0]) - len(source[0].lstrip())
37
38 # strip indent spaces
39 source = [s[indent:] for s in source if len(s) > indent]
40 # skip all decorator and comment lines before 'def' or 'async def'
41 start = 0
42 for i, s in enumerate(source):
43 if s[:3] in ['def', 'asy']:
44 start = i
45 break
46 source = '\n'.join(source[start:])
47
48 globs = globals() if is_global else method.__globals__.copy()
49 exec(source, globs) # pylint:disable=exec-used
50
51 # Method with the same name might exist in various classes or modules
52 # so a global function should have a unique name
53 function_name = method.__module__ + "_" + method.__qualname__
54 function_name = function_name.replace('.', '_')
55 globs[function_name] = globs[method.__name__]
56 return globs[function_name]
57
58
59 def _workers_count():
60 cpu_count = 0
61 try:
62 cpu_count = len(os.sched_getaffinity(0))
63 except AttributeError:
64 cpu_count = os.cpu_count()
65 return cpu_count * 4
66
67
68 def _make_action_wrapper_with_args(use_lock=None, no_eval=None): # pylint: disable=redefined-outer-name
69 return functools.partial(_make_action_wrapper, use_lock=use_lock, no_eval=no_eval)
70
71 def _make_action_wrapper(action_method, use_lock=None, no_eval=None):
72 @functools.wraps(action_method)
73 def _action_wrapper(action_self, *args, **kwargs):
74 """ Call the action method """
75 if use_lock is not None:
76 if action_self.pipeline is not None:
77 if isinstance(use_lock, bool):
78 _lock_name = '#_lock_' + action_method.__name__
79 else:
80 _lock_name = use_lock
81 if not action_self.pipeline.has_variable(_lock_name):
82 action_self.pipeline.init_variable(_lock_name, threading.Lock())
83 action_self.pipeline.get_variable(_lock_name).acquire()
84
85 _res = action_method(action_self, *args, **kwargs)
86
87 if use_lock is not None:
88 if action_self.pipeline is not None:
89 action_self.pipeline.get_variable(_lock_name).release()
90
91 return _res
92
93 if isinstance(no_eval, str):
94 no_eval = [no_eval]
95 _action_wrapper.action = dict(method=action_method, use_lock=use_lock, no_eval=no_eval)
96 return _action_wrapper
97
98 def action(*args, **kwargs):
99 """ Decorator for action methods in :class:`~.Batch` classes
100
101 Parameters
102 ----------
103 use_lock : bool or str
104 whether to lock an action when a pipeline is executed. It can be bool or a lock name.
105 A pipeline variable with a lock is created in the pipeline during the execution.
106
107 no_eval : str or a sequence of str
108 parameters to skip from named expression evaluation.
109 A parameter should be passed as a named argument only.
110
111 Examples
112 --------
113
114 .. code-block:: python
115
116 @action
117 def some_action(self, arg1, arg2):
118 ...
119
120 @action(no_eval='dst')
121 def calc_offset(self, src, dst=None):
122 ...
123
124 @action(use_lock=True)
125 def critical_section(self, some_arg, another_arg):
126 ...
127
128 @action(use_lock='lock_name')
129 def another_critical_section(self, some_arg, another_arg):
130 ...
131 """
132 if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
133 # action without arguments
134 return _make_action_wrapper(action_method=args[0])
135 # action with arguments
136 return _make_action_wrapper_with_args(*args, **kwargs)
137
138
139 def apply_parallel(*args, **kwargs):
140 """ Mark class method for transform in its metaclass.
141
142 Decorator writes `kwargs` to the method attribute `apply_kwargs`,
143 so they can be extracted and used in metaclass.
144
145 Parameters
146 ----------
147 args, kwargs
148 other parameters passed to `apply_parallel` method of the class
149 where this decorator is being used
150
151 Notes
152 -----
153 Redefine the attribute `apply_defaults <.Batch.apply_defaults>` in
154 the batch class. This is proposed solely for the purposes of brevity — in
155 order to avoid repeated heavily loaded class methods decoration, e.g.
156 `@apply_parallel(src='images', target='for')` which in most cases is
157 actually equivalent to simple `@apply_parallel` assuming
158 that the defaults are redefined for the class whose methods are being
159 transformed.
160
161 Note, that if no defaults redefined those from the nearest
162 parent class will be used in :class:`~.batch.MethodsTransformingMeta`.
163 """
164 def mark(method):
165 method.apply_kwargs = kwargs
166 return method
167
168 if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
169 return mark(args[0])
170 if len(args) != 0:
171 raise ValueError("This decorator accepts only named arguments")
172
173 return mark
174
175
176 def any_action_failed(results):
177 """ Return `True` if some parallelized invocations threw exceptions """
178 return any(isinstance(res, Exception) for res in results)
179
180 def call_method(method, use_self, args, kwargs, seed=None):
181 """ Call a method with given args """
182 if use_self and hasattr(args[0], 'random_seed') and seed is not None:
183 # set batch.random_seed to create RNG
184 args[0].random_seed = seed
185 return method(*args, **kwargs)
186
187 def inbatch_parallel(init, post=None, target='threads', _use_self=None, **dec_kwargs):
188 """ Decorator for parallel methods in :class:`~.Batch` classes
189
190 Parameters
191 ----------
192 init
193 a method name or a callable that returns an iterable for parallelization
194 (e.g. a list of indices or items to be passed to a parallelized method)
195 post
196 a method name or a callable to call after parallel invocations
197 (e.g. to assemble the batch)
198 target : 'threads', 'mpc', 'async', 'for'
199 a parallelization engine
200 _use_self : bool
201 whether to pass `self` (i.e. whether a decorated callable is a method or a function)
202
203 Notes
204 -----
205 `mpc` can be used with a method that is decorated only by `inbatch_parallel`.
206 All other decorators will be ignored.
207 """
208 if target not in ['nogil', 'threads', 'mpc', 'async', 'for', 't', 'm', 'a', 'f']:
209 raise ValueError("target should be one of 'threads', 'mpc', 'async', 'for'")
210
211 def inbatch_parallel_decorator(method):
212 """ Return a decorator which run a method in parallel """
213 use_self = '.' in method.__qualname__ if _use_self is None else _use_self
214 mpc_method = method
215 if use_self:
216 try:
217 mpc_method = make_function(method, is_global=True)
218 except Exception: # pylint:disable=broad-except
219 mpc_method = None
220
221 def _check_functions(self):
222 """ Check decorator's `init` and `post` parameters """
223 if init is None:
224 raise ValueError("init cannot be None")
225
226 if isinstance(init, str):
227 try:
228 init_fn = getattr(self, init)
229 except AttributeError as e:
230 raise ValueError("init should refer to a method or property of the class", type(self).__name__,
231 "returning the list of arguments") from e
232 elif callable(init):
233 init_fn = init
234 else:
235 init_fn = init
236
237 if isinstance(post, str):
238 try:
239 post_fn = getattr(self, post)
240 except AttributeError as e:
241 raise ValueError("post should refer to a method of the class", type(self).__name__) from e
242 elif callable(post):
243 post_fn = post
244 else:
245 post_fn = post
246
247 return init_fn, post_fn
248
249 def _call_init_fn(init_fn, args, kwargs):
250 if callable(init_fn):
251 return init_fn(*args, **kwargs)
252 return init_fn
253
254 def _call_post_fn(self, post_fn, futures, args, kwargs):
255 all_results = []
256 for future in futures:
257 try:
258 if isinstance(future, (cf.Future, asyncio.Task)):
259 result = future.result()
260 else:
261 result = future
262 except Exception as exce: # pylint: disable=broad-except
263 result = exce
264 finally:
265 all_results += [result]
266
267 if post_fn is None:
268 if any_action_failed(all_results):
269 all_errors = [error for error in all_results if isinstance(error, Exception)]
270 logging.error("Parallel action failed %s", all_errors)
271 traceback.print_tb(all_errors[0].__traceback__)
272 raise RuntimeError("Parallel action failed")
273 return self
274 return post_fn(all_results, *args, **kwargs)
275
276 def _prepare_args(self, args, kwargs):
277 params = list()
278
279 def _get_value(value, pos=None, name=None):
280 if isinstance(value, P):
281 if pos is not None:
282 params.append(pos)
283 elif name is not None:
284 params.append(name)
285 v = value.get(batch=self, parallel=True)
286 return v
287 return value
288
289 _args = []
290 for i, v in enumerate(args):
291 _args.append(_get_value(v, pos=i))
292 _kwargs = {}
293 for k, v in kwargs.items():
294 _kwargs.update({k: _get_value(v, name=k)})
295
296 return _args, _kwargs, params
297
298 def _make_args(self, iteration, init_args, args, kwargs, params=None):
299 """ Make args, kwargs tuple """
300 if isinstance(init_args, tuple) and len(init_args) == 2 and \
301 isinstance(init_args[0], tuple) and isinstance(init_args[1], dict):
302 margs, mkwargs = init_args
303 elif isinstance(init_args, dict):
304 margs = list()
305 mkwargs = init_args
306 else:
307 margs = init_args
308 mkwargs = dict()
309
310 margs = margs if isinstance(margs, (list, tuple)) else [margs]
311
312 if params:
313 _args = list(args)
314 _kwargs = {**kwargs}
315 for k in params:
316 if isinstance(k, str):
317 _kwargs[k] = _kwargs[k][iteration]
318 else:
319 _args[k] = _args[k][iteration]
320 else:
321 _args = args
322 _kwargs = kwargs
323
324 if len(args) > 0:
325 margs = list(margs) + list(_args)
326 if len(kwargs) > 0:
327 mkwargs.update(_kwargs)
328
329 if use_self:
330 margs = [self] + list(margs)
331
332 return margs, mkwargs
333
334 def make_random_seed(self):
335 if getattr(self, 'random_state', None) is None:
336 return make_seed_sequence()
337 return self.random_stat
338
339 def wrap_with_threads(self, args, kwargs):
340 """ Run a method in parallel threads """
341 init_fn, post_fn = _check_functions(self)
342
343 n_workers = kwargs.pop('n_workers', _workers_count())
344 with cf.ThreadPoolExecutor(max_workers=n_workers) as executor:
345 futures = []
346 args, kwargs, params = _prepare_args(self, args, kwargs)
347 full_kwargs = {**dec_kwargs, **kwargs}
348 for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):
349 margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)
350 seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)
351 one_ft = executor.submit(call_method, method, use_self, margs, mkwargs, seed=seed)
352 futures.append(one_ft)
353
354 timeout = kwargs.get('timeout', None)
355 cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)
356
357 return _call_post_fn(self, post_fn, futures, args, full_kwargs)
358
359 def wrap_with_mpc(self, args, kwargs):
360 """ Run a method in parallel processes """
361 init_fn, post_fn = _check_functions(self)
362
363 n_workers = kwargs.pop('n_workers', _workers_count())
364 with cf.ProcessPoolExecutor(max_workers=n_workers) as executor:
365 futures = []
366 args, kwargs, params = _prepare_args(self, args, kwargs)
367 full_kwargs = {**dec_kwargs, **kwargs}
368 for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):
369 margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)
370 seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)
371 one_ft = executor.submit(call_method, mpc_method, use_self, margs, mkwargs, seed=seed)
372 futures.append(one_ft)
373
374 timeout = kwargs.pop('timeout', None)
375 cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)
376
377 return _call_post_fn(self, post_fn, futures, args, full_kwargs)
378
379 def wrap_with_async(self, args, kwargs):
380 """ Run a method in parallel with async / await """
381 try:
382 loop = asyncio.get_event_loop()
383 except RuntimeError:
384 # this is a new thread where there is no loop
385 loop = asyncio.new_event_loop()
386 asyncio.set_event_loop(loop)
387 else:
388 # allow to specify a loop as an action parameter
389 loop = kwargs.get('loop', loop)
390
391 if loop.is_running():
392 raise RuntimeError('Cannot parallel async methods with a running event loop (e.g. in IPython).')
393
394 init_fn, post_fn = _check_functions(self)
395
396 futures = []
397 args, kwargs, params = _prepare_args(self, args, kwargs)
398 full_kwargs = {**dec_kwargs, **kwargs}
399 # save an initial seed to generate child seeds from
400 random_seed = make_random_seed(self)
401 for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):
402 margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)
403 seed = spawn_seed_sequence(random_seed)
404 futures.append(loop.create_task(call_method(method, use_self, margs, mkwargs, seed=seed)))
405
406 loop.run_until_complete(asyncio.gather(*futures, loop=loop, return_exceptions=True))
407
408 return _call_post_fn(self, post_fn, futures, args, full_kwargs)
409
410 def wrap_with_for(self, args, kwargs):
411 """ Run a method sequentially (without parallelism) """
412 init_fn, post_fn = _check_functions(self)
413 _ = kwargs.pop('n_workers', _workers_count())
414 futures = []
415 args, kwargs, params = _prepare_args(self, args, kwargs)
416 full_kwargs = {**dec_kwargs, **kwargs}
417 # save an initial seed to generate child seeds from
418 random_seed = make_random_seed(self)
419 for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):
420 margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)
421
422 seed = spawn_seed_sequence(random_seed)
423 try:
424 one_ft = call_method(method, use_self, margs, mkwargs, seed=seed)
425 except Exception as e: # pylint: disable=broad-except
426 one_ft = e
427 futures.append(one_ft)
428
429 return _call_post_fn(self, post_fn, futures, args, full_kwargs)
430
431 @functools.wraps(method)
432 def wrapped_method(*args, **kwargs):
433 """ Wrap a method with a required parallel engine """
434 if use_self:
435 # the first arg is self, not an ordinary arg
436 self = args[0]
437 args = args[1:]
438 else:
439 # still need self to preserve the signatures of other functions
440 self = None
441
442 _target = kwargs.pop('target', target)
443
444 if asyncio.iscoroutinefunction(method) or _target in ['async', 'a']:
445 x = wrap_with_async(self, args, kwargs)
446 elif _target in ['threads', 't']:
447 x = wrap_with_threads(self, args, kwargs)
448 elif _target in ['mpc', 'm']:
449 if mpc_method is not None:
450 x = wrap_with_mpc(self, args, kwargs)
451 else:
452 raise ValueError('Cannot use MPC with this method', method)
453 elif _target in ['for', 'f']:
454 x = wrap_with_for(self, args, kwargs)
455 else:
456 raise ValueError('Wrong parallelization target:', _target)
457 return x
458 return wrapped_method
459
460 return inbatch_parallel_decorator
461
462
463
464 def parallel(*args, use_self=None, **kwargs):
465 """ Decorator for a parallel execution of a function """
466 return inbatch_parallel(*args, _use_self=use_self, **kwargs)
467
468
469 def njit(nogil=True, parallel=True): # pylint: disable=redefined-outer-name
470 """ Fake njit decorator to use when numba is not installed """
471 _, _ = nogil, parallel
472 def njit_fake_decorator(method):
473 """ Return a decorator """
474 @functools.wraps(method)
475 def wrapped_method(*args, **kwargs):
476 """ Log warning that numba is not installed which causes preformance degradation """
477 logging.warning('numba is not installed. This causes a severe performance degradation for method %s',
478 method.__name__)
479 return method(*args, **kwargs)
480 return wrapped_method
481 return njit_fake_decorator
482
483
484 def mjit(*args, nopython=True, nogil=True, **kwargs):
485 """ jit decorator for methods
486
487 Notes
488 -----
489 This decorator should be applied directly to a method, not another decorator.
490 """
491 def _jit(method):
492 if jit is not None:
493 func = make_function(method)
494 func = jit(*args, nopython=nopython, nogil=nogil, **kwargs)(func)
495 else:
496 func = method
497 logging.warning('numba is not installed. This causes a severe performance degradation for method %s',
498 method.__name__)
499
500 @functools.wraps(method)
501 def _wrapped_method(self, *args, **kwargs):
502 _ = self
503 return func(None, *args, **kwargs)
504 return _wrapped_method
505
506 if len(args) == 1 and (callable(args[0])) and len(kwargs) == 0:
507 method = args[0]
508 args = tuple()
509 return _jit(method)
510 return _jit
511
512
513 def deprecated(msg):
514 """ Decorator for deprecated functions and methods """
515 def decorator(func):
516 @functools.wraps(func)
517 def _call(*args, **kwargs):
518 logging.warning(msg)
519 return func(*args, **kwargs)
520 return _call
521 return decorator
522
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/batchflow/decorators.py b/batchflow/decorators.py
--- a/batchflow/decorators.py
+++ b/batchflow/decorators.py
@@ -334,7 +334,7 @@
def make_random_seed(self):
if getattr(self, 'random_state', None) is None:
return make_seed_sequence()
- return self.random_stat
+ return self.random_state
def wrap_with_threads(self, args, kwargs):
""" Run a method in parallel threads """
| {"golden_diff": "diff --git a/batchflow/decorators.py b/batchflow/decorators.py\n--- a/batchflow/decorators.py\n+++ b/batchflow/decorators.py\n@@ -334,7 +334,7 @@\n def make_random_seed(self):\n if getattr(self, 'random_state', None) is None:\n return make_seed_sequence()\n- return self.random_stat\n+ return self.random_state\n \n def wrap_with_threads(self, args, kwargs):\n \"\"\" Run a method in parallel threads \"\"\"\n", "issue": "Typo\nhttps://github.com/analysiscenter/batchflow/blob/cd9062150811665b5a4e51a1080da5855f1c4dcb/batchflow/decorators.py#L337\r\n\r\n`random_stat` -> `random_state`\n", "before_files": [{"content": "\"\"\" Pipeline decorators \"\"\"\nimport os\nimport traceback\nimport threading\nimport concurrent.futures as cf\nimport asyncio\nimport functools\nimport logging\nimport inspect\n\ntry:\n from numba import jit\nexcept ImportError:\n jit = None\n\nfrom .named_expr import P\nfrom .utils_random import make_seed_sequence, spawn_seed_sequence\n\n\ndef make_function(method, is_global=False):\n \"\"\" Makes a function from a method\n\n Parameters\n ----------\n method\n a callable\n\n is_global : bool\n whether to create a function in a global namespace\n\n Notes\n -----\n A method should not be decorated with any other decorator.\n \"\"\"\n source = inspect.getsource(method).split('\\n')\n indent = len(source[0]) - len(source[0].lstrip())\n\n # strip indent spaces\n source = [s[indent:] for s in source if len(s) > indent]\n # skip all decorator and comment lines before 'def' or 'async def'\n start = 0\n for i, s in enumerate(source):\n if s[:3] in ['def', 'asy']:\n start = i\n break\n source = '\\n'.join(source[start:])\n\n globs = globals() if is_global else method.__globals__.copy()\n exec(source, globs) # pylint:disable=exec-used\n\n # Method with the same name might exist in various classes or modules\n # so a global function should have a unique name\n function_name = method.__module__ + \"_\" + method.__qualname__\n function_name = function_name.replace('.', '_')\n globs[function_name] = globs[method.__name__]\n return globs[function_name]\n\n\ndef _workers_count():\n cpu_count = 0\n try:\n cpu_count = len(os.sched_getaffinity(0))\n except AttributeError:\n cpu_count = os.cpu_count()\n return cpu_count * 4\n\n\ndef _make_action_wrapper_with_args(use_lock=None, no_eval=None): # pylint: disable=redefined-outer-name\n return functools.partial(_make_action_wrapper, use_lock=use_lock, no_eval=no_eval)\n\ndef _make_action_wrapper(action_method, use_lock=None, no_eval=None):\n @functools.wraps(action_method)\n def _action_wrapper(action_self, *args, **kwargs):\n \"\"\" Call the action method \"\"\"\n if use_lock is not None:\n if action_self.pipeline is not None:\n if isinstance(use_lock, bool):\n _lock_name = '#_lock_' + action_method.__name__\n else:\n _lock_name = use_lock\n if not action_self.pipeline.has_variable(_lock_name):\n action_self.pipeline.init_variable(_lock_name, threading.Lock())\n action_self.pipeline.get_variable(_lock_name).acquire()\n\n _res = action_method(action_self, *args, **kwargs)\n\n if use_lock is not None:\n if action_self.pipeline is not None:\n action_self.pipeline.get_variable(_lock_name).release()\n\n return _res\n\n if isinstance(no_eval, str):\n no_eval = [no_eval]\n _action_wrapper.action = dict(method=action_method, use_lock=use_lock, no_eval=no_eval)\n return _action_wrapper\n\ndef action(*args, **kwargs):\n \"\"\" Decorator for action methods in :class:`~.Batch` classes\n\n Parameters\n ----------\n use_lock : bool or str\n whether to lock an action when a pipeline is executed. It can be bool or a lock name.\n A pipeline variable with a lock is created in the pipeline during the execution.\n\n no_eval : str or a sequence of str\n parameters to skip from named expression evaluation.\n A parameter should be passed as a named argument only.\n\n Examples\n --------\n\n .. code-block:: python\n\n @action\n def some_action(self, arg1, arg2):\n ...\n\n @action(no_eval='dst')\n def calc_offset(self, src, dst=None):\n ...\n\n @action(use_lock=True)\n def critical_section(self, some_arg, another_arg):\n ...\n\n @action(use_lock='lock_name')\n def another_critical_section(self, some_arg, another_arg):\n ...\n \"\"\"\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # action without arguments\n return _make_action_wrapper(action_method=args[0])\n # action with arguments\n return _make_action_wrapper_with_args(*args, **kwargs)\n\n\ndef apply_parallel(*args, **kwargs):\n \"\"\" Mark class method for transform in its metaclass.\n\n Decorator writes `kwargs` to the method attribute `apply_kwargs`,\n so they can be extracted and used in metaclass.\n\n Parameters\n ----------\n args, kwargs\n other parameters passed to `apply_parallel` method of the class\n where this decorator is being used\n\n Notes\n -----\n Redefine the attribute `apply_defaults <.Batch.apply_defaults>` in\n the batch class. This is proposed solely for the purposes of brevity \u2014 in\n order to avoid repeated heavily loaded class methods decoration, e.g.\n `@apply_parallel(src='images', target='for')` which in most cases is\n actually equivalent to simple `@apply_parallel` assuming\n that the defaults are redefined for the class whose methods are being\n transformed.\n\n Note, that if no defaults redefined those from the nearest\n parent class will be used in :class:`~.batch.MethodsTransformingMeta`.\n \"\"\"\n def mark(method):\n method.apply_kwargs = kwargs\n return method\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return mark(args[0])\n if len(args) != 0:\n raise ValueError(\"This decorator accepts only named arguments\")\n\n return mark\n\n\ndef any_action_failed(results):\n \"\"\" Return `True` if some parallelized invocations threw exceptions \"\"\"\n return any(isinstance(res, Exception) for res in results)\n\ndef call_method(method, use_self, args, kwargs, seed=None):\n \"\"\" Call a method with given args \"\"\"\n if use_self and hasattr(args[0], 'random_seed') and seed is not None:\n # set batch.random_seed to create RNG\n args[0].random_seed = seed\n return method(*args, **kwargs)\n\ndef inbatch_parallel(init, post=None, target='threads', _use_self=None, **dec_kwargs):\n \"\"\" Decorator for parallel methods in :class:`~.Batch` classes\n\n Parameters\n ----------\n init\n a method name or a callable that returns an iterable for parallelization\n (e.g. a list of indices or items to be passed to a parallelized method)\n post\n a method name or a callable to call after parallel invocations\n (e.g. to assemble the batch)\n target : 'threads', 'mpc', 'async', 'for'\n a parallelization engine\n _use_self : bool\n whether to pass `self` (i.e. whether a decorated callable is a method or a function)\n\n Notes\n -----\n `mpc` can be used with a method that is decorated only by `inbatch_parallel`.\n All other decorators will be ignored.\n \"\"\"\n if target not in ['nogil', 'threads', 'mpc', 'async', 'for', 't', 'm', 'a', 'f']:\n raise ValueError(\"target should be one of 'threads', 'mpc', 'async', 'for'\")\n\n def inbatch_parallel_decorator(method):\n \"\"\" Return a decorator which run a method in parallel \"\"\"\n use_self = '.' in method.__qualname__ if _use_self is None else _use_self\n mpc_method = method\n if use_self:\n try:\n mpc_method = make_function(method, is_global=True)\n except Exception: # pylint:disable=broad-except\n mpc_method = None\n\n def _check_functions(self):\n \"\"\" Check decorator's `init` and `post` parameters \"\"\"\n if init is None:\n raise ValueError(\"init cannot be None\")\n\n if isinstance(init, str):\n try:\n init_fn = getattr(self, init)\n except AttributeError as e:\n raise ValueError(\"init should refer to a method or property of the class\", type(self).__name__,\n \"returning the list of arguments\") from e\n elif callable(init):\n init_fn = init\n else:\n init_fn = init\n\n if isinstance(post, str):\n try:\n post_fn = getattr(self, post)\n except AttributeError as e:\n raise ValueError(\"post should refer to a method of the class\", type(self).__name__) from e\n elif callable(post):\n post_fn = post\n else:\n post_fn = post\n\n return init_fn, post_fn\n\n def _call_init_fn(init_fn, args, kwargs):\n if callable(init_fn):\n return init_fn(*args, **kwargs)\n return init_fn\n\n def _call_post_fn(self, post_fn, futures, args, kwargs):\n all_results = []\n for future in futures:\n try:\n if isinstance(future, (cf.Future, asyncio.Task)):\n result = future.result()\n else:\n result = future\n except Exception as exce: # pylint: disable=broad-except\n result = exce\n finally:\n all_results += [result]\n\n if post_fn is None:\n if any_action_failed(all_results):\n all_errors = [error for error in all_results if isinstance(error, Exception)]\n logging.error(\"Parallel action failed %s\", all_errors)\n traceback.print_tb(all_errors[0].__traceback__)\n raise RuntimeError(\"Parallel action failed\")\n return self\n return post_fn(all_results, *args, **kwargs)\n\n def _prepare_args(self, args, kwargs):\n params = list()\n\n def _get_value(value, pos=None, name=None):\n if isinstance(value, P):\n if pos is not None:\n params.append(pos)\n elif name is not None:\n params.append(name)\n v = value.get(batch=self, parallel=True)\n return v\n return value\n\n _args = []\n for i, v in enumerate(args):\n _args.append(_get_value(v, pos=i))\n _kwargs = {}\n for k, v in kwargs.items():\n _kwargs.update({k: _get_value(v, name=k)})\n\n return _args, _kwargs, params\n\n def _make_args(self, iteration, init_args, args, kwargs, params=None):\n \"\"\" Make args, kwargs tuple \"\"\"\n if isinstance(init_args, tuple) and len(init_args) == 2 and \\\n isinstance(init_args[0], tuple) and isinstance(init_args[1], dict):\n margs, mkwargs = init_args\n elif isinstance(init_args, dict):\n margs = list()\n mkwargs = init_args\n else:\n margs = init_args\n mkwargs = dict()\n\n margs = margs if isinstance(margs, (list, tuple)) else [margs]\n\n if params:\n _args = list(args)\n _kwargs = {**kwargs}\n for k in params:\n if isinstance(k, str):\n _kwargs[k] = _kwargs[k][iteration]\n else:\n _args[k] = _args[k][iteration]\n else:\n _args = args\n _kwargs = kwargs\n\n if len(args) > 0:\n margs = list(margs) + list(_args)\n if len(kwargs) > 0:\n mkwargs.update(_kwargs)\n\n if use_self:\n margs = [self] + list(margs)\n\n return margs, mkwargs\n\n def make_random_seed(self):\n if getattr(self, 'random_state', None) is None:\n return make_seed_sequence()\n return self.random_stat\n\n def wrap_with_threads(self, args, kwargs):\n \"\"\" Run a method in parallel threads \"\"\"\n init_fn, post_fn = _check_functions(self)\n\n n_workers = kwargs.pop('n_workers', _workers_count())\n with cf.ThreadPoolExecutor(max_workers=n_workers) as executor:\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)\n one_ft = executor.submit(call_method, method, use_self, margs, mkwargs, seed=seed)\n futures.append(one_ft)\n\n timeout = kwargs.get('timeout', None)\n cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_mpc(self, args, kwargs):\n \"\"\" Run a method in parallel processes \"\"\"\n init_fn, post_fn = _check_functions(self)\n\n n_workers = kwargs.pop('n_workers', _workers_count())\n with cf.ProcessPoolExecutor(max_workers=n_workers) as executor:\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)\n one_ft = executor.submit(call_method, mpc_method, use_self, margs, mkwargs, seed=seed)\n futures.append(one_ft)\n\n timeout = kwargs.pop('timeout', None)\n cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_async(self, args, kwargs):\n \"\"\" Run a method in parallel with async / await \"\"\"\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n # this is a new thread where there is no loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n else:\n # allow to specify a loop as an action parameter\n loop = kwargs.get('loop', loop)\n\n if loop.is_running():\n raise RuntimeError('Cannot parallel async methods with a running event loop (e.g. in IPython).')\n\n init_fn, post_fn = _check_functions(self)\n\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n # save an initial seed to generate child seeds from\n random_seed = make_random_seed(self)\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = spawn_seed_sequence(random_seed)\n futures.append(loop.create_task(call_method(method, use_self, margs, mkwargs, seed=seed)))\n\n loop.run_until_complete(asyncio.gather(*futures, loop=loop, return_exceptions=True))\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_for(self, args, kwargs):\n \"\"\" Run a method sequentially (without parallelism) \"\"\"\n init_fn, post_fn = _check_functions(self)\n _ = kwargs.pop('n_workers', _workers_count())\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n # save an initial seed to generate child seeds from\n random_seed = make_random_seed(self)\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n\n seed = spawn_seed_sequence(random_seed)\n try:\n one_ft = call_method(method, use_self, margs, mkwargs, seed=seed)\n except Exception as e: # pylint: disable=broad-except\n one_ft = e\n futures.append(one_ft)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n @functools.wraps(method)\n def wrapped_method(*args, **kwargs):\n \"\"\" Wrap a method with a required parallel engine \"\"\"\n if use_self:\n # the first arg is self, not an ordinary arg\n self = args[0]\n args = args[1:]\n else:\n # still need self to preserve the signatures of other functions\n self = None\n\n _target = kwargs.pop('target', target)\n\n if asyncio.iscoroutinefunction(method) or _target in ['async', 'a']:\n x = wrap_with_async(self, args, kwargs)\n elif _target in ['threads', 't']:\n x = wrap_with_threads(self, args, kwargs)\n elif _target in ['mpc', 'm']:\n if mpc_method is not None:\n x = wrap_with_mpc(self, args, kwargs)\n else:\n raise ValueError('Cannot use MPC with this method', method)\n elif _target in ['for', 'f']:\n x = wrap_with_for(self, args, kwargs)\n else:\n raise ValueError('Wrong parallelization target:', _target)\n return x\n return wrapped_method\n\n return inbatch_parallel_decorator\n\n\n\ndef parallel(*args, use_self=None, **kwargs):\n \"\"\" Decorator for a parallel execution of a function \"\"\"\n return inbatch_parallel(*args, _use_self=use_self, **kwargs)\n\n\ndef njit(nogil=True, parallel=True): # pylint: disable=redefined-outer-name\n \"\"\" Fake njit decorator to use when numba is not installed \"\"\"\n _, _ = nogil, parallel\n def njit_fake_decorator(method):\n \"\"\" Return a decorator \"\"\"\n @functools.wraps(method)\n def wrapped_method(*args, **kwargs):\n \"\"\" Log warning that numba is not installed which causes preformance degradation \"\"\"\n logging.warning('numba is not installed. This causes a severe performance degradation for method %s',\n method.__name__)\n return method(*args, **kwargs)\n return wrapped_method\n return njit_fake_decorator\n\n\ndef mjit(*args, nopython=True, nogil=True, **kwargs):\n \"\"\" jit decorator for methods\n\n Notes\n -----\n This decorator should be applied directly to a method, not another decorator.\n \"\"\"\n def _jit(method):\n if jit is not None:\n func = make_function(method)\n func = jit(*args, nopython=nopython, nogil=nogil, **kwargs)(func)\n else:\n func = method\n logging.warning('numba is not installed. This causes a severe performance degradation for method %s',\n method.__name__)\n\n @functools.wraps(method)\n def _wrapped_method(self, *args, **kwargs):\n _ = self\n return func(None, *args, **kwargs)\n return _wrapped_method\n\n if len(args) == 1 and (callable(args[0])) and len(kwargs) == 0:\n method = args[0]\n args = tuple()\n return _jit(method)\n return _jit\n\n\ndef deprecated(msg):\n \"\"\" Decorator for deprecated functions and methods \"\"\"\n def decorator(func):\n @functools.wraps(func)\n def _call(*args, **kwargs):\n logging.warning(msg)\n return func(*args, **kwargs)\n return _call\n return decorator\n", "path": "batchflow/decorators.py"}], "after_files": [{"content": "\"\"\" Pipeline decorators \"\"\"\nimport os\nimport traceback\nimport threading\nimport concurrent.futures as cf\nimport asyncio\nimport functools\nimport logging\nimport inspect\n\ntry:\n from numba import jit\nexcept ImportError:\n jit = None\n\nfrom .named_expr import P\nfrom .utils_random import make_seed_sequence, spawn_seed_sequence\n\n\ndef make_function(method, is_global=False):\n \"\"\" Makes a function from a method\n\n Parameters\n ----------\n method\n a callable\n\n is_global : bool\n whether to create a function in a global namespace\n\n Notes\n -----\n A method should not be decorated with any other decorator.\n \"\"\"\n source = inspect.getsource(method).split('\\n')\n indent = len(source[0]) - len(source[0].lstrip())\n\n # strip indent spaces\n source = [s[indent:] for s in source if len(s) > indent]\n # skip all decorator and comment lines before 'def' or 'async def'\n start = 0\n for i, s in enumerate(source):\n if s[:3] in ['def', 'asy']:\n start = i\n break\n source = '\\n'.join(source[start:])\n\n globs = globals() if is_global else method.__globals__.copy()\n exec(source, globs) # pylint:disable=exec-used\n\n # Method with the same name might exist in various classes or modules\n # so a global function should have a unique name\n function_name = method.__module__ + \"_\" + method.__qualname__\n function_name = function_name.replace('.', '_')\n globs[function_name] = globs[method.__name__]\n return globs[function_name]\n\n\ndef _workers_count():\n cpu_count = 0\n try:\n cpu_count = len(os.sched_getaffinity(0))\n except AttributeError:\n cpu_count = os.cpu_count()\n return cpu_count * 4\n\n\ndef _make_action_wrapper_with_args(use_lock=None, no_eval=None): # pylint: disable=redefined-outer-name\n return functools.partial(_make_action_wrapper, use_lock=use_lock, no_eval=no_eval)\n\ndef _make_action_wrapper(action_method, use_lock=None, no_eval=None):\n @functools.wraps(action_method)\n def _action_wrapper(action_self, *args, **kwargs):\n \"\"\" Call the action method \"\"\"\n if use_lock is not None:\n if action_self.pipeline is not None:\n if isinstance(use_lock, bool):\n _lock_name = '#_lock_' + action_method.__name__\n else:\n _lock_name = use_lock\n if not action_self.pipeline.has_variable(_lock_name):\n action_self.pipeline.init_variable(_lock_name, threading.Lock())\n action_self.pipeline.get_variable(_lock_name).acquire()\n\n _res = action_method(action_self, *args, **kwargs)\n\n if use_lock is not None:\n if action_self.pipeline is not None:\n action_self.pipeline.get_variable(_lock_name).release()\n\n return _res\n\n if isinstance(no_eval, str):\n no_eval = [no_eval]\n _action_wrapper.action = dict(method=action_method, use_lock=use_lock, no_eval=no_eval)\n return _action_wrapper\n\ndef action(*args, **kwargs):\n \"\"\" Decorator for action methods in :class:`~.Batch` classes\n\n Parameters\n ----------\n use_lock : bool or str\n whether to lock an action when a pipeline is executed. It can be bool or a lock name.\n A pipeline variable with a lock is created in the pipeline during the execution.\n\n no_eval : str or a sequence of str\n parameters to skip from named expression evaluation.\n A parameter should be passed as a named argument only.\n\n Examples\n --------\n\n .. code-block:: python\n\n @action\n def some_action(self, arg1, arg2):\n ...\n\n @action(no_eval='dst')\n def calc_offset(self, src, dst=None):\n ...\n\n @action(use_lock=True)\n def critical_section(self, some_arg, another_arg):\n ...\n\n @action(use_lock='lock_name')\n def another_critical_section(self, some_arg, another_arg):\n ...\n \"\"\"\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # action without arguments\n return _make_action_wrapper(action_method=args[0])\n # action with arguments\n return _make_action_wrapper_with_args(*args, **kwargs)\n\n\ndef apply_parallel(*args, **kwargs):\n \"\"\" Mark class method for transform in its metaclass.\n\n Decorator writes `kwargs` to the method attribute `apply_kwargs`,\n so they can be extracted and used in metaclass.\n\n Parameters\n ----------\n args, kwargs\n other parameters passed to `apply_parallel` method of the class\n where this decorator is being used\n\n Notes\n -----\n Redefine the attribute `apply_defaults <.Batch.apply_defaults>` in\n the batch class. This is proposed solely for the purposes of brevity \u2014 in\n order to avoid repeated heavily loaded class methods decoration, e.g.\n `@apply_parallel(src='images', target='for')` which in most cases is\n actually equivalent to simple `@apply_parallel` assuming\n that the defaults are redefined for the class whose methods are being\n transformed.\n\n Note, that if no defaults redefined those from the nearest\n parent class will be used in :class:`~.batch.MethodsTransformingMeta`.\n \"\"\"\n def mark(method):\n method.apply_kwargs = kwargs\n return method\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return mark(args[0])\n if len(args) != 0:\n raise ValueError(\"This decorator accepts only named arguments\")\n\n return mark\n\n\ndef any_action_failed(results):\n \"\"\" Return `True` if some parallelized invocations threw exceptions \"\"\"\n return any(isinstance(res, Exception) for res in results)\n\ndef call_method(method, use_self, args, kwargs, seed=None):\n \"\"\" Call a method with given args \"\"\"\n if use_self and hasattr(args[0], 'random_seed') and seed is not None:\n # set batch.random_seed to create RNG\n args[0].random_seed = seed\n return method(*args, **kwargs)\n\ndef inbatch_parallel(init, post=None, target='threads', _use_self=None, **dec_kwargs):\n \"\"\" Decorator for parallel methods in :class:`~.Batch` classes\n\n Parameters\n ----------\n init\n a method name or a callable that returns an iterable for parallelization\n (e.g. a list of indices or items to be passed to a parallelized method)\n post\n a method name or a callable to call after parallel invocations\n (e.g. to assemble the batch)\n target : 'threads', 'mpc', 'async', 'for'\n a parallelization engine\n _use_self : bool\n whether to pass `self` (i.e. whether a decorated callable is a method or a function)\n\n Notes\n -----\n `mpc` can be used with a method that is decorated only by `inbatch_parallel`.\n All other decorators will be ignored.\n \"\"\"\n if target not in ['nogil', 'threads', 'mpc', 'async', 'for', 't', 'm', 'a', 'f']:\n raise ValueError(\"target should be one of 'threads', 'mpc', 'async', 'for'\")\n\n def inbatch_parallel_decorator(method):\n \"\"\" Return a decorator which run a method in parallel \"\"\"\n use_self = '.' in method.__qualname__ if _use_self is None else _use_self\n mpc_method = method\n if use_self:\n try:\n mpc_method = make_function(method, is_global=True)\n except Exception: # pylint:disable=broad-except\n mpc_method = None\n\n def _check_functions(self):\n \"\"\" Check decorator's `init` and `post` parameters \"\"\"\n if init is None:\n raise ValueError(\"init cannot be None\")\n\n if isinstance(init, str):\n try:\n init_fn = getattr(self, init)\n except AttributeError as e:\n raise ValueError(\"init should refer to a method or property of the class\", type(self).__name__,\n \"returning the list of arguments\") from e\n elif callable(init):\n init_fn = init\n else:\n init_fn = init\n\n if isinstance(post, str):\n try:\n post_fn = getattr(self, post)\n except AttributeError as e:\n raise ValueError(\"post should refer to a method of the class\", type(self).__name__) from e\n elif callable(post):\n post_fn = post\n else:\n post_fn = post\n\n return init_fn, post_fn\n\n def _call_init_fn(init_fn, args, kwargs):\n if callable(init_fn):\n return init_fn(*args, **kwargs)\n return init_fn\n\n def _call_post_fn(self, post_fn, futures, args, kwargs):\n all_results = []\n for future in futures:\n try:\n if isinstance(future, (cf.Future, asyncio.Task)):\n result = future.result()\n else:\n result = future\n except Exception as exce: # pylint: disable=broad-except\n result = exce\n finally:\n all_results += [result]\n\n if post_fn is None:\n if any_action_failed(all_results):\n all_errors = [error for error in all_results if isinstance(error, Exception)]\n logging.error(\"Parallel action failed %s\", all_errors)\n traceback.print_tb(all_errors[0].__traceback__)\n raise RuntimeError(\"Parallel action failed\")\n return self\n return post_fn(all_results, *args, **kwargs)\n\n def _prepare_args(self, args, kwargs):\n params = list()\n\n def _get_value(value, pos=None, name=None):\n if isinstance(value, P):\n if pos is not None:\n params.append(pos)\n elif name is not None:\n params.append(name)\n v = value.get(batch=self, parallel=True)\n return v\n return value\n\n _args = []\n for i, v in enumerate(args):\n _args.append(_get_value(v, pos=i))\n _kwargs = {}\n for k, v in kwargs.items():\n _kwargs.update({k: _get_value(v, name=k)})\n\n return _args, _kwargs, params\n\n def _make_args(self, iteration, init_args, args, kwargs, params=None):\n \"\"\" Make args, kwargs tuple \"\"\"\n if isinstance(init_args, tuple) and len(init_args) == 2 and \\\n isinstance(init_args[0], tuple) and isinstance(init_args[1], dict):\n margs, mkwargs = init_args\n elif isinstance(init_args, dict):\n margs = list()\n mkwargs = init_args\n else:\n margs = init_args\n mkwargs = dict()\n\n margs = margs if isinstance(margs, (list, tuple)) else [margs]\n\n if params:\n _args = list(args)\n _kwargs = {**kwargs}\n for k in params:\n if isinstance(k, str):\n _kwargs[k] = _kwargs[k][iteration]\n else:\n _args[k] = _args[k][iteration]\n else:\n _args = args\n _kwargs = kwargs\n\n if len(args) > 0:\n margs = list(margs) + list(_args)\n if len(kwargs) > 0:\n mkwargs.update(_kwargs)\n\n if use_self:\n margs = [self] + list(margs)\n\n return margs, mkwargs\n\n def make_random_seed(self):\n if getattr(self, 'random_state', None) is None:\n return make_seed_sequence()\n return self.random_state\n\n def wrap_with_threads(self, args, kwargs):\n \"\"\" Run a method in parallel threads \"\"\"\n init_fn, post_fn = _check_functions(self)\n\n n_workers = kwargs.pop('n_workers', _workers_count())\n with cf.ThreadPoolExecutor(max_workers=n_workers) as executor:\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)\n one_ft = executor.submit(call_method, method, use_self, margs, mkwargs, seed=seed)\n futures.append(one_ft)\n\n timeout = kwargs.get('timeout', None)\n cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_mpc(self, args, kwargs):\n \"\"\" Run a method in parallel processes \"\"\"\n init_fn, post_fn = _check_functions(self)\n\n n_workers = kwargs.pop('n_workers', _workers_count())\n with cf.ProcessPoolExecutor(max_workers=n_workers) as executor:\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)\n one_ft = executor.submit(call_method, mpc_method, use_self, margs, mkwargs, seed=seed)\n futures.append(one_ft)\n\n timeout = kwargs.pop('timeout', None)\n cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_async(self, args, kwargs):\n \"\"\" Run a method in parallel with async / await \"\"\"\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n # this is a new thread where there is no loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n else:\n # allow to specify a loop as an action parameter\n loop = kwargs.get('loop', loop)\n\n if loop.is_running():\n raise RuntimeError('Cannot parallel async methods with a running event loop (e.g. in IPython).')\n\n init_fn, post_fn = _check_functions(self)\n\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n # save an initial seed to generate child seeds from\n random_seed = make_random_seed(self)\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = spawn_seed_sequence(random_seed)\n futures.append(loop.create_task(call_method(method, use_self, margs, mkwargs, seed=seed)))\n\n loop.run_until_complete(asyncio.gather(*futures, loop=loop, return_exceptions=True))\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_for(self, args, kwargs):\n \"\"\" Run a method sequentially (without parallelism) \"\"\"\n init_fn, post_fn = _check_functions(self)\n _ = kwargs.pop('n_workers', _workers_count())\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n # save an initial seed to generate child seeds from\n random_seed = make_random_seed(self)\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n\n seed = spawn_seed_sequence(random_seed)\n try:\n one_ft = call_method(method, use_self, margs, mkwargs, seed=seed)\n except Exception as e: # pylint: disable=broad-except\n one_ft = e\n futures.append(one_ft)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n @functools.wraps(method)\n def wrapped_method(*args, **kwargs):\n \"\"\" Wrap a method with a required parallel engine \"\"\"\n if use_self:\n # the first arg is self, not an ordinary arg\n self = args[0]\n args = args[1:]\n else:\n # still need self to preserve the signatures of other functions\n self = None\n\n _target = kwargs.pop('target', target)\n\n if asyncio.iscoroutinefunction(method) or _target in ['async', 'a']:\n x = wrap_with_async(self, args, kwargs)\n elif _target in ['threads', 't']:\n x = wrap_with_threads(self, args, kwargs)\n elif _target in ['mpc', 'm']:\n if mpc_method is not None:\n x = wrap_with_mpc(self, args, kwargs)\n else:\n raise ValueError('Cannot use MPC with this method', method)\n elif _target in ['for', 'f']:\n x = wrap_with_for(self, args, kwargs)\n else:\n raise ValueError('Wrong parallelization target:', _target)\n return x\n return wrapped_method\n\n return inbatch_parallel_decorator\n\n\n\ndef parallel(*args, use_self=None, **kwargs):\n \"\"\" Decorator for a parallel execution of a function \"\"\"\n return inbatch_parallel(*args, _use_self=use_self, **kwargs)\n\n\ndef njit(nogil=True, parallel=True): # pylint: disable=redefined-outer-name\n \"\"\" Fake njit decorator to use when numba is not installed \"\"\"\n _, _ = nogil, parallel\n def njit_fake_decorator(method):\n \"\"\" Return a decorator \"\"\"\n @functools.wraps(method)\n def wrapped_method(*args, **kwargs):\n \"\"\" Log warning that numba is not installed which causes preformance degradation \"\"\"\n logging.warning('numba is not installed. This causes a severe performance degradation for method %s',\n method.__name__)\n return method(*args, **kwargs)\n return wrapped_method\n return njit_fake_decorator\n\n\ndef mjit(*args, nopython=True, nogil=True, **kwargs):\n \"\"\" jit decorator for methods\n\n Notes\n -----\n This decorator should be applied directly to a method, not another decorator.\n \"\"\"\n def _jit(method):\n if jit is not None:\n func = make_function(method)\n func = jit(*args, nopython=nopython, nogil=nogil, **kwargs)(func)\n else:\n func = method\n logging.warning('numba is not installed. This causes a severe performance degradation for method %s',\n method.__name__)\n\n @functools.wraps(method)\n def _wrapped_method(self, *args, **kwargs):\n _ = self\n return func(None, *args, **kwargs)\n return _wrapped_method\n\n if len(args) == 1 and (callable(args[0])) and len(kwargs) == 0:\n method = args[0]\n args = tuple()\n return _jit(method)\n return _jit\n\n\ndef deprecated(msg):\n \"\"\" Decorator for deprecated functions and methods \"\"\"\n def decorator(func):\n @functools.wraps(func)\n def _call(*args, **kwargs):\n logging.warning(msg)\n return func(*args, **kwargs)\n return _call\n return decorator\n", "path": "batchflow/decorators.py"}]} |
gh_patches_debug_1539 | rasdani/github-patches | git_diff | avocado-framework__avocado-2276 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
why this is "<" not ">" ?
https://github.com/avocado-framework/avocado/blob/d1503a1dcfe684a1b6ab03fd79546eb0f2bfb511/optional_plugins/runner_remote/avocado_runner_remote/__init__.py#L107
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optional_plugins/runner_remote/avocado_runner_remote/__init__.py`
Content:
```
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; either version 2 of the License, or
4 # (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9 #
10 # See LICENSE for more details.
11 #
12 # Copyright: Red Hat Inc. 2014-2017
13 # Authors: Ruda Moura <[email protected]>
14 # Cleber Rosa <[email protected]>
15
16 import getpass
17 import json
18 import logging
19 import os
20 import re
21 import sys
22 import time
23
24 import fabric.api
25 import fabric.network
26 import fabric.operations
27 import fabric.tasks
28 from fabric.context_managers import shell_env
29 from fabric.exceptions import CommandTimeout
30
31 from avocado.core import exceptions
32 from avocado.core import exit_codes
33 from avocado.core import loader
34 from avocado.core import output
35 from avocado.core import status
36 from avocado.core.output import LOG_JOB, LOG_UI
37 from avocado.core.plugin_interfaces import CLI
38 from avocado.core.runner import TestRunner
39 from avocado.core.settings import settings
40 from avocado.core.test import TestID
41 from avocado.utils import archive
42 from avocado.utils import astring
43 from avocado.utils import process
44 from avocado.utils import stacktrace
45
46
47 class RemoterError(Exception):
48 pass
49
50
51 class ConnectionError(RemoterError):
52 pass
53
54
55 def _get_env_vars(env_vars):
56 """
57 Gets environment variables.
58
59 :param variables: A list of variables to get.
60 :return: A dictionary with variables names and values.
61 """
62 env_vars_map = {}
63 for var in env_vars:
64 value = os.environ.get(var)
65 if value is not None:
66 env_vars_map[var] = value
67 return env_vars_map
68
69
70 def run(command, ignore_status=False, quiet=True, timeout=60):
71 """
72 Executes a command on the defined fabric hosts.
73
74 This is basically a wrapper to fabric.operations.run, encapsulating
75 the result on an avocado process.CmdResult object. This also assumes
76 the fabric environment was previously (and properly) initialized.
77
78 :param command: the command string to execute.
79 :param ignore_status: Whether to not raise exceptions in case the
80 command's return code is different than zero.
81 :param timeout: Maximum time allowed for the command to return.
82 :param quiet: Whether to not log command stdout/err. Default: True.
83
84 :return: the result of the remote program's execution.
85 :rtype: :class:`avocado.utils.process.CmdResult`.
86 :raise fabric.exceptions.CommandTimeout: When timeout exhausted.
87 """
88
89 result = process.CmdResult()
90 start_time = time.time()
91 end_time = time.time() + (timeout or 0) # Support timeout=None
92 # Fabric sometimes returns NetworkError even when timeout not reached
93 fabric_result = None
94 fabric_exception = None
95 while True:
96 try:
97 fabric_result = fabric.operations.run(command=command,
98 quiet=quiet,
99 warn_only=True,
100 timeout=timeout,
101 pty=False,
102 combine_stderr=False)
103 break
104 except fabric.network.NetworkError as details:
105 fabric_exception = details
106 timeout = end_time - time.time()
107 if time.time() < end_time:
108 break
109 if fabric_result is None:
110 if fabric_exception is not None:
111 raise fabric_exception # it's not None pylint: disable=E0702
112 else:
113 raise fabric.network.NetworkError("Remote execution of '%s'"
114 "failed without any "
115 "exception. This should not "
116 "happen." % command)
117 end_time = time.time()
118 duration = end_time - start_time
119 result.command = command
120 result.stdout = str(fabric_result.stdout)
121 result.stderr = str(fabric_result.stderr)
122 result.duration = duration
123 result.exit_status = fabric_result.return_code
124 result.failed = fabric_result.failed
125 result.succeeded = fabric_result.succeeded
126 if not ignore_status:
127 if result.failed:
128 raise process.CmdError(command=command, result=result)
129 return result
130
131
132 def send_files(local_path, remote_path):
133 """
134 Send files to the defined fabric host.
135
136 This assumes the fabric environment was previously (and properly)
137 initialized.
138
139 :param local_path: the local path.
140 :param remote_path: the remote path.
141 """
142 try:
143 fabric.operations.put(local_path, remote_path,
144 mirror_local_mode=True)
145 except ValueError:
146 return False
147 return True
148
149
150 def receive_files(local_path, remote_path):
151 """
152 Receive files from the defined fabric host.
153
154 This assumes the fabric environment was previously (and properly)
155 initialized.
156
157 :param local_path: the local path.
158 :param remote_path: the remote path.
159 """
160 try:
161 fabric.operations.get(remote_path,
162 local_path)
163 except ValueError:
164 return False
165 return True
166
167
168 def _update_fabric_env(method):
169 """
170 Update fabric env with the appropriate parameters.
171
172 :param method: Remote method to wrap.
173 :return: Wrapped method.
174 """
175 def wrapper(*args, **kwargs):
176 fabric.api.env.update(host_string=args[0].hostname,
177 user=args[0].username,
178 key_filename=args[0].key_filename,
179 password=args[0].password,
180 port=args[0].port)
181 return method(*args, **kwargs)
182 return wrapper
183
184
185 class Remote(object):
186
187 """
188 Performs remote operations.
189 """
190
191 def __init__(self, hostname, username=None, password=None,
192 key_filename=None, port=22, timeout=60, attempts=10,
193 env_keep=None):
194 """
195 Creates an instance of :class:`Remote`.
196
197 :param hostname: the hostname.
198 :param username: the username. Default: autodetect.
199 :param password: the password. Default: try to use public key.
200 :param key_filename: path to an identity file (Example: .pem files
201 from Amazon EC2).
202 :param timeout: remote command timeout, in seconds. Default: 60.
203 :param attempts: number of attempts to connect. Default: 10.
204 """
205 self.hostname = hostname
206 if username is None:
207 username = getpass.getuser()
208 self.username = username
209 self.key_filename = key_filename
210 # None = use public key
211 self.password = password
212 self.port = port
213 reject_unknown_hosts = settings.get_value('remoter.behavior',
214 'reject_unknown_hosts',
215 key_type=bool,
216 default=False)
217 disable_known_hosts = settings.get_value('remoter.behavior',
218 'disable_known_hosts',
219 key_type=bool,
220 default=False)
221 if env_keep is None:
222 self.env_vars = {}
223 else:
224 self.env_vars = _get_env_vars(env_keep)
225 fabric.api.env.update(host_string=hostname,
226 user=username,
227 password=password,
228 key_filename=key_filename,
229 port=port,
230 timeout=timeout / attempts,
231 connection_attempts=attempts,
232 linewise=True,
233 abort_on_prompts=True,
234 abort_exception=ConnectionError,
235 reject_unknown_hosts=reject_unknown_hosts,
236 disable_known_hosts=disable_known_hosts)
237
238 @_update_fabric_env
239 def run(self, command, ignore_status=False, quiet=True, timeout=60):
240 """
241 Run a command on the remote host.
242
243 :param command: the command string to execute.
244 :param ignore_status: Whether to not raise exceptions in case the
245 command's return code is different than zero.
246 :param timeout: Maximum time allowed for the command to return.
247 :param quiet: Whether to not log command stdout/err. Default: True.
248
249 :return: the result of the remote program's execution.
250 :rtype: :class:`avocado.utils.process.CmdResult`.
251 :raise fabric.exceptions.CommandTimeout: When timeout exhausted.
252 """
253
254 with shell_env(**self.env_vars): # pylint: disable=E1129
255 return_dict = fabric.tasks.execute(run, command, ignore_status,
256 quiet, timeout,
257 hosts=[self.hostname])
258 return return_dict[self.hostname]
259
260 def uptime(self):
261 """
262 Performs uptime (good to check connection).
263
264 :return: the uptime string or empty string if fails.
265 """
266 res = self.run('uptime', ignore_status=True)
267 if res.exit_status == 0:
268 return res
269 else:
270 return ''
271
272 def makedir(self, remote_path):
273 """
274 Create a directory.
275
276 :param remote_path: the remote path to create.
277 """
278 self.run('mkdir -p %s' % remote_path)
279
280 @_update_fabric_env
281 def send_files(self, local_path, remote_path):
282 """
283 Send files to remote host.
284
285 :param local_path: the local path.
286 :param remote_path: the remote path.
287 """
288 result_dict = fabric.tasks.execute(send_files, local_path,
289 remote_path, hosts=[self.hostname])
290 return result_dict[self.hostname]
291
292 @_update_fabric_env
293 def receive_files(self, local_path, remote_path):
294 """
295 Receive files from the remote host.
296
297 :param local_path: the local path.
298 :param remote_path: the remote path.
299 """
300 result_dict = fabric.tasks.execute(receive_files, local_path,
301 remote_path, hosts=[self.hostname])
302 return result_dict[self.hostname]
303
304
305 class RemoteTestRunner(TestRunner):
306
307 """ Tooled TestRunner to run on remote machine using ssh """
308
309 # Let's use re.MULTILINE because sometimes servers might have MOTD
310 # that will introduce a line break on output.
311 remote_version_re = re.compile(r'^Avocado (\d+)\.(\d+)\r?$',
312 re.MULTILINE)
313
314 def __init__(self, job, result):
315 super(RemoteTestRunner, self).__init__(job, result)
316 #: remoter connection to the remote machine
317 self.remote = None
318
319 def setup(self):
320 """ Setup remote environment """
321 stdout_claimed_by = getattr(self.job.args, 'stdout_claimed_by', None)
322 if not stdout_claimed_by:
323 self.job.log.info("LOGIN : %s@%s:%d (TIMEOUT: %s seconds)",
324 self.job.args.remote_username,
325 self.job.args.remote_hostname,
326 self.job.args.remote_port,
327 self.job.args.remote_timeout)
328 self.remote = Remote(hostname=self.job.args.remote_hostname,
329 username=self.job.args.remote_username,
330 password=self.job.args.remote_password,
331 key_filename=self.job.args.remote_key_file,
332 port=self.job.args.remote_port,
333 timeout=self.job.args.remote_timeout,
334 env_keep=self.job.args.env_keep)
335
336 def check_remote_avocado(self):
337 """
338 Checks if the remote system appears to have avocado installed
339
340 The "appears to have" description is justified by the fact that the
341 check is rather simplistic, it attempts to run an `avocado -v` command
342 and checks if the output looks like what avocado would print out.
343
344 :rtype: tuple with (bool, tuple)
345 :returns: (True, (x, y, z)) if avocado appears to be installed and
346 (False, None) otherwise.
347 """
348 # This will be useful as extra debugging info in case avocado
349 # doesn't seem to be available in the remote system.
350 self.remote.run('env', ignore_status=True, timeout=60)
351
352 result = self.remote.run('avocado -v',
353 ignore_status=True,
354 timeout=60)
355 if result.exit_status == 127:
356 return (False, None)
357
358 match = self.remote_version_re.findall(result.stderr)
359 if match is None:
360 return (False, None)
361
362 try:
363 return (True, tuple(int(_) for _ in match[0]))
364 except IndexError:
365 return (False, None)
366
367 @staticmethod
368 def _parse_json_response(json_output):
369 """
370 Try to parse JSON response from the remote output.
371
372 It tries to find start of the json dictionary and then grabs
373 everything till the end of the dictionary. It supports single-
374 line as well as multi-line pretty json output.
375 """
376 _result = iter(json_output.splitlines())
377 json_result = ""
378 response = None
379 for line in _result: # Find the beginning
380 if line.startswith('{'):
381 json_result += line
382 break
383 else:
384 raise ValueError("Could not find the beginning of the remote JSON"
385 " output:\n%s" % output)
386 if json_result.endswith('}'): # probably single-line
387 try:
388 response = json.loads(json_result)
389 except ValueError:
390 pass
391 if not response:
392 # Json was incomplete, try to find another end
393 for line in _result:
394 json_result += line
395 if line.startswith('}'):
396 try:
397 response = json.loads(json_result)
398 break
399 except ValueError:
400 pass
401 if not response:
402 raise ValueError("Could not find the end of the remote JSON "
403 "output:\n%s" % output)
404 return response
405
406 def run_test(self, references, timeout):
407 """
408 Run tests.
409
410 :param references: a string with test references.
411 :return: a dictionary with test results.
412 """
413 extra_params = []
414 mux_files = getattr(self.job.args, 'mux_yaml') or []
415 if mux_files:
416 extra_params.append("-m %s" % " ".join(mux_files))
417
418 if getattr(self.job.args, "dry_run", False):
419 extra_params.append("--dry-run")
420 references_str = " ".join(references)
421
422 avocado_cmd = ('avocado run --force-job-id %s --json - '
423 '--archive %s %s' % (self.job.unique_id,
424 references_str, " ".join(extra_params)))
425 try:
426 result = self.remote.run(avocado_cmd, ignore_status=True,
427 timeout=timeout)
428 if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:
429 raise exceptions.JobError("Remote execution failed with: %s" % result.stderr)
430
431 except CommandTimeout:
432 raise exceptions.JobError("Remote execution took longer than "
433 "specified timeout (%s). Interrupting."
434 % (timeout))
435
436 try:
437 json_result = self._parse_json_response(result.stdout)
438 except:
439 stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')
440 raise exceptions.JobError(result.stdout)
441
442 for t_dict in json_result['tests']:
443 logdir = os.path.join(self.job.logdir, 'test-results')
444 relative_path = astring.string_to_safe_path(str(t_dict['test']))
445 logdir = os.path.join(logdir, relative_path)
446 t_dict['logdir'] = logdir
447 t_dict['logfile'] = os.path.join(logdir, 'debug.log')
448
449 return json_result
450
451 def run_suite(self, test_suite, variants, timeout=0, replay_map=None,
452 suite_order="variants-per-test"):
453 """
454 Run one or more tests and report with test result.
455
456 :param params_list: a list of param dicts.
457 :param variants: A varianter iterator (unused here)
458
459 :return: a set with types of test failures.
460 """
461 del test_suite # using self.job.references instead
462 del variants # we're not using multiplexation here
463 if suite_order != "variants-per-test" and suite_order is not None:
464 raise exceptions.JobError("execution-order %s is not supported "
465 "for remote execution." % suite_order)
466 del suite_order # suite_order is ignored for now
467 if not timeout: # avoid timeout = 0
468 timeout = None
469 summary = set()
470
471 stdout_backup = sys.stdout
472 stderr_backup = sys.stderr
473 fabric_debugfile = os.path.join(self.job.logdir, 'remote.log')
474 paramiko_logger = logging.getLogger('paramiko')
475 fabric_logger = logging.getLogger('avocado.fabric')
476 remote_logger = logging.getLogger('avocado.remote')
477 app_logger = logging.getLogger('avocado.debug')
478 fmt = ('%(asctime)s %(module)-10.10s L%(lineno)-.4d %('
479 'levelname)-5.5s| %(message)s')
480 formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
481 file_handler = logging.FileHandler(filename=fabric_debugfile)
482 file_handler.setFormatter(formatter)
483 fabric_logger.addHandler(file_handler)
484 paramiko_logger.addHandler(file_handler)
485 remote_logger.addHandler(file_handler)
486 if self.job.args.show_job_log:
487 output.add_log_handler(paramiko_logger.name)
488 logger_list = [output.LOG_JOB]
489 sys.stdout = output.LoggingFile(loggers=logger_list)
490 sys.stderr = output.LoggingFile(loggers=logger_list)
491 try:
492 try:
493 self.setup()
494 avocado_installed, _ = self.check_remote_avocado()
495 if not avocado_installed:
496 raise exceptions.JobError('Remote machine does not seem to'
497 ' have avocado installed')
498 except Exception as details:
499 stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
500 raise exceptions.JobError(details)
501 results = self.run_test(self.job.references, timeout)
502 remote_log_dir = os.path.dirname(results['debuglog'])
503 self.result.tests_total = results['total']
504 local_log_dir = self.job.logdir
505 for tst in results['tests']:
506 name = tst['test'].split('-', 1)
507 name = [name[0]] + name[1].split(';')
508 if len(name) == 3:
509 name[2] = {"variant_id": name[2]}
510 name = TestID(*name, no_digits=-1)
511 state = dict(name=name,
512 time_elapsed=tst['time'],
513 time_start=tst['start'],
514 time_end=tst['end'],
515 status=tst['status'],
516 logdir=tst['logdir'],
517 logfile=tst['logfile'],
518 fail_reason=tst['fail_reason'],
519 job_logdir=local_log_dir,
520 job_unique_id='')
521 self.result.start_test(state)
522 self.job._result_events_dispatcher.map_method('start_test',
523 self.result,
524 state)
525 self.result.check_test(state)
526 self.job._result_events_dispatcher.map_method('end_test',
527 self.result,
528 state)
529 if state['status'] == "INTERRUPTED":
530 summary.add("INTERRUPTED")
531 elif not status.mapping[state['status']]:
532 summary.add("FAIL")
533 zip_filename = remote_log_dir + '.zip'
534 zip_path_filename = os.path.join(local_log_dir,
535 os.path.basename(zip_filename))
536 self.remote.receive_files(local_log_dir, zip_filename)
537 archive.uncompress(zip_path_filename, local_log_dir)
538 os.remove(zip_path_filename)
539 self.result.end_tests()
540 self.job._result_events_dispatcher.map_method('post_tests',
541 self.job)
542 finally:
543 try:
544 self.tear_down()
545 except Exception as details:
546 stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
547 raise exceptions.JobError(details)
548 sys.stdout = stdout_backup
549 sys.stderr = stderr_backup
550 return summary
551
552 def tear_down(self):
553 """
554 This method is only called when `run_suite` gets to the point of to be
555 executing `setup` method and is called at the end of the execution.
556
557 :warning: It might be called on `setup` exceptions, so things
558 initialized during `setup` might not yet be initialized.
559 """
560 pass
561
562
563 class RemoteCLI(CLI):
564
565 """
566 Run tests on a remote machine
567 """
568
569 name = 'remote'
570 description = "Remote machine options for 'run' subcommand"
571
572 def configure(self, parser):
573 run_subcommand_parser = parser.subcommands.choices.get('run', None)
574 if run_subcommand_parser is None:
575 return
576
577 msg = 'test execution on a remote machine'
578 remote_parser = run_subcommand_parser.add_argument_group(msg)
579 remote_parser.add_argument('--remote-hostname',
580 dest='remote_hostname', default=None,
581 help=('Specify the hostname to login on'
582 ' remote machine'))
583 remote_parser.add_argument('--remote-port', dest='remote_port',
584 default=22, type=int,
585 help=('Specify the port number to login on '
586 'remote machine. Default: %(default)s'))
587 remote_parser.add_argument('--remote-username',
588 dest='remote_username',
589 default=getpass.getuser(),
590 help=('Specify the username to login on'
591 ' remote machine. Default: '
592 '%(default)s'))
593 remote_parser.add_argument('--remote-password',
594 dest='remote_password', default=None,
595 help=('Specify the password to login on'
596 ' remote machine'))
597 remote_parser.add_argument('--remote-key-file',
598 dest='remote_key_file', default=None,
599 help=('Specify an identity file with a '
600 'private key instead of a password '
601 '(Example: .pem files from Amazon EC2)'))
602 remote_parser.add_argument('--remote-timeout', metavar='SECONDS',
603 default=60, type=int,
604 help=("Amount of time (in seconds) to "
605 "wait for a successful connection"
606 " to the remote machine. Defaults"
607 " to %(default)s seconds."))
608
609 @staticmethod
610 def _check_required_args(args, enable_arg, required_args):
611 """
612 :return: True when enable_arg enabled and all required args are set
613 :raise sys.exit: When missing required argument.
614 """
615 if (not hasattr(args, enable_arg) or
616 not getattr(args, enable_arg)):
617 return False
618 missing = []
619 for arg in required_args:
620 if not getattr(args, arg):
621 missing.append(arg)
622 if missing:
623 LOG_UI.error("Use of %s requires %s arguments to be set. Please "
624 "set %s.", enable_arg, ', '.join(required_args),
625 ', '.join(missing))
626
627 return sys.exit(exit_codes.AVOCADO_FAIL)
628 return True
629
630 def run(self, args):
631 if self._check_required_args(args, 'remote_hostname',
632 ('remote_hostname',)):
633 loader.loader.clear_plugins()
634 loader.loader.register_plugin(loader.DummyLoader)
635 args.test_runner = RemoteTestRunner
636
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optional_plugins/runner_remote/avocado_runner_remote/__init__.py b/optional_plugins/runner_remote/avocado_runner_remote/__init__.py
--- a/optional_plugins/runner_remote/avocado_runner_remote/__init__.py
+++ b/optional_plugins/runner_remote/avocado_runner_remote/__init__.py
@@ -104,7 +104,7 @@
except fabric.network.NetworkError as details:
fabric_exception = details
timeout = end_time - time.time()
- if time.time() < end_time:
+ if time.time() > end_time:
break
if fabric_result is None:
if fabric_exception is not None:
| {"golden_diff": "diff --git a/optional_plugins/runner_remote/avocado_runner_remote/__init__.py b/optional_plugins/runner_remote/avocado_runner_remote/__init__.py\n--- a/optional_plugins/runner_remote/avocado_runner_remote/__init__.py\n+++ b/optional_plugins/runner_remote/avocado_runner_remote/__init__.py\n@@ -104,7 +104,7 @@\n except fabric.network.NetworkError as details:\n fabric_exception = details\n timeout = end_time - time.time()\n- if time.time() < end_time:\n+ if time.time() > end_time:\n break\n if fabric_result is None:\n if fabric_exception is not None:\n", "issue": "why this is \"<\" not \">\" ?\nhttps://github.com/avocado-framework/avocado/blob/d1503a1dcfe684a1b6ab03fd79546eb0f2bfb511/optional_plugins/runner_remote/avocado_runner_remote/__init__.py#L107\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2014-2017\n# Authors: Ruda Moura <[email protected]>\n# Cleber Rosa <[email protected]>\n\nimport getpass\nimport json\nimport logging\nimport os\nimport re\nimport sys\nimport time\n\nimport fabric.api\nimport fabric.network\nimport fabric.operations\nimport fabric.tasks\nfrom fabric.context_managers import shell_env\nfrom fabric.exceptions import CommandTimeout\n\nfrom avocado.core import exceptions\nfrom avocado.core import exit_codes\nfrom avocado.core import loader\nfrom avocado.core import output\nfrom avocado.core import status\nfrom avocado.core.output import LOG_JOB, LOG_UI\nfrom avocado.core.plugin_interfaces import CLI\nfrom avocado.core.runner import TestRunner\nfrom avocado.core.settings import settings\nfrom avocado.core.test import TestID\nfrom avocado.utils import archive\nfrom avocado.utils import astring\nfrom avocado.utils import process\nfrom avocado.utils import stacktrace\n\n\nclass RemoterError(Exception):\n pass\n\n\nclass ConnectionError(RemoterError):\n pass\n\n\ndef _get_env_vars(env_vars):\n \"\"\"\n Gets environment variables.\n\n :param variables: A list of variables to get.\n :return: A dictionary with variables names and values.\n \"\"\"\n env_vars_map = {}\n for var in env_vars:\n value = os.environ.get(var)\n if value is not None:\n env_vars_map[var] = value\n return env_vars_map\n\n\ndef run(command, ignore_status=False, quiet=True, timeout=60):\n \"\"\"\n Executes a command on the defined fabric hosts.\n\n This is basically a wrapper to fabric.operations.run, encapsulating\n the result on an avocado process.CmdResult object. This also assumes\n the fabric environment was previously (and properly) initialized.\n\n :param command: the command string to execute.\n :param ignore_status: Whether to not raise exceptions in case the\n command's return code is different than zero.\n :param timeout: Maximum time allowed for the command to return.\n :param quiet: Whether to not log command stdout/err. Default: True.\n\n :return: the result of the remote program's execution.\n :rtype: :class:`avocado.utils.process.CmdResult`.\n :raise fabric.exceptions.CommandTimeout: When timeout exhausted.\n \"\"\"\n\n result = process.CmdResult()\n start_time = time.time()\n end_time = time.time() + (timeout or 0) # Support timeout=None\n # Fabric sometimes returns NetworkError even when timeout not reached\n fabric_result = None\n fabric_exception = None\n while True:\n try:\n fabric_result = fabric.operations.run(command=command,\n quiet=quiet,\n warn_only=True,\n timeout=timeout,\n pty=False,\n combine_stderr=False)\n break\n except fabric.network.NetworkError as details:\n fabric_exception = details\n timeout = end_time - time.time()\n if time.time() < end_time:\n break\n if fabric_result is None:\n if fabric_exception is not None:\n raise fabric_exception # it's not None pylint: disable=E0702\n else:\n raise fabric.network.NetworkError(\"Remote execution of '%s'\"\n \"failed without any \"\n \"exception. This should not \"\n \"happen.\" % command)\n end_time = time.time()\n duration = end_time - start_time\n result.command = command\n result.stdout = str(fabric_result.stdout)\n result.stderr = str(fabric_result.stderr)\n result.duration = duration\n result.exit_status = fabric_result.return_code\n result.failed = fabric_result.failed\n result.succeeded = fabric_result.succeeded\n if not ignore_status:\n if result.failed:\n raise process.CmdError(command=command, result=result)\n return result\n\n\ndef send_files(local_path, remote_path):\n \"\"\"\n Send files to the defined fabric host.\n\n This assumes the fabric environment was previously (and properly)\n initialized.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n try:\n fabric.operations.put(local_path, remote_path,\n mirror_local_mode=True)\n except ValueError:\n return False\n return True\n\n\ndef receive_files(local_path, remote_path):\n \"\"\"\n Receive files from the defined fabric host.\n\n This assumes the fabric environment was previously (and properly)\n initialized.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n try:\n fabric.operations.get(remote_path,\n local_path)\n except ValueError:\n return False\n return True\n\n\ndef _update_fabric_env(method):\n \"\"\"\n Update fabric env with the appropriate parameters.\n\n :param method: Remote method to wrap.\n :return: Wrapped method.\n \"\"\"\n def wrapper(*args, **kwargs):\n fabric.api.env.update(host_string=args[0].hostname,\n user=args[0].username,\n key_filename=args[0].key_filename,\n password=args[0].password,\n port=args[0].port)\n return method(*args, **kwargs)\n return wrapper\n\n\nclass Remote(object):\n\n \"\"\"\n Performs remote operations.\n \"\"\"\n\n def __init__(self, hostname, username=None, password=None,\n key_filename=None, port=22, timeout=60, attempts=10,\n env_keep=None):\n \"\"\"\n Creates an instance of :class:`Remote`.\n\n :param hostname: the hostname.\n :param username: the username. Default: autodetect.\n :param password: the password. Default: try to use public key.\n :param key_filename: path to an identity file (Example: .pem files\n from Amazon EC2).\n :param timeout: remote command timeout, in seconds. Default: 60.\n :param attempts: number of attempts to connect. Default: 10.\n \"\"\"\n self.hostname = hostname\n if username is None:\n username = getpass.getuser()\n self.username = username\n self.key_filename = key_filename\n # None = use public key\n self.password = password\n self.port = port\n reject_unknown_hosts = settings.get_value('remoter.behavior',\n 'reject_unknown_hosts',\n key_type=bool,\n default=False)\n disable_known_hosts = settings.get_value('remoter.behavior',\n 'disable_known_hosts',\n key_type=bool,\n default=False)\n if env_keep is None:\n self.env_vars = {}\n else:\n self.env_vars = _get_env_vars(env_keep)\n fabric.api.env.update(host_string=hostname,\n user=username,\n password=password,\n key_filename=key_filename,\n port=port,\n timeout=timeout / attempts,\n connection_attempts=attempts,\n linewise=True,\n abort_on_prompts=True,\n abort_exception=ConnectionError,\n reject_unknown_hosts=reject_unknown_hosts,\n disable_known_hosts=disable_known_hosts)\n\n @_update_fabric_env\n def run(self, command, ignore_status=False, quiet=True, timeout=60):\n \"\"\"\n Run a command on the remote host.\n\n :param command: the command string to execute.\n :param ignore_status: Whether to not raise exceptions in case the\n command's return code is different than zero.\n :param timeout: Maximum time allowed for the command to return.\n :param quiet: Whether to not log command stdout/err. Default: True.\n\n :return: the result of the remote program's execution.\n :rtype: :class:`avocado.utils.process.CmdResult`.\n :raise fabric.exceptions.CommandTimeout: When timeout exhausted.\n \"\"\"\n\n with shell_env(**self.env_vars): # pylint: disable=E1129\n return_dict = fabric.tasks.execute(run, command, ignore_status,\n quiet, timeout,\n hosts=[self.hostname])\n return return_dict[self.hostname]\n\n def uptime(self):\n \"\"\"\n Performs uptime (good to check connection).\n\n :return: the uptime string or empty string if fails.\n \"\"\"\n res = self.run('uptime', ignore_status=True)\n if res.exit_status == 0:\n return res\n else:\n return ''\n\n def makedir(self, remote_path):\n \"\"\"\n Create a directory.\n\n :param remote_path: the remote path to create.\n \"\"\"\n self.run('mkdir -p %s' % remote_path)\n\n @_update_fabric_env\n def send_files(self, local_path, remote_path):\n \"\"\"\n Send files to remote host.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n result_dict = fabric.tasks.execute(send_files, local_path,\n remote_path, hosts=[self.hostname])\n return result_dict[self.hostname]\n\n @_update_fabric_env\n def receive_files(self, local_path, remote_path):\n \"\"\"\n Receive files from the remote host.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n result_dict = fabric.tasks.execute(receive_files, local_path,\n remote_path, hosts=[self.hostname])\n return result_dict[self.hostname]\n\n\nclass RemoteTestRunner(TestRunner):\n\n \"\"\" Tooled TestRunner to run on remote machine using ssh \"\"\"\n\n # Let's use re.MULTILINE because sometimes servers might have MOTD\n # that will introduce a line break on output.\n remote_version_re = re.compile(r'^Avocado (\\d+)\\.(\\d+)\\r?$',\n re.MULTILINE)\n\n def __init__(self, job, result):\n super(RemoteTestRunner, self).__init__(job, result)\n #: remoter connection to the remote machine\n self.remote = None\n\n def setup(self):\n \"\"\" Setup remote environment \"\"\"\n stdout_claimed_by = getattr(self.job.args, 'stdout_claimed_by', None)\n if not stdout_claimed_by:\n self.job.log.info(\"LOGIN : %s@%s:%d (TIMEOUT: %s seconds)\",\n self.job.args.remote_username,\n self.job.args.remote_hostname,\n self.job.args.remote_port,\n self.job.args.remote_timeout)\n self.remote = Remote(hostname=self.job.args.remote_hostname,\n username=self.job.args.remote_username,\n password=self.job.args.remote_password,\n key_filename=self.job.args.remote_key_file,\n port=self.job.args.remote_port,\n timeout=self.job.args.remote_timeout,\n env_keep=self.job.args.env_keep)\n\n def check_remote_avocado(self):\n \"\"\"\n Checks if the remote system appears to have avocado installed\n\n The \"appears to have\" description is justified by the fact that the\n check is rather simplistic, it attempts to run an `avocado -v` command\n and checks if the output looks like what avocado would print out.\n\n :rtype: tuple with (bool, tuple)\n :returns: (True, (x, y, z)) if avocado appears to be installed and\n (False, None) otherwise.\n \"\"\"\n # This will be useful as extra debugging info in case avocado\n # doesn't seem to be available in the remote system.\n self.remote.run('env', ignore_status=True, timeout=60)\n\n result = self.remote.run('avocado -v',\n ignore_status=True,\n timeout=60)\n if result.exit_status == 127:\n return (False, None)\n\n match = self.remote_version_re.findall(result.stderr)\n if match is None:\n return (False, None)\n\n try:\n return (True, tuple(int(_) for _ in match[0]))\n except IndexError:\n return (False, None)\n\n @staticmethod\n def _parse_json_response(json_output):\n \"\"\"\n Try to parse JSON response from the remote output.\n\n It tries to find start of the json dictionary and then grabs\n everything till the end of the dictionary. It supports single-\n line as well as multi-line pretty json output.\n \"\"\"\n _result = iter(json_output.splitlines())\n json_result = \"\"\n response = None\n for line in _result: # Find the beginning\n if line.startswith('{'):\n json_result += line\n break\n else:\n raise ValueError(\"Could not find the beginning of the remote JSON\"\n \" output:\\n%s\" % output)\n if json_result.endswith('}'): # probably single-line\n try:\n response = json.loads(json_result)\n except ValueError:\n pass\n if not response:\n # Json was incomplete, try to find another end\n for line in _result:\n json_result += line\n if line.startswith('}'):\n try:\n response = json.loads(json_result)\n break\n except ValueError:\n pass\n if not response:\n raise ValueError(\"Could not find the end of the remote JSON \"\n \"output:\\n%s\" % output)\n return response\n\n def run_test(self, references, timeout):\n \"\"\"\n Run tests.\n\n :param references: a string with test references.\n :return: a dictionary with test results.\n \"\"\"\n extra_params = []\n mux_files = getattr(self.job.args, 'mux_yaml') or []\n if mux_files:\n extra_params.append(\"-m %s\" % \" \".join(mux_files))\n\n if getattr(self.job.args, \"dry_run\", False):\n extra_params.append(\"--dry-run\")\n references_str = \" \".join(references)\n\n avocado_cmd = ('avocado run --force-job-id %s --json - '\n '--archive %s %s' % (self.job.unique_id,\n references_str, \" \".join(extra_params)))\n try:\n result = self.remote.run(avocado_cmd, ignore_status=True,\n timeout=timeout)\n if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:\n raise exceptions.JobError(\"Remote execution failed with: %s\" % result.stderr)\n\n except CommandTimeout:\n raise exceptions.JobError(\"Remote execution took longer than \"\n \"specified timeout (%s). Interrupting.\"\n % (timeout))\n\n try:\n json_result = self._parse_json_response(result.stdout)\n except:\n stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')\n raise exceptions.JobError(result.stdout)\n\n for t_dict in json_result['tests']:\n logdir = os.path.join(self.job.logdir, 'test-results')\n relative_path = astring.string_to_safe_path(str(t_dict['test']))\n logdir = os.path.join(logdir, relative_path)\n t_dict['logdir'] = logdir\n t_dict['logfile'] = os.path.join(logdir, 'debug.log')\n\n return json_result\n\n def run_suite(self, test_suite, variants, timeout=0, replay_map=None,\n suite_order=\"variants-per-test\"):\n \"\"\"\n Run one or more tests and report with test result.\n\n :param params_list: a list of param dicts.\n :param variants: A varianter iterator (unused here)\n\n :return: a set with types of test failures.\n \"\"\"\n del test_suite # using self.job.references instead\n del variants # we're not using multiplexation here\n if suite_order != \"variants-per-test\" and suite_order is not None:\n raise exceptions.JobError(\"execution-order %s is not supported \"\n \"for remote execution.\" % suite_order)\n del suite_order # suite_order is ignored for now\n if not timeout: # avoid timeout = 0\n timeout = None\n summary = set()\n\n stdout_backup = sys.stdout\n stderr_backup = sys.stderr\n fabric_debugfile = os.path.join(self.job.logdir, 'remote.log')\n paramiko_logger = logging.getLogger('paramiko')\n fabric_logger = logging.getLogger('avocado.fabric')\n remote_logger = logging.getLogger('avocado.remote')\n app_logger = logging.getLogger('avocado.debug')\n fmt = ('%(asctime)s %(module)-10.10s L%(lineno)-.4d %('\n 'levelname)-5.5s| %(message)s')\n formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')\n file_handler = logging.FileHandler(filename=fabric_debugfile)\n file_handler.setFormatter(formatter)\n fabric_logger.addHandler(file_handler)\n paramiko_logger.addHandler(file_handler)\n remote_logger.addHandler(file_handler)\n if self.job.args.show_job_log:\n output.add_log_handler(paramiko_logger.name)\n logger_list = [output.LOG_JOB]\n sys.stdout = output.LoggingFile(loggers=logger_list)\n sys.stderr = output.LoggingFile(loggers=logger_list)\n try:\n try:\n self.setup()\n avocado_installed, _ = self.check_remote_avocado()\n if not avocado_installed:\n raise exceptions.JobError('Remote machine does not seem to'\n ' have avocado installed')\n except Exception as details:\n stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)\n raise exceptions.JobError(details)\n results = self.run_test(self.job.references, timeout)\n remote_log_dir = os.path.dirname(results['debuglog'])\n self.result.tests_total = results['total']\n local_log_dir = self.job.logdir\n for tst in results['tests']:\n name = tst['test'].split('-', 1)\n name = [name[0]] + name[1].split(';')\n if len(name) == 3:\n name[2] = {\"variant_id\": name[2]}\n name = TestID(*name, no_digits=-1)\n state = dict(name=name,\n time_elapsed=tst['time'],\n time_start=tst['start'],\n time_end=tst['end'],\n status=tst['status'],\n logdir=tst['logdir'],\n logfile=tst['logfile'],\n fail_reason=tst['fail_reason'],\n job_logdir=local_log_dir,\n job_unique_id='')\n self.result.start_test(state)\n self.job._result_events_dispatcher.map_method('start_test',\n self.result,\n state)\n self.result.check_test(state)\n self.job._result_events_dispatcher.map_method('end_test',\n self.result,\n state)\n if state['status'] == \"INTERRUPTED\":\n summary.add(\"INTERRUPTED\")\n elif not status.mapping[state['status']]:\n summary.add(\"FAIL\")\n zip_filename = remote_log_dir + '.zip'\n zip_path_filename = os.path.join(local_log_dir,\n os.path.basename(zip_filename))\n self.remote.receive_files(local_log_dir, zip_filename)\n archive.uncompress(zip_path_filename, local_log_dir)\n os.remove(zip_path_filename)\n self.result.end_tests()\n self.job._result_events_dispatcher.map_method('post_tests',\n self.job)\n finally:\n try:\n self.tear_down()\n except Exception as details:\n stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)\n raise exceptions.JobError(details)\n sys.stdout = stdout_backup\n sys.stderr = stderr_backup\n return summary\n\n def tear_down(self):\n \"\"\"\n This method is only called when `run_suite` gets to the point of to be\n executing `setup` method and is called at the end of the execution.\n\n :warning: It might be called on `setup` exceptions, so things\n initialized during `setup` might not yet be initialized.\n \"\"\"\n pass\n\n\nclass RemoteCLI(CLI):\n\n \"\"\"\n Run tests on a remote machine\n \"\"\"\n\n name = 'remote'\n description = \"Remote machine options for 'run' subcommand\"\n\n def configure(self, parser):\n run_subcommand_parser = parser.subcommands.choices.get('run', None)\n if run_subcommand_parser is None:\n return\n\n msg = 'test execution on a remote machine'\n remote_parser = run_subcommand_parser.add_argument_group(msg)\n remote_parser.add_argument('--remote-hostname',\n dest='remote_hostname', default=None,\n help=('Specify the hostname to login on'\n ' remote machine'))\n remote_parser.add_argument('--remote-port', dest='remote_port',\n default=22, type=int,\n help=('Specify the port number to login on '\n 'remote machine. Default: %(default)s'))\n remote_parser.add_argument('--remote-username',\n dest='remote_username',\n default=getpass.getuser(),\n help=('Specify the username to login on'\n ' remote machine. Default: '\n '%(default)s'))\n remote_parser.add_argument('--remote-password',\n dest='remote_password', default=None,\n help=('Specify the password to login on'\n ' remote machine'))\n remote_parser.add_argument('--remote-key-file',\n dest='remote_key_file', default=None,\n help=('Specify an identity file with a '\n 'private key instead of a password '\n '(Example: .pem files from Amazon EC2)'))\n remote_parser.add_argument('--remote-timeout', metavar='SECONDS',\n default=60, type=int,\n help=(\"Amount of time (in seconds) to \"\n \"wait for a successful connection\"\n \" to the remote machine. Defaults\"\n \" to %(default)s seconds.\"))\n\n @staticmethod\n def _check_required_args(args, enable_arg, required_args):\n \"\"\"\n :return: True when enable_arg enabled and all required args are set\n :raise sys.exit: When missing required argument.\n \"\"\"\n if (not hasattr(args, enable_arg) or\n not getattr(args, enable_arg)):\n return False\n missing = []\n for arg in required_args:\n if not getattr(args, arg):\n missing.append(arg)\n if missing:\n LOG_UI.error(\"Use of %s requires %s arguments to be set. Please \"\n \"set %s.\", enable_arg, ', '.join(required_args),\n ', '.join(missing))\n\n return sys.exit(exit_codes.AVOCADO_FAIL)\n return True\n\n def run(self, args):\n if self._check_required_args(args, 'remote_hostname',\n ('remote_hostname',)):\n loader.loader.clear_plugins()\n loader.loader.register_plugin(loader.DummyLoader)\n args.test_runner = RemoteTestRunner\n", "path": "optional_plugins/runner_remote/avocado_runner_remote/__init__.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2014-2017\n# Authors: Ruda Moura <[email protected]>\n# Cleber Rosa <[email protected]>\n\nimport getpass\nimport json\nimport logging\nimport os\nimport re\nimport sys\nimport time\n\nimport fabric.api\nimport fabric.network\nimport fabric.operations\nimport fabric.tasks\nfrom fabric.context_managers import shell_env\nfrom fabric.exceptions import CommandTimeout\n\nfrom avocado.core import exceptions\nfrom avocado.core import exit_codes\nfrom avocado.core import loader\nfrom avocado.core import output\nfrom avocado.core import status\nfrom avocado.core.output import LOG_JOB, LOG_UI\nfrom avocado.core.plugin_interfaces import CLI\nfrom avocado.core.runner import TestRunner\nfrom avocado.core.settings import settings\nfrom avocado.core.test import TestID\nfrom avocado.utils import archive\nfrom avocado.utils import astring\nfrom avocado.utils import process\nfrom avocado.utils import stacktrace\n\n\nclass RemoterError(Exception):\n pass\n\n\nclass ConnectionError(RemoterError):\n pass\n\n\ndef _get_env_vars(env_vars):\n \"\"\"\n Gets environment variables.\n\n :param variables: A list of variables to get.\n :return: A dictionary with variables names and values.\n \"\"\"\n env_vars_map = {}\n for var in env_vars:\n value = os.environ.get(var)\n if value is not None:\n env_vars_map[var] = value\n return env_vars_map\n\n\ndef run(command, ignore_status=False, quiet=True, timeout=60):\n \"\"\"\n Executes a command on the defined fabric hosts.\n\n This is basically a wrapper to fabric.operations.run, encapsulating\n the result on an avocado process.CmdResult object. This also assumes\n the fabric environment was previously (and properly) initialized.\n\n :param command: the command string to execute.\n :param ignore_status: Whether to not raise exceptions in case the\n command's return code is different than zero.\n :param timeout: Maximum time allowed for the command to return.\n :param quiet: Whether to not log command stdout/err. Default: True.\n\n :return: the result of the remote program's execution.\n :rtype: :class:`avocado.utils.process.CmdResult`.\n :raise fabric.exceptions.CommandTimeout: When timeout exhausted.\n \"\"\"\n\n result = process.CmdResult()\n start_time = time.time()\n end_time = time.time() + (timeout or 0) # Support timeout=None\n # Fabric sometimes returns NetworkError even when timeout not reached\n fabric_result = None\n fabric_exception = None\n while True:\n try:\n fabric_result = fabric.operations.run(command=command,\n quiet=quiet,\n warn_only=True,\n timeout=timeout,\n pty=False,\n combine_stderr=False)\n break\n except fabric.network.NetworkError as details:\n fabric_exception = details\n timeout = end_time - time.time()\n if time.time() > end_time:\n break\n if fabric_result is None:\n if fabric_exception is not None:\n raise fabric_exception # it's not None pylint: disable=E0702\n else:\n raise fabric.network.NetworkError(\"Remote execution of '%s'\"\n \"failed without any \"\n \"exception. This should not \"\n \"happen.\" % command)\n end_time = time.time()\n duration = end_time - start_time\n result.command = command\n result.stdout = str(fabric_result.stdout)\n result.stderr = str(fabric_result.stderr)\n result.duration = duration\n result.exit_status = fabric_result.return_code\n result.failed = fabric_result.failed\n result.succeeded = fabric_result.succeeded\n if not ignore_status:\n if result.failed:\n raise process.CmdError(command=command, result=result)\n return result\n\n\ndef send_files(local_path, remote_path):\n \"\"\"\n Send files to the defined fabric host.\n\n This assumes the fabric environment was previously (and properly)\n initialized.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n try:\n fabric.operations.put(local_path, remote_path,\n mirror_local_mode=True)\n except ValueError:\n return False\n return True\n\n\ndef receive_files(local_path, remote_path):\n \"\"\"\n Receive files from the defined fabric host.\n\n This assumes the fabric environment was previously (and properly)\n initialized.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n try:\n fabric.operations.get(remote_path,\n local_path)\n except ValueError:\n return False\n return True\n\n\ndef _update_fabric_env(method):\n \"\"\"\n Update fabric env with the appropriate parameters.\n\n :param method: Remote method to wrap.\n :return: Wrapped method.\n \"\"\"\n def wrapper(*args, **kwargs):\n fabric.api.env.update(host_string=args[0].hostname,\n user=args[0].username,\n key_filename=args[0].key_filename,\n password=args[0].password,\n port=args[0].port)\n return method(*args, **kwargs)\n return wrapper\n\n\nclass Remote(object):\n\n \"\"\"\n Performs remote operations.\n \"\"\"\n\n def __init__(self, hostname, username=None, password=None,\n key_filename=None, port=22, timeout=60, attempts=10,\n env_keep=None):\n \"\"\"\n Creates an instance of :class:`Remote`.\n\n :param hostname: the hostname.\n :param username: the username. Default: autodetect.\n :param password: the password. Default: try to use public key.\n :param key_filename: path to an identity file (Example: .pem files\n from Amazon EC2).\n :param timeout: remote command timeout, in seconds. Default: 60.\n :param attempts: number of attempts to connect. Default: 10.\n \"\"\"\n self.hostname = hostname\n if username is None:\n username = getpass.getuser()\n self.username = username\n self.key_filename = key_filename\n # None = use public key\n self.password = password\n self.port = port\n reject_unknown_hosts = settings.get_value('remoter.behavior',\n 'reject_unknown_hosts',\n key_type=bool,\n default=False)\n disable_known_hosts = settings.get_value('remoter.behavior',\n 'disable_known_hosts',\n key_type=bool,\n default=False)\n if env_keep is None:\n self.env_vars = {}\n else:\n self.env_vars = _get_env_vars(env_keep)\n fabric.api.env.update(host_string=hostname,\n user=username,\n password=password,\n key_filename=key_filename,\n port=port,\n timeout=timeout / attempts,\n connection_attempts=attempts,\n linewise=True,\n abort_on_prompts=True,\n abort_exception=ConnectionError,\n reject_unknown_hosts=reject_unknown_hosts,\n disable_known_hosts=disable_known_hosts)\n\n @_update_fabric_env\n def run(self, command, ignore_status=False, quiet=True, timeout=60):\n \"\"\"\n Run a command on the remote host.\n\n :param command: the command string to execute.\n :param ignore_status: Whether to not raise exceptions in case the\n command's return code is different than zero.\n :param timeout: Maximum time allowed for the command to return.\n :param quiet: Whether to not log command stdout/err. Default: True.\n\n :return: the result of the remote program's execution.\n :rtype: :class:`avocado.utils.process.CmdResult`.\n :raise fabric.exceptions.CommandTimeout: When timeout exhausted.\n \"\"\"\n\n with shell_env(**self.env_vars): # pylint: disable=E1129\n return_dict = fabric.tasks.execute(run, command, ignore_status,\n quiet, timeout,\n hosts=[self.hostname])\n return return_dict[self.hostname]\n\n def uptime(self):\n \"\"\"\n Performs uptime (good to check connection).\n\n :return: the uptime string or empty string if fails.\n \"\"\"\n res = self.run('uptime', ignore_status=True)\n if res.exit_status == 0:\n return res\n else:\n return ''\n\n def makedir(self, remote_path):\n \"\"\"\n Create a directory.\n\n :param remote_path: the remote path to create.\n \"\"\"\n self.run('mkdir -p %s' % remote_path)\n\n @_update_fabric_env\n def send_files(self, local_path, remote_path):\n \"\"\"\n Send files to remote host.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n result_dict = fabric.tasks.execute(send_files, local_path,\n remote_path, hosts=[self.hostname])\n return result_dict[self.hostname]\n\n @_update_fabric_env\n def receive_files(self, local_path, remote_path):\n \"\"\"\n Receive files from the remote host.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n result_dict = fabric.tasks.execute(receive_files, local_path,\n remote_path, hosts=[self.hostname])\n return result_dict[self.hostname]\n\n\nclass RemoteTestRunner(TestRunner):\n\n \"\"\" Tooled TestRunner to run on remote machine using ssh \"\"\"\n\n # Let's use re.MULTILINE because sometimes servers might have MOTD\n # that will introduce a line break on output.\n remote_version_re = re.compile(r'^Avocado (\\d+)\\.(\\d+)\\r?$',\n re.MULTILINE)\n\n def __init__(self, job, result):\n super(RemoteTestRunner, self).__init__(job, result)\n #: remoter connection to the remote machine\n self.remote = None\n\n def setup(self):\n \"\"\" Setup remote environment \"\"\"\n stdout_claimed_by = getattr(self.job.args, 'stdout_claimed_by', None)\n if not stdout_claimed_by:\n self.job.log.info(\"LOGIN : %s@%s:%d (TIMEOUT: %s seconds)\",\n self.job.args.remote_username,\n self.job.args.remote_hostname,\n self.job.args.remote_port,\n self.job.args.remote_timeout)\n self.remote = Remote(hostname=self.job.args.remote_hostname,\n username=self.job.args.remote_username,\n password=self.job.args.remote_password,\n key_filename=self.job.args.remote_key_file,\n port=self.job.args.remote_port,\n timeout=self.job.args.remote_timeout,\n env_keep=self.job.args.env_keep)\n\n def check_remote_avocado(self):\n \"\"\"\n Checks if the remote system appears to have avocado installed\n\n The \"appears to have\" description is justified by the fact that the\n check is rather simplistic, it attempts to run an `avocado -v` command\n and checks if the output looks like what avocado would print out.\n\n :rtype: tuple with (bool, tuple)\n :returns: (True, (x, y, z)) if avocado appears to be installed and\n (False, None) otherwise.\n \"\"\"\n # This will be useful as extra debugging info in case avocado\n # doesn't seem to be available in the remote system.\n self.remote.run('env', ignore_status=True, timeout=60)\n\n result = self.remote.run('avocado -v',\n ignore_status=True,\n timeout=60)\n if result.exit_status == 127:\n return (False, None)\n\n match = self.remote_version_re.findall(result.stderr)\n if match is None:\n return (False, None)\n\n try:\n return (True, tuple(int(_) for _ in match[0]))\n except IndexError:\n return (False, None)\n\n @staticmethod\n def _parse_json_response(json_output):\n \"\"\"\n Try to parse JSON response from the remote output.\n\n It tries to find start of the json dictionary and then grabs\n everything till the end of the dictionary. It supports single-\n line as well as multi-line pretty json output.\n \"\"\"\n _result = iter(json_output.splitlines())\n json_result = \"\"\n response = None\n for line in _result: # Find the beginning\n if line.startswith('{'):\n json_result += line\n break\n else:\n raise ValueError(\"Could not find the beginning of the remote JSON\"\n \" output:\\n%s\" % output)\n if json_result.endswith('}'): # probably single-line\n try:\n response = json.loads(json_result)\n except ValueError:\n pass\n if not response:\n # Json was incomplete, try to find another end\n for line in _result:\n json_result += line\n if line.startswith('}'):\n try:\n response = json.loads(json_result)\n break\n except ValueError:\n pass\n if not response:\n raise ValueError(\"Could not find the end of the remote JSON \"\n \"output:\\n%s\" % output)\n return response\n\n def run_test(self, references, timeout):\n \"\"\"\n Run tests.\n\n :param references: a string with test references.\n :return: a dictionary with test results.\n \"\"\"\n extra_params = []\n mux_files = getattr(self.job.args, 'mux_yaml') or []\n if mux_files:\n extra_params.append(\"-m %s\" % \" \".join(mux_files))\n\n if getattr(self.job.args, \"dry_run\", False):\n extra_params.append(\"--dry-run\")\n references_str = \" \".join(references)\n\n avocado_cmd = ('avocado run --force-job-id %s --json - '\n '--archive %s %s' % (self.job.unique_id,\n references_str, \" \".join(extra_params)))\n try:\n result = self.remote.run(avocado_cmd, ignore_status=True,\n timeout=timeout)\n if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:\n raise exceptions.JobError(\"Remote execution failed with: %s\" % result.stderr)\n\n except CommandTimeout:\n raise exceptions.JobError(\"Remote execution took longer than \"\n \"specified timeout (%s). Interrupting.\"\n % (timeout))\n\n try:\n json_result = self._parse_json_response(result.stdout)\n except:\n stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')\n raise exceptions.JobError(result.stdout)\n\n for t_dict in json_result['tests']:\n logdir = os.path.join(self.job.logdir, 'test-results')\n relative_path = astring.string_to_safe_path(str(t_dict['test']))\n logdir = os.path.join(logdir, relative_path)\n t_dict['logdir'] = logdir\n t_dict['logfile'] = os.path.join(logdir, 'debug.log')\n\n return json_result\n\n def run_suite(self, test_suite, variants, timeout=0, replay_map=None,\n suite_order=\"variants-per-test\"):\n \"\"\"\n Run one or more tests and report with test result.\n\n :param params_list: a list of param dicts.\n :param variants: A varianter iterator (unused here)\n\n :return: a set with types of test failures.\n \"\"\"\n del test_suite # using self.job.references instead\n del variants # we're not using multiplexation here\n if suite_order != \"variants-per-test\" and suite_order is not None:\n raise exceptions.JobError(\"execution-order %s is not supported \"\n \"for remote execution.\" % suite_order)\n del suite_order # suite_order is ignored for now\n if not timeout: # avoid timeout = 0\n timeout = None\n summary = set()\n\n stdout_backup = sys.stdout\n stderr_backup = sys.stderr\n fabric_debugfile = os.path.join(self.job.logdir, 'remote.log')\n paramiko_logger = logging.getLogger('paramiko')\n fabric_logger = logging.getLogger('avocado.fabric')\n remote_logger = logging.getLogger('avocado.remote')\n app_logger = logging.getLogger('avocado.debug')\n fmt = ('%(asctime)s %(module)-10.10s L%(lineno)-.4d %('\n 'levelname)-5.5s| %(message)s')\n formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')\n file_handler = logging.FileHandler(filename=fabric_debugfile)\n file_handler.setFormatter(formatter)\n fabric_logger.addHandler(file_handler)\n paramiko_logger.addHandler(file_handler)\n remote_logger.addHandler(file_handler)\n if self.job.args.show_job_log:\n output.add_log_handler(paramiko_logger.name)\n logger_list = [output.LOG_JOB]\n sys.stdout = output.LoggingFile(loggers=logger_list)\n sys.stderr = output.LoggingFile(loggers=logger_list)\n try:\n try:\n self.setup()\n avocado_installed, _ = self.check_remote_avocado()\n if not avocado_installed:\n raise exceptions.JobError('Remote machine does not seem to'\n ' have avocado installed')\n except Exception as details:\n stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)\n raise exceptions.JobError(details)\n results = self.run_test(self.job.references, timeout)\n remote_log_dir = os.path.dirname(results['debuglog'])\n self.result.tests_total = results['total']\n local_log_dir = self.job.logdir\n for tst in results['tests']:\n name = tst['test'].split('-', 1)\n name = [name[0]] + name[1].split(';')\n if len(name) == 3:\n name[2] = {\"variant_id\": name[2]}\n name = TestID(*name, no_digits=-1)\n state = dict(name=name,\n time_elapsed=tst['time'],\n time_start=tst['start'],\n time_end=tst['end'],\n status=tst['status'],\n logdir=tst['logdir'],\n logfile=tst['logfile'],\n fail_reason=tst['fail_reason'],\n job_logdir=local_log_dir,\n job_unique_id='')\n self.result.start_test(state)\n self.job._result_events_dispatcher.map_method('start_test',\n self.result,\n state)\n self.result.check_test(state)\n self.job._result_events_dispatcher.map_method('end_test',\n self.result,\n state)\n if state['status'] == \"INTERRUPTED\":\n summary.add(\"INTERRUPTED\")\n elif not status.mapping[state['status']]:\n summary.add(\"FAIL\")\n zip_filename = remote_log_dir + '.zip'\n zip_path_filename = os.path.join(local_log_dir,\n os.path.basename(zip_filename))\n self.remote.receive_files(local_log_dir, zip_filename)\n archive.uncompress(zip_path_filename, local_log_dir)\n os.remove(zip_path_filename)\n self.result.end_tests()\n self.job._result_events_dispatcher.map_method('post_tests',\n self.job)\n finally:\n try:\n self.tear_down()\n except Exception as details:\n stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)\n raise exceptions.JobError(details)\n sys.stdout = stdout_backup\n sys.stderr = stderr_backup\n return summary\n\n def tear_down(self):\n \"\"\"\n This method is only called when `run_suite` gets to the point of to be\n executing `setup` method and is called at the end of the execution.\n\n :warning: It might be called on `setup` exceptions, so things\n initialized during `setup` might not yet be initialized.\n \"\"\"\n pass\n\n\nclass RemoteCLI(CLI):\n\n \"\"\"\n Run tests on a remote machine\n \"\"\"\n\n name = 'remote'\n description = \"Remote machine options for 'run' subcommand\"\n\n def configure(self, parser):\n run_subcommand_parser = parser.subcommands.choices.get('run', None)\n if run_subcommand_parser is None:\n return\n\n msg = 'test execution on a remote machine'\n remote_parser = run_subcommand_parser.add_argument_group(msg)\n remote_parser.add_argument('--remote-hostname',\n dest='remote_hostname', default=None,\n help=('Specify the hostname to login on'\n ' remote machine'))\n remote_parser.add_argument('--remote-port', dest='remote_port',\n default=22, type=int,\n help=('Specify the port number to login on '\n 'remote machine. Default: %(default)s'))\n remote_parser.add_argument('--remote-username',\n dest='remote_username',\n default=getpass.getuser(),\n help=('Specify the username to login on'\n ' remote machine. Default: '\n '%(default)s'))\n remote_parser.add_argument('--remote-password',\n dest='remote_password', default=None,\n help=('Specify the password to login on'\n ' remote machine'))\n remote_parser.add_argument('--remote-key-file',\n dest='remote_key_file', default=None,\n help=('Specify an identity file with a '\n 'private key instead of a password '\n '(Example: .pem files from Amazon EC2)'))\n remote_parser.add_argument('--remote-timeout', metavar='SECONDS',\n default=60, type=int,\n help=(\"Amount of time (in seconds) to \"\n \"wait for a successful connection\"\n \" to the remote machine. Defaults\"\n \" to %(default)s seconds.\"))\n\n @staticmethod\n def _check_required_args(args, enable_arg, required_args):\n \"\"\"\n :return: True when enable_arg enabled and all required args are set\n :raise sys.exit: When missing required argument.\n \"\"\"\n if (not hasattr(args, enable_arg) or\n not getattr(args, enable_arg)):\n return False\n missing = []\n for arg in required_args:\n if not getattr(args, arg):\n missing.append(arg)\n if missing:\n LOG_UI.error(\"Use of %s requires %s arguments to be set. Please \"\n \"set %s.\", enable_arg, ', '.join(required_args),\n ', '.join(missing))\n\n return sys.exit(exit_codes.AVOCADO_FAIL)\n return True\n\n def run(self, args):\n if self._check_required_args(args, 'remote_hostname',\n ('remote_hostname',)):\n loader.loader.clear_plugins()\n loader.loader.register_plugin(loader.DummyLoader)\n args.test_runner = RemoteTestRunner\n", "path": "optional_plugins/runner_remote/avocado_runner_remote/__init__.py"}]} |
gh_patches_debug_1540 | rasdani/github-patches | git_diff | chanzuckerberg__single-cell-curation-428 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feat(curation notebooks): display the x-request-id in the error response
When an error occurs especially 500 errors, the only piece of debug information the user has is the x-request-id. Making this value visible to the users will help developer trouble shoot errors seen by the users in the future.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `notebooks/curation_api/python/src/utils/logger.py`
Content:
```
1 import logging
2 import os
3 import sys
4
5
6 def set_log_level(log_level: str):
7 """
8 :param log_level: the logging level ("NOTSET", "DEBUG", "INFO", "WARN", "ERROR", "FATAL")
9 """
10 all_levels = logging._nameToLevel.keys()
11 if log_level not in all_levels:
12 raise Exception(f"The log_level arg must be one of {list(all_levels)}")
13 os.environ["LOG_LEVEL"] = log_level
14 logger = logging.getLogger()
15 logger.setLevel(log_level)
16 for h in logger.handlers:
17 h.setLevel(log_level)
18 print(f"Set logging level to {log_level}")
19
20
21 def get_custom_logger() -> logging.Logger:
22 """
23 Get a custom logger that will still print to stdout in notebooks.
24 :return: the logger object
25 """
26 log_level = os.getenv("LOG_LEVEL", "INFO")
27 logging.basicConfig(level=log_level)
28 logger = logging.getLogger()
29 logger.removeHandler(logger.handlers[0])
30 ch = logging.StreamHandler(stream=sys.stdout)
31 ch.setLevel(level=log_level)
32 level_printout = f"{'%(levelname)s:' if logger.level in ('WARN', 'ERROR') else ''}"
33 formatter = logging.Formatter(f"{level_printout}%(message)s")
34 ch.setFormatter(formatter)
35 logger.addHandler(ch)
36 return logger
37
38
39 def failure(logger: logging.Logger, e, *messages):
40 logger.error("\n\033[1m\033[38;5;9mFAILED\033[0m\n") # 'FAILED' in bold red
41 logger.error(e.response.reason + "\n")
42 logger.error(e.response.text + "\n")
43 if messages:
44 [logger.error(m) for m in messages]
45
46
47 def success(logger: logging.Logger, *messages):
48 logger.info("\n\033[1m\033[38;5;10mSUCCESS\033[0m\n") # 'SUCCESS' in bold green
49 if messages:
50 [logger.info(m) for m in messages]
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/notebooks/curation_api/python/src/utils/logger.py b/notebooks/curation_api/python/src/utils/logger.py
--- a/notebooks/curation_api/python/src/utils/logger.py
+++ b/notebooks/curation_api/python/src/utils/logger.py
@@ -40,6 +40,8 @@
logger.error("\n\033[1m\033[38;5;9mFAILED\033[0m\n") # 'FAILED' in bold red
logger.error(e.response.reason + "\n")
logger.error(e.response.text + "\n")
+ logger.error(f"x-request-id: {e.response.headers.get('x-request-id')}")
+
if messages:
[logger.error(m) for m in messages]
| {"golden_diff": "diff --git a/notebooks/curation_api/python/src/utils/logger.py b/notebooks/curation_api/python/src/utils/logger.py\n--- a/notebooks/curation_api/python/src/utils/logger.py\n+++ b/notebooks/curation_api/python/src/utils/logger.py\n@@ -40,6 +40,8 @@\n logger.error(\"\\n\\033[1m\\033[38;5;9mFAILED\\033[0m\\n\") # 'FAILED' in bold red\n logger.error(e.response.reason + \"\\n\")\n logger.error(e.response.text + \"\\n\")\n+ logger.error(f\"x-request-id: {e.response.headers.get('x-request-id')}\")\n+\n if messages:\n [logger.error(m) for m in messages]\n", "issue": "Feat(curation notebooks): display the x-request-id in the error response\nWhen an error occurs especially 500 errors, the only piece of debug information the user has is the x-request-id. Making this value visible to the users will help developer trouble shoot errors seen by the users in the future.\n", "before_files": [{"content": "import logging\nimport os\nimport sys\n\n\ndef set_log_level(log_level: str):\n \"\"\"\n :param log_level: the logging level (\"NOTSET\", \"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"FATAL\")\n \"\"\"\n all_levels = logging._nameToLevel.keys()\n if log_level not in all_levels:\n raise Exception(f\"The log_level arg must be one of {list(all_levels)}\")\n os.environ[\"LOG_LEVEL\"] = log_level\n logger = logging.getLogger()\n logger.setLevel(log_level)\n for h in logger.handlers:\n h.setLevel(log_level)\n print(f\"Set logging level to {log_level}\")\n\n\ndef get_custom_logger() -> logging.Logger:\n \"\"\"\n Get a custom logger that will still print to stdout in notebooks.\n :return: the logger object\n \"\"\"\n log_level = os.getenv(\"LOG_LEVEL\", \"INFO\")\n logging.basicConfig(level=log_level)\n logger = logging.getLogger()\n logger.removeHandler(logger.handlers[0])\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(level=log_level)\n level_printout = f\"{'%(levelname)s:' if logger.level in ('WARN', 'ERROR') else ''}\"\n formatter = logging.Formatter(f\"{level_printout}%(message)s\")\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\ndef failure(logger: logging.Logger, e, *messages):\n logger.error(\"\\n\\033[1m\\033[38;5;9mFAILED\\033[0m\\n\") # 'FAILED' in bold red\n logger.error(e.response.reason + \"\\n\")\n logger.error(e.response.text + \"\\n\")\n if messages:\n [logger.error(m) for m in messages]\n\n\ndef success(logger: logging.Logger, *messages):\n logger.info(\"\\n\\033[1m\\033[38;5;10mSUCCESS\\033[0m\\n\") # 'SUCCESS' in bold green\n if messages:\n [logger.info(m) for m in messages]\n", "path": "notebooks/curation_api/python/src/utils/logger.py"}], "after_files": [{"content": "import logging\nimport os\nimport sys\n\n\ndef set_log_level(log_level: str):\n \"\"\"\n :param log_level: the logging level (\"NOTSET\", \"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"FATAL\")\n \"\"\"\n all_levels = logging._nameToLevel.keys()\n if log_level not in all_levels:\n raise Exception(f\"The log_level arg must be one of {list(all_levels)}\")\n os.environ[\"LOG_LEVEL\"] = log_level\n logger = logging.getLogger()\n logger.setLevel(log_level)\n for h in logger.handlers:\n h.setLevel(log_level)\n print(f\"Set logging level to {log_level}\")\n\n\ndef get_custom_logger() -> logging.Logger:\n \"\"\"\n Get a custom logger that will still print to stdout in notebooks.\n :return: the logger object\n \"\"\"\n log_level = os.getenv(\"LOG_LEVEL\", \"INFO\")\n logging.basicConfig(level=log_level)\n logger = logging.getLogger()\n logger.removeHandler(logger.handlers[0])\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(level=log_level)\n level_printout = f\"{'%(levelname)s:' if logger.level in ('WARN', 'ERROR') else ''}\"\n formatter = logging.Formatter(f\"{level_printout}%(message)s\")\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\ndef failure(logger: logging.Logger, e, *messages):\n logger.error(\"\\n\\033[1m\\033[38;5;9mFAILED\\033[0m\\n\") # 'FAILED' in bold red\n logger.error(e.response.reason + \"\\n\")\n logger.error(e.response.text + \"\\n\")\n logger.error(f\"x-request-id: {e.response.headers.get('x-request-id')}\")\n\n if messages:\n [logger.error(m) for m in messages]\n\n\ndef success(logger: logging.Logger, *messages):\n logger.info(\"\\n\\033[1m\\033[38;5;10mSUCCESS\\033[0m\\n\") # 'SUCCESS' in bold green\n if messages:\n [logger.info(m) for m in messages]\n", "path": "notebooks/curation_api/python/src/utils/logger.py"}]} |
gh_patches_debug_1541 | rasdani/github-patches | git_diff | statsmodels__statsmodels-6518 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix simple typo: variuos -> various
# Issue Type
[x] Bug (Typo)
# Steps to Replicate
1. Examine statsmodels/sandbox/multilinear.py.
2. Search for `variuos`.
# Expected Behaviour
1. Should read `various`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `statsmodels/sandbox/multilinear.py`
Content:
```
1 """Analyze a set of multiple variables with a linear models
2
3 multiOLS:
4 take a model and test it on a series of variables defined over a
5 pandas dataset, returning a summary for each variable
6
7 multigroup:
8 take a boolean vector and the definition of several groups of variables
9 and test if the group has a fraction of true values higher than the
10 rest. It allows to test if the variables in the group are significantly
11 more significant than outside the group.
12 """
13
14 from statsmodels.compat.python import iteritems
15 from patsy import dmatrix
16 import pandas as pd
17 from statsmodels.api import OLS
18 from statsmodels.api import stats
19 import numpy as np
20 import logging
21
22 def _model2dataframe(model_endog, model_exog, model_type=OLS, **kwargs):
23 """return a series containing the summary of a linear model
24
25 All the exceding parameters will be redirected to the linear model
26 """
27 # create the linear model and perform the fit
28 model_result = model_type(model_endog, model_exog, **kwargs).fit()
29 # keeps track of some global statistics
30 statistics = pd.Series({'r2': model_result.rsquared,
31 'adj_r2': model_result.rsquared_adj})
32 # put them togher with the result for each term
33 result_df = pd.DataFrame({'params': model_result.params,
34 'pvals': model_result.pvalues,
35 'std': model_result.bse,
36 'statistics': statistics})
37 # add the complexive results for f-value and the total p-value
38 fisher_df = pd.DataFrame({'params': {'_f_test': model_result.fvalue},
39 'pvals': {'_f_test': model_result.f_pvalue}})
40 # merge them and unstack to obtain a hierarchically indexed series
41 res_series = pd.concat([result_df, fisher_df]).unstack()
42 return res_series.dropna()
43
44
45 def multiOLS(model, dataframe, column_list=None, method='fdr_bh',
46 alpha=0.05, subset=None, model_type=OLS, **kwargs):
47 """apply a linear model to several endogenous variables on a dataframe
48
49 Take a linear model definition via formula and a dataframe that will be
50 the environment of the model, and apply the linear model to a subset
51 (or all) of the columns of the dataframe. It will return a dataframe
52 with part of the information from the linear model summary.
53
54 Parameters
55 ----------
56 model : str
57 formula description of the model
58 dataframe : pandas.dataframe
59 dataframe where the model will be evaluated
60 column_list : list[str], optional
61 Names of the columns to analyze with the model.
62 If None (Default) it will perform the function on all the
63 eligible columns (numerical type and not in the model definition)
64 model_type : model class, optional
65 The type of model to be used. The default is the linear model.
66 Can be any linear model (OLS, WLS, GLS, etc..)
67 method: str, optional
68 the method used to perform the pvalue correction for multiple testing.
69 default is the Benjamini/Hochberg, other available methods are:
70
71 `bonferroni` : one-step correction
72 `sidak` : on-step correction
73 `holm-sidak` :
74 `holm` :
75 `simes-hochberg` :
76 `hommel` :
77 `fdr_bh` : Benjamini/Hochberg
78 `fdr_by` : Benjamini/Yekutieli
79
80 alpha: float, optional
81 the significance level used for the pvalue correction (default 0.05)
82 subset: bool array
83 the selected rows to be used in the regression
84
85 all the other parameters will be directed to the model creation.
86
87 Returns
88 -------
89 summary : pandas.DataFrame
90 a dataframe containing an extract from the summary of the model
91 obtained for each columns. It will give the model complexive f test
92 result and p-value, and the regression value and standard deviarion
93 for each of the regressors. The DataFrame has a hierachical column
94 structure, divided as:
95
96 - params: contains the parameters resulting from the models. Has
97 an additional column named _f_test containing the result of the
98 F test.
99 - pval: the pvalue results of the models. Has the _f_test column
100 for the significativity of the whole test.
101 - adj_pval: the corrected pvalues via the multitest function.
102 - std: uncertainties of the model parameters
103 - statistics: contains the r squared statistics and the adjusted
104 r squared.
105
106 Notes
107 -----
108 The main application of this function is on system biology to perform
109 a linear model testing of a lot of different parameters, like the
110 different genetic expression of several genes.
111
112 See Also
113 --------
114 statsmodels.stats.multitest
115 contains several functions to perform the multiple p-value correction
116
117 Examples
118 --------
119 Using the longley data as dataframe example
120
121 >>> import statsmodels.api as sm
122 >>> data = sm.datasets.longley.load_pandas()
123 >>> df = data.exog
124 >>> df['TOTEMP'] = data.endog
125
126 This will perform the specified linear model on all the
127 other columns of the dataframe
128 >>> multiOLS('GNP + 1', df)
129
130 This select only a certain subset of the columns
131 >>> multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
132
133 It is possible to specify a trasformation also on the target column,
134 conforming to the patsy formula specification
135 >>> multiOLS('GNP + 0', df, ['I(GNPDEFL**2)', 'center(TOTEMP)'])
136
137 It is possible to specify the subset of the dataframe
138 on which perform the analysis
139 >> multiOLS('GNP + 1', df, subset=df.GNPDEFL > 90)
140
141 Even a single column name can be given without enclosing it in a list
142 >>> multiOLS('GNP + 0', df, 'GNPDEFL')
143 """
144 # data normalization
145 # if None take all the numerical columns that are not present in the model
146 # it's not waterproof but is a good enough criterion for everyday use
147 if column_list is None:
148 column_list = [name for name in dataframe.columns
149 if dataframe[name].dtype != object and name not in model]
150 # if it's a single string transform it in a single element list
151 if isinstance(column_list, str):
152 column_list = [column_list]
153 if subset is not None:
154 dataframe = dataframe.loc[subset]
155 # perform each model and retrieve the statistics
156 col_results = {}
157 # as the model will use always the same endogenous variables
158 # we can create them once and reuse
159 model_exog = dmatrix(model, data=dataframe, return_type="dataframe")
160 for col_name in column_list:
161 # it will try to interpret the column name as a valid dataframe
162 # index as it can be several times faster. If it fails it
163 # interpret it as a patsy formula (for example for centering)
164 try:
165 model_endog = dataframe[col_name]
166 except KeyError:
167 model_endog = dmatrix(col_name + ' + 0', data=dataframe)
168 # retrieve the result and store them
169 res = _model2dataframe(model_endog, model_exog, model_type, **kwargs)
170 col_results[col_name] = res
171 # mangle them togheter and sort by complexive p-value
172 summary = pd.DataFrame(col_results)
173 # order by the p-value: the most useful model first!
174 summary = summary.T.sort_values([('pvals', '_f_test')])
175 summary.index.name = 'endogenous vars'
176 # implementing the pvalue correction method
177 smt = stats.multipletests
178 for (key1, key2) in summary:
179 if key1 != 'pvals':
180 continue
181 p_values = summary[key1, key2]
182 corrected = smt(p_values, method=method, alpha=alpha)[1]
183 # extend the dataframe of results with the column
184 # of the corrected p_values
185 summary['adj_' + key1, key2] = corrected
186 return summary
187
188
189 def _test_group(pvalues, group_name, group, exact=True):
190 """test if the objects in the group are different from the general set.
191
192 The test is performed on the pvalues set (ad a pandas series) over
193 the group specified via a fisher exact test.
194 """
195 from scipy.stats import fisher_exact, chi2_contingency
196
197 totals = 1.0 * len(pvalues)
198 total_significant = 1.0 * np.sum(pvalues)
199 cross_index = [c for c in group if c in pvalues.index]
200 missing = [c for c in group if c not in pvalues.index]
201 if missing:
202 s = ('the test is not well defined if the group '
203 'has elements not presents in the significativity '
204 'array. group name: {}, missing elements: {}')
205 logging.warning(s.format(group_name, missing))
206 # how many are significant and not in the group
207 group_total = 1.0 * len(cross_index)
208 group_sign = 1.0 * len([c for c in cross_index if pvalues[c]])
209 group_nonsign = 1.0 * (group_total - group_sign)
210 # how many are significant and not outside the group
211 extern_sign = 1.0 * (total_significant - group_sign)
212 extern_nonsign = 1.0 * (totals - total_significant - group_nonsign)
213 # make the fisher test or the chi squared
214 test = fisher_exact if exact else chi2_contingency
215 table = [[extern_nonsign, extern_sign], [group_nonsign, group_sign]]
216 pvalue = test(np.array(table))[1]
217 # is the group more represented or less?
218 part = group_sign, group_nonsign, extern_sign, extern_nonsign
219 #increase = (group_sign / group_total) > (total_significant / totals)
220 increase = np.log((totals * group_sign)
221 / (total_significant * group_total))
222 return pvalue, increase, part
223
224
225 def multigroup(pvals, groups, exact=True, keep_all=True, alpha=0.05):
226 """Test if the given groups are different from the total partition.
227
228 Given a boolean array test if each group has a proportion of positives
229 different than the complexive proportion.
230 The test can be done as an exact Fisher test or approximated as a
231 Chi squared test for more speed.
232
233 Parameters
234 ----------
235 pvals: pandas series of boolean
236 the significativity of the variables under analysis
237 groups: dict of list
238 the name of each category of variables under exam.
239 each one is a list of the variables included
240 exact: bool, optional
241 If True (default) use the fisher exact test, otherwise
242 use the chi squared test for contingencies tables.
243 For high number of elements in the array the fisher test can
244 be significantly slower than the chi squared.
245 keep_all: bool, optional
246 if False it will drop those groups where the fraction
247 of positive is below the expected result. If True (default)
248 it will keep all the significant results.
249 alpha: float, optional
250 the significativity level for the pvalue correction
251 on the whole set of groups (not inside the groups themselves).
252
253 Returns
254 -------
255 result_df: pandas dataframe
256 for each group returns:
257
258 pvals - the fisher p value of the test
259 adj_pvals - the adjusted pvals
260 increase - the log of the odd ratio between the
261 internal significant ratio versus the external one
262 _in_sign - significative elements inside the group
263 _in_non - non significative elements inside the group
264 _out_sign - significative elements outside the group
265 _out_non - non significative elements outside the group
266
267 Notes
268 -----
269 This test allow to see if a category of variables is generally better
270 suited to be described for the model. For example to see if a predictor
271 gives more information on demographic or economical parameters,
272 by creating two groups containing the endogenous variables of each
273 category.
274
275 This function is conceived for medical dataset with a lot of variables
276 that can be easily grouped into functional groups. This is because
277 The significativity of a group require a rather large number of
278 composing elements.
279
280 Examples
281 --------
282 A toy example on a real dataset, the Guerry dataset from R
283 >>> url = "https://raw.githubusercontent.com/vincentarelbundock/"
284 >>> url = url + "Rdatasets/csv/HistData/Guerry.csv"
285 >>> df = pd.read_csv(url, index_col='dept')
286
287 evaluate the relationship between the variuos paramenters whith the Wealth
288 >>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
289
290 define the groups
291 >>> groups = {}
292 >>> groups['crime'] = ['Crime_prop', 'Infanticide',
293 ... 'Crime_parents', 'Desertion', 'Crime_pers']
294 >>> groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
295 >>> groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
296
297 do the analysis of the significativity
298 >>> multigroup(pvals < 0.05, groups)
299 """
300 pvals = pd.Series(pvals)
301 if not (set(pvals.unique()) <= set([False, True])):
302 raise ValueError("the series should be binary")
303 if hasattr(pvals.index, 'is_unique') and not pvals.index.is_unique:
304 raise ValueError("series with duplicated index is not accepted")
305 results = {'pvals': {},
306 'increase': {},
307 '_in_sign': {},
308 '_in_non': {},
309 '_out_sign': {},
310 '_out_non': {}}
311 for group_name, group_list in iteritems(groups):
312 res = _test_group(pvals, group_name, group_list, exact)
313 results['pvals'][group_name] = res[0]
314 results['increase'][group_name] = res[1]
315 results['_in_sign'][group_name] = res[2][0]
316 results['_in_non'][group_name] = res[2][1]
317 results['_out_sign'][group_name] = res[2][2]
318 results['_out_non'][group_name] = res[2][3]
319 result_df = pd.DataFrame(results).sort_values('pvals')
320 if not keep_all:
321 result_df = result_df[result_df.increase]
322 smt = stats.multipletests
323 corrected = smt(result_df['pvals'], method='fdr_bh', alpha=alpha)[1]
324 result_df['adj_pvals'] = corrected
325 return result_df
326
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/statsmodels/sandbox/multilinear.py b/statsmodels/sandbox/multilinear.py
--- a/statsmodels/sandbox/multilinear.py
+++ b/statsmodels/sandbox/multilinear.py
@@ -284,7 +284,7 @@
>>> url = url + "Rdatasets/csv/HistData/Guerry.csv"
>>> df = pd.read_csv(url, index_col='dept')
- evaluate the relationship between the variuos paramenters whith the Wealth
+ evaluate the relationship between the various paramenters whith the Wealth
>>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
define the groups
| {"golden_diff": "diff --git a/statsmodels/sandbox/multilinear.py b/statsmodels/sandbox/multilinear.py\n--- a/statsmodels/sandbox/multilinear.py\n+++ b/statsmodels/sandbox/multilinear.py\n@@ -284,7 +284,7 @@\n >>> url = url + \"Rdatasets/csv/HistData/Guerry.csv\"\n >>> df = pd.read_csv(url, index_col='dept')\n \n- evaluate the relationship between the variuos paramenters whith the Wealth\n+ evaluate the relationship between the various paramenters whith the Wealth\n >>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']\n \n define the groups\n", "issue": "Fix simple typo: variuos -> various\n# Issue Type\n\n[x] Bug (Typo)\n\n# Steps to Replicate\n\n1. Examine statsmodels/sandbox/multilinear.py.\n2. Search for `variuos`.\n\n# Expected Behaviour\n\n1. Should read `various`.\n\n\n", "before_files": [{"content": "\"\"\"Analyze a set of multiple variables with a linear models\n\nmultiOLS:\n take a model and test it on a series of variables defined over a\n pandas dataset, returning a summary for each variable\n\nmultigroup:\n take a boolean vector and the definition of several groups of variables\n and test if the group has a fraction of true values higher than the\n rest. It allows to test if the variables in the group are significantly\n more significant than outside the group.\n\"\"\"\n\nfrom statsmodels.compat.python import iteritems\nfrom patsy import dmatrix\nimport pandas as pd\nfrom statsmodels.api import OLS\nfrom statsmodels.api import stats\nimport numpy as np\nimport logging\n\ndef _model2dataframe(model_endog, model_exog, model_type=OLS, **kwargs):\n \"\"\"return a series containing the summary of a linear model\n\n All the exceding parameters will be redirected to the linear model\n \"\"\"\n # create the linear model and perform the fit\n model_result = model_type(model_endog, model_exog, **kwargs).fit()\n # keeps track of some global statistics\n statistics = pd.Series({'r2': model_result.rsquared,\n 'adj_r2': model_result.rsquared_adj})\n # put them togher with the result for each term\n result_df = pd.DataFrame({'params': model_result.params,\n 'pvals': model_result.pvalues,\n 'std': model_result.bse,\n 'statistics': statistics})\n # add the complexive results for f-value and the total p-value\n fisher_df = pd.DataFrame({'params': {'_f_test': model_result.fvalue},\n 'pvals': {'_f_test': model_result.f_pvalue}})\n # merge them and unstack to obtain a hierarchically indexed series\n res_series = pd.concat([result_df, fisher_df]).unstack()\n return res_series.dropna()\n\n\ndef multiOLS(model, dataframe, column_list=None, method='fdr_bh',\n alpha=0.05, subset=None, model_type=OLS, **kwargs):\n \"\"\"apply a linear model to several endogenous variables on a dataframe\n\n Take a linear model definition via formula and a dataframe that will be\n the environment of the model, and apply the linear model to a subset\n (or all) of the columns of the dataframe. It will return a dataframe\n with part of the information from the linear model summary.\n\n Parameters\n ----------\n model : str\n formula description of the model\n dataframe : pandas.dataframe\n dataframe where the model will be evaluated\n column_list : list[str], optional\n Names of the columns to analyze with the model.\n If None (Default) it will perform the function on all the\n eligible columns (numerical type and not in the model definition)\n model_type : model class, optional\n The type of model to be used. The default is the linear model.\n Can be any linear model (OLS, WLS, GLS, etc..)\n method: str, optional\n the method used to perform the pvalue correction for multiple testing.\n default is the Benjamini/Hochberg, other available methods are:\n\n `bonferroni` : one-step correction\n `sidak` : on-step correction\n `holm-sidak` :\n `holm` :\n `simes-hochberg` :\n `hommel` :\n `fdr_bh` : Benjamini/Hochberg\n `fdr_by` : Benjamini/Yekutieli\n\n alpha: float, optional\n the significance level used for the pvalue correction (default 0.05)\n subset: bool array\n the selected rows to be used in the regression\n\n all the other parameters will be directed to the model creation.\n\n Returns\n -------\n summary : pandas.DataFrame\n a dataframe containing an extract from the summary of the model\n obtained for each columns. It will give the model complexive f test\n result and p-value, and the regression value and standard deviarion\n for each of the regressors. The DataFrame has a hierachical column\n structure, divided as:\n\n - params: contains the parameters resulting from the models. Has\n an additional column named _f_test containing the result of the\n F test.\n - pval: the pvalue results of the models. Has the _f_test column\n for the significativity of the whole test.\n - adj_pval: the corrected pvalues via the multitest function.\n - std: uncertainties of the model parameters\n - statistics: contains the r squared statistics and the adjusted\n r squared.\n\n Notes\n -----\n The main application of this function is on system biology to perform\n a linear model testing of a lot of different parameters, like the\n different genetic expression of several genes.\n\n See Also\n --------\n statsmodels.stats.multitest\n contains several functions to perform the multiple p-value correction\n\n Examples\n --------\n Using the longley data as dataframe example\n\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.longley.load_pandas()\n >>> df = data.exog\n >>> df['TOTEMP'] = data.endog\n\n This will perform the specified linear model on all the\n other columns of the dataframe\n >>> multiOLS('GNP + 1', df)\n\n This select only a certain subset of the columns\n >>> multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])\n\n It is possible to specify a trasformation also on the target column,\n conforming to the patsy formula specification\n >>> multiOLS('GNP + 0', df, ['I(GNPDEFL**2)', 'center(TOTEMP)'])\n\n It is possible to specify the subset of the dataframe\n on which perform the analysis\n >> multiOLS('GNP + 1', df, subset=df.GNPDEFL > 90)\n\n Even a single column name can be given without enclosing it in a list\n >>> multiOLS('GNP + 0', df, 'GNPDEFL')\n \"\"\"\n # data normalization\n # if None take all the numerical columns that are not present in the model\n # it's not waterproof but is a good enough criterion for everyday use\n if column_list is None:\n column_list = [name for name in dataframe.columns\n if dataframe[name].dtype != object and name not in model]\n # if it's a single string transform it in a single element list\n if isinstance(column_list, str):\n column_list = [column_list]\n if subset is not None:\n dataframe = dataframe.loc[subset]\n # perform each model and retrieve the statistics\n col_results = {}\n # as the model will use always the same endogenous variables\n # we can create them once and reuse\n model_exog = dmatrix(model, data=dataframe, return_type=\"dataframe\")\n for col_name in column_list:\n # it will try to interpret the column name as a valid dataframe\n # index as it can be several times faster. If it fails it\n # interpret it as a patsy formula (for example for centering)\n try:\n model_endog = dataframe[col_name]\n except KeyError:\n model_endog = dmatrix(col_name + ' + 0', data=dataframe)\n # retrieve the result and store them\n res = _model2dataframe(model_endog, model_exog, model_type, **kwargs)\n col_results[col_name] = res\n # mangle them togheter and sort by complexive p-value\n summary = pd.DataFrame(col_results)\n # order by the p-value: the most useful model first!\n summary = summary.T.sort_values([('pvals', '_f_test')])\n summary.index.name = 'endogenous vars'\n # implementing the pvalue correction method\n smt = stats.multipletests\n for (key1, key2) in summary:\n if key1 != 'pvals':\n continue\n p_values = summary[key1, key2]\n corrected = smt(p_values, method=method, alpha=alpha)[1]\n # extend the dataframe of results with the column\n # of the corrected p_values\n summary['adj_' + key1, key2] = corrected\n return summary\n\n\ndef _test_group(pvalues, group_name, group, exact=True):\n \"\"\"test if the objects in the group are different from the general set.\n\n The test is performed on the pvalues set (ad a pandas series) over\n the group specified via a fisher exact test.\n \"\"\"\n from scipy.stats import fisher_exact, chi2_contingency\n\n totals = 1.0 * len(pvalues)\n total_significant = 1.0 * np.sum(pvalues)\n cross_index = [c for c in group if c in pvalues.index]\n missing = [c for c in group if c not in pvalues.index]\n if missing:\n s = ('the test is not well defined if the group '\n 'has elements not presents in the significativity '\n 'array. group name: {}, missing elements: {}')\n logging.warning(s.format(group_name, missing))\n # how many are significant and not in the group\n group_total = 1.0 * len(cross_index)\n group_sign = 1.0 * len([c for c in cross_index if pvalues[c]])\n group_nonsign = 1.0 * (group_total - group_sign)\n # how many are significant and not outside the group\n extern_sign = 1.0 * (total_significant - group_sign)\n extern_nonsign = 1.0 * (totals - total_significant - group_nonsign)\n # make the fisher test or the chi squared\n test = fisher_exact if exact else chi2_contingency\n table = [[extern_nonsign, extern_sign], [group_nonsign, group_sign]]\n pvalue = test(np.array(table))[1]\n # is the group more represented or less?\n part = group_sign, group_nonsign, extern_sign, extern_nonsign\n #increase = (group_sign / group_total) > (total_significant / totals)\n increase = np.log((totals * group_sign)\n / (total_significant * group_total))\n return pvalue, increase, part\n\n\ndef multigroup(pvals, groups, exact=True, keep_all=True, alpha=0.05):\n \"\"\"Test if the given groups are different from the total partition.\n\n Given a boolean array test if each group has a proportion of positives\n different than the complexive proportion.\n The test can be done as an exact Fisher test or approximated as a\n Chi squared test for more speed.\n\n Parameters\n ----------\n pvals: pandas series of boolean\n the significativity of the variables under analysis\n groups: dict of list\n the name of each category of variables under exam.\n each one is a list of the variables included\n exact: bool, optional\n If True (default) use the fisher exact test, otherwise\n use the chi squared test for contingencies tables.\n For high number of elements in the array the fisher test can\n be significantly slower than the chi squared.\n keep_all: bool, optional\n if False it will drop those groups where the fraction\n of positive is below the expected result. If True (default)\n it will keep all the significant results.\n alpha: float, optional\n the significativity level for the pvalue correction\n on the whole set of groups (not inside the groups themselves).\n\n Returns\n -------\n result_df: pandas dataframe\n for each group returns:\n\n pvals - the fisher p value of the test\n adj_pvals - the adjusted pvals\n increase - the log of the odd ratio between the\n internal significant ratio versus the external one\n _in_sign - significative elements inside the group\n _in_non - non significative elements inside the group\n _out_sign - significative elements outside the group\n _out_non - non significative elements outside the group\n\n Notes\n -----\n This test allow to see if a category of variables is generally better\n suited to be described for the model. For example to see if a predictor\n gives more information on demographic or economical parameters,\n by creating two groups containing the endogenous variables of each\n category.\n\n This function is conceived for medical dataset with a lot of variables\n that can be easily grouped into functional groups. This is because\n The significativity of a group require a rather large number of\n composing elements.\n\n Examples\n --------\n A toy example on a real dataset, the Guerry dataset from R\n >>> url = \"https://raw.githubusercontent.com/vincentarelbundock/\"\n >>> url = url + \"Rdatasets/csv/HistData/Guerry.csv\"\n >>> df = pd.read_csv(url, index_col='dept')\n\n evaluate the relationship between the variuos paramenters whith the Wealth\n >>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']\n\n define the groups\n >>> groups = {}\n >>> groups['crime'] = ['Crime_prop', 'Infanticide',\n ... 'Crime_parents', 'Desertion', 'Crime_pers']\n >>> groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']\n >>> groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']\n\n do the analysis of the significativity\n >>> multigroup(pvals < 0.05, groups)\n \"\"\"\n pvals = pd.Series(pvals)\n if not (set(pvals.unique()) <= set([False, True])):\n raise ValueError(\"the series should be binary\")\n if hasattr(pvals.index, 'is_unique') and not pvals.index.is_unique:\n raise ValueError(\"series with duplicated index is not accepted\")\n results = {'pvals': {},\n 'increase': {},\n '_in_sign': {},\n '_in_non': {},\n '_out_sign': {},\n '_out_non': {}}\n for group_name, group_list in iteritems(groups):\n res = _test_group(pvals, group_name, group_list, exact)\n results['pvals'][group_name] = res[0]\n results['increase'][group_name] = res[1]\n results['_in_sign'][group_name] = res[2][0]\n results['_in_non'][group_name] = res[2][1]\n results['_out_sign'][group_name] = res[2][2]\n results['_out_non'][group_name] = res[2][3]\n result_df = pd.DataFrame(results).sort_values('pvals')\n if not keep_all:\n result_df = result_df[result_df.increase]\n smt = stats.multipletests\n corrected = smt(result_df['pvals'], method='fdr_bh', alpha=alpha)[1]\n result_df['adj_pvals'] = corrected\n return result_df\n", "path": "statsmodels/sandbox/multilinear.py"}], "after_files": [{"content": "\"\"\"Analyze a set of multiple variables with a linear models\n\nmultiOLS:\n take a model and test it on a series of variables defined over a\n pandas dataset, returning a summary for each variable\n\nmultigroup:\n take a boolean vector and the definition of several groups of variables\n and test if the group has a fraction of true values higher than the\n rest. It allows to test if the variables in the group are significantly\n more significant than outside the group.\n\"\"\"\n\nfrom statsmodels.compat.python import iteritems\nfrom patsy import dmatrix\nimport pandas as pd\nfrom statsmodels.api import OLS\nfrom statsmodels.api import stats\nimport numpy as np\nimport logging\n\ndef _model2dataframe(model_endog, model_exog, model_type=OLS, **kwargs):\n \"\"\"return a series containing the summary of a linear model\n\n All the exceding parameters will be redirected to the linear model\n \"\"\"\n # create the linear model and perform the fit\n model_result = model_type(model_endog, model_exog, **kwargs).fit()\n # keeps track of some global statistics\n statistics = pd.Series({'r2': model_result.rsquared,\n 'adj_r2': model_result.rsquared_adj})\n # put them togher with the result for each term\n result_df = pd.DataFrame({'params': model_result.params,\n 'pvals': model_result.pvalues,\n 'std': model_result.bse,\n 'statistics': statistics})\n # add the complexive results for f-value and the total p-value\n fisher_df = pd.DataFrame({'params': {'_f_test': model_result.fvalue},\n 'pvals': {'_f_test': model_result.f_pvalue}})\n # merge them and unstack to obtain a hierarchically indexed series\n res_series = pd.concat([result_df, fisher_df]).unstack()\n return res_series.dropna()\n\n\ndef multiOLS(model, dataframe, column_list=None, method='fdr_bh',\n alpha=0.05, subset=None, model_type=OLS, **kwargs):\n \"\"\"apply a linear model to several endogenous variables on a dataframe\n\n Take a linear model definition via formula and a dataframe that will be\n the environment of the model, and apply the linear model to a subset\n (or all) of the columns of the dataframe. It will return a dataframe\n with part of the information from the linear model summary.\n\n Parameters\n ----------\n model : str\n formula description of the model\n dataframe : pandas.dataframe\n dataframe where the model will be evaluated\n column_list : list[str], optional\n Names of the columns to analyze with the model.\n If None (Default) it will perform the function on all the\n eligible columns (numerical type and not in the model definition)\n model_type : model class, optional\n The type of model to be used. The default is the linear model.\n Can be any linear model (OLS, WLS, GLS, etc..)\n method: str, optional\n the method used to perform the pvalue correction for multiple testing.\n default is the Benjamini/Hochberg, other available methods are:\n\n `bonferroni` : one-step correction\n `sidak` : on-step correction\n `holm-sidak` :\n `holm` :\n `simes-hochberg` :\n `hommel` :\n `fdr_bh` : Benjamini/Hochberg\n `fdr_by` : Benjamini/Yekutieli\n\n alpha: float, optional\n the significance level used for the pvalue correction (default 0.05)\n subset: bool array\n the selected rows to be used in the regression\n\n all the other parameters will be directed to the model creation.\n\n Returns\n -------\n summary : pandas.DataFrame\n a dataframe containing an extract from the summary of the model\n obtained for each columns. It will give the model complexive f test\n result and p-value, and the regression value and standard deviarion\n for each of the regressors. The DataFrame has a hierachical column\n structure, divided as:\n\n - params: contains the parameters resulting from the models. Has\n an additional column named _f_test containing the result of the\n F test.\n - pval: the pvalue results of the models. Has the _f_test column\n for the significativity of the whole test.\n - adj_pval: the corrected pvalues via the multitest function.\n - std: uncertainties of the model parameters\n - statistics: contains the r squared statistics and the adjusted\n r squared.\n\n Notes\n -----\n The main application of this function is on system biology to perform\n a linear model testing of a lot of different parameters, like the\n different genetic expression of several genes.\n\n See Also\n --------\n statsmodels.stats.multitest\n contains several functions to perform the multiple p-value correction\n\n Examples\n --------\n Using the longley data as dataframe example\n\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.longley.load_pandas()\n >>> df = data.exog\n >>> df['TOTEMP'] = data.endog\n\n This will perform the specified linear model on all the\n other columns of the dataframe\n >>> multiOLS('GNP + 1', df)\n\n This select only a certain subset of the columns\n >>> multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])\n\n It is possible to specify a trasformation also on the target column,\n conforming to the patsy formula specification\n >>> multiOLS('GNP + 0', df, ['I(GNPDEFL**2)', 'center(TOTEMP)'])\n\n It is possible to specify the subset of the dataframe\n on which perform the analysis\n >> multiOLS('GNP + 1', df, subset=df.GNPDEFL > 90)\n\n Even a single column name can be given without enclosing it in a list\n >>> multiOLS('GNP + 0', df, 'GNPDEFL')\n \"\"\"\n # data normalization\n # if None take all the numerical columns that are not present in the model\n # it's not waterproof but is a good enough criterion for everyday use\n if column_list is None:\n column_list = [name for name in dataframe.columns\n if dataframe[name].dtype != object and name not in model]\n # if it's a single string transform it in a single element list\n if isinstance(column_list, str):\n column_list = [column_list]\n if subset is not None:\n dataframe = dataframe.loc[subset]\n # perform each model and retrieve the statistics\n col_results = {}\n # as the model will use always the same endogenous variables\n # we can create them once and reuse\n model_exog = dmatrix(model, data=dataframe, return_type=\"dataframe\")\n for col_name in column_list:\n # it will try to interpret the column name as a valid dataframe\n # index as it can be several times faster. If it fails it\n # interpret it as a patsy formula (for example for centering)\n try:\n model_endog = dataframe[col_name]\n except KeyError:\n model_endog = dmatrix(col_name + ' + 0', data=dataframe)\n # retrieve the result and store them\n res = _model2dataframe(model_endog, model_exog, model_type, **kwargs)\n col_results[col_name] = res\n # mangle them togheter and sort by complexive p-value\n summary = pd.DataFrame(col_results)\n # order by the p-value: the most useful model first!\n summary = summary.T.sort_values([('pvals', '_f_test')])\n summary.index.name = 'endogenous vars'\n # implementing the pvalue correction method\n smt = stats.multipletests\n for (key1, key2) in summary:\n if key1 != 'pvals':\n continue\n p_values = summary[key1, key2]\n corrected = smt(p_values, method=method, alpha=alpha)[1]\n # extend the dataframe of results with the column\n # of the corrected p_values\n summary['adj_' + key1, key2] = corrected\n return summary\n\n\ndef _test_group(pvalues, group_name, group, exact=True):\n \"\"\"test if the objects in the group are different from the general set.\n\n The test is performed on the pvalues set (ad a pandas series) over\n the group specified via a fisher exact test.\n \"\"\"\n from scipy.stats import fisher_exact, chi2_contingency\n\n totals = 1.0 * len(pvalues)\n total_significant = 1.0 * np.sum(pvalues)\n cross_index = [c for c in group if c in pvalues.index]\n missing = [c for c in group if c not in pvalues.index]\n if missing:\n s = ('the test is not well defined if the group '\n 'has elements not presents in the significativity '\n 'array. group name: {}, missing elements: {}')\n logging.warning(s.format(group_name, missing))\n # how many are significant and not in the group\n group_total = 1.0 * len(cross_index)\n group_sign = 1.0 * len([c for c in cross_index if pvalues[c]])\n group_nonsign = 1.0 * (group_total - group_sign)\n # how many are significant and not outside the group\n extern_sign = 1.0 * (total_significant - group_sign)\n extern_nonsign = 1.0 * (totals - total_significant - group_nonsign)\n # make the fisher test or the chi squared\n test = fisher_exact if exact else chi2_contingency\n table = [[extern_nonsign, extern_sign], [group_nonsign, group_sign]]\n pvalue = test(np.array(table))[1]\n # is the group more represented or less?\n part = group_sign, group_nonsign, extern_sign, extern_nonsign\n #increase = (group_sign / group_total) > (total_significant / totals)\n increase = np.log((totals * group_sign)\n / (total_significant * group_total))\n return pvalue, increase, part\n\n\ndef multigroup(pvals, groups, exact=True, keep_all=True, alpha=0.05):\n \"\"\"Test if the given groups are different from the total partition.\n\n Given a boolean array test if each group has a proportion of positives\n different than the complexive proportion.\n The test can be done as an exact Fisher test or approximated as a\n Chi squared test for more speed.\n\n Parameters\n ----------\n pvals: pandas series of boolean\n the significativity of the variables under analysis\n groups: dict of list\n the name of each category of variables under exam.\n each one is a list of the variables included\n exact: bool, optional\n If True (default) use the fisher exact test, otherwise\n use the chi squared test for contingencies tables.\n For high number of elements in the array the fisher test can\n be significantly slower than the chi squared.\n keep_all: bool, optional\n if False it will drop those groups where the fraction\n of positive is below the expected result. If True (default)\n it will keep all the significant results.\n alpha: float, optional\n the significativity level for the pvalue correction\n on the whole set of groups (not inside the groups themselves).\n\n Returns\n -------\n result_df: pandas dataframe\n for each group returns:\n\n pvals - the fisher p value of the test\n adj_pvals - the adjusted pvals\n increase - the log of the odd ratio between the\n internal significant ratio versus the external one\n _in_sign - significative elements inside the group\n _in_non - non significative elements inside the group\n _out_sign - significative elements outside the group\n _out_non - non significative elements outside the group\n\n Notes\n -----\n This test allow to see if a category of variables is generally better\n suited to be described for the model. For example to see if a predictor\n gives more information on demographic or economical parameters,\n by creating two groups containing the endogenous variables of each\n category.\n\n This function is conceived for medical dataset with a lot of variables\n that can be easily grouped into functional groups. This is because\n The significativity of a group require a rather large number of\n composing elements.\n\n Examples\n --------\n A toy example on a real dataset, the Guerry dataset from R\n >>> url = \"https://raw.githubusercontent.com/vincentarelbundock/\"\n >>> url = url + \"Rdatasets/csv/HistData/Guerry.csv\"\n >>> df = pd.read_csv(url, index_col='dept')\n\n evaluate the relationship between the various paramenters whith the Wealth\n >>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']\n\n define the groups\n >>> groups = {}\n >>> groups['crime'] = ['Crime_prop', 'Infanticide',\n ... 'Crime_parents', 'Desertion', 'Crime_pers']\n >>> groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']\n >>> groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']\n\n do the analysis of the significativity\n >>> multigroup(pvals < 0.05, groups)\n \"\"\"\n pvals = pd.Series(pvals)\n if not (set(pvals.unique()) <= set([False, True])):\n raise ValueError(\"the series should be binary\")\n if hasattr(pvals.index, 'is_unique') and not pvals.index.is_unique:\n raise ValueError(\"series with duplicated index is not accepted\")\n results = {'pvals': {},\n 'increase': {},\n '_in_sign': {},\n '_in_non': {},\n '_out_sign': {},\n '_out_non': {}}\n for group_name, group_list in iteritems(groups):\n res = _test_group(pvals, group_name, group_list, exact)\n results['pvals'][group_name] = res[0]\n results['increase'][group_name] = res[1]\n results['_in_sign'][group_name] = res[2][0]\n results['_in_non'][group_name] = res[2][1]\n results['_out_sign'][group_name] = res[2][2]\n results['_out_non'][group_name] = res[2][3]\n result_df = pd.DataFrame(results).sort_values('pvals')\n if not keep_all:\n result_df = result_df[result_df.increase]\n smt = stats.multipletests\n corrected = smt(result_df['pvals'], method='fdr_bh', alpha=alpha)[1]\n result_df['adj_pvals'] = corrected\n return result_df\n", "path": "statsmodels/sandbox/multilinear.py"}]} |
gh_patches_debug_1542 | rasdani/github-patches | git_diff | holoviz__panel-645 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Port not being released after stopping threaded holoviz panel app server
Closing a threaded panel app holds on to the port it started on. This is different behavior than closing an app initialized without threading.
```
usgs_logo = pn.panel('../assets/usgs_logo.png', height=130)
column = pn.Column(usgs_logo)
app = column.show(port=8889)
app.stop()
```
Port 8889 is released.
```
app = row.show(port=8889, threaded=True)
app.stop()
```
Port 8889 is not released.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/io/server.py`
Content:
```
1 """
2 Utilities for creating bokeh Server instances.
3 """
4 from __future__ import absolute_import, division, unicode_literals
5
6 import signal
7 import threading
8
9 from functools import partial
10
11 from bokeh.server.server import Server
12
13 from .state import state
14
15
16 #---------------------------------------------------------------------
17 # Private API
18 #---------------------------------------------------------------------
19
20 def _origin_url(url):
21 if url.startswith("http"):
22 url = url.split("//")[1]
23 return url
24
25
26 def _server_url(url, port):
27 if url.startswith("http"):
28 return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/")
29 else:
30 return 'http://%s:%d%s' % (url.split(':')[0], port, "/")
31
32 #---------------------------------------------------------------------
33 # Public API
34 #---------------------------------------------------------------------
35
36 def get_server(panel, port=0, websocket_origin=None, loop=None,
37 show=False, start=False, **kwargs):
38 """
39 Returns a Server instance with this panel attached as the root
40 app.
41
42 Arguments
43 ---------
44 port: int (optional, default=0)
45 Allows specifying a specific port
46 websocket_origin: str or list(str) (optional)
47 A list of hosts that can connect to the websocket.
48
49 This is typically required when embedding a server app in
50 an external web site.
51
52 If None, "localhost" is used.
53 loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
54 The tornado IOLoop to run the Server on
55 show : boolean (optional, default=False)
56 Whether to open the server in a new browser tab on start
57 start : boolean(optional, default=False)
58 Whether to start the Server
59 kwargs: dict
60 Additional keyword arguments to pass to Server instance
61
62 Returns
63 -------
64 server : bokeh.server.server.Server
65 Bokeh Server instance running this panel
66 """
67 from tornado.ioloop import IOLoop
68 opts = dict(kwargs)
69 if loop:
70 loop.make_current()
71 opts['io_loop'] = loop
72 else:
73 opts['io_loop'] = IOLoop.current()
74
75 if websocket_origin:
76 if not isinstance(websocket_origin, list):
77 websocket_origin = [websocket_origin]
78 opts['allow_websocket_origin'] = websocket_origin
79
80 server_id = kwargs.pop('server_id', None)
81 server = Server({'/': partial(panel._modify_doc, server_id)}, port=port, **opts)
82 if server_id:
83 state._servers[server_id] = (server, panel, [])
84
85 if show:
86 def show_callback():
87 server.show('/')
88 server.io_loop.add_callback(show_callback)
89
90 def sig_exit(*args, **kwargs):
91 server.io_loop.add_callback_from_signal(do_stop)
92
93 def do_stop(*args, **kwargs):
94 server.io_loop.stop()
95
96 try:
97 signal.signal(signal.SIGINT, sig_exit)
98 except ValueError:
99 pass # Can't use signal on a thread
100
101 if start:
102 server.start()
103 try:
104 server.io_loop.start()
105 except RuntimeError:
106 pass
107 return server
108
109
110 class StoppableThread(threading.Thread):
111 """Thread class with a stop() method."""
112
113 def __init__(self, io_loop=None, timeout=1000, **kwargs):
114 from tornado import ioloop
115 super(StoppableThread, self).__init__(**kwargs)
116 self._stop_event = threading.Event()
117 self.io_loop = io_loop
118 self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)
119 self._cb.start()
120
121 def _check_stopped(self):
122 if self.stopped:
123 self._cb.stop()
124 self.io_loop.stop()
125
126 def stop(self):
127 self._stop_event.set()
128
129 @property
130 def stopped(self):
131 return self._stop_event.is_set()
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/io/server.py b/panel/io/server.py
--- a/panel/io/server.py
+++ b/panel/io/server.py
@@ -122,6 +122,15 @@
if self.stopped:
self._cb.stop()
self.io_loop.stop()
+
+ def run(self):
+ try:
+ if self._target:
+ bokeh_server = self._target(*self._args, **self._kwargs)
+ finally:
+ if isinstance(bokeh_server, Server):
+ bokeh_server.stop()
+ del self._target, self._args, self._kwargs
def stop(self):
self._stop_event.set()
| {"golden_diff": "diff --git a/panel/io/server.py b/panel/io/server.py\n--- a/panel/io/server.py\n+++ b/panel/io/server.py\n@@ -122,6 +122,15 @@\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n+ \n+ def run(self):\n+ try:\n+ if self._target:\n+ bokeh_server = self._target(*self._args, **self._kwargs)\n+ finally:\n+ if isinstance(bokeh_server, Server):\n+ bokeh_server.stop()\n+ del self._target, self._args, self._kwargs\n \n def stop(self):\n self._stop_event.set()\n", "issue": "Port not being released after stopping threaded holoviz panel app server\nClosing a threaded panel app holds on to the port it started on. This is different behavior than closing an app initialized without threading.\r\n\r\n```\r\nusgs_logo = pn.panel('../assets/usgs_logo.png', height=130)\r\n\r\ncolumn = pn.Column(usgs_logo)\r\n\r\napp = column.show(port=8889)\r\n\r\napp.stop()\r\n```\r\n\r\nPort 8889 is released.\r\n\r\n```\r\napp = row.show(port=8889, threaded=True)\r\n\r\napp.stop()\r\n```\r\nPort 8889 is not released.\n", "before_files": [{"content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport signal\nimport threading\n\nfrom functools import partial\n\nfrom bokeh.server.server import Server\n\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\ndef _origin_url(url):\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\n\ndef _server_url(url, port):\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\ndef get_server(panel, port=0, websocket_origin=None, loop=None,\n show=False, start=False, **kwargs):\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n\n Returns\n -------\n server : bokeh.server.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from tornado.ioloop import IOLoop\n opts = dict(kwargs)\n if loop:\n loop.make_current()\n opts['io_loop'] = loop\n else:\n opts['io_loop'] = IOLoop.current()\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n server_id = kwargs.pop('server_id', None)\n server = Server({'/': partial(panel._modify_doc, server_id)}, port=port, **opts)\n if server_id:\n state._servers[server_id] = (server, panel, [])\n\n if show:\n def show_callback():\n server.show('/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop=None, timeout=1000, **kwargs):\n from tornado import ioloop\n super(StoppableThread, self).__init__(**kwargs)\n self._stop_event = threading.Event()\n self.io_loop = io_loop\n self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)\n self._cb.start()\n\n def _check_stopped(self):\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n\n def stop(self):\n self._stop_event.set()\n\n @property\n def stopped(self):\n return self._stop_event.is_set()\n", "path": "panel/io/server.py"}], "after_files": [{"content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport signal\nimport threading\n\nfrom functools import partial\n\nfrom bokeh.server.server import Server\n\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\ndef _origin_url(url):\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\n\ndef _server_url(url, port):\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\ndef get_server(panel, port=0, websocket_origin=None, loop=None,\n show=False, start=False, **kwargs):\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n\n Returns\n -------\n server : bokeh.server.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from tornado.ioloop import IOLoop\n opts = dict(kwargs)\n if loop:\n loop.make_current()\n opts['io_loop'] = loop\n else:\n opts['io_loop'] = IOLoop.current()\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n server_id = kwargs.pop('server_id', None)\n server = Server({'/': partial(panel._modify_doc, server_id)}, port=port, **opts)\n if server_id:\n state._servers[server_id] = (server, panel, [])\n\n if show:\n def show_callback():\n server.show('/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop=None, timeout=1000, **kwargs):\n from tornado import ioloop\n super(StoppableThread, self).__init__(**kwargs)\n self._stop_event = threading.Event()\n self.io_loop = io_loop\n self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)\n self._cb.start()\n\n def _check_stopped(self):\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n \n def run(self):\n try:\n if self._target:\n bokeh_server = self._target(*self._args, **self._kwargs)\n finally:\n if isinstance(bokeh_server, Server):\n bokeh_server.stop()\n del self._target, self._args, self._kwargs\n\n def stop(self):\n self._stop_event.set()\n\n @property\n def stopped(self):\n return self._stop_event.is_set()\n", "path": "panel/io/server.py"}]} |
gh_patches_debug_1543 | rasdani/github-patches | git_diff | hylang__hy-2328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
F-strings don't allow `\N{…}`
```
Hy 0.24.0 using CPython(default) 3.9.6 on Darwin
=> (print "\N{slightly smiling face}")
🙂
=> (print f"\N{slightly smiling face}")
Traceback (most recent call last):
File "stdin-eda06fe2e57521e633661e996d6216e5bab61d9b", line 1
(print f"\N{slightly smiling face}")
^
hy.reader.exceptions.LexException: (unicode error) 'unicodeescape' codec can't decode bytes in position 0-1: malformed \N character escape (<string>, line 1)
```
Python 3.9 works fine:
```
>>> print("\N{slightly smiling face}")
🙂
>>> print(f"\N{slightly smiling face}")
🙂
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hy/reader/hy_reader.py`
Content:
```
1 "Character reader for parsing Hy source."
2
3 import hy
4 from hy.models import (
5 Bytes,
6 Complex,
7 Dict,
8 Expression,
9 FComponent,
10 Float,
11 FString,
12 Integer,
13 Keyword,
14 List,
15 Set,
16 String,
17 Symbol,
18 Tuple,
19 as_model,
20 )
21
22 from .exceptions import LexException, PrematureEndOfInput
23 from .mangling import mangle
24 from .reader import Reader, isnormalizedspace
25
26
27 def sym(name):
28 return Symbol(name, from_parser=True)
29
30
31 # Note: This is subtly different from
32 # the `mkexpr` in hy/compiler.py !
33 def mkexpr(root, *args):
34 return Expression((sym(root) if isinstance(root, str) else root, *args))
35
36
37 def symbol_like(ident, reader=None):
38 """Generate a Hy model from an identifier-like string.
39
40 Also verifies the syntax of dot notation and validity of symbol names.
41
42 Parameters
43 ----------
44 ident : str
45 Text to convert.
46
47 reader : Reader, optional
48 The reader to use, if any; used for generating position data for errors.
49
50 Returns
51 -------
52 out : a hy.models.Object subtype corresponding to the parsed text.
53 """
54 try:
55 return Integer(ident)
56 except ValueError:
57 pass
58 try:
59 return Float(ident)
60 except ValueError:
61 pass
62 if ident not in ("j", "J"):
63 try:
64 return Complex(ident)
65 except ValueError:
66 pass
67
68 if "." in ident:
69 for chunk in ident.split("."):
70 if chunk and not isinstance(symbol_like(chunk, reader=reader), Symbol):
71 msg = (
72 "Cannot access attribute on anything other"
73 " than a name (in order to get attributes of expressions,"
74 " use `(. <expression> <attr>)` or `(.<attr> <expression>)`)"
75 )
76 if reader is None:
77 raise ValueError(msg)
78 else:
79 raise LexException.from_reader(msg, reader)
80
81 if reader is None:
82 if (
83 not ident
84 or ident[:1] == ":"
85 or any(isnormalizedspace(c) for c in ident)
86 or HyReader.NON_IDENT.intersection(ident)
87 ):
88 raise ValueError(f"Syntactically illegal symbol: {ident!r}")
89
90 return sym(ident)
91
92
93 class HyReader(Reader):
94 """A modular reader for Hy source."""
95
96 ###
97 # Components necessary for Reader implementation
98 ###
99
100 NON_IDENT = set("()[]{};\"'")
101
102 def fill_pos(self, model, start):
103 """Attach line/col information to a model.
104
105 Sets the end location of `model` to the current cursor position.
106
107 Args:
108 model (hy.models.Object): model to set line/col info for.
109 start (tuple[int, int]): (line, column) tuple indicating the start
110 location to assign to `model`.
111 """
112 model.start_line, model.start_column = start
113 model.end_line, model.end_column = self.pos
114 return model
115
116 def read_default(self, key):
117 """Default reader handler when nothing in the table matches.
118
119 Try to read an identifier/symbol. If there's a double-quote immediately
120 following, then parse it as a string with the given prefix (e.g.,
121 `r"..."`). Otherwise, parse it as a symbol-like.
122 """
123 ident = key + self.read_ident()
124 if self.peek_and_getc('"'):
125 return self.prefixed_string('"', ident)
126 return symbol_like(ident, reader=self)
127
128 def parse(self, stream, filename=None):
129 """Yields all `hy.models.Object`'s in `source`
130
131 Additionally exposes `self` as ``hy.&reader`` during read/compile time.
132
133 Args:
134 source:
135 Hy source to be parsed.
136 filename (str | None):
137 Filename to use for error messages. If `None` then previously
138 set filename is used.
139 """
140 self._set_source(stream, filename)
141 rname = mangle("&reader")
142 old_reader = getattr(hy, rname, None)
143 setattr(hy, rname, self)
144
145 try:
146 yield from self.parse_forms_until("")
147 finally:
148 if old_reader is None:
149 delattr(hy, rname)
150 else:
151 setattr(hy, rname, old_reader)
152
153 ###
154 # Reading forms
155 ###
156
157 def try_parse_one_form(self):
158 """Attempt to parse a single Hy form.
159
160 Read one (non-space) character from the stream, then call the
161 corresponding handler.
162
163 Returns:
164 hy.models.Object | None:
165 Model optionally returned by the called handler. Handlers may
166 return `None` to signify no parsed form (e.g., for comments).
167
168 Raises:
169 PrematureEndOfInput: If the reader hits the end of the file before
170 fully parsing a form.
171 LexException: If there is an error during form parsing.
172 """
173 try:
174 self.slurp_space()
175 c = self.getc()
176 start = self._pos
177 if not c:
178 raise PrematureEndOfInput.from_reader(
179 "Premature end of input while attempting to parse one form", self
180 )
181 handler = self.reader_table.get(c)
182 model = handler(self, c) if handler else self.read_default(c)
183 return self.fill_pos(model, start) if model is not None else None
184 except LexException:
185 raise
186 except Exception as e:
187 raise LexException.from_reader(
188 str(e) or "Exception thrown attempting to parse one form", self
189 )
190
191 def parse_one_form(self):
192 """Read from the stream until a form is parsed.
193
194 Guaranteed to return a model (i.e., skips over comments).
195
196 Returns:
197 hy.models.Object
198 """
199 model = None
200 while model is None:
201 model = self.try_parse_one_form()
202 return model
203
204 def parse_forms_until(self, closer):
205 """Yields `hy.models.Object`'s until character `closer` is seen.
206
207 Useful for reading a sequence such as s-exprs or lists.
208 """
209 while True:
210 self.slurp_space()
211 if self.peek_and_getc(closer):
212 break
213 model = self.try_parse_one_form()
214 if model is not None:
215 yield model
216
217 ###
218 # Basic atoms
219 ###
220
221 @reader_for(")")
222 @reader_for("]")
223 @reader_for("}")
224 def INVALID(self, key):
225 raise LexException.from_reader(
226 f"Ran into a '{key}' where it wasn't expected.", self
227 )
228
229 @reader_for(";")
230 def line_comment(self, _):
231 any(c == "\n" for c in self.chars(eof_ok=True))
232 return None
233
234 @reader_for(":")
235 def keyword(self, _):
236 ident = self.read_ident()
237 if "." in ident:
238 raise LexException.from_reader(
239 "Cannot access attribute on anything other"
240 " than a name (in order to get attributes of expressions,"
241 " use `(. <expression> <attr>)` or `(.<attr> <expression>)`)",
242 self,
243 )
244 return Keyword(ident, from_parser=True)
245
246 @reader_for('"')
247 def prefixed_string(self, _, prefix=""):
248 prefix_chars = set(prefix)
249 if (
250 len(prefix_chars) != len(prefix)
251 or prefix_chars - set("bfr")
252 or set("bf") <= prefix_chars
253 ):
254 raise LexException.from_reader(f"invalid string prefix {prefix!r}", self)
255
256 escaping = False
257
258 def quote_closing(c):
259 nonlocal escaping
260 if c == "\\":
261 escaping = not escaping
262 return 0
263 if c == '"' and not escaping:
264 return 1
265 if (
266 escaping
267 and "r" not in prefix
268 and
269 # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
270 c
271 not in ("\n\r\\'\"abfnrtv01234567x" + ("" if "b" in prefix else "NuU"))
272 ):
273 raise LexException.from_reader("invalid escape sequence \\" + c, self)
274 escaping = False
275 return 0
276
277 return self.read_string_until(quote_closing, prefix, "f" in prefix.lower())
278
279 ###
280 # Special annotations
281 ###
282
283 @reader_for("'", ("quote",))
284 @reader_for("`", ("quasiquote",))
285 def tag_as(root):
286 def _tag_as(self, _):
287 nc = self.peekc()
288 if (
289 not nc
290 or isnormalizedspace(nc)
291 or self.reader_table.get(nc) == self.INVALID
292 ):
293 raise LexException.from_reader(
294 "Could not identify the next token.", self
295 )
296 model = self.parse_one_form()
297 return mkexpr(root, model)
298
299 return _tag_as
300
301 @reader_for("~")
302 def unquote(self, key):
303 nc = self.peekc()
304 if not nc or isnormalizedspace(nc) or self.reader_table.get(nc) == self.INVALID:
305 return sym(key)
306 return mkexpr(
307 "unquote" + ("-splice" if self.peek_and_getc("@") else ""),
308 self.parse_one_form(),
309 )
310
311 ###
312 # Sequences
313 ###
314
315 @reader_for("(", (Expression, ")"))
316 @reader_for("[", (List, "]"))
317 @reader_for("{", (Dict, "}"))
318 @reader_for("#{", (Set, "}"))
319 @reader_for("#(", (Tuple, ")"))
320 def sequence(seq_type, closer):
321 return lambda self, _: seq_type(self.parse_forms_until(closer))
322
323 ###
324 # Reader tag-macros
325 ###
326
327 @reader_for("#")
328 def tag_dispatch(self, key):
329 """General handler for reader macros (and tag macros).
330
331 Reads a full identifier after the `#` and calls the corresponding handler
332 (this allows, e.g., `#reads-multiple-forms foo bar baz`).
333
334 Failing that, reads a single character after the `#` and immediately
335 calls the corresponding handler (this allows, e.g., `#*args` to parse
336 as `#*` followed by `args`).
337 """
338
339 if not self.peekc():
340 raise PrematureEndOfInput.from_reader(
341 "Premature end of input while attempting dispatch", self
342 )
343
344 if self.peek_and_getc("^"):
345 typ = self.parse_one_form()
346 target = self.parse_one_form()
347 return mkexpr("annotate", target, typ)
348
349 tag = None
350 # try dispatching tagged ident
351 ident = self.read_ident(just_peeking=True)
352 if ident and mangle(key + ident) in self.reader_table:
353 self.getn(len(ident))
354 tag = mangle(key + ident)
355 # failing that, dispatch tag + single character
356 elif key + self.peekc() in self.reader_table:
357 tag = key + self.getc()
358 if tag:
359 tree = self.dispatch(tag)
360 return as_model(tree) if tree is not None else None
361
362 raise LexException.from_reader(
363 f"reader macro '{key + self.read_ident()}' is not defined", self
364 )
365
366 @reader_for("#_")
367 def discard(self, _):
368 """Discards the next parsed form."""
369 self.parse_one_form()
370 return None
371
372 @reader_for("#*")
373 def hash_star(self, _):
374 """Unpacking forms `#*` and `#**`, corresponding to `*` and `**` in Python."""
375 num_stars = 1
376 while self.peek_and_getc("*"):
377 num_stars += 1
378 if num_stars > 2:
379 raise LexException.from_reader("too many stars", self)
380 return mkexpr(
381 "unpack-" + ("iterable", "mapping")[num_stars - 1],
382 self.parse_one_form(),
383 )
384
385 ###
386 # Strings
387 # (these are more complicated because f-strings
388 # form their own sublanguage)
389 ###
390
391 @reader_for("#[")
392 def bracketed_string(self, _):
393 """Bracketed strings. See the Hy docs for full details."""
394 delim = []
395 for c in self.chars():
396 if c == "[":
397 break
398 elif c == "]":
399 raise LexException.from_reader(
400 "Ran into a ']' where it wasn't expected.", self
401 )
402 delim.append(c)
403 delim = "".join(delim)
404 is_fstring = delim == "f" or delim.startswith("f-")
405
406 # discard single initial newline, if any, accounting for all
407 # three styles of newline
408 self.peek_and_getc("\x0d")
409 self.peek_and_getc("\x0a")
410
411 index = -1
412
413 def delim_closing(c):
414 nonlocal index
415 if c == "]":
416 if index == len(delim):
417 # this is the second bracket at the end of the delim
418 return len(delim) + 2
419 else:
420 # reset state, this may be the first bracket of closing delim
421 index = 0
422 elif 0 <= index <= len(delim):
423 # we're inside a possible closing delim
424 if index < len(delim) and c == delim[index]:
425 index += 1
426 else:
427 # failed delim, reset state
428 index = -1
429 return 0
430
431 return self.read_string_until(delim_closing, None, is_fstring, brackets=delim)
432
433 def read_string_until(self, closing, prefix, is_fstring, **kwargs):
434 if is_fstring:
435 components = self.read_fcomponents_until(closing, prefix)
436 return FString(components, **kwargs)
437 s = self.read_chars_until(closing, prefix, is_fstring=False)
438 return (Bytes if isinstance(s, bytes) else String)(s, **kwargs)
439
440 def read_chars_until(self, closing, prefix, is_fstring):
441 s = []
442 for c in self.chars():
443 s.append(c)
444 # check if c is closing
445 n_closing_chars = closing(c)
446 if n_closing_chars:
447 # string has ended
448 s = s[:-n_closing_chars]
449 break
450 # check if c is start of component
451 if is_fstring and c == "{":
452 # check and handle "{{"
453 if self.peek_and_getc("{"):
454 s.append("{")
455 else:
456 # remove "{" from end of string component
457 s.pop()
458 break
459 res = "".join(s).replace("\x0d\x0a", "\x0a").replace("\x0d", "\x0a")
460
461 if prefix is not None:
462 res = eval(f'{prefix}"""{res}"""')
463 if is_fstring:
464 return res, n_closing_chars
465 return res
466
467 def read_fcomponents_until(self, closing, prefix):
468 components = []
469 start = self.pos
470 while True:
471 s, closed = self.read_chars_until(closing, prefix, is_fstring=True)
472 if s:
473 components.append(self.fill_pos(String(s), start))
474 if closed:
475 break
476 components.extend(self.read_fcomponent(prefix))
477 return components
478
479 def read_fcomponent(self, prefix):
480 """May return one or two components, since the `=` debugging syntax
481 will create a String component."""
482 start = self.pos
483 values = []
484 conversion = None
485 has_debug = False
486
487 # read the expression, saving the text verbatim
488 # in case we encounter debug `=`
489 space_before = self.slurp_space()
490 with self.saving_chars() as form_text:
491 model = self.parse_one_form()
492 space_between = self.slurp_space()
493
494 # check for and handle debug syntax:
495 # we emt the verbatim text before we emit the value
496 if self.peek_and_getc("="):
497 has_debug = True
498 space_after = self.slurp_space()
499 dbg_prefix = (
500 space_before + "".join(form_text) + space_between + "=" + space_after
501 )
502 values.append(self.fill_pos(String(dbg_prefix), start))
503
504 # handle conversion code
505 if self.peek_and_getc("!"):
506 conversion = self.getc()
507 self.slurp_space()
508
509 def component_closing(c):
510 if c == "}":
511 return 1
512 return 0
513
514 # handle formatting options
515 format_components = []
516 if self.peek_and_getc(":"):
517 format_components = self.read_fcomponents_until(component_closing, prefix)
518 else:
519 if has_debug and conversion is None:
520 conversion = "r"
521 if not self.getc() == "}":
522 raise LexException.from_reader("f-string: trailing junk in field", self)
523 return values + [
524 self.fill_pos(FComponent((model, *format_components), conversion), start)
525 ]
526
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hy/reader/hy_reader.py b/hy/reader/hy_reader.py
--- a/hy/reader/hy_reader.py
+++ b/hy/reader/hy_reader.py
@@ -448,7 +448,7 @@
s = s[:-n_closing_chars]
break
# check if c is start of component
- if is_fstring and c == "{":
+ if is_fstring and c == "{" and s[-3:] != ["\\", "N", "{"]:
# check and handle "{{"
if self.peek_and_getc("{"):
s.append("{")
| {"golden_diff": "diff --git a/hy/reader/hy_reader.py b/hy/reader/hy_reader.py\n--- a/hy/reader/hy_reader.py\n+++ b/hy/reader/hy_reader.py\n@@ -448,7 +448,7 @@\n s = s[:-n_closing_chars]\n break\n # check if c is start of component\n- if is_fstring and c == \"{\":\n+ if is_fstring and c == \"{\" and s[-3:] != [\"\\\\\", \"N\", \"{\"]:\n # check and handle \"{{\"\n if self.peek_and_getc(\"{\"):\n s.append(\"{\")\n", "issue": "F-strings don't allow `\\N{\u2026}`\n```\r\nHy 0.24.0 using CPython(default) 3.9.6 on Darwin\r\n=> (print \"\\N{slightly smiling face}\")\r\n\ud83d\ude42\r\n=> (print f\"\\N{slightly smiling face}\")\r\nTraceback (most recent call last):\r\n File \"stdin-eda06fe2e57521e633661e996d6216e5bab61d9b\", line 1\r\n (print f\"\\N{slightly smiling face}\")\r\n ^\r\nhy.reader.exceptions.LexException: (unicode error) 'unicodeescape' codec can't decode bytes in position 0-1: malformed \\N character escape (<string>, line 1)\r\n```\r\n\r\nPython 3.9 works fine:\r\n```\r\n>>> print(\"\\N{slightly smiling face}\")\r\n\ud83d\ude42\r\n>>> print(f\"\\N{slightly smiling face}\")\r\n\ud83d\ude42\r\n```\n", "before_files": [{"content": "\"Character reader for parsing Hy source.\"\n\nimport hy\nfrom hy.models import (\n Bytes,\n Complex,\n Dict,\n Expression,\n FComponent,\n Float,\n FString,\n Integer,\n Keyword,\n List,\n Set,\n String,\n Symbol,\n Tuple,\n as_model,\n)\n\nfrom .exceptions import LexException, PrematureEndOfInput\nfrom .mangling import mangle\nfrom .reader import Reader, isnormalizedspace\n\n\ndef sym(name):\n return Symbol(name, from_parser=True)\n\n\n# Note: This is subtly different from\n# the `mkexpr` in hy/compiler.py !\ndef mkexpr(root, *args):\n return Expression((sym(root) if isinstance(root, str) else root, *args))\n\n\ndef symbol_like(ident, reader=None):\n \"\"\"Generate a Hy model from an identifier-like string.\n\n Also verifies the syntax of dot notation and validity of symbol names.\n\n Parameters\n ----------\n ident : str\n Text to convert.\n\n reader : Reader, optional\n The reader to use, if any; used for generating position data for errors.\n\n Returns\n -------\n out : a hy.models.Object subtype corresponding to the parsed text.\n \"\"\"\n try:\n return Integer(ident)\n except ValueError:\n pass\n try:\n return Float(ident)\n except ValueError:\n pass\n if ident not in (\"j\", \"J\"):\n try:\n return Complex(ident)\n except ValueError:\n pass\n\n if \".\" in ident:\n for chunk in ident.split(\".\"):\n if chunk and not isinstance(symbol_like(chunk, reader=reader), Symbol):\n msg = (\n \"Cannot access attribute on anything other\"\n \" than a name (in order to get attributes of expressions,\"\n \" use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\"\n )\n if reader is None:\n raise ValueError(msg)\n else:\n raise LexException.from_reader(msg, reader)\n\n if reader is None:\n if (\n not ident\n or ident[:1] == \":\"\n or any(isnormalizedspace(c) for c in ident)\n or HyReader.NON_IDENT.intersection(ident)\n ):\n raise ValueError(f\"Syntactically illegal symbol: {ident!r}\")\n\n return sym(ident)\n\n\nclass HyReader(Reader):\n \"\"\"A modular reader for Hy source.\"\"\"\n\n ###\n # Components necessary for Reader implementation\n ###\n\n NON_IDENT = set(\"()[]{};\\\"'\")\n\n def fill_pos(self, model, start):\n \"\"\"Attach line/col information to a model.\n\n Sets the end location of `model` to the current cursor position.\n\n Args:\n model (hy.models.Object): model to set line/col info for.\n start (tuple[int, int]): (line, column) tuple indicating the start\n location to assign to `model`.\n \"\"\"\n model.start_line, model.start_column = start\n model.end_line, model.end_column = self.pos\n return model\n\n def read_default(self, key):\n \"\"\"Default reader handler when nothing in the table matches.\n\n Try to read an identifier/symbol. If there's a double-quote immediately\n following, then parse it as a string with the given prefix (e.g.,\n `r\"...\"`). Otherwise, parse it as a symbol-like.\n \"\"\"\n ident = key + self.read_ident()\n if self.peek_and_getc('\"'):\n return self.prefixed_string('\"', ident)\n return symbol_like(ident, reader=self)\n\n def parse(self, stream, filename=None):\n \"\"\"Yields all `hy.models.Object`'s in `source`\n\n Additionally exposes `self` as ``hy.&reader`` during read/compile time.\n\n Args:\n source:\n Hy source to be parsed.\n filename (str | None):\n Filename to use for error messages. If `None` then previously\n set filename is used.\n \"\"\"\n self._set_source(stream, filename)\n rname = mangle(\"&reader\")\n old_reader = getattr(hy, rname, None)\n setattr(hy, rname, self)\n\n try:\n yield from self.parse_forms_until(\"\")\n finally:\n if old_reader is None:\n delattr(hy, rname)\n else:\n setattr(hy, rname, old_reader)\n\n ###\n # Reading forms\n ###\n\n def try_parse_one_form(self):\n \"\"\"Attempt to parse a single Hy form.\n\n Read one (non-space) character from the stream, then call the\n corresponding handler.\n\n Returns:\n hy.models.Object | None:\n Model optionally returned by the called handler. Handlers may\n return `None` to signify no parsed form (e.g., for comments).\n\n Raises:\n PrematureEndOfInput: If the reader hits the end of the file before\n fully parsing a form.\n LexException: If there is an error during form parsing.\n \"\"\"\n try:\n self.slurp_space()\n c = self.getc()\n start = self._pos\n if not c:\n raise PrematureEndOfInput.from_reader(\n \"Premature end of input while attempting to parse one form\", self\n )\n handler = self.reader_table.get(c)\n model = handler(self, c) if handler else self.read_default(c)\n return self.fill_pos(model, start) if model is not None else None\n except LexException:\n raise\n except Exception as e:\n raise LexException.from_reader(\n str(e) or \"Exception thrown attempting to parse one form\", self\n )\n\n def parse_one_form(self):\n \"\"\"Read from the stream until a form is parsed.\n\n Guaranteed to return a model (i.e., skips over comments).\n\n Returns:\n hy.models.Object\n \"\"\"\n model = None\n while model is None:\n model = self.try_parse_one_form()\n return model\n\n def parse_forms_until(self, closer):\n \"\"\"Yields `hy.models.Object`'s until character `closer` is seen.\n\n Useful for reading a sequence such as s-exprs or lists.\n \"\"\"\n while True:\n self.slurp_space()\n if self.peek_and_getc(closer):\n break\n model = self.try_parse_one_form()\n if model is not None:\n yield model\n\n ###\n # Basic atoms\n ###\n\n @reader_for(\")\")\n @reader_for(\"]\")\n @reader_for(\"}\")\n def INVALID(self, key):\n raise LexException.from_reader(\n f\"Ran into a '{key}' where it wasn't expected.\", self\n )\n\n @reader_for(\";\")\n def line_comment(self, _):\n any(c == \"\\n\" for c in self.chars(eof_ok=True))\n return None\n\n @reader_for(\":\")\n def keyword(self, _):\n ident = self.read_ident()\n if \".\" in ident:\n raise LexException.from_reader(\n \"Cannot access attribute on anything other\"\n \" than a name (in order to get attributes of expressions,\"\n \" use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\",\n self,\n )\n return Keyword(ident, from_parser=True)\n\n @reader_for('\"')\n def prefixed_string(self, _, prefix=\"\"):\n prefix_chars = set(prefix)\n if (\n len(prefix_chars) != len(prefix)\n or prefix_chars - set(\"bfr\")\n or set(\"bf\") <= prefix_chars\n ):\n raise LexException.from_reader(f\"invalid string prefix {prefix!r}\", self)\n\n escaping = False\n\n def quote_closing(c):\n nonlocal escaping\n if c == \"\\\\\":\n escaping = not escaping\n return 0\n if c == '\"' and not escaping:\n return 1\n if (\n escaping\n and \"r\" not in prefix\n and\n # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n c\n not in (\"\\n\\r\\\\'\\\"abfnrtv01234567x\" + (\"\" if \"b\" in prefix else \"NuU\"))\n ):\n raise LexException.from_reader(\"invalid escape sequence \\\\\" + c, self)\n escaping = False\n return 0\n\n return self.read_string_until(quote_closing, prefix, \"f\" in prefix.lower())\n\n ###\n # Special annotations\n ###\n\n @reader_for(\"'\", (\"quote\",))\n @reader_for(\"`\", (\"quasiquote\",))\n def tag_as(root):\n def _tag_as(self, _):\n nc = self.peekc()\n if (\n not nc\n or isnormalizedspace(nc)\n or self.reader_table.get(nc) == self.INVALID\n ):\n raise LexException.from_reader(\n \"Could not identify the next token.\", self\n )\n model = self.parse_one_form()\n return mkexpr(root, model)\n\n return _tag_as\n\n @reader_for(\"~\")\n def unquote(self, key):\n nc = self.peekc()\n if not nc or isnormalizedspace(nc) or self.reader_table.get(nc) == self.INVALID:\n return sym(key)\n return mkexpr(\n \"unquote\" + (\"-splice\" if self.peek_and_getc(\"@\") else \"\"),\n self.parse_one_form(),\n )\n\n ###\n # Sequences\n ###\n\n @reader_for(\"(\", (Expression, \")\"))\n @reader_for(\"[\", (List, \"]\"))\n @reader_for(\"{\", (Dict, \"}\"))\n @reader_for(\"#{\", (Set, \"}\"))\n @reader_for(\"#(\", (Tuple, \")\"))\n def sequence(seq_type, closer):\n return lambda self, _: seq_type(self.parse_forms_until(closer))\n\n ###\n # Reader tag-macros\n ###\n\n @reader_for(\"#\")\n def tag_dispatch(self, key):\n \"\"\"General handler for reader macros (and tag macros).\n\n Reads a full identifier after the `#` and calls the corresponding handler\n (this allows, e.g., `#reads-multiple-forms foo bar baz`).\n\n Failing that, reads a single character after the `#` and immediately\n calls the corresponding handler (this allows, e.g., `#*args` to parse\n as `#*` followed by `args`).\n \"\"\"\n\n if not self.peekc():\n raise PrematureEndOfInput.from_reader(\n \"Premature end of input while attempting dispatch\", self\n )\n\n if self.peek_and_getc(\"^\"):\n typ = self.parse_one_form()\n target = self.parse_one_form()\n return mkexpr(\"annotate\", target, typ)\n\n tag = None\n # try dispatching tagged ident\n ident = self.read_ident(just_peeking=True)\n if ident and mangle(key + ident) in self.reader_table:\n self.getn(len(ident))\n tag = mangle(key + ident)\n # failing that, dispatch tag + single character\n elif key + self.peekc() in self.reader_table:\n tag = key + self.getc()\n if tag:\n tree = self.dispatch(tag)\n return as_model(tree) if tree is not None else None\n\n raise LexException.from_reader(\n f\"reader macro '{key + self.read_ident()}' is not defined\", self\n )\n\n @reader_for(\"#_\")\n def discard(self, _):\n \"\"\"Discards the next parsed form.\"\"\"\n self.parse_one_form()\n return None\n\n @reader_for(\"#*\")\n def hash_star(self, _):\n \"\"\"Unpacking forms `#*` and `#**`, corresponding to `*` and `**` in Python.\"\"\"\n num_stars = 1\n while self.peek_and_getc(\"*\"):\n num_stars += 1\n if num_stars > 2:\n raise LexException.from_reader(\"too many stars\", self)\n return mkexpr(\n \"unpack-\" + (\"iterable\", \"mapping\")[num_stars - 1],\n self.parse_one_form(),\n )\n\n ###\n # Strings\n # (these are more complicated because f-strings\n # form their own sublanguage)\n ###\n\n @reader_for(\"#[\")\n def bracketed_string(self, _):\n \"\"\"Bracketed strings. See the Hy docs for full details.\"\"\"\n delim = []\n for c in self.chars():\n if c == \"[\":\n break\n elif c == \"]\":\n raise LexException.from_reader(\n \"Ran into a ']' where it wasn't expected.\", self\n )\n delim.append(c)\n delim = \"\".join(delim)\n is_fstring = delim == \"f\" or delim.startswith(\"f-\")\n\n # discard single initial newline, if any, accounting for all\n # three styles of newline\n self.peek_and_getc(\"\\x0d\")\n self.peek_and_getc(\"\\x0a\")\n\n index = -1\n\n def delim_closing(c):\n nonlocal index\n if c == \"]\":\n if index == len(delim):\n # this is the second bracket at the end of the delim\n return len(delim) + 2\n else:\n # reset state, this may be the first bracket of closing delim\n index = 0\n elif 0 <= index <= len(delim):\n # we're inside a possible closing delim\n if index < len(delim) and c == delim[index]:\n index += 1\n else:\n # failed delim, reset state\n index = -1\n return 0\n\n return self.read_string_until(delim_closing, None, is_fstring, brackets=delim)\n\n def read_string_until(self, closing, prefix, is_fstring, **kwargs):\n if is_fstring:\n components = self.read_fcomponents_until(closing, prefix)\n return FString(components, **kwargs)\n s = self.read_chars_until(closing, prefix, is_fstring=False)\n return (Bytes if isinstance(s, bytes) else String)(s, **kwargs)\n\n def read_chars_until(self, closing, prefix, is_fstring):\n s = []\n for c in self.chars():\n s.append(c)\n # check if c is closing\n n_closing_chars = closing(c)\n if n_closing_chars:\n # string has ended\n s = s[:-n_closing_chars]\n break\n # check if c is start of component\n if is_fstring and c == \"{\":\n # check and handle \"{{\"\n if self.peek_and_getc(\"{\"):\n s.append(\"{\")\n else:\n # remove \"{\" from end of string component\n s.pop()\n break\n res = \"\".join(s).replace(\"\\x0d\\x0a\", \"\\x0a\").replace(\"\\x0d\", \"\\x0a\")\n\n if prefix is not None:\n res = eval(f'{prefix}\"\"\"{res}\"\"\"')\n if is_fstring:\n return res, n_closing_chars\n return res\n\n def read_fcomponents_until(self, closing, prefix):\n components = []\n start = self.pos\n while True:\n s, closed = self.read_chars_until(closing, prefix, is_fstring=True)\n if s:\n components.append(self.fill_pos(String(s), start))\n if closed:\n break\n components.extend(self.read_fcomponent(prefix))\n return components\n\n def read_fcomponent(self, prefix):\n \"\"\"May return one or two components, since the `=` debugging syntax\n will create a String component.\"\"\"\n start = self.pos\n values = []\n conversion = None\n has_debug = False\n\n # read the expression, saving the text verbatim\n # in case we encounter debug `=`\n space_before = self.slurp_space()\n with self.saving_chars() as form_text:\n model = self.parse_one_form()\n space_between = self.slurp_space()\n\n # check for and handle debug syntax:\n # we emt the verbatim text before we emit the value\n if self.peek_and_getc(\"=\"):\n has_debug = True\n space_after = self.slurp_space()\n dbg_prefix = (\n space_before + \"\".join(form_text) + space_between + \"=\" + space_after\n )\n values.append(self.fill_pos(String(dbg_prefix), start))\n\n # handle conversion code\n if self.peek_and_getc(\"!\"):\n conversion = self.getc()\n self.slurp_space()\n\n def component_closing(c):\n if c == \"}\":\n return 1\n return 0\n\n # handle formatting options\n format_components = []\n if self.peek_and_getc(\":\"):\n format_components = self.read_fcomponents_until(component_closing, prefix)\n else:\n if has_debug and conversion is None:\n conversion = \"r\"\n if not self.getc() == \"}\":\n raise LexException.from_reader(\"f-string: trailing junk in field\", self)\n return values + [\n self.fill_pos(FComponent((model, *format_components), conversion), start)\n ]\n", "path": "hy/reader/hy_reader.py"}], "after_files": [{"content": "\"Character reader for parsing Hy source.\"\n\nimport hy\nfrom hy.models import (\n Bytes,\n Complex,\n Dict,\n Expression,\n FComponent,\n Float,\n FString,\n Integer,\n Keyword,\n List,\n Set,\n String,\n Symbol,\n Tuple,\n as_model,\n)\n\nfrom .exceptions import LexException, PrematureEndOfInput\nfrom .mangling import mangle\nfrom .reader import Reader, isnormalizedspace\n\n\ndef sym(name):\n return Symbol(name, from_parser=True)\n\n\n# Note: This is subtly different from\n# the `mkexpr` in hy/compiler.py !\ndef mkexpr(root, *args):\n return Expression((sym(root) if isinstance(root, str) else root, *args))\n\n\ndef symbol_like(ident, reader=None):\n \"\"\"Generate a Hy model from an identifier-like string.\n\n Also verifies the syntax of dot notation and validity of symbol names.\n\n Parameters\n ----------\n ident : str\n Text to convert.\n\n reader : Reader, optional\n The reader to use, if any; used for generating position data for errors.\n\n Returns\n -------\n out : a hy.models.Object subtype corresponding to the parsed text.\n \"\"\"\n try:\n return Integer(ident)\n except ValueError:\n pass\n try:\n return Float(ident)\n except ValueError:\n pass\n if ident not in (\"j\", \"J\"):\n try:\n return Complex(ident)\n except ValueError:\n pass\n\n if \".\" in ident:\n for chunk in ident.split(\".\"):\n if chunk and not isinstance(symbol_like(chunk, reader=reader), Symbol):\n msg = (\n \"Cannot access attribute on anything other\"\n \" than a name (in order to get attributes of expressions,\"\n \" use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\"\n )\n if reader is None:\n raise ValueError(msg)\n else:\n raise LexException.from_reader(msg, reader)\n\n if reader is None:\n if (\n not ident\n or ident[:1] == \":\"\n or any(isnormalizedspace(c) for c in ident)\n or HyReader.NON_IDENT.intersection(ident)\n ):\n raise ValueError(f\"Syntactically illegal symbol: {ident!r}\")\n\n return sym(ident)\n\n\nclass HyReader(Reader):\n \"\"\"A modular reader for Hy source.\"\"\"\n\n ###\n # Components necessary for Reader implementation\n ###\n\n NON_IDENT = set(\"()[]{};\\\"'\")\n\n def fill_pos(self, model, start):\n \"\"\"Attach line/col information to a model.\n\n Sets the end location of `model` to the current cursor position.\n\n Args:\n model (hy.models.Object): model to set line/col info for.\n start (tuple[int, int]): (line, column) tuple indicating the start\n location to assign to `model`.\n \"\"\"\n model.start_line, model.start_column = start\n model.end_line, model.end_column = self.pos\n return model\n\n def read_default(self, key):\n \"\"\"Default reader handler when nothing in the table matches.\n\n Try to read an identifier/symbol. If there's a double-quote immediately\n following, then parse it as a string with the given prefix (e.g.,\n `r\"...\"`). Otherwise, parse it as a symbol-like.\n \"\"\"\n ident = key + self.read_ident()\n if self.peek_and_getc('\"'):\n return self.prefixed_string('\"', ident)\n return symbol_like(ident, reader=self)\n\n def parse(self, stream, filename=None):\n \"\"\"Yields all `hy.models.Object`'s in `source`\n\n Additionally exposes `self` as ``hy.&reader`` during read/compile time.\n\n Args:\n source:\n Hy source to be parsed.\n filename (str | None):\n Filename to use for error messages. If `None` then previously\n set filename is used.\n \"\"\"\n self._set_source(stream, filename)\n rname = mangle(\"&reader\")\n old_reader = getattr(hy, rname, None)\n setattr(hy, rname, self)\n\n try:\n yield from self.parse_forms_until(\"\")\n finally:\n if old_reader is None:\n delattr(hy, rname)\n else:\n setattr(hy, rname, old_reader)\n\n ###\n # Reading forms\n ###\n\n def try_parse_one_form(self):\n \"\"\"Attempt to parse a single Hy form.\n\n Read one (non-space) character from the stream, then call the\n corresponding handler.\n\n Returns:\n hy.models.Object | None:\n Model optionally returned by the called handler. Handlers may\n return `None` to signify no parsed form (e.g., for comments).\n\n Raises:\n PrematureEndOfInput: If the reader hits the end of the file before\n fully parsing a form.\n LexException: If there is an error during form parsing.\n \"\"\"\n try:\n self.slurp_space()\n c = self.getc()\n start = self._pos\n if not c:\n raise PrematureEndOfInput.from_reader(\n \"Premature end of input while attempting to parse one form\", self\n )\n handler = self.reader_table.get(c)\n model = handler(self, c) if handler else self.read_default(c)\n return self.fill_pos(model, start) if model is not None else None\n except LexException:\n raise\n except Exception as e:\n raise LexException.from_reader(\n str(e) or \"Exception thrown attempting to parse one form\", self\n )\n\n def parse_one_form(self):\n \"\"\"Read from the stream until a form is parsed.\n\n Guaranteed to return a model (i.e., skips over comments).\n\n Returns:\n hy.models.Object\n \"\"\"\n model = None\n while model is None:\n model = self.try_parse_one_form()\n return model\n\n def parse_forms_until(self, closer):\n \"\"\"Yields `hy.models.Object`'s until character `closer` is seen.\n\n Useful for reading a sequence such as s-exprs or lists.\n \"\"\"\n while True:\n self.slurp_space()\n if self.peek_and_getc(closer):\n break\n model = self.try_parse_one_form()\n if model is not None:\n yield model\n\n ###\n # Basic atoms\n ###\n\n @reader_for(\")\")\n @reader_for(\"]\")\n @reader_for(\"}\")\n def INVALID(self, key):\n raise LexException.from_reader(\n f\"Ran into a '{key}' where it wasn't expected.\", self\n )\n\n @reader_for(\";\")\n def line_comment(self, _):\n any(c == \"\\n\" for c in self.chars(eof_ok=True))\n return None\n\n @reader_for(\":\")\n def keyword(self, _):\n ident = self.read_ident()\n if \".\" in ident:\n raise LexException.from_reader(\n \"Cannot access attribute on anything other\"\n \" than a name (in order to get attributes of expressions,\"\n \" use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\",\n self,\n )\n return Keyword(ident, from_parser=True)\n\n @reader_for('\"')\n def prefixed_string(self, _, prefix=\"\"):\n prefix_chars = set(prefix)\n if (\n len(prefix_chars) != len(prefix)\n or prefix_chars - set(\"bfr\")\n or set(\"bf\") <= prefix_chars\n ):\n raise LexException.from_reader(f\"invalid string prefix {prefix!r}\", self)\n\n escaping = False\n\n def quote_closing(c):\n nonlocal escaping\n if c == \"\\\\\":\n escaping = not escaping\n return 0\n if c == '\"' and not escaping:\n return 1\n if (\n escaping\n and \"r\" not in prefix\n and\n # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n c\n not in (\"\\n\\r\\\\'\\\"abfnrtv01234567x\" + (\"\" if \"b\" in prefix else \"NuU\"))\n ):\n raise LexException.from_reader(\"invalid escape sequence \\\\\" + c, self)\n escaping = False\n return 0\n\n return self.read_string_until(quote_closing, prefix, \"f\" in prefix.lower())\n\n ###\n # Special annotations\n ###\n\n @reader_for(\"'\", (\"quote\",))\n @reader_for(\"`\", (\"quasiquote\",))\n def tag_as(root):\n def _tag_as(self, _):\n nc = self.peekc()\n if (\n not nc\n or isnormalizedspace(nc)\n or self.reader_table.get(nc) == self.INVALID\n ):\n raise LexException.from_reader(\n \"Could not identify the next token.\", self\n )\n model = self.parse_one_form()\n return mkexpr(root, model)\n\n return _tag_as\n\n @reader_for(\"~\")\n def unquote(self, key):\n nc = self.peekc()\n if not nc or isnormalizedspace(nc) or self.reader_table.get(nc) == self.INVALID:\n return sym(key)\n return mkexpr(\n \"unquote\" + (\"-splice\" if self.peek_and_getc(\"@\") else \"\"),\n self.parse_one_form(),\n )\n\n ###\n # Sequences\n ###\n\n @reader_for(\"(\", (Expression, \")\"))\n @reader_for(\"[\", (List, \"]\"))\n @reader_for(\"{\", (Dict, \"}\"))\n @reader_for(\"#{\", (Set, \"}\"))\n @reader_for(\"#(\", (Tuple, \")\"))\n def sequence(seq_type, closer):\n return lambda self, _: seq_type(self.parse_forms_until(closer))\n\n ###\n # Reader tag-macros\n ###\n\n @reader_for(\"#\")\n def tag_dispatch(self, key):\n \"\"\"General handler for reader macros (and tag macros).\n\n Reads a full identifier after the `#` and calls the corresponding handler\n (this allows, e.g., `#reads-multiple-forms foo bar baz`).\n\n Failing that, reads a single character after the `#` and immediately\n calls the corresponding handler (this allows, e.g., `#*args` to parse\n as `#*` followed by `args`).\n \"\"\"\n\n if not self.peekc():\n raise PrematureEndOfInput.from_reader(\n \"Premature end of input while attempting dispatch\", self\n )\n\n if self.peek_and_getc(\"^\"):\n typ = self.parse_one_form()\n target = self.parse_one_form()\n return mkexpr(\"annotate\", target, typ)\n\n tag = None\n # try dispatching tagged ident\n ident = self.read_ident(just_peeking=True)\n if ident and mangle(key + ident) in self.reader_table:\n self.getn(len(ident))\n tag = mangle(key + ident)\n # failing that, dispatch tag + single character\n elif key + self.peekc() in self.reader_table:\n tag = key + self.getc()\n if tag:\n tree = self.dispatch(tag)\n return as_model(tree) if tree is not None else None\n\n raise LexException.from_reader(\n f\"reader macro '{key + self.read_ident()}' is not defined\", self\n )\n\n @reader_for(\"#_\")\n def discard(self, _):\n \"\"\"Discards the next parsed form.\"\"\"\n self.parse_one_form()\n return None\n\n @reader_for(\"#*\")\n def hash_star(self, _):\n \"\"\"Unpacking forms `#*` and `#**`, corresponding to `*` and `**` in Python.\"\"\"\n num_stars = 1\n while self.peek_and_getc(\"*\"):\n num_stars += 1\n if num_stars > 2:\n raise LexException.from_reader(\"too many stars\", self)\n return mkexpr(\n \"unpack-\" + (\"iterable\", \"mapping\")[num_stars - 1],\n self.parse_one_form(),\n )\n\n ###\n # Strings\n # (these are more complicated because f-strings\n # form their own sublanguage)\n ###\n\n @reader_for(\"#[\")\n def bracketed_string(self, _):\n \"\"\"Bracketed strings. See the Hy docs for full details.\"\"\"\n delim = []\n for c in self.chars():\n if c == \"[\":\n break\n elif c == \"]\":\n raise LexException.from_reader(\n \"Ran into a ']' where it wasn't expected.\", self\n )\n delim.append(c)\n delim = \"\".join(delim)\n is_fstring = delim == \"f\" or delim.startswith(\"f-\")\n\n # discard single initial newline, if any, accounting for all\n # three styles of newline\n self.peek_and_getc(\"\\x0d\")\n self.peek_and_getc(\"\\x0a\")\n\n index = -1\n\n def delim_closing(c):\n nonlocal index\n if c == \"]\":\n if index == len(delim):\n # this is the second bracket at the end of the delim\n return len(delim) + 2\n else:\n # reset state, this may be the first bracket of closing delim\n index = 0\n elif 0 <= index <= len(delim):\n # we're inside a possible closing delim\n if index < len(delim) and c == delim[index]:\n index += 1\n else:\n # failed delim, reset state\n index = -1\n return 0\n\n return self.read_string_until(delim_closing, None, is_fstring, brackets=delim)\n\n def read_string_until(self, closing, prefix, is_fstring, **kwargs):\n if is_fstring:\n components = self.read_fcomponents_until(closing, prefix)\n return FString(components, **kwargs)\n s = self.read_chars_until(closing, prefix, is_fstring=False)\n return (Bytes if isinstance(s, bytes) else String)(s, **kwargs)\n\n def read_chars_until(self, closing, prefix, is_fstring):\n s = []\n for c in self.chars():\n s.append(c)\n # check if c is closing\n n_closing_chars = closing(c)\n if n_closing_chars:\n # string has ended\n s = s[:-n_closing_chars]\n break\n # check if c is start of component\n if is_fstring and c == \"{\" and s[-3:] != [\"\\\\\", \"N\", \"{\"]:\n # check and handle \"{{\"\n if self.peek_and_getc(\"{\"):\n s.append(\"{\")\n else:\n # remove \"{\" from end of string component\n s.pop()\n break\n res = \"\".join(s).replace(\"\\x0d\\x0a\", \"\\x0a\").replace(\"\\x0d\", \"\\x0a\")\n\n if prefix is not None:\n res = eval(f'{prefix}\"\"\"{res}\"\"\"')\n if is_fstring:\n return res, n_closing_chars\n return res\n\n def read_fcomponents_until(self, closing, prefix):\n components = []\n start = self.pos\n while True:\n s, closed = self.read_chars_until(closing, prefix, is_fstring=True)\n if s:\n components.append(self.fill_pos(String(s), start))\n if closed:\n break\n components.extend(self.read_fcomponent(prefix))\n return components\n\n def read_fcomponent(self, prefix):\n \"\"\"May return one or two components, since the `=` debugging syntax\n will create a String component.\"\"\"\n start = self.pos\n values = []\n conversion = None\n has_debug = False\n\n # read the expression, saving the text verbatim\n # in case we encounter debug `=`\n space_before = self.slurp_space()\n with self.saving_chars() as form_text:\n model = self.parse_one_form()\n space_between = self.slurp_space()\n\n # check for and handle debug syntax:\n # we emt the verbatim text before we emit the value\n if self.peek_and_getc(\"=\"):\n has_debug = True\n space_after = self.slurp_space()\n dbg_prefix = (\n space_before + \"\".join(form_text) + space_between + \"=\" + space_after\n )\n values.append(self.fill_pos(String(dbg_prefix), start))\n\n # handle conversion code\n if self.peek_and_getc(\"!\"):\n conversion = self.getc()\n self.slurp_space()\n\n def component_closing(c):\n if c == \"}\":\n return 1\n return 0\n\n # handle formatting options\n format_components = []\n if self.peek_and_getc(\":\"):\n format_components = self.read_fcomponents_until(component_closing, prefix)\n else:\n if has_debug and conversion is None:\n conversion = \"r\"\n if not self.getc() == \"}\":\n raise LexException.from_reader(\"f-string: trailing junk in field\", self)\n return values + [\n self.fill_pos(FComponent((model, *format_components), conversion), start)\n ]\n", "path": "hy/reader/hy_reader.py"}]} |
gh_patches_debug_1544 | rasdani/github-patches | git_diff | mosaicml__composer-182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add venv into docker image to enable editable `pip install`
When trying to install composer with `pip install -e .` from within the docker image, we are seeing this error:
```
Traceback (most recent call last):
File "/usr/bin/composer", line 33, in <module>
sys.exit(load_entry_point('mosaicml', 'console_scripts', 'composer')())
File "/usr/bin/composer", line 22, in importlib_load_entry_point
for entry_point in distribution(dist_name).entry_points
File "/usr/lib/python3.8/importlib/metadata.py", line 445, in distribution
return Distribution.from_name(distribution_name)
File "/usr/lib/python3.8/importlib/metadata.py", line 169, in from_name
raise PackageNotFoundError(name)
importlib.metadata.PackageNotFoundError: mosaicml
```
This seems to be remedied by running the `pip install` from within a virtualenv. Can we bake a virtualenv into the docker image as a workaround?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 import os
4 import sys
5 import textwrap
6
7 import setuptools
8 from setuptools import setup
9
10
11 def package_files(directory: str):
12 # from https://stackoverflow.com/a/36693250
13 paths = []
14 for (path, _, filenames) in os.walk(directory):
15 for filename in filenames:
16 paths.append(os.path.join('..', path, filename))
17 return paths
18
19
20 with open("README.md", "r", encoding="utf-8") as fh:
21 long_description = fh.read()
22
23 install_requires = [
24 "pyyaml>=5.4.1",
25 "tqdm>=4.62.3",
26 "torchmetrics>=0.6.0",
27 "torch_optimizer==0.1.0",
28 "torchvision>=0.9.0",
29 "torch>=1.9",
30 "yahp>=0.0.14",
31 "numpy==1.21.5",
32 ]
33 extra_deps = {}
34
35 extra_deps['base'] = []
36
37 extra_deps['dev'] = [
38 "custom_inherit==2.3.2",
39 'junitparser>=2.1.1',
40 'coverage[toml]>=6.1.1',
41 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners
42 'pytest>=6.2.0',
43 'yapf>=0.13.0',
44 'isort>=5.9.3',
45 'ipython>=7.29.0',
46 'ipykernel>=6.5.0',
47 'jupyter>=1.0.0',
48 'yamllint>=1.26.2',
49 'pytest-timeout>=1.4.2',
50 'recommonmark>=0.7.1',
51 'sphinx>=4.2.0',
52 'sphinx_copybutton>=0.4.0',
53 'sphinx_markdown_tables>=0.0.15',
54 'sphinx-argparse>=0.3.1',
55 'sphinxcontrib.katex>=0.8.6',
56 'sphinxext.opengraph>=0.4.2',
57 'sphinxemoji>=0.2.0',
58 'sphinx_rtd_theme>=1.0.0',
59 'testbook>=0.4.2',
60 'myst-parser>=0.15.2',
61 ]
62 extra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']
63
64 extra_deps['nlp'] = [
65 'transformers>=4.11.3',
66 'datasets>=1.14.0',
67 ]
68
69 extra_deps['unet'] = [
70 'monai>=0.7.0',
71 'scikit-learn>=1.0.1',
72 ]
73
74 extra_deps['deepspeed'] = [
75 'deepspeed>=0.5.5',
76 ]
77
78 extra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)
79
80 setup(
81 name="mosaicml",
82 version="0.3.1",
83 author="MosaicML",
84 author_email="[email protected]",
85 description="composing methods for ML training efficiency",
86 long_description=long_description,
87 long_description_content_type="text/markdown",
88 url="https://github.com/mosaicml/composer",
89 include_package_data=True,
90 package_data={
91 "composer": ['py.typed'],
92 "": package_files('composer/yamls'),
93 },
94 packages=setuptools.find_packages(exclude=["tests*"]),
95 classifiers=[
96 "Programming Language :: Python :: 3",
97 ],
98 install_requires=install_requires,
99 entry_points={
100 'console_scripts': ['composer = composer.cli.launcher:main',],
101 },
102 extras_require=extra_deps,
103 dependency_links=['https://developer.download.nvidia.com/compute/redist'],
104 python_requires='>=3.7',
105 ext_package="composer",
106 )
107
108 # only visible if user installs with verbose -v flag
109 # Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)
110 print("*" * 20, file=sys.stderr)
111 print(textwrap.dedent("""NOTE: For best performance, we recommend installing Pillow-SIMD
112 for accelerated image processing operations. To install:
113 \t pip uninstall pillow && pip install pillow-simd"""),
114 file=sys.stderr)
115 print("*" * 20, file=sys.stderr)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,16 @@
# Copyright 2021 MosaicML. All Rights Reserved.
import os
+import site
import sys
import textwrap
import setuptools
from setuptools import setup
+# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255
+site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
+
def package_files(directory: str):
# from https://stackoverflow.com/a/36693250
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,12 +1,16 @@\n # Copyright 2021 MosaicML. All Rights Reserved.\n \n import os\n+import site\n import sys\n import textwrap\n \n import setuptools\n from setuptools import setup\n \n+# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\n+site.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n+\n \n def package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n", "issue": "Add venv into docker image to enable editable `pip install`\nWhen trying to install composer with `pip install -e .` from within the docker image, we are seeing this error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/bin/composer\", line 33, in <module>\r\n sys.exit(load_entry_point('mosaicml', 'console_scripts', 'composer')())\r\n File \"/usr/bin/composer\", line 22, in importlib_load_entry_point\r\n for entry_point in distribution(dist_name).entry_points\r\n File \"/usr/lib/python3.8/importlib/metadata.py\", line 445, in distribution\r\n return Distribution.from_name(distribution_name)\r\n File \"/usr/lib/python3.8/importlib/metadata.py\", line 169, in from_name\r\n raise PackageNotFoundError(name)\r\nimportlib.metadata.PackageNotFoundError: mosaicml\r\n```\r\nThis seems to be remedied by running the `pip install` from within a virtualenv. Can we bake a virtualenv into the docker image as a workaround?\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"numpy==1.21.5\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['deepspeed'] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport site\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\n\n# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"numpy==1.21.5\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['deepspeed'] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py"}]} |
gh_patches_debug_1545 | rasdani/github-patches | git_diff | python__mypy-4106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No constraint inferred for Any vs. Type[T]
I'd expect the revealed type to be `Any` instead of `<nothing>` for this example:
```py
from typing import Type, Any, TypeVar
T = TypeVar('T')
def f(c: Type[T]) -> T: ...
x: Any
reveal_type(f(x)) # <nothing>
```
It looks like constraint inference doesn't work correctly when matching `Any` against `Type[T]`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mypy/constraints.py`
Content:
```
1 """Type inference constraints."""
2
3 from typing import Iterable, List, Optional, Sequence
4
5 from mypy import experiments
6 from mypy.types import (
7 CallableType, Type, TypeVisitor, UnboundType, AnyType, NoneTyp, TypeVarType, Instance,
8 TupleType, TypedDictType, UnionType, Overloaded, ErasedType, PartialType, DeletedType,
9 UninhabitedType, TypeType, TypeVarId, TypeQuery, is_named_instance, TypeOfAny
10 )
11 from mypy.maptype import map_instance_to_supertype
12 from mypy import nodes
13 import mypy.subtypes
14 from mypy.sametypes import is_same_type
15 from mypy.erasetype import erase_typevars
16
17
18 SUBTYPE_OF = 0 # type: int
19 SUPERTYPE_OF = 1 # type: int
20
21
22 class Constraint:
23 """A representation of a type constraint.
24
25 It can be either T <: type or T :> type (T is a type variable).
26 """
27
28 type_var = None # type: TypeVarId
29 op = 0 # SUBTYPE_OF or SUPERTYPE_OF
30 target = None # type: Type
31
32 def __init__(self, type_var: TypeVarId, op: int, target: Type) -> None:
33 self.type_var = type_var
34 self.op = op
35 self.target = target
36
37 def __repr__(self) -> str:
38 op_str = '<:'
39 if self.op == SUPERTYPE_OF:
40 op_str = ':>'
41 return '{} {} {}'.format(self.type_var, op_str, self.target)
42
43
44 def infer_constraints_for_callable(
45 callee: CallableType, arg_types: Sequence[Optional[Type]], arg_kinds: List[int],
46 formal_to_actual: List[List[int]]) -> List[Constraint]:
47 """Infer type variable constraints for a callable and actual arguments.
48
49 Return a list of constraints.
50 """
51 constraints = [] # type: List[Constraint]
52 tuple_counter = [0]
53
54 for i, actuals in enumerate(formal_to_actual):
55 for actual in actuals:
56 actual_arg_type = arg_types[actual]
57 if actual_arg_type is None:
58 continue
59
60 actual_type = get_actual_type(actual_arg_type, arg_kinds[actual],
61 tuple_counter)
62 c = infer_constraints(callee.arg_types[i], actual_type,
63 SUPERTYPE_OF)
64 constraints.extend(c)
65
66 return constraints
67
68
69 def get_actual_type(arg_type: Type, kind: int,
70 tuple_counter: List[int]) -> Type:
71 """Return the type of an actual argument with the given kind.
72
73 If the argument is a *arg, return the individual argument item.
74 """
75
76 if kind == nodes.ARG_STAR:
77 if isinstance(arg_type, Instance):
78 if arg_type.type.fullname() == 'builtins.list':
79 # List *arg.
80 return arg_type.args[0]
81 elif arg_type.args:
82 # TODO try to map type arguments to Iterable
83 return arg_type.args[0]
84 else:
85 return AnyType(TypeOfAny.from_error)
86 elif isinstance(arg_type, TupleType):
87 # Get the next tuple item of a tuple *arg.
88 tuple_counter[0] += 1
89 return arg_type.items[tuple_counter[0] - 1]
90 else:
91 return AnyType(TypeOfAny.from_error)
92 elif kind == nodes.ARG_STAR2:
93 if isinstance(arg_type, Instance) and (arg_type.type.fullname() == 'builtins.dict'):
94 # Dict **arg. TODO more general (Mapping)
95 return arg_type.args[1]
96 else:
97 return AnyType(TypeOfAny.from_error)
98 else:
99 # No translation for other kinds.
100 return arg_type
101
102
103 def infer_constraints(template: Type, actual: Type,
104 direction: int) -> List[Constraint]:
105 """Infer type constraints.
106
107 Match a template type, which may contain type variable references,
108 recursively against a type which does not contain (the same) type
109 variable references. The result is a list of type constrains of
110 form 'T is a supertype/subtype of x', where T is a type variable
111 present in the template and x is a type without reference to type
112 variables present in the template.
113
114 Assume T and S are type variables. Now the following results can be
115 calculated (read as '(template, actual) --> result'):
116
117 (T, X) --> T :> X
118 (X[T], X[Y]) --> T <: Y and T :> Y
119 ((T, T), (X, Y)) --> T :> X and T :> Y
120 ((T, S), (X, Y)) --> T :> X and S :> Y
121 (X[T], Any) --> T <: Any and T :> Any
122
123 The constraints are represented as Constraint objects.
124 """
125
126 # If the template is simply a type variable, emit a Constraint directly.
127 # We need to handle this case before handling Unions for two reasons:
128 # 1. "T <: Union[U1, U2]" is not equivalent to "T <: U1 or T <: U2",
129 # because T can itself be a union (notably, Union[U1, U2] itself).
130 # 2. "T :> Union[U1, U2]" is logically equivalent to "T :> U1 and
131 # T :> U2", but they are not equivalent to the constraint solver,
132 # which never introduces new Union types (it uses join() instead).
133 if isinstance(template, TypeVarType):
134 return [Constraint(template.id, direction, actual)]
135
136 # Now handle the case of either template or actual being a Union.
137 # For a Union to be a subtype of another type, every item of the Union
138 # must be a subtype of that type, so concatenate the constraints.
139 if direction == SUBTYPE_OF and isinstance(template, UnionType):
140 res = []
141 for t_item in template.items:
142 res.extend(infer_constraints(t_item, actual, direction))
143 return res
144 if direction == SUPERTYPE_OF and isinstance(actual, UnionType):
145 res = []
146 for a_item in actual.items:
147 res.extend(infer_constraints(template, a_item, direction))
148 return res
149
150 # Now the potential subtype is known not to be a Union or a type
151 # variable that we are solving for. In that case, for a Union to
152 # be a supertype of the potential subtype, some item of the Union
153 # must be a supertype of it.
154 if direction == SUBTYPE_OF and isinstance(actual, UnionType):
155 # If some of items is not a complete type, disregard that.
156 items = simplify_away_incomplete_types(actual.items)
157 # We infer constraints eagerly -- try to find constraints for a type
158 # variable if possible. This seems to help with some real-world
159 # use cases.
160 return any_constraints(
161 [infer_constraints_if_possible(template, a_item, direction)
162 for a_item in items],
163 eager=True)
164 if direction == SUPERTYPE_OF and isinstance(template, UnionType):
165 # When the template is a union, we are okay with leaving some
166 # type variables indeterminate. This helps with some special
167 # cases, though this isn't very principled.
168 return any_constraints(
169 [infer_constraints_if_possible(t_item, actual, direction)
170 for t_item in template.items],
171 eager=False)
172
173 # Remaining cases are handled by ConstraintBuilderVisitor.
174 return template.accept(ConstraintBuilderVisitor(actual, direction))
175
176
177 def infer_constraints_if_possible(template: Type, actual: Type,
178 direction: int) -> Optional[List[Constraint]]:
179 """Like infer_constraints, but return None if the input relation is
180 known to be unsatisfiable, for example if template=List[T] and actual=int.
181 (In this case infer_constraints would return [], just like it would for
182 an automatically satisfied relation like template=List[T] and actual=object.)
183 """
184 if (direction == SUBTYPE_OF and
185 not mypy.subtypes.is_subtype(erase_typevars(template), actual)):
186 return None
187 if (direction == SUPERTYPE_OF and
188 not mypy.subtypes.is_subtype(actual, erase_typevars(template))):
189 return None
190 return infer_constraints(template, actual, direction)
191
192
193 def any_constraints(options: List[Optional[List[Constraint]]], eager: bool) -> List[Constraint]:
194 """Deduce what we can from a collection of constraint lists.
195
196 It's a given that at least one of the lists must be satisfied. A
197 None element in the list of options represents an unsatisfiable
198 constraint and is ignored. Ignore empty constraint lists if eager
199 is true -- they are always trivially satisfiable.
200 """
201 if eager:
202 valid_options = [option for option in options if option]
203 else:
204 valid_options = [option for option in options if option is not None]
205 if len(valid_options) == 1:
206 return valid_options[0]
207 elif (len(valid_options) > 1 and
208 all(is_same_constraints(valid_options[0], c)
209 for c in valid_options[1:])):
210 # Multiple sets of constraints that are all the same. Just pick any one of them.
211 # TODO: More generally, if a given (variable, direction) pair appears in
212 # every option, combine the bounds with meet/join.
213 return valid_options[0]
214
215 # Otherwise, there are either no valid options or multiple, inconsistent valid
216 # options. Give up and deduce nothing.
217 return []
218
219
220 def is_same_constraints(x: List[Constraint], y: List[Constraint]) -> bool:
221 for c1 in x:
222 if not any(is_same_constraint(c1, c2) for c2 in y):
223 return False
224 for c1 in y:
225 if not any(is_same_constraint(c1, c2) for c2 in x):
226 return False
227 return True
228
229
230 def is_same_constraint(c1: Constraint, c2: Constraint) -> bool:
231 return (c1.type_var == c2.type_var
232 and c1.op == c2.op
233 and is_same_type(c1.target, c2.target))
234
235
236 def simplify_away_incomplete_types(types: List[Type]) -> List[Type]:
237 complete = [typ for typ in types if is_complete_type(typ)]
238 if complete:
239 return complete
240 else:
241 return types
242
243
244 def is_complete_type(typ: Type) -> bool:
245 """Is a type complete?
246
247 A complete doesn't have uninhabited type components or (when not in strict
248 optional mode) None components.
249 """
250 return typ.accept(CompleteTypeVisitor())
251
252
253 class CompleteTypeVisitor(TypeQuery[bool]):
254 def __init__(self) -> None:
255 super().__init__(all)
256
257 def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
258 return False
259
260
261 class ConstraintBuilderVisitor(TypeVisitor[List[Constraint]]):
262 """Visitor class for inferring type constraints."""
263
264 # The type that is compared against a template
265 # TODO: The value may be None. Is that actually correct?
266 actual = None # type: Type
267
268 def __init__(self, actual: Type, direction: int) -> None:
269 # Direction must be SUBTYPE_OF or SUPERTYPE_OF.
270 self.actual = actual
271 self.direction = direction
272
273 # Trivial leaf types
274
275 def visit_unbound_type(self, template: UnboundType) -> List[Constraint]:
276 return []
277
278 def visit_any(self, template: AnyType) -> List[Constraint]:
279 return []
280
281 def visit_none_type(self, template: NoneTyp) -> List[Constraint]:
282 return []
283
284 def visit_uninhabited_type(self, template: UninhabitedType) -> List[Constraint]:
285 return []
286
287 def visit_erased_type(self, template: ErasedType) -> List[Constraint]:
288 return []
289
290 def visit_deleted_type(self, template: DeletedType) -> List[Constraint]:
291 return []
292
293 # Errors
294
295 def visit_partial_type(self, template: PartialType) -> List[Constraint]:
296 # We can't do anything useful with a partial type here.
297 assert False, "Internal error"
298
299 # Non-trivial leaf type
300
301 def visit_type_var(self, template: TypeVarType) -> List[Constraint]:
302 assert False, ("Unexpected TypeVarType in ConstraintBuilderVisitor"
303 " (should have been handled in infer_constraints)")
304
305 # Non-leaf types
306
307 def visit_instance(self, template: Instance) -> List[Constraint]:
308 original_actual = actual = self.actual
309 res = [] # type: List[Constraint]
310 if isinstance(actual, CallableType) and actual.fallback is not None:
311 actual = actual.fallback
312 if isinstance(actual, TypedDictType):
313 actual = actual.as_anonymous().fallback
314 if isinstance(actual, Instance):
315 instance = actual
316 # We always try nominal inference if possible,
317 # it is much faster than the structural one.
318 if (self.direction == SUBTYPE_OF and
319 template.type.has_base(instance.type.fullname())):
320 mapped = map_instance_to_supertype(template, instance.type)
321 for i in range(len(instance.args)):
322 # The constraints for generic type parameters are
323 # invariant. Include constraints from both directions
324 # to achieve the effect.
325 res.extend(infer_constraints(
326 mapped.args[i], instance.args[i], self.direction))
327 res.extend(infer_constraints(
328 mapped.args[i], instance.args[i], neg_op(self.direction)))
329 return res
330 elif (self.direction == SUPERTYPE_OF and
331 instance.type.has_base(template.type.fullname())):
332 mapped = map_instance_to_supertype(instance, template.type)
333 for j in range(len(template.args)):
334 # The constraints for generic type parameters are
335 # invariant.
336 res.extend(infer_constraints(
337 template.args[j], mapped.args[j], self.direction))
338 res.extend(infer_constraints(
339 template.args[j], mapped.args[j], neg_op(self.direction)))
340 return res
341 if (template.type.is_protocol and self.direction == SUPERTYPE_OF and
342 # We avoid infinite recursion for structural subtypes by checking
343 # whether this type already appeared in the inference chain.
344 # This is a conservative way break the inference cycles.
345 # It never produces any "false" constraints but gives up soon
346 # on purely structural inference cycles, see #3829.
347 not any(is_same_type(template, t) for t in template.type.inferring) and
348 mypy.subtypes.is_subtype(instance, erase_typevars(template))):
349 template.type.inferring.append(template)
350 self.infer_constraints_from_protocol_members(res, instance, template,
351 original_actual, template)
352 template.type.inferring.pop()
353 return res
354 elif (instance.type.is_protocol and self.direction == SUBTYPE_OF and
355 # We avoid infinite recursion for structural subtypes also here.
356 not any(is_same_type(instance, i) for i in instance.type.inferring) and
357 mypy.subtypes.is_subtype(erase_typevars(template), instance)):
358 instance.type.inferring.append(instance)
359 self.infer_constraints_from_protocol_members(res, instance, template,
360 template, instance)
361 instance.type.inferring.pop()
362 return res
363 if isinstance(actual, AnyType):
364 # IDEA: Include both ways, i.e. add negation as well?
365 return self.infer_against_any(template.args, actual)
366 if (isinstance(actual, TupleType) and
367 (is_named_instance(template, 'typing.Iterable') or
368 is_named_instance(template, 'typing.Container') or
369 is_named_instance(template, 'typing.Sequence') or
370 is_named_instance(template, 'typing.Reversible'))
371 and self.direction == SUPERTYPE_OF):
372 for item in actual.items:
373 cb = infer_constraints(template.args[0], item, SUPERTYPE_OF)
374 res.extend(cb)
375 return res
376 elif (isinstance(actual, TupleType) and template.type.is_protocol and
377 self.direction == SUPERTYPE_OF):
378 if mypy.subtypes.is_subtype(actual.fallback, erase_typevars(template)):
379 res.extend(infer_constraints(template, actual.fallback, self.direction))
380 return res
381 return []
382 else:
383 return []
384
385 def infer_constraints_from_protocol_members(self, res: List[Constraint],
386 instance: Instance, template: Instance,
387 subtype: Type, protocol: Instance) -> None:
388 """Infer constraints for situations where either 'template' or 'instance' is a protocol.
389
390 The 'protocol' is the one of two that is an instance of protocol type, 'subtype'
391 is the type used to bind self during inference. Currently, we just infer constrains for
392 every protocol member type (both ways for settable members).
393 """
394 for member in protocol.type.protocol_members:
395 inst = mypy.subtypes.find_member(member, instance, subtype)
396 temp = mypy.subtypes.find_member(member, template, subtype)
397 assert inst is not None and temp is not None
398 # The above is safe since at this point we know that 'instance' is a subtype
399 # of (erased) 'template', therefore it defines all protocol members
400 res.extend(infer_constraints(temp, inst, self.direction))
401 if (mypy.subtypes.IS_SETTABLE in
402 mypy.subtypes.get_member_flags(member, protocol.type)):
403 # Settable members are invariant, add opposite constraints
404 res.extend(infer_constraints(temp, inst, neg_op(self.direction)))
405
406 def visit_callable_type(self, template: CallableType) -> List[Constraint]:
407 if isinstance(self.actual, CallableType):
408 cactual = self.actual
409 # FIX verify argument counts
410 # FIX what if one of the functions is generic
411 res = [] # type: List[Constraint]
412
413 # We can't infer constraints from arguments if the template is Callable[..., T] (with
414 # literal '...').
415 if not template.is_ellipsis_args:
416 # The lengths should match, but don't crash (it will error elsewhere).
417 for t, a in zip(template.arg_types, cactual.arg_types):
418 # Negate direction due to function argument type contravariance.
419 res.extend(infer_constraints(t, a, neg_op(self.direction)))
420 res.extend(infer_constraints(template.ret_type, cactual.ret_type,
421 self.direction))
422 return res
423 elif isinstance(self.actual, AnyType):
424 # FIX what if generic
425 res = self.infer_against_any(template.arg_types, self.actual)
426 any_type = AnyType(TypeOfAny.from_another_any, source_any=self.actual)
427 res.extend(infer_constraints(template.ret_type, any_type, self.direction))
428 return res
429 elif isinstance(self.actual, Overloaded):
430 return self.infer_against_overloaded(self.actual, template)
431 elif isinstance(self.actual, TypeType):
432 return infer_constraints(template.ret_type, self.actual.item, self.direction)
433 elif isinstance(self.actual, Instance):
434 # Instances with __call__ method defined are considered structural
435 # subtypes of Callable with a compatible signature.
436 call = mypy.subtypes.find_member('__call__', self.actual, self.actual)
437 if call:
438 return infer_constraints(template, call, self.direction)
439 else:
440 return []
441 else:
442 return []
443
444 def infer_against_overloaded(self, overloaded: Overloaded,
445 template: CallableType) -> List[Constraint]:
446 # Create constraints by matching an overloaded type against a template.
447 # This is tricky to do in general. We cheat by only matching against
448 # the first overload item, and by only matching the return type. This
449 # seems to work somewhat well, but we should really use a more
450 # reliable technique.
451 item = find_matching_overload_item(overloaded, template)
452 return infer_constraints(template.ret_type, item.ret_type,
453 self.direction)
454
455 def visit_tuple_type(self, template: TupleType) -> List[Constraint]:
456 actual = self.actual
457 if isinstance(actual, TupleType) and len(actual.items) == len(template.items):
458 res = [] # type: List[Constraint]
459 for i in range(len(template.items)):
460 res.extend(infer_constraints(template.items[i],
461 actual.items[i],
462 self.direction))
463 return res
464 elif isinstance(actual, AnyType):
465 return self.infer_against_any(template.items, actual)
466 else:
467 return []
468
469 def visit_typeddict_type(self, template: TypedDictType) -> List[Constraint]:
470 actual = self.actual
471 if isinstance(actual, TypedDictType):
472 res = [] # type: List[Constraint]
473 # NOTE: Non-matching keys are ignored. Compatibility is checked
474 # elsewhere so this shouldn't be unsafe.
475 for (item_name, template_item_type, actual_item_type) in template.zip(actual):
476 res.extend(infer_constraints(template_item_type,
477 actual_item_type,
478 self.direction))
479 return res
480 elif isinstance(actual, AnyType):
481 return self.infer_against_any(template.items.values(), actual)
482 else:
483 return []
484
485 def visit_union_type(self, template: UnionType) -> List[Constraint]:
486 assert False, ("Unexpected UnionType in ConstraintBuilderVisitor"
487 " (should have been handled in infer_constraints)")
488
489 def infer_against_any(self, types: Iterable[Type], any_type: AnyType) -> List[Constraint]:
490 res = [] # type: List[Constraint]
491 for t in types:
492 res.extend(infer_constraints(t, any_type, self.direction))
493 return res
494
495 def visit_overloaded(self, template: Overloaded) -> List[Constraint]:
496 res = [] # type: List[Constraint]
497 for t in template.items():
498 res.extend(infer_constraints(t, self.actual, self.direction))
499 return res
500
501 def visit_type_type(self, template: TypeType) -> List[Constraint]:
502 if isinstance(self.actual, CallableType):
503 return infer_constraints(template.item, self.actual.ret_type, self.direction)
504 elif isinstance(self.actual, Overloaded):
505 return infer_constraints(template.item, self.actual.items()[0].ret_type,
506 self.direction)
507 elif isinstance(self.actual, TypeType):
508 return infer_constraints(template.item, self.actual.item, self.direction)
509 else:
510 return []
511
512
513 def neg_op(op: int) -> int:
514 """Map SubtypeOf to SupertypeOf and vice versa."""
515
516 if op == SUBTYPE_OF:
517 return SUPERTYPE_OF
518 elif op == SUPERTYPE_OF:
519 return SUBTYPE_OF
520 else:
521 raise ValueError('Invalid operator {}'.format(op))
522
523
524 def find_matching_overload_item(overloaded: Overloaded, template: CallableType) -> CallableType:
525 """Disambiguate overload item against a template."""
526 items = overloaded.items()
527 for item in items:
528 # Return type may be indeterminate in the template, so ignore it when performing a
529 # subtype check.
530 if mypy.subtypes.is_callable_subtype(item, template, ignore_return=True):
531 return item
532 # Fall back to the first item if we can't find a match. This is totally arbitrary --
533 # maybe we should just bail out at this point.
534 return items[0]
535
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mypy/constraints.py b/mypy/constraints.py
--- a/mypy/constraints.py
+++ b/mypy/constraints.py
@@ -506,6 +506,8 @@
self.direction)
elif isinstance(self.actual, TypeType):
return infer_constraints(template.item, self.actual.item, self.direction)
+ elif isinstance(self.actual, AnyType):
+ return infer_constraints(template.item, self.actual, self.direction)
else:
return []
| {"golden_diff": "diff --git a/mypy/constraints.py b/mypy/constraints.py\n--- a/mypy/constraints.py\n+++ b/mypy/constraints.py\n@@ -506,6 +506,8 @@\n self.direction)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.item, self.actual.item, self.direction)\n+ elif isinstance(self.actual, AnyType):\n+ return infer_constraints(template.item, self.actual, self.direction)\n else:\n return []\n", "issue": "No constraint inferred for Any vs. Type[T]\nI'd expect the revealed type to be `Any` instead of `<nothing>` for this example:\r\n\r\n```py\r\nfrom typing import Type, Any, TypeVar\r\n\r\nT = TypeVar('T')\r\n\r\ndef f(c: Type[T]) -> T: ...\r\n\r\nx: Any\r\nreveal_type(f(x)) # <nothing>\r\n```\r\n\r\nIt looks like constraint inference doesn't work correctly when matching `Any` against `Type[T]`.\n", "before_files": [{"content": "\"\"\"Type inference constraints.\"\"\"\n\nfrom typing import Iterable, List, Optional, Sequence\n\nfrom mypy import experiments\nfrom mypy.types import (\n CallableType, Type, TypeVisitor, UnboundType, AnyType, NoneTyp, TypeVarType, Instance,\n TupleType, TypedDictType, UnionType, Overloaded, ErasedType, PartialType, DeletedType,\n UninhabitedType, TypeType, TypeVarId, TypeQuery, is_named_instance, TypeOfAny\n)\nfrom mypy.maptype import map_instance_to_supertype\nfrom mypy import nodes\nimport mypy.subtypes\nfrom mypy.sametypes import is_same_type\nfrom mypy.erasetype import erase_typevars\n\n\nSUBTYPE_OF = 0 # type: int\nSUPERTYPE_OF = 1 # type: int\n\n\nclass Constraint:\n \"\"\"A representation of a type constraint.\n\n It can be either T <: type or T :> type (T is a type variable).\n \"\"\"\n\n type_var = None # type: TypeVarId\n op = 0 # SUBTYPE_OF or SUPERTYPE_OF\n target = None # type: Type\n\n def __init__(self, type_var: TypeVarId, op: int, target: Type) -> None:\n self.type_var = type_var\n self.op = op\n self.target = target\n\n def __repr__(self) -> str:\n op_str = '<:'\n if self.op == SUPERTYPE_OF:\n op_str = ':>'\n return '{} {} {}'.format(self.type_var, op_str, self.target)\n\n\ndef infer_constraints_for_callable(\n callee: CallableType, arg_types: Sequence[Optional[Type]], arg_kinds: List[int],\n formal_to_actual: List[List[int]]) -> List[Constraint]:\n \"\"\"Infer type variable constraints for a callable and actual arguments.\n\n Return a list of constraints.\n \"\"\"\n constraints = [] # type: List[Constraint]\n tuple_counter = [0]\n\n for i, actuals in enumerate(formal_to_actual):\n for actual in actuals:\n actual_arg_type = arg_types[actual]\n if actual_arg_type is None:\n continue\n\n actual_type = get_actual_type(actual_arg_type, arg_kinds[actual],\n tuple_counter)\n c = infer_constraints(callee.arg_types[i], actual_type,\n SUPERTYPE_OF)\n constraints.extend(c)\n\n return constraints\n\n\ndef get_actual_type(arg_type: Type, kind: int,\n tuple_counter: List[int]) -> Type:\n \"\"\"Return the type of an actual argument with the given kind.\n\n If the argument is a *arg, return the individual argument item.\n \"\"\"\n\n if kind == nodes.ARG_STAR:\n if isinstance(arg_type, Instance):\n if arg_type.type.fullname() == 'builtins.list':\n # List *arg.\n return arg_type.args[0]\n elif arg_type.args:\n # TODO try to map type arguments to Iterable\n return arg_type.args[0]\n else:\n return AnyType(TypeOfAny.from_error)\n elif isinstance(arg_type, TupleType):\n # Get the next tuple item of a tuple *arg.\n tuple_counter[0] += 1\n return arg_type.items[tuple_counter[0] - 1]\n else:\n return AnyType(TypeOfAny.from_error)\n elif kind == nodes.ARG_STAR2:\n if isinstance(arg_type, Instance) and (arg_type.type.fullname() == 'builtins.dict'):\n # Dict **arg. TODO more general (Mapping)\n return arg_type.args[1]\n else:\n return AnyType(TypeOfAny.from_error)\n else:\n # No translation for other kinds.\n return arg_type\n\n\ndef infer_constraints(template: Type, actual: Type,\n direction: int) -> List[Constraint]:\n \"\"\"Infer type constraints.\n\n Match a template type, which may contain type variable references,\n recursively against a type which does not contain (the same) type\n variable references. The result is a list of type constrains of\n form 'T is a supertype/subtype of x', where T is a type variable\n present in the template and x is a type without reference to type\n variables present in the template.\n\n Assume T and S are type variables. Now the following results can be\n calculated (read as '(template, actual) --> result'):\n\n (T, X) --> T :> X\n (X[T], X[Y]) --> T <: Y and T :> Y\n ((T, T), (X, Y)) --> T :> X and T :> Y\n ((T, S), (X, Y)) --> T :> X and S :> Y\n (X[T], Any) --> T <: Any and T :> Any\n\n The constraints are represented as Constraint objects.\n \"\"\"\n\n # If the template is simply a type variable, emit a Constraint directly.\n # We need to handle this case before handling Unions for two reasons:\n # 1. \"T <: Union[U1, U2]\" is not equivalent to \"T <: U1 or T <: U2\",\n # because T can itself be a union (notably, Union[U1, U2] itself).\n # 2. \"T :> Union[U1, U2]\" is logically equivalent to \"T :> U1 and\n # T :> U2\", but they are not equivalent to the constraint solver,\n # which never introduces new Union types (it uses join() instead).\n if isinstance(template, TypeVarType):\n return [Constraint(template.id, direction, actual)]\n\n # Now handle the case of either template or actual being a Union.\n # For a Union to be a subtype of another type, every item of the Union\n # must be a subtype of that type, so concatenate the constraints.\n if direction == SUBTYPE_OF and isinstance(template, UnionType):\n res = []\n for t_item in template.items:\n res.extend(infer_constraints(t_item, actual, direction))\n return res\n if direction == SUPERTYPE_OF and isinstance(actual, UnionType):\n res = []\n for a_item in actual.items:\n res.extend(infer_constraints(template, a_item, direction))\n return res\n\n # Now the potential subtype is known not to be a Union or a type\n # variable that we are solving for. In that case, for a Union to\n # be a supertype of the potential subtype, some item of the Union\n # must be a supertype of it.\n if direction == SUBTYPE_OF and isinstance(actual, UnionType):\n # If some of items is not a complete type, disregard that.\n items = simplify_away_incomplete_types(actual.items)\n # We infer constraints eagerly -- try to find constraints for a type\n # variable if possible. This seems to help with some real-world\n # use cases.\n return any_constraints(\n [infer_constraints_if_possible(template, a_item, direction)\n for a_item in items],\n eager=True)\n if direction == SUPERTYPE_OF and isinstance(template, UnionType):\n # When the template is a union, we are okay with leaving some\n # type variables indeterminate. This helps with some special\n # cases, though this isn't very principled.\n return any_constraints(\n [infer_constraints_if_possible(t_item, actual, direction)\n for t_item in template.items],\n eager=False)\n\n # Remaining cases are handled by ConstraintBuilderVisitor.\n return template.accept(ConstraintBuilderVisitor(actual, direction))\n\n\ndef infer_constraints_if_possible(template: Type, actual: Type,\n direction: int) -> Optional[List[Constraint]]:\n \"\"\"Like infer_constraints, but return None if the input relation is\n known to be unsatisfiable, for example if template=List[T] and actual=int.\n (In this case infer_constraints would return [], just like it would for\n an automatically satisfied relation like template=List[T] and actual=object.)\n \"\"\"\n if (direction == SUBTYPE_OF and\n not mypy.subtypes.is_subtype(erase_typevars(template), actual)):\n return None\n if (direction == SUPERTYPE_OF and\n not mypy.subtypes.is_subtype(actual, erase_typevars(template))):\n return None\n return infer_constraints(template, actual, direction)\n\n\ndef any_constraints(options: List[Optional[List[Constraint]]], eager: bool) -> List[Constraint]:\n \"\"\"Deduce what we can from a collection of constraint lists.\n\n It's a given that at least one of the lists must be satisfied. A\n None element in the list of options represents an unsatisfiable\n constraint and is ignored. Ignore empty constraint lists if eager\n is true -- they are always trivially satisfiable.\n \"\"\"\n if eager:\n valid_options = [option for option in options if option]\n else:\n valid_options = [option for option in options if option is not None]\n if len(valid_options) == 1:\n return valid_options[0]\n elif (len(valid_options) > 1 and\n all(is_same_constraints(valid_options[0], c)\n for c in valid_options[1:])):\n # Multiple sets of constraints that are all the same. Just pick any one of them.\n # TODO: More generally, if a given (variable, direction) pair appears in\n # every option, combine the bounds with meet/join.\n return valid_options[0]\n\n # Otherwise, there are either no valid options or multiple, inconsistent valid\n # options. Give up and deduce nothing.\n return []\n\n\ndef is_same_constraints(x: List[Constraint], y: List[Constraint]) -> bool:\n for c1 in x:\n if not any(is_same_constraint(c1, c2) for c2 in y):\n return False\n for c1 in y:\n if not any(is_same_constraint(c1, c2) for c2 in x):\n return False\n return True\n\n\ndef is_same_constraint(c1: Constraint, c2: Constraint) -> bool:\n return (c1.type_var == c2.type_var\n and c1.op == c2.op\n and is_same_type(c1.target, c2.target))\n\n\ndef simplify_away_incomplete_types(types: List[Type]) -> List[Type]:\n complete = [typ for typ in types if is_complete_type(typ)]\n if complete:\n return complete\n else:\n return types\n\n\ndef is_complete_type(typ: Type) -> bool:\n \"\"\"Is a type complete?\n\n A complete doesn't have uninhabited type components or (when not in strict\n optional mode) None components.\n \"\"\"\n return typ.accept(CompleteTypeVisitor())\n\n\nclass CompleteTypeVisitor(TypeQuery[bool]):\n def __init__(self) -> None:\n super().__init__(all)\n\n def visit_uninhabited_type(self, t: UninhabitedType) -> bool:\n return False\n\n\nclass ConstraintBuilderVisitor(TypeVisitor[List[Constraint]]):\n \"\"\"Visitor class for inferring type constraints.\"\"\"\n\n # The type that is compared against a template\n # TODO: The value may be None. Is that actually correct?\n actual = None # type: Type\n\n def __init__(self, actual: Type, direction: int) -> None:\n # Direction must be SUBTYPE_OF or SUPERTYPE_OF.\n self.actual = actual\n self.direction = direction\n\n # Trivial leaf types\n\n def visit_unbound_type(self, template: UnboundType) -> List[Constraint]:\n return []\n\n def visit_any(self, template: AnyType) -> List[Constraint]:\n return []\n\n def visit_none_type(self, template: NoneTyp) -> List[Constraint]:\n return []\n\n def visit_uninhabited_type(self, template: UninhabitedType) -> List[Constraint]:\n return []\n\n def visit_erased_type(self, template: ErasedType) -> List[Constraint]:\n return []\n\n def visit_deleted_type(self, template: DeletedType) -> List[Constraint]:\n return []\n\n # Errors\n\n def visit_partial_type(self, template: PartialType) -> List[Constraint]:\n # We can't do anything useful with a partial type here.\n assert False, \"Internal error\"\n\n # Non-trivial leaf type\n\n def visit_type_var(self, template: TypeVarType) -> List[Constraint]:\n assert False, (\"Unexpected TypeVarType in ConstraintBuilderVisitor\"\n \" (should have been handled in infer_constraints)\")\n\n # Non-leaf types\n\n def visit_instance(self, template: Instance) -> List[Constraint]:\n original_actual = actual = self.actual\n res = [] # type: List[Constraint]\n if isinstance(actual, CallableType) and actual.fallback is not None:\n actual = actual.fallback\n if isinstance(actual, TypedDictType):\n actual = actual.as_anonymous().fallback\n if isinstance(actual, Instance):\n instance = actual\n # We always try nominal inference if possible,\n # it is much faster than the structural one.\n if (self.direction == SUBTYPE_OF and\n template.type.has_base(instance.type.fullname())):\n mapped = map_instance_to_supertype(template, instance.type)\n for i in range(len(instance.args)):\n # The constraints for generic type parameters are\n # invariant. Include constraints from both directions\n # to achieve the effect.\n res.extend(infer_constraints(\n mapped.args[i], instance.args[i], self.direction))\n res.extend(infer_constraints(\n mapped.args[i], instance.args[i], neg_op(self.direction)))\n return res\n elif (self.direction == SUPERTYPE_OF and\n instance.type.has_base(template.type.fullname())):\n mapped = map_instance_to_supertype(instance, template.type)\n for j in range(len(template.args)):\n # The constraints for generic type parameters are\n # invariant.\n res.extend(infer_constraints(\n template.args[j], mapped.args[j], self.direction))\n res.extend(infer_constraints(\n template.args[j], mapped.args[j], neg_op(self.direction)))\n return res\n if (template.type.is_protocol and self.direction == SUPERTYPE_OF and\n # We avoid infinite recursion for structural subtypes by checking\n # whether this type already appeared in the inference chain.\n # This is a conservative way break the inference cycles.\n # It never produces any \"false\" constraints but gives up soon\n # on purely structural inference cycles, see #3829.\n not any(is_same_type(template, t) for t in template.type.inferring) and\n mypy.subtypes.is_subtype(instance, erase_typevars(template))):\n template.type.inferring.append(template)\n self.infer_constraints_from_protocol_members(res, instance, template,\n original_actual, template)\n template.type.inferring.pop()\n return res\n elif (instance.type.is_protocol and self.direction == SUBTYPE_OF and\n # We avoid infinite recursion for structural subtypes also here.\n not any(is_same_type(instance, i) for i in instance.type.inferring) and\n mypy.subtypes.is_subtype(erase_typevars(template), instance)):\n instance.type.inferring.append(instance)\n self.infer_constraints_from_protocol_members(res, instance, template,\n template, instance)\n instance.type.inferring.pop()\n return res\n if isinstance(actual, AnyType):\n # IDEA: Include both ways, i.e. add negation as well?\n return self.infer_against_any(template.args, actual)\n if (isinstance(actual, TupleType) and\n (is_named_instance(template, 'typing.Iterable') or\n is_named_instance(template, 'typing.Container') or\n is_named_instance(template, 'typing.Sequence') or\n is_named_instance(template, 'typing.Reversible'))\n and self.direction == SUPERTYPE_OF):\n for item in actual.items:\n cb = infer_constraints(template.args[0], item, SUPERTYPE_OF)\n res.extend(cb)\n return res\n elif (isinstance(actual, TupleType) and template.type.is_protocol and\n self.direction == SUPERTYPE_OF):\n if mypy.subtypes.is_subtype(actual.fallback, erase_typevars(template)):\n res.extend(infer_constraints(template, actual.fallback, self.direction))\n return res\n return []\n else:\n return []\n\n def infer_constraints_from_protocol_members(self, res: List[Constraint],\n instance: Instance, template: Instance,\n subtype: Type, protocol: Instance) -> None:\n \"\"\"Infer constraints for situations where either 'template' or 'instance' is a protocol.\n\n The 'protocol' is the one of two that is an instance of protocol type, 'subtype'\n is the type used to bind self during inference. Currently, we just infer constrains for\n every protocol member type (both ways for settable members).\n \"\"\"\n for member in protocol.type.protocol_members:\n inst = mypy.subtypes.find_member(member, instance, subtype)\n temp = mypy.subtypes.find_member(member, template, subtype)\n assert inst is not None and temp is not None\n # The above is safe since at this point we know that 'instance' is a subtype\n # of (erased) 'template', therefore it defines all protocol members\n res.extend(infer_constraints(temp, inst, self.direction))\n if (mypy.subtypes.IS_SETTABLE in\n mypy.subtypes.get_member_flags(member, protocol.type)):\n # Settable members are invariant, add opposite constraints\n res.extend(infer_constraints(temp, inst, neg_op(self.direction)))\n\n def visit_callable_type(self, template: CallableType) -> List[Constraint]:\n if isinstance(self.actual, CallableType):\n cactual = self.actual\n # FIX verify argument counts\n # FIX what if one of the functions is generic\n res = [] # type: List[Constraint]\n\n # We can't infer constraints from arguments if the template is Callable[..., T] (with\n # literal '...').\n if not template.is_ellipsis_args:\n # The lengths should match, but don't crash (it will error elsewhere).\n for t, a in zip(template.arg_types, cactual.arg_types):\n # Negate direction due to function argument type contravariance.\n res.extend(infer_constraints(t, a, neg_op(self.direction)))\n res.extend(infer_constraints(template.ret_type, cactual.ret_type,\n self.direction))\n return res\n elif isinstance(self.actual, AnyType):\n # FIX what if generic\n res = self.infer_against_any(template.arg_types, self.actual)\n any_type = AnyType(TypeOfAny.from_another_any, source_any=self.actual)\n res.extend(infer_constraints(template.ret_type, any_type, self.direction))\n return res\n elif isinstance(self.actual, Overloaded):\n return self.infer_against_overloaded(self.actual, template)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.ret_type, self.actual.item, self.direction)\n elif isinstance(self.actual, Instance):\n # Instances with __call__ method defined are considered structural\n # subtypes of Callable with a compatible signature.\n call = mypy.subtypes.find_member('__call__', self.actual, self.actual)\n if call:\n return infer_constraints(template, call, self.direction)\n else:\n return []\n else:\n return []\n\n def infer_against_overloaded(self, overloaded: Overloaded,\n template: CallableType) -> List[Constraint]:\n # Create constraints by matching an overloaded type against a template.\n # This is tricky to do in general. We cheat by only matching against\n # the first overload item, and by only matching the return type. This\n # seems to work somewhat well, but we should really use a more\n # reliable technique.\n item = find_matching_overload_item(overloaded, template)\n return infer_constraints(template.ret_type, item.ret_type,\n self.direction)\n\n def visit_tuple_type(self, template: TupleType) -> List[Constraint]:\n actual = self.actual\n if isinstance(actual, TupleType) and len(actual.items) == len(template.items):\n res = [] # type: List[Constraint]\n for i in range(len(template.items)):\n res.extend(infer_constraints(template.items[i],\n actual.items[i],\n self.direction))\n return res\n elif isinstance(actual, AnyType):\n return self.infer_against_any(template.items, actual)\n else:\n return []\n\n def visit_typeddict_type(self, template: TypedDictType) -> List[Constraint]:\n actual = self.actual\n if isinstance(actual, TypedDictType):\n res = [] # type: List[Constraint]\n # NOTE: Non-matching keys are ignored. Compatibility is checked\n # elsewhere so this shouldn't be unsafe.\n for (item_name, template_item_type, actual_item_type) in template.zip(actual):\n res.extend(infer_constraints(template_item_type,\n actual_item_type,\n self.direction))\n return res\n elif isinstance(actual, AnyType):\n return self.infer_against_any(template.items.values(), actual)\n else:\n return []\n\n def visit_union_type(self, template: UnionType) -> List[Constraint]:\n assert False, (\"Unexpected UnionType in ConstraintBuilderVisitor\"\n \" (should have been handled in infer_constraints)\")\n\n def infer_against_any(self, types: Iterable[Type], any_type: AnyType) -> List[Constraint]:\n res = [] # type: List[Constraint]\n for t in types:\n res.extend(infer_constraints(t, any_type, self.direction))\n return res\n\n def visit_overloaded(self, template: Overloaded) -> List[Constraint]:\n res = [] # type: List[Constraint]\n for t in template.items():\n res.extend(infer_constraints(t, self.actual, self.direction))\n return res\n\n def visit_type_type(self, template: TypeType) -> List[Constraint]:\n if isinstance(self.actual, CallableType):\n return infer_constraints(template.item, self.actual.ret_type, self.direction)\n elif isinstance(self.actual, Overloaded):\n return infer_constraints(template.item, self.actual.items()[0].ret_type,\n self.direction)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.item, self.actual.item, self.direction)\n else:\n return []\n\n\ndef neg_op(op: int) -> int:\n \"\"\"Map SubtypeOf to SupertypeOf and vice versa.\"\"\"\n\n if op == SUBTYPE_OF:\n return SUPERTYPE_OF\n elif op == SUPERTYPE_OF:\n return SUBTYPE_OF\n else:\n raise ValueError('Invalid operator {}'.format(op))\n\n\ndef find_matching_overload_item(overloaded: Overloaded, template: CallableType) -> CallableType:\n \"\"\"Disambiguate overload item against a template.\"\"\"\n items = overloaded.items()\n for item in items:\n # Return type may be indeterminate in the template, so ignore it when performing a\n # subtype check.\n if mypy.subtypes.is_callable_subtype(item, template, ignore_return=True):\n return item\n # Fall back to the first item if we can't find a match. This is totally arbitrary --\n # maybe we should just bail out at this point.\n return items[0]\n", "path": "mypy/constraints.py"}], "after_files": [{"content": "\"\"\"Type inference constraints.\"\"\"\n\nfrom typing import Iterable, List, Optional, Sequence\n\nfrom mypy import experiments\nfrom mypy.types import (\n CallableType, Type, TypeVisitor, UnboundType, AnyType, NoneTyp, TypeVarType, Instance,\n TupleType, TypedDictType, UnionType, Overloaded, ErasedType, PartialType, DeletedType,\n UninhabitedType, TypeType, TypeVarId, TypeQuery, is_named_instance, TypeOfAny\n)\nfrom mypy.maptype import map_instance_to_supertype\nfrom mypy import nodes\nimport mypy.subtypes\nfrom mypy.sametypes import is_same_type\nfrom mypy.erasetype import erase_typevars\n\n\nSUBTYPE_OF = 0 # type: int\nSUPERTYPE_OF = 1 # type: int\n\n\nclass Constraint:\n \"\"\"A representation of a type constraint.\n\n It can be either T <: type or T :> type (T is a type variable).\n \"\"\"\n\n type_var = None # type: TypeVarId\n op = 0 # SUBTYPE_OF or SUPERTYPE_OF\n target = None # type: Type\n\n def __init__(self, type_var: TypeVarId, op: int, target: Type) -> None:\n self.type_var = type_var\n self.op = op\n self.target = target\n\n def __repr__(self) -> str:\n op_str = '<:'\n if self.op == SUPERTYPE_OF:\n op_str = ':>'\n return '{} {} {}'.format(self.type_var, op_str, self.target)\n\n\ndef infer_constraints_for_callable(\n callee: CallableType, arg_types: Sequence[Optional[Type]], arg_kinds: List[int],\n formal_to_actual: List[List[int]]) -> List[Constraint]:\n \"\"\"Infer type variable constraints for a callable and actual arguments.\n\n Return a list of constraints.\n \"\"\"\n constraints = [] # type: List[Constraint]\n tuple_counter = [0]\n\n for i, actuals in enumerate(formal_to_actual):\n for actual in actuals:\n actual_arg_type = arg_types[actual]\n if actual_arg_type is None:\n continue\n\n actual_type = get_actual_type(actual_arg_type, arg_kinds[actual],\n tuple_counter)\n c = infer_constraints(callee.arg_types[i], actual_type,\n SUPERTYPE_OF)\n constraints.extend(c)\n\n return constraints\n\n\ndef get_actual_type(arg_type: Type, kind: int,\n tuple_counter: List[int]) -> Type:\n \"\"\"Return the type of an actual argument with the given kind.\n\n If the argument is a *arg, return the individual argument item.\n \"\"\"\n\n if kind == nodes.ARG_STAR:\n if isinstance(arg_type, Instance):\n if arg_type.type.fullname() == 'builtins.list':\n # List *arg.\n return arg_type.args[0]\n elif arg_type.args:\n # TODO try to map type arguments to Iterable\n return arg_type.args[0]\n else:\n return AnyType(TypeOfAny.from_error)\n elif isinstance(arg_type, TupleType):\n # Get the next tuple item of a tuple *arg.\n tuple_counter[0] += 1\n return arg_type.items[tuple_counter[0] - 1]\n else:\n return AnyType(TypeOfAny.from_error)\n elif kind == nodes.ARG_STAR2:\n if isinstance(arg_type, Instance) and (arg_type.type.fullname() == 'builtins.dict'):\n # Dict **arg. TODO more general (Mapping)\n return arg_type.args[1]\n else:\n return AnyType(TypeOfAny.from_error)\n else:\n # No translation for other kinds.\n return arg_type\n\n\ndef infer_constraints(template: Type, actual: Type,\n direction: int) -> List[Constraint]:\n \"\"\"Infer type constraints.\n\n Match a template type, which may contain type variable references,\n recursively against a type which does not contain (the same) type\n variable references. The result is a list of type constrains of\n form 'T is a supertype/subtype of x', where T is a type variable\n present in the template and x is a type without reference to type\n variables present in the template.\n\n Assume T and S are type variables. Now the following results can be\n calculated (read as '(template, actual) --> result'):\n\n (T, X) --> T :> X\n (X[T], X[Y]) --> T <: Y and T :> Y\n ((T, T), (X, Y)) --> T :> X and T :> Y\n ((T, S), (X, Y)) --> T :> X and S :> Y\n (X[T], Any) --> T <: Any and T :> Any\n\n The constraints are represented as Constraint objects.\n \"\"\"\n\n # If the template is simply a type variable, emit a Constraint directly.\n # We need to handle this case before handling Unions for two reasons:\n # 1. \"T <: Union[U1, U2]\" is not equivalent to \"T <: U1 or T <: U2\",\n # because T can itself be a union (notably, Union[U1, U2] itself).\n # 2. \"T :> Union[U1, U2]\" is logically equivalent to \"T :> U1 and\n # T :> U2\", but they are not equivalent to the constraint solver,\n # which never introduces new Union types (it uses join() instead).\n if isinstance(template, TypeVarType):\n return [Constraint(template.id, direction, actual)]\n\n # Now handle the case of either template or actual being a Union.\n # For a Union to be a subtype of another type, every item of the Union\n # must be a subtype of that type, so concatenate the constraints.\n if direction == SUBTYPE_OF and isinstance(template, UnionType):\n res = []\n for t_item in template.items:\n res.extend(infer_constraints(t_item, actual, direction))\n return res\n if direction == SUPERTYPE_OF and isinstance(actual, UnionType):\n res = []\n for a_item in actual.items:\n res.extend(infer_constraints(template, a_item, direction))\n return res\n\n # Now the potential subtype is known not to be a Union or a type\n # variable that we are solving for. In that case, for a Union to\n # be a supertype of the potential subtype, some item of the Union\n # must be a supertype of it.\n if direction == SUBTYPE_OF and isinstance(actual, UnionType):\n # If some of items is not a complete type, disregard that.\n items = simplify_away_incomplete_types(actual.items)\n # We infer constraints eagerly -- try to find constraints for a type\n # variable if possible. This seems to help with some real-world\n # use cases.\n return any_constraints(\n [infer_constraints_if_possible(template, a_item, direction)\n for a_item in items],\n eager=True)\n if direction == SUPERTYPE_OF and isinstance(template, UnionType):\n # When the template is a union, we are okay with leaving some\n # type variables indeterminate. This helps with some special\n # cases, though this isn't very principled.\n return any_constraints(\n [infer_constraints_if_possible(t_item, actual, direction)\n for t_item in template.items],\n eager=False)\n\n # Remaining cases are handled by ConstraintBuilderVisitor.\n return template.accept(ConstraintBuilderVisitor(actual, direction))\n\n\ndef infer_constraints_if_possible(template: Type, actual: Type,\n direction: int) -> Optional[List[Constraint]]:\n \"\"\"Like infer_constraints, but return None if the input relation is\n known to be unsatisfiable, for example if template=List[T] and actual=int.\n (In this case infer_constraints would return [], just like it would for\n an automatically satisfied relation like template=List[T] and actual=object.)\n \"\"\"\n if (direction == SUBTYPE_OF and\n not mypy.subtypes.is_subtype(erase_typevars(template), actual)):\n return None\n if (direction == SUPERTYPE_OF and\n not mypy.subtypes.is_subtype(actual, erase_typevars(template))):\n return None\n return infer_constraints(template, actual, direction)\n\n\ndef any_constraints(options: List[Optional[List[Constraint]]], eager: bool) -> List[Constraint]:\n \"\"\"Deduce what we can from a collection of constraint lists.\n\n It's a given that at least one of the lists must be satisfied. A\n None element in the list of options represents an unsatisfiable\n constraint and is ignored. Ignore empty constraint lists if eager\n is true -- they are always trivially satisfiable.\n \"\"\"\n if eager:\n valid_options = [option for option in options if option]\n else:\n valid_options = [option for option in options if option is not None]\n if len(valid_options) == 1:\n return valid_options[0]\n elif (len(valid_options) > 1 and\n all(is_same_constraints(valid_options[0], c)\n for c in valid_options[1:])):\n # Multiple sets of constraints that are all the same. Just pick any one of them.\n # TODO: More generally, if a given (variable, direction) pair appears in\n # every option, combine the bounds with meet/join.\n return valid_options[0]\n\n # Otherwise, there are either no valid options or multiple, inconsistent valid\n # options. Give up and deduce nothing.\n return []\n\n\ndef is_same_constraints(x: List[Constraint], y: List[Constraint]) -> bool:\n for c1 in x:\n if not any(is_same_constraint(c1, c2) for c2 in y):\n return False\n for c1 in y:\n if not any(is_same_constraint(c1, c2) for c2 in x):\n return False\n return True\n\n\ndef is_same_constraint(c1: Constraint, c2: Constraint) -> bool:\n return (c1.type_var == c2.type_var\n and c1.op == c2.op\n and is_same_type(c1.target, c2.target))\n\n\ndef simplify_away_incomplete_types(types: List[Type]) -> List[Type]:\n complete = [typ for typ in types if is_complete_type(typ)]\n if complete:\n return complete\n else:\n return types\n\n\ndef is_complete_type(typ: Type) -> bool:\n \"\"\"Is a type complete?\n\n A complete doesn't have uninhabited type components or (when not in strict\n optional mode) None components.\n \"\"\"\n return typ.accept(CompleteTypeVisitor())\n\n\nclass CompleteTypeVisitor(TypeQuery[bool]):\n def __init__(self) -> None:\n super().__init__(all)\n\n def visit_uninhabited_type(self, t: UninhabitedType) -> bool:\n return False\n\n\nclass ConstraintBuilderVisitor(TypeVisitor[List[Constraint]]):\n \"\"\"Visitor class for inferring type constraints.\"\"\"\n\n # The type that is compared against a template\n # TODO: The value may be None. Is that actually correct?\n actual = None # type: Type\n\n def __init__(self, actual: Type, direction: int) -> None:\n # Direction must be SUBTYPE_OF or SUPERTYPE_OF.\n self.actual = actual\n self.direction = direction\n\n # Trivial leaf types\n\n def visit_unbound_type(self, template: UnboundType) -> List[Constraint]:\n return []\n\n def visit_any(self, template: AnyType) -> List[Constraint]:\n return []\n\n def visit_none_type(self, template: NoneTyp) -> List[Constraint]:\n return []\n\n def visit_uninhabited_type(self, template: UninhabitedType) -> List[Constraint]:\n return []\n\n def visit_erased_type(self, template: ErasedType) -> List[Constraint]:\n return []\n\n def visit_deleted_type(self, template: DeletedType) -> List[Constraint]:\n return []\n\n # Errors\n\n def visit_partial_type(self, template: PartialType) -> List[Constraint]:\n # We can't do anything useful with a partial type here.\n assert False, \"Internal error\"\n\n # Non-trivial leaf type\n\n def visit_type_var(self, template: TypeVarType) -> List[Constraint]:\n assert False, (\"Unexpected TypeVarType in ConstraintBuilderVisitor\"\n \" (should have been handled in infer_constraints)\")\n\n # Non-leaf types\n\n def visit_instance(self, template: Instance) -> List[Constraint]:\n original_actual = actual = self.actual\n res = [] # type: List[Constraint]\n if isinstance(actual, CallableType) and actual.fallback is not None:\n actual = actual.fallback\n if isinstance(actual, TypedDictType):\n actual = actual.as_anonymous().fallback\n if isinstance(actual, Instance):\n instance = actual\n # We always try nominal inference if possible,\n # it is much faster than the structural one.\n if (self.direction == SUBTYPE_OF and\n template.type.has_base(instance.type.fullname())):\n mapped = map_instance_to_supertype(template, instance.type)\n for i in range(len(instance.args)):\n # The constraints for generic type parameters are\n # invariant. Include constraints from both directions\n # to achieve the effect.\n res.extend(infer_constraints(\n mapped.args[i], instance.args[i], self.direction))\n res.extend(infer_constraints(\n mapped.args[i], instance.args[i], neg_op(self.direction)))\n return res\n elif (self.direction == SUPERTYPE_OF and\n instance.type.has_base(template.type.fullname())):\n mapped = map_instance_to_supertype(instance, template.type)\n for j in range(len(template.args)):\n # The constraints for generic type parameters are\n # invariant.\n res.extend(infer_constraints(\n template.args[j], mapped.args[j], self.direction))\n res.extend(infer_constraints(\n template.args[j], mapped.args[j], neg_op(self.direction)))\n return res\n if (template.type.is_protocol and self.direction == SUPERTYPE_OF and\n # We avoid infinite recursion for structural subtypes by checking\n # whether this type already appeared in the inference chain.\n # This is a conservative way break the inference cycles.\n # It never produces any \"false\" constraints but gives up soon\n # on purely structural inference cycles, see #3829.\n not any(is_same_type(template, t) for t in template.type.inferring) and\n mypy.subtypes.is_subtype(instance, erase_typevars(template))):\n template.type.inferring.append(template)\n self.infer_constraints_from_protocol_members(res, instance, template,\n original_actual, template)\n template.type.inferring.pop()\n return res\n elif (instance.type.is_protocol and self.direction == SUBTYPE_OF and\n # We avoid infinite recursion for structural subtypes also here.\n not any(is_same_type(instance, i) for i in instance.type.inferring) and\n mypy.subtypes.is_subtype(erase_typevars(template), instance)):\n instance.type.inferring.append(instance)\n self.infer_constraints_from_protocol_members(res, instance, template,\n template, instance)\n instance.type.inferring.pop()\n return res\n if isinstance(actual, AnyType):\n # IDEA: Include both ways, i.e. add negation as well?\n return self.infer_against_any(template.args, actual)\n if (isinstance(actual, TupleType) and\n (is_named_instance(template, 'typing.Iterable') or\n is_named_instance(template, 'typing.Container') or\n is_named_instance(template, 'typing.Sequence') or\n is_named_instance(template, 'typing.Reversible'))\n and self.direction == SUPERTYPE_OF):\n for item in actual.items:\n cb = infer_constraints(template.args[0], item, SUPERTYPE_OF)\n res.extend(cb)\n return res\n elif (isinstance(actual, TupleType) and template.type.is_protocol and\n self.direction == SUPERTYPE_OF):\n if mypy.subtypes.is_subtype(actual.fallback, erase_typevars(template)):\n res.extend(infer_constraints(template, actual.fallback, self.direction))\n return res\n return []\n else:\n return []\n\n def infer_constraints_from_protocol_members(self, res: List[Constraint],\n instance: Instance, template: Instance,\n subtype: Type, protocol: Instance) -> None:\n \"\"\"Infer constraints for situations where either 'template' or 'instance' is a protocol.\n\n The 'protocol' is the one of two that is an instance of protocol type, 'subtype'\n is the type used to bind self during inference. Currently, we just infer constrains for\n every protocol member type (both ways for settable members).\n \"\"\"\n for member in protocol.type.protocol_members:\n inst = mypy.subtypes.find_member(member, instance, subtype)\n temp = mypy.subtypes.find_member(member, template, subtype)\n assert inst is not None and temp is not None\n # The above is safe since at this point we know that 'instance' is a subtype\n # of (erased) 'template', therefore it defines all protocol members\n res.extend(infer_constraints(temp, inst, self.direction))\n if (mypy.subtypes.IS_SETTABLE in\n mypy.subtypes.get_member_flags(member, protocol.type)):\n # Settable members are invariant, add opposite constraints\n res.extend(infer_constraints(temp, inst, neg_op(self.direction)))\n\n def visit_callable_type(self, template: CallableType) -> List[Constraint]:\n if isinstance(self.actual, CallableType):\n cactual = self.actual\n # FIX verify argument counts\n # FIX what if one of the functions is generic\n res = [] # type: List[Constraint]\n\n # We can't infer constraints from arguments if the template is Callable[..., T] (with\n # literal '...').\n if not template.is_ellipsis_args:\n # The lengths should match, but don't crash (it will error elsewhere).\n for t, a in zip(template.arg_types, cactual.arg_types):\n # Negate direction due to function argument type contravariance.\n res.extend(infer_constraints(t, a, neg_op(self.direction)))\n res.extend(infer_constraints(template.ret_type, cactual.ret_type,\n self.direction))\n return res\n elif isinstance(self.actual, AnyType):\n # FIX what if generic\n res = self.infer_against_any(template.arg_types, self.actual)\n any_type = AnyType(TypeOfAny.from_another_any, source_any=self.actual)\n res.extend(infer_constraints(template.ret_type, any_type, self.direction))\n return res\n elif isinstance(self.actual, Overloaded):\n return self.infer_against_overloaded(self.actual, template)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.ret_type, self.actual.item, self.direction)\n elif isinstance(self.actual, Instance):\n # Instances with __call__ method defined are considered structural\n # subtypes of Callable with a compatible signature.\n call = mypy.subtypes.find_member('__call__', self.actual, self.actual)\n if call:\n return infer_constraints(template, call, self.direction)\n else:\n return []\n else:\n return []\n\n def infer_against_overloaded(self, overloaded: Overloaded,\n template: CallableType) -> List[Constraint]:\n # Create constraints by matching an overloaded type against a template.\n # This is tricky to do in general. We cheat by only matching against\n # the first overload item, and by only matching the return type. This\n # seems to work somewhat well, but we should really use a more\n # reliable technique.\n item = find_matching_overload_item(overloaded, template)\n return infer_constraints(template.ret_type, item.ret_type,\n self.direction)\n\n def visit_tuple_type(self, template: TupleType) -> List[Constraint]:\n actual = self.actual\n if isinstance(actual, TupleType) and len(actual.items) == len(template.items):\n res = [] # type: List[Constraint]\n for i in range(len(template.items)):\n res.extend(infer_constraints(template.items[i],\n actual.items[i],\n self.direction))\n return res\n elif isinstance(actual, AnyType):\n return self.infer_against_any(template.items, actual)\n else:\n return []\n\n def visit_typeddict_type(self, template: TypedDictType) -> List[Constraint]:\n actual = self.actual\n if isinstance(actual, TypedDictType):\n res = [] # type: List[Constraint]\n # NOTE: Non-matching keys are ignored. Compatibility is checked\n # elsewhere so this shouldn't be unsafe.\n for (item_name, template_item_type, actual_item_type) in template.zip(actual):\n res.extend(infer_constraints(template_item_type,\n actual_item_type,\n self.direction))\n return res\n elif isinstance(actual, AnyType):\n return self.infer_against_any(template.items.values(), actual)\n else:\n return []\n\n def visit_union_type(self, template: UnionType) -> List[Constraint]:\n assert False, (\"Unexpected UnionType in ConstraintBuilderVisitor\"\n \" (should have been handled in infer_constraints)\")\n\n def infer_against_any(self, types: Iterable[Type], any_type: AnyType) -> List[Constraint]:\n res = [] # type: List[Constraint]\n for t in types:\n res.extend(infer_constraints(t, any_type, self.direction))\n return res\n\n def visit_overloaded(self, template: Overloaded) -> List[Constraint]:\n res = [] # type: List[Constraint]\n for t in template.items():\n res.extend(infer_constraints(t, self.actual, self.direction))\n return res\n\n def visit_type_type(self, template: TypeType) -> List[Constraint]:\n if isinstance(self.actual, CallableType):\n return infer_constraints(template.item, self.actual.ret_type, self.direction)\n elif isinstance(self.actual, Overloaded):\n return infer_constraints(template.item, self.actual.items()[0].ret_type,\n self.direction)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.item, self.actual.item, self.direction)\n elif isinstance(self.actual, AnyType):\n return infer_constraints(template.item, self.actual, self.direction)\n else:\n return []\n\n\ndef neg_op(op: int) -> int:\n \"\"\"Map SubtypeOf to SupertypeOf and vice versa.\"\"\"\n\n if op == SUBTYPE_OF:\n return SUPERTYPE_OF\n elif op == SUPERTYPE_OF:\n return SUBTYPE_OF\n else:\n raise ValueError('Invalid operator {}'.format(op))\n\n\ndef find_matching_overload_item(overloaded: Overloaded, template: CallableType) -> CallableType:\n \"\"\"Disambiguate overload item against a template.\"\"\"\n items = overloaded.items()\n for item in items:\n # Return type may be indeterminate in the template, so ignore it when performing a\n # subtype check.\n if mypy.subtypes.is_callable_subtype(item, template, ignore_return=True):\n return item\n # Fall back to the first item if we can't find a match. This is totally arbitrary --\n # maybe we should just bail out at this point.\n return items[0]\n", "path": "mypy/constraints.py"}]} |
gh_patches_debug_1546 | rasdani/github-patches | git_diff | elastic__helm-charts-516 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
helm upgrade fails due to 'cannot patch "elasticsearch-master" with kind StatefulSet'
**Chart version:**
7.6.0
**Kubernetes version:**
v1.14.9-eks-c0eccc
**Kubernetes provider:** E.g. GKE (Google Kubernetes Engine)
EKS
**Helm Version:**
v3.0.2
**`helm get release` output**
<details>
<summary>Output of helm get release</summary>
```
NAME: elasticsearch
LAST DEPLOYED: Fri Feb 21 16:30:05 2020
NAMESPACE: elasticsearch
STATUS: failed
REVISION: 29
USER-SUPPLIED VALUES:
antiAffinity: hard
antiAffinityTopologyKey: kubernetes.io/hostname
clusterHealthCheckParams: wait_for_status=green&timeout=1s
clusterName: elasticsearch
esConfig:
elasticsearch.yml: |
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
network.host: 0.0.0.0
esJavaOpts: -Xmx1g -Xms1g
esMajorVersion: ""
extraEnvs:
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: elastic-credentials
- name: ELASTIC_USERNAME
valueFrom:
secretKeyRef:
key: username
name: elastic-credentials
extraInitContainers: ""
extraVolumeMounts: ""
extraVolumes: ""
fsGroup: ""
fullnameOverride: ""
httpPort: 9200
image: docker.elastic.co/elasticsearch/elasticsearch
imagePullPolicy: IfNotPresent
imagePullSecrets: []
imageTag: 7.6.0
ingress:
annotations: {}
enabled: false
hosts:
- elasticsearch.local
path: /
tls: []
initResources: {}
keystore: []
labels: {}
lifecycle: {}
masterService: ""
masterTerminationFix: false
maxUnavailable: 1
minimumMasterNodes: 2
nameOverride: ""
networkHost: 0.0.0.0
nodeAffinity: {}
nodeGroup: master
nodeSelector: {}
persistence:
annotations: {}
enabled: true
podAnnotations: {}
podManagementPolicy: Parallel
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
podSecurityPolicy:
create: false
name: ""
spec:
fsGroup:
rule: RunAsAny
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
priorityClassName: ""
protocol: https
rbac:
create: false
serviceAccountName: ""
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
replicas: 3
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 200m
memory: 2Gi
roles:
data: "true"
ingest: "true"
master: "true"
schedulerName: ""
secretMounts:
- name: elastic-certificates
path: /usr/share/elasticsearch/config/certs
secretName: elastic-certificates
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
service:
annotations: {}
httpPortName: http
labels: {}
labelsHeadless: {}
nodePort: ""
transportPortName: transport
type: ClusterIP
sidecarResources: {}
sysctlInitContainer:
enabled: true
sysctlVmMaxMapCount: 262144
terminationGracePeriod: 120
tolerations: []
transportPort: 9300
updateStrategy: RollingUpdate
volumeClaimTemplate:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
COMPUTED VALUES:
antiAffinity: hard
antiAffinityTopologyKey: kubernetes.io/hostname
clusterHealthCheckParams: wait_for_status=green&timeout=1s
clusterName: elasticsearch
esConfig:
elasticsearch.yml: |
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
network.host: 0.0.0.0
esJavaOpts: -Xmx1g -Xms1g
esMajorVersion: ""
extraContainers: ""
extraEnvs:
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: elastic-credentials
- name: ELASTIC_USERNAME
valueFrom:
secretKeyRef:
key: username
name: elastic-credentials
extraInitContainers: ""
extraVolumeMounts: ""
extraVolumes: ""
fsGroup: ""
fullnameOverride: ""
httpPort: 9200
image: docker.elastic.co/elasticsearch/elasticsearch
imagePullPolicy: IfNotPresent
imagePullSecrets: []
imageTag: 7.6.0
ingress:
annotations: {}
enabled: false
hosts:
- elasticsearch.local
path: /
tls: []
initResources: {}
keystore: []
labels: {}
lifecycle: {}
masterService: ""
masterTerminationFix: false
maxUnavailable: 1
minimumMasterNodes: 2
nameOverride: ""
networkHost: 0.0.0.0
nodeAffinity: {}
nodeGroup: master
nodeSelector: {}
persistence:
annotations: {}
enabled: true
podAnnotations: {}
podManagementPolicy: Parallel
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
podSecurityPolicy:
create: false
name: ""
spec:
fsGroup:
rule: RunAsAny
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
priorityClassName: ""
protocol: https
rbac:
create: false
serviceAccountName: ""
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
replicas: 3
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 200m
memory: 2Gi
roles:
data: "true"
ingest: "true"
master: "true"
schedulerName: ""
secretMounts:
- name: elastic-certificates
path: /usr/share/elasticsearch/config/certs
secretName: elastic-certificates
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
service:
annotations: {}
httpPortName: http
labels: {}
labelsHeadless: {}
nodePort: ""
transportPortName: transport
type: ClusterIP
sidecarResources: {}
sysctlInitContainer:
enabled: true
sysctlVmMaxMapCount: 262144
terminationGracePeriod: 120
tolerations: []
transportPort: 9300
updateStrategy: RollingUpdate
volumeClaimTemplate:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
HOOKS:
---
# Source: elasticsearch/templates/test/test-elasticsearch-health.yaml
apiVersion: v1
kind: Pod
metadata:
name: "elasticsearch-sbxrc-test"
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: "elasticsearch-ualfr-test"
image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0"
command:
- "sh"
- "-c"
- |
#!/usr/bin/env bash -e
curl -XGET --fail 'elasticsearch-master:9200/_cluster/health?wait_for_status=green&timeout=1s'
restartPolicy: Never
MANIFEST:
---
# Source: elasticsearch/templates/poddisruptionbudget.yaml
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: "elasticsearch-master-pdb"
spec:
maxUnavailable: 1
selector:
matchLabels:
app: "elasticsearch-master"
---
# Source: elasticsearch/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: elasticsearch-master-config
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
data:
elasticsearch.yml: |
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
network.host: 0.0.0.0
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch-master-headless
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve
# Create endpoints also if the related pod isn't ready
publishNotReadyAddresses: true
selector:
app: "elasticsearch-master"
ports:
- name: http
port: 9200
- name: transport
port: 9300
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch-master
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
{}
spec:
type: ClusterIP
selector:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
ports:
- name: http
protocol: TCP
port: 9200
- name: transport
protocol: TCP
port: 9300
---
# Source: elasticsearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch-master
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
esMajorVersion: "7"
spec:
serviceName: elasticsearch-master-headless
selector:
matchLabels:
app: "elasticsearch-master"
replicas: 3
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- metadata:
name: elasticsearch-master
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
template:
metadata:
name: "elasticsearch-master"
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
configchecksum: a925349ed01ac0903a539d33164dabb0c174b9b602c943057c90033eee58253
spec:
securityContext:
fsGroup: 1000
runAsUser: 1000
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "elasticsearch-master"
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 120
volumes:
- name: elastic-certificates
secret:
secretName: elastic-certificates
- name: esconfig
configMap:
name: elasticsearch-master-config
initContainers:
- name: configure-sysctl
securityContext:
runAsUser: 0
privileged: true
image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0"
imagePullPolicy: "IfNotPresent"
command: ["sysctl", "-w", "vm.max_map_count=262144"]
resources:
{}
containers:
- name: "elasticsearch"
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0"
imagePullPolicy: "IfNotPresent"
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
# If the node is starting up wait for the cluster to be ready (request params: 'wait_for_status=green&timeout=1s' )
# Once it has started only check that the node itself is responding
START_FILE=/tmp/.es_start_file
http () {
local path="${1}"
if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then
BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}"
else
BASIC_AUTH=''
fi
curl -XGET -s -k --fail ${BASIC_AUTH} https://127.0.0.1:9200${path}
}
if [ -f "${START_FILE}" ]; then
echo 'Elasticsearch is already running, lets check the node is healthy and there are master nodes available'
http "/_cluster/health?timeout=0s"
else
echo 'Waiting for elasticsearch cluster to become ready (request params: "wait_for_status=green&timeout=1s" )'
if http "/_cluster/health?wait_for_status=green&timeout=1s" ; then
touch ${START_FILE}
exit 0
else
echo 'Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )'
exit 1
fi
fi
ports:
- name: http
containerPort: 9200
- name: transport
containerPort: 9300
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 200m
memory: 2Gi
env:
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: cluster.initial_master_nodes
value: "elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2,"
- name: discovery.seed_hosts
value: "elasticsearch-master-headless"
- name: cluster.name
value: "elasticsearch"
- name: network.host
value: "0.0.0.0"
- name: ES_JAVA_OPTS
value: "-Xmx1g -Xms1g"
- name: node.data
value: "true"
- name: node.ingest
value: "true"
- name: node.master
value: "true"
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: elastic-credentials
- name: ELASTIC_USERNAME
valueFrom:
secretKeyRef:
key: username
name: elastic-credentials
volumeMounts:
- name: "elasticsearch-master"
mountPath: /usr/share/elasticsearch/data
- name: elastic-certificates
mountPath: /usr/share/elasticsearch/config/certs
- name: esconfig
mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
subPath: elasticsearch.yml
NOTES:
1. Watch all cluster members come up.
$ kubectl get pods --namespace=elasticsearch -l app=elasticsearch-master -w
2. Test cluster health using Helm test.
$ helm test elasticsearch
```
NOTE: the images above show 7.6.0 as I have manually updated the statefulset as a workaround.
</details>
**Describe the bug:**
Performing 'helm upgrade' returns the following error;
Error: UPGRADE FAILED: cannot patch "elasticsearch-master" with kind StatefulSet: StatefulSet.apps "elasticsearch-master" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden
**Steps to reproduce:**
1. helm install elasticsearch elastic/elasticsearch -n elasticsearch -f values.yaml
2. helm upgrade elasticsearch elastic/elasticsearch -n elasticsearch -f values.yaml
**Expected behavior:**
Successful upgrade to newer image
**Provide logs and/or server output (if relevant):**
```
cat values.yaml
---
clusterName: "elasticsearch"
nodeGroup: "master"
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: ""
# Elasticsearch roles that will be applied to this nodeGroup
# These will be set as environment variables. E.g. node.master=true
roles:
master: "true"
ingest: "true"
data: "true"
replicas: 3
minimumMasterNodes: 2
esMajorVersion: ""
# Allows you to add any config files in /usr/share/elasticsearch/config/
# such as elasticsearch.yml and log4j2.properties
esConfig:
elasticsearch.yml: |
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
network.host: 0.0.0.0
# log4j2.properties: |
# key = value
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs:
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
name: elastic-credentials
key: password
- name: ELASTIC_USERNAME
valueFrom:
secretKeyRef:
name: elastic-credentials
key: username
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts:
- name: elastic-certificates
secretName: elastic-certificates
path: /usr/share/elasticsearch/config/certs
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.6.0"
imagePullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# additionals labels
labels: {}
esJavaOpts: "-Xmx1g -Xms1g"
resources:
requests:
cpu: "200m"
memory: "2Gi"
limits:
cpu: "1000m"
memory: "2Gi"
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
sidecarResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
networkHost: "0.0.0.0"
volumeClaimTemplate:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 50Gi
rbac:
create: false
serviceAccountName: ""
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
persistence:
enabled: true
annotations: {}
extraVolumes: ""
# - name: extras
# emptyDir: {}
extraVolumeMounts: ""
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: ""
# - name: do-something
# image: busybox
# command: ['do', 'something']
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "hard"
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
protocol: https
httpPort: 9200
transportPort: 9300
service:
labels: {}
labelsHeadless: {}
type: ClusterIP
nodePort: ""
annotations: {}
httpPortName: http
transportPortName: transport
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
# The following value is deprecated,
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# How long to wait for elasticsearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
nodeSelector: {}
tolerations: []
# Enabling this will publically expose your Elasticsearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- elasticsearch.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
nameOverride: ""
fullnameOverride: ""
# https://github.com/elastic/helm-charts/issues/63
masterTerminationFix: false
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
sysctlInitContainer:
enabled: true
keystore: []
```
**Any additional context:**
I manually updated the statefulset to get around the problem, hoping helm would then recognise the new image version was in place, however that has gained me nothing. The upgrade still fails
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `helpers/helpers.py`
Content:
```
1 import tempfile
2 import yaml
3 import os
4 import json
5 from subprocess import check_output
6
7
8 def helm_template(config):
9 with tempfile.NamedTemporaryFile() as temp:
10 with open(temp.name, "w") as values:
11 values.write(config)
12 helm_cmd = "helm template -f {0} --namespace default ./".format(temp.name)
13 result = yaml.load_all(check_output(helm_cmd.split()))
14
15 results = {}
16 for r in result:
17 if r:
18 kind = r["kind"].lower()
19 if kind not in results:
20 results[kind] = {}
21 results[kind][r["metadata"]["name"]] = r
22
23 if os.environ.get("DEBUG"):
24 print(json.dumps(results, indent=4, sort_keys=True))
25 return results
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/helpers/helpers.py b/helpers/helpers.py
--- a/helpers/helpers.py
+++ b/helpers/helpers.py
@@ -9,7 +9,7 @@
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, "w") as values:
values.write(config)
- helm_cmd = "helm template -f {0} --namespace default ./".format(temp.name)
+ helm_cmd = "helm template release-name -f {0} ./".format(temp.name)
result = yaml.load_all(check_output(helm_cmd.split()))
results = {}
| {"golden_diff": "diff --git a/helpers/helpers.py b/helpers/helpers.py\n--- a/helpers/helpers.py\n+++ b/helpers/helpers.py\n@@ -9,7 +9,7 @@\n with tempfile.NamedTemporaryFile() as temp:\n with open(temp.name, \"w\") as values:\n values.write(config)\n- helm_cmd = \"helm template -f {0} --namespace default ./\".format(temp.name)\n+ helm_cmd = \"helm template release-name -f {0} ./\".format(temp.name)\n result = yaml.load_all(check_output(helm_cmd.split()))\n \n results = {}\n", "issue": "helm upgrade fails due to 'cannot patch \"elasticsearch-master\" with kind StatefulSet'\n**Chart version:**\r\n7.6.0\r\n\r\n**Kubernetes version:**\r\nv1.14.9-eks-c0eccc\r\n\r\n**Kubernetes provider:** E.g. GKE (Google Kubernetes Engine)\r\nEKS\r\n\r\n**Helm Version:**\r\nv3.0.2\r\n\r\n**`helm get release` output**\r\n<details>\r\n<summary>Output of helm get release</summary>\r\n\r\n```\r\nNAME: elasticsearch\r\nLAST DEPLOYED: Fri Feb 21 16:30:05 2020\r\nNAMESPACE: elasticsearch\r\nSTATUS: failed\r\nREVISION: 29\r\nUSER-SUPPLIED VALUES:\r\nantiAffinity: hard\r\nantiAffinityTopologyKey: kubernetes.io/hostname\r\nclusterHealthCheckParams: wait_for_status=green&timeout=1s\r\nclusterName: elasticsearch\r\nesConfig:\r\n elasticsearch.yml: |\r\n xpack.security.enabled: true\r\n xpack.security.transport.ssl.enabled: true\r\n xpack.security.transport.ssl.verification_mode: certificate\r\n xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.http.ssl.enabled: true\r\n xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n network.host: 0.0.0.0\r\nesJavaOpts: -Xmx1g -Xms1g\r\nesMajorVersion: \"\"\r\nextraEnvs:\r\n- name: ELASTIC_PASSWORD\r\n valueFrom:\r\n secretKeyRef:\r\n key: password\r\n name: elastic-credentials\r\n- name: ELASTIC_USERNAME\r\n valueFrom:\r\n secretKeyRef:\r\n key: username\r\n name: elastic-credentials\r\nextraInitContainers: \"\"\r\nextraVolumeMounts: \"\"\r\nextraVolumes: \"\"\r\nfsGroup: \"\"\r\nfullnameOverride: \"\"\r\nhttpPort: 9200\r\nimage: docker.elastic.co/elasticsearch/elasticsearch\r\nimagePullPolicy: IfNotPresent\r\nimagePullSecrets: []\r\nimageTag: 7.6.0\r\ningress:\r\n annotations: {}\r\n enabled: false\r\n hosts:\r\n - elasticsearch.local\r\n path: /\r\n tls: []\r\ninitResources: {}\r\nkeystore: []\r\nlabels: {}\r\nlifecycle: {}\r\nmasterService: \"\"\r\nmasterTerminationFix: false\r\nmaxUnavailable: 1\r\nminimumMasterNodes: 2\r\nnameOverride: \"\"\r\nnetworkHost: 0.0.0.0\r\nnodeAffinity: {}\r\nnodeGroup: master\r\nnodeSelector: {}\r\npersistence:\r\n annotations: {}\r\n enabled: true\r\npodAnnotations: {}\r\npodManagementPolicy: Parallel\r\npodSecurityContext:\r\n fsGroup: 1000\r\n runAsUser: 1000\r\npodSecurityPolicy:\r\n create: false\r\n name: \"\"\r\n spec:\r\n fsGroup:\r\n rule: RunAsAny\r\n privileged: true\r\n runAsUser:\r\n rule: RunAsAny\r\n seLinux:\r\n rule: RunAsAny\r\n supplementalGroups:\r\n rule: RunAsAny\r\n volumes:\r\n - secret\r\n - configMap\r\n - persistentVolumeClaim\r\npriorityClassName: \"\"\r\nprotocol: https\r\nrbac:\r\n create: false\r\n serviceAccountName: \"\"\r\nreadinessProbe:\r\n failureThreshold: 3\r\n initialDelaySeconds: 10\r\n periodSeconds: 10\r\n successThreshold: 3\r\n timeoutSeconds: 5\r\nreplicas: 3\r\nresources:\r\n limits:\r\n cpu: 1000m\r\n memory: 2Gi\r\n requests:\r\n cpu: 200m\r\n memory: 2Gi\r\nroles:\r\n data: \"true\"\r\n ingest: \"true\"\r\n master: \"true\"\r\nschedulerName: \"\"\r\nsecretMounts:\r\n- name: elastic-certificates\r\n path: /usr/share/elasticsearch/config/certs\r\n secretName: elastic-certificates\r\nsecurityContext:\r\n capabilities:\r\n drop:\r\n - ALL\r\n runAsNonRoot: true\r\n runAsUser: 1000\r\nservice:\r\n annotations: {}\r\n httpPortName: http\r\n labels: {}\r\n labelsHeadless: {}\r\n nodePort: \"\"\r\n transportPortName: transport\r\n type: ClusterIP\r\nsidecarResources: {}\r\nsysctlInitContainer:\r\n enabled: true\r\nsysctlVmMaxMapCount: 262144\r\nterminationGracePeriod: 120\r\ntolerations: []\r\ntransportPort: 9300\r\nupdateStrategy: RollingUpdate\r\nvolumeClaimTemplate:\r\n accessModes:\r\n - ReadWriteOnce\r\n resources:\r\n requests:\r\n storage: 50Gi\r\n\r\nCOMPUTED VALUES:\r\nantiAffinity: hard\r\nantiAffinityTopologyKey: kubernetes.io/hostname\r\nclusterHealthCheckParams: wait_for_status=green&timeout=1s\r\nclusterName: elasticsearch\r\nesConfig:\r\n elasticsearch.yml: |\r\n xpack.security.enabled: true\r\n xpack.security.transport.ssl.enabled: true\r\n xpack.security.transport.ssl.verification_mode: certificate\r\n xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.http.ssl.enabled: true\r\n xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n network.host: 0.0.0.0\r\nesJavaOpts: -Xmx1g -Xms1g\r\nesMajorVersion: \"\"\r\nextraContainers: \"\"\r\nextraEnvs:\r\n- name: ELASTIC_PASSWORD\r\n valueFrom:\r\n secretKeyRef:\r\n key: password\r\n name: elastic-credentials\r\n- name: ELASTIC_USERNAME\r\n valueFrom:\r\n secretKeyRef:\r\n key: username\r\n name: elastic-credentials\r\nextraInitContainers: \"\"\r\nextraVolumeMounts: \"\"\r\nextraVolumes: \"\"\r\nfsGroup: \"\"\r\nfullnameOverride: \"\"\r\nhttpPort: 9200\r\nimage: docker.elastic.co/elasticsearch/elasticsearch\r\nimagePullPolicy: IfNotPresent\r\nimagePullSecrets: []\r\nimageTag: 7.6.0\r\ningress:\r\n annotations: {}\r\n enabled: false\r\n hosts:\r\n - elasticsearch.local\r\n path: /\r\n tls: []\r\ninitResources: {}\r\nkeystore: []\r\nlabels: {}\r\nlifecycle: {}\r\nmasterService: \"\"\r\nmasterTerminationFix: false\r\nmaxUnavailable: 1\r\nminimumMasterNodes: 2\r\nnameOverride: \"\"\r\nnetworkHost: 0.0.0.0\r\nnodeAffinity: {}\r\nnodeGroup: master\r\nnodeSelector: {}\r\npersistence:\r\n annotations: {}\r\n enabled: true\r\npodAnnotations: {}\r\npodManagementPolicy: Parallel\r\npodSecurityContext:\r\n fsGroup: 1000\r\n runAsUser: 1000\r\npodSecurityPolicy:\r\n create: false\r\n name: \"\"\r\n spec:\r\n fsGroup:\r\n rule: RunAsAny\r\n privileged: true\r\n runAsUser:\r\n rule: RunAsAny\r\n seLinux:\r\n rule: RunAsAny\r\n supplementalGroups:\r\n rule: RunAsAny\r\n volumes:\r\n - secret\r\n - configMap\r\n - persistentVolumeClaim\r\npriorityClassName: \"\"\r\nprotocol: https\r\nrbac:\r\n create: false\r\n serviceAccountName: \"\"\r\nreadinessProbe:\r\n failureThreshold: 3\r\n initialDelaySeconds: 10\r\n periodSeconds: 10\r\n successThreshold: 3\r\n timeoutSeconds: 5\r\nreplicas: 3\r\nresources:\r\n limits:\r\n cpu: 1000m\r\n memory: 2Gi\r\n requests:\r\n cpu: 200m\r\n memory: 2Gi\r\nroles:\r\n data: \"true\"\r\n ingest: \"true\"\r\n master: \"true\"\r\nschedulerName: \"\"\r\nsecretMounts:\r\n- name: elastic-certificates\r\n path: /usr/share/elasticsearch/config/certs\r\n secretName: elastic-certificates\r\nsecurityContext:\r\n capabilities:\r\n drop:\r\n - ALL\r\n runAsNonRoot: true\r\n runAsUser: 1000\r\nservice:\r\n annotations: {}\r\n httpPortName: http\r\n labels: {}\r\n labelsHeadless: {}\r\n nodePort: \"\"\r\n transportPortName: transport\r\n type: ClusterIP\r\nsidecarResources: {}\r\nsysctlInitContainer:\r\n enabled: true\r\nsysctlVmMaxMapCount: 262144\r\nterminationGracePeriod: 120\r\ntolerations: []\r\ntransportPort: 9300\r\nupdateStrategy: RollingUpdate\r\nvolumeClaimTemplate:\r\n accessModes:\r\n - ReadWriteOnce\r\n resources:\r\n requests:\r\n storage: 50Gi\r\n\r\nHOOKS:\r\n---\r\n# Source: elasticsearch/templates/test/test-elasticsearch-health.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: \"elasticsearch-sbxrc-test\"\r\n annotations:\r\n \"helm.sh/hook\": test-success\r\nspec:\r\n containers:\r\n - name: \"elasticsearch-ualfr-test\"\r\n image: \"docker.elastic.co/elasticsearch/elasticsearch:7.6.0\"\r\n command:\r\n - \"sh\"\r\n - \"-c\"\r\n - |\r\n #!/usr/bin/env bash -e\r\n curl -XGET --fail 'elasticsearch-master:9200/_cluster/health?wait_for_status=green&timeout=1s'\r\n restartPolicy: Never\r\nMANIFEST:\r\n---\r\n# Source: elasticsearch/templates/poddisruptionbudget.yaml\r\napiVersion: policy/v1beta1\r\nkind: PodDisruptionBudget\r\nmetadata:\r\n name: \"elasticsearch-master-pdb\"\r\nspec:\r\n maxUnavailable: 1\r\n selector:\r\n matchLabels:\r\n app: \"elasticsearch-master\"\r\n---\r\n# Source: elasticsearch/templates/configmap.yaml\r\napiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: elasticsearch-master-config\r\n labels:\r\n heritage: \"Helm\"\r\n release: \"elasticsearch\"\r\n chart: \"elasticsearch\"\r\n app: \"elasticsearch-master\"\r\ndata:\r\n elasticsearch.yml: |\r\n xpack.security.enabled: true\r\n xpack.security.transport.ssl.enabled: true\r\n xpack.security.transport.ssl.verification_mode: certificate\r\n xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.http.ssl.enabled: true\r\n xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n network.host: 0.0.0.0\r\n---\r\n# Source: elasticsearch/templates/service.yaml\r\nkind: Service\r\napiVersion: v1\r\nmetadata:\r\n name: elasticsearch-master-headless\r\n labels:\r\n heritage: \"Helm\"\r\n release: \"elasticsearch\"\r\n chart: \"elasticsearch\"\r\n app: \"elasticsearch-master\"\r\n annotations:\r\n service.alpha.kubernetes.io/tolerate-unready-endpoints: \"true\"\r\nspec:\r\n clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve\r\n # Create endpoints also if the related pod isn't ready\r\n publishNotReadyAddresses: true\r\n selector:\r\n app: \"elasticsearch-master\"\r\n ports:\r\n - name: http\r\n port: 9200\r\n - name: transport\r\n port: 9300\r\n---\r\n# Source: elasticsearch/templates/service.yaml\r\nkind: Service\r\napiVersion: v1\r\nmetadata:\r\n name: elasticsearch-master\r\n labels:\r\n heritage: \"Helm\"\r\n release: \"elasticsearch\"\r\n chart: \"elasticsearch\"\r\n app: \"elasticsearch-master\"\r\n annotations:\r\n {}\r\nspec:\r\n type: ClusterIP\r\n selector:\r\n heritage: \"Helm\"\r\n release: \"elasticsearch\"\r\n chart: \"elasticsearch\"\r\n app: \"elasticsearch-master\"\r\n ports:\r\n - name: http\r\n protocol: TCP\r\n port: 9200\r\n - name: transport\r\n protocol: TCP\r\n port: 9300\r\n---\r\n# Source: elasticsearch/templates/statefulset.yaml\r\napiVersion: apps/v1\r\nkind: StatefulSet\r\nmetadata:\r\n name: elasticsearch-master\r\n labels:\r\n heritage: \"Helm\"\r\n release: \"elasticsearch\"\r\n chart: \"elasticsearch\"\r\n app: \"elasticsearch-master\"\r\n annotations:\r\n esMajorVersion: \"7\"\r\nspec:\r\n serviceName: elasticsearch-master-headless\r\n selector:\r\n matchLabels:\r\n app: \"elasticsearch-master\"\r\n replicas: 3\r\n podManagementPolicy: Parallel\r\n updateStrategy:\r\n type: RollingUpdate\r\n volumeClaimTemplates:\r\n - metadata:\r\n name: elasticsearch-master\r\n spec:\r\n accessModes:\r\n - ReadWriteOnce\r\n resources:\r\n requests:\r\n storage: 50Gi\r\n template:\r\n metadata:\r\n name: \"elasticsearch-master\"\r\n labels:\r\n heritage: \"Helm\"\r\n release: \"elasticsearch\"\r\n chart: \"elasticsearch\"\r\n app: \"elasticsearch-master\"\r\n annotations:\r\n \r\n configchecksum: a925349ed01ac0903a539d33164dabb0c174b9b602c943057c90033eee58253\r\n spec:\r\n securityContext:\r\n fsGroup: 1000\r\n runAsUser: 1000\r\n affinity:\r\n podAntiAffinity:\r\n requiredDuringSchedulingIgnoredDuringExecution:\r\n - labelSelector:\r\n matchExpressions:\r\n - key: app\r\n operator: In\r\n values:\r\n - \"elasticsearch-master\"\r\n topologyKey: kubernetes.io/hostname\r\n terminationGracePeriodSeconds: 120\r\n volumes:\r\n - name: elastic-certificates\r\n secret:\r\n secretName: elastic-certificates\r\n - name: esconfig\r\n configMap:\r\n name: elasticsearch-master-config\r\n initContainers:\r\n - name: configure-sysctl\r\n securityContext:\r\n runAsUser: 0\r\n privileged: true\r\n image: \"docker.elastic.co/elasticsearch/elasticsearch:7.6.0\"\r\n imagePullPolicy: \"IfNotPresent\"\r\n command: [\"sysctl\", \"-w\", \"vm.max_map_count=262144\"]\r\n resources:\r\n {}\r\n\r\n containers:\r\n - name: \"elasticsearch\"\r\n securityContext:\r\n capabilities:\r\n drop:\r\n - ALL\r\n runAsNonRoot: true\r\n runAsUser: 1000\r\n image: \"docker.elastic.co/elasticsearch/elasticsearch:7.6.0\"\r\n imagePullPolicy: \"IfNotPresent\"\r\n readinessProbe:\r\n failureThreshold: 3\r\n initialDelaySeconds: 10\r\n periodSeconds: 10\r\n successThreshold: 3\r\n timeoutSeconds: 5\r\n exec:\r\n command:\r\n - sh\r\n - -c\r\n - |\r\n #!/usr/bin/env bash -e\r\n # If the node is starting up wait for the cluster to be ready (request params: 'wait_for_status=green&timeout=1s' )\r\n # Once it has started only check that the node itself is responding\r\n START_FILE=/tmp/.es_start_file\r\n\r\n http () {\r\n local path=\"${1}\"\r\n if [ -n \"${ELASTIC_USERNAME}\" ] && [ -n \"${ELASTIC_PASSWORD}\" ]; then\r\n BASIC_AUTH=\"-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}\"\r\n else\r\n BASIC_AUTH=''\r\n fi\r\n curl -XGET -s -k --fail ${BASIC_AUTH} https://127.0.0.1:9200${path}\r\n }\r\n\r\n if [ -f \"${START_FILE}\" ]; then\r\n echo 'Elasticsearch is already running, lets check the node is healthy and there are master nodes available'\r\n http \"/_cluster/health?timeout=0s\"\r\n else\r\n echo 'Waiting for elasticsearch cluster to become ready (request params: \"wait_for_status=green&timeout=1s\" )'\r\n if http \"/_cluster/health?wait_for_status=green&timeout=1s\" ; then\r\n touch ${START_FILE}\r\n exit 0\r\n else\r\n echo 'Cluster is not yet ready (request params: \"wait_for_status=green&timeout=1s\" )'\r\n exit 1\r\n fi\r\n fi\r\n ports:\r\n - name: http\r\n containerPort: 9200\r\n - name: transport\r\n containerPort: 9300\r\n resources:\r\n limits:\r\n cpu: 1000m\r\n memory: 2Gi\r\n requests:\r\n cpu: 200m\r\n memory: 2Gi\r\n env:\r\n - name: node.name\r\n valueFrom:\r\n fieldRef:\r\n fieldPath: metadata.name\r\n - name: cluster.initial_master_nodes\r\n value: \"elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2,\"\r\n - name: discovery.seed_hosts\r\n value: \"elasticsearch-master-headless\"\r\n - name: cluster.name\r\n value: \"elasticsearch\"\r\n - name: network.host\r\n value: \"0.0.0.0\"\r\n - name: ES_JAVA_OPTS\r\n value: \"-Xmx1g -Xms1g\"\r\n - name: node.data\r\n value: \"true\"\r\n - name: node.ingest\r\n value: \"true\"\r\n - name: node.master\r\n value: \"true\"\r\n - name: ELASTIC_PASSWORD\r\n valueFrom:\r\n secretKeyRef:\r\n key: password\r\n name: elastic-credentials\r\n - name: ELASTIC_USERNAME\r\n valueFrom:\r\n secretKeyRef:\r\n key: username\r\n name: elastic-credentials\r\n volumeMounts:\r\n - name: \"elasticsearch-master\"\r\n mountPath: /usr/share/elasticsearch/data\r\n\r\n - name: elastic-certificates\r\n mountPath: /usr/share/elasticsearch/config/certs\r\n - name: esconfig\r\n mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\r\n subPath: elasticsearch.yml\r\n\r\nNOTES:\r\n1. Watch all cluster members come up.\r\n $ kubectl get pods --namespace=elasticsearch -l app=elasticsearch-master -w\r\n2. Test cluster health using Helm test.\r\n $ helm test elasticsearch\r\n\r\n```\r\nNOTE: the images above show 7.6.0 as I have manually updated the statefulset as a workaround. \r\n</details>\r\n\r\n**Describe the bug:**\r\nPerforming 'helm upgrade' returns the following error;\r\n\r\nError: UPGRADE FAILED: cannot patch \"elasticsearch-master\" with kind StatefulSet: StatefulSet.apps \"elasticsearch-master\" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden\r\n\r\n\r\n**Steps to reproduce:**\r\n\r\n1. helm install elasticsearch elastic/elasticsearch -n elasticsearch -f values.yaml\r\n2. helm upgrade elasticsearch elastic/elasticsearch -n elasticsearch -f values.yaml\r\n\r\n**Expected behavior:**\r\nSuccessful upgrade to newer image\r\n\r\n**Provide logs and/or server output (if relevant):**\r\n\r\n```\r\n cat values.yaml\r\n---\r\nclusterName: \"elasticsearch\"\r\nnodeGroup: \"master\"\r\n\r\n# The service that non master groups will try to connect to when joining the cluster\r\n# This should be set to clusterName + \"-\" + nodeGroup for your master group\r\nmasterService: \"\"\r\n\r\n# Elasticsearch roles that will be applied to this nodeGroup\r\n# These will be set as environment variables. E.g. node.master=true\r\nroles:\r\n master: \"true\"\r\n ingest: \"true\"\r\n data: \"true\"\r\n\r\nreplicas: 3\r\nminimumMasterNodes: 2\r\n\r\nesMajorVersion: \"\"\r\n\r\n# Allows you to add any config files in /usr/share/elasticsearch/config/\r\n# such as elasticsearch.yml and log4j2.properties\r\nesConfig: \r\n elasticsearch.yml: |\r\n xpack.security.enabled: true\r\n xpack.security.transport.ssl.enabled: true\r\n xpack.security.transport.ssl.verification_mode: certificate\r\n xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.http.ssl.enabled: true\r\n xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12\r\n network.host: 0.0.0.0\r\n# log4j2.properties: |\r\n# key = value\r\n\r\n# Extra environment variables to append to this nodeGroup\r\n# This will be appended to the current 'env:' key. You can use any of the kubernetes env\r\n# syntax here\r\nextraEnvs:\r\n - name: ELASTIC_PASSWORD\r\n valueFrom:\r\n secretKeyRef:\r\n name: elastic-credentials\r\n key: password\r\n - name: ELASTIC_USERNAME\r\n valueFrom:\r\n secretKeyRef:\r\n name: elastic-credentials\r\n key: username\r\n\r\n# A list of secrets and their paths to mount inside the pod\r\n# This is useful for mounting certificates for security and for mounting\r\n# the X-Pack license\r\nsecretMounts:\r\n - name: elastic-certificates\r\n secretName: elastic-certificates\r\n path: /usr/share/elasticsearch/config/certs \r\n\r\n\r\nimage: \"docker.elastic.co/elasticsearch/elasticsearch\"\r\nimageTag: \"7.6.0\"\r\nimagePullPolicy: \"IfNotPresent\"\r\n\r\npodAnnotations: {}\r\n # iam.amazonaws.com/role: es-cluster\r\n\r\n# additionals labels\r\nlabels: {}\r\n\r\nesJavaOpts: \"-Xmx1g -Xms1g\"\r\n\r\nresources:\r\n requests:\r\n cpu: \"200m\"\r\n memory: \"2Gi\"\r\n limits:\r\n cpu: \"1000m\"\r\n memory: \"2Gi\"\r\n\r\ninitResources: {}\r\n # limits:\r\n # cpu: \"25m\"\r\n # # memory: \"128Mi\"\r\n # requests:\r\n # cpu: \"25m\"\r\n # memory: \"128Mi\"\r\n\r\nsidecarResources: {}\r\n # limits:\r\n # cpu: \"25m\"\r\n # # memory: \"128Mi\"\r\n # requests:\r\n # cpu: \"25m\"\r\n # memory: \"128Mi\"\r\n\r\nnetworkHost: \"0.0.0.0\"\r\n\r\nvolumeClaimTemplate:\r\n accessModes: [ \"ReadWriteOnce\" ]\r\n resources:\r\n requests:\r\n storage: 50Gi\r\n\r\nrbac:\r\n create: false\r\n serviceAccountName: \"\"\r\n\r\npodSecurityPolicy:\r\n create: false\r\n name: \"\"\r\n spec:\r\n privileged: true\r\n fsGroup:\r\n rule: RunAsAny\r\n runAsUser:\r\n rule: RunAsAny\r\n seLinux:\r\n rule: RunAsAny\r\n supplementalGroups:\r\n rule: RunAsAny\r\n volumes:\r\n - secret\r\n - configMap\r\n - persistentVolumeClaim\r\n\r\npersistence:\r\n enabled: true\r\n annotations: {}\r\n\r\nextraVolumes: \"\"\r\n # - name: extras\r\n # emptyDir: {}\r\n\r\nextraVolumeMounts: \"\"\r\n # - name: extras\r\n # mountPath: /usr/share/extras\r\n # readOnly: true\r\n\r\nextraInitContainers: \"\"\r\n # - name: do-something\r\n # image: busybox\r\n # command: ['do', 'something']\r\n\r\n# This is the PriorityClass settings as defined in\r\n# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass\r\npriorityClassName: \"\"\r\n\r\n# By default this will make sure two pods don't end up on the same node\r\n# Changing this to a region would allow you to spread pods across regions\r\nantiAffinityTopologyKey: \"kubernetes.io/hostname\"\r\n\r\n# Hard means that by default pods will only be scheduled if there are enough nodes for them\r\n# and that they will never end up on the same node. Setting this to soft will do this \"best effort\"\r\nantiAffinity: \"hard\"\r\n\r\n# This is the node affinity settings as defined in\r\n# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature\r\nnodeAffinity: {}\r\n\r\n# The default is to deploy all pods serially. By setting this to parallel all pods are started at\r\n# the same time when bootstrapping the cluster\r\npodManagementPolicy: \"Parallel\"\r\n\r\nprotocol: https\r\nhttpPort: 9200\r\ntransportPort: 9300\r\n\r\nservice:\r\n labels: {}\r\n labelsHeadless: {}\r\n type: ClusterIP\r\n nodePort: \"\"\r\n annotations: {}\r\n httpPortName: http\r\n transportPortName: transport\r\n\r\nupdateStrategy: RollingUpdate\r\n\r\n# This is the max unavailable setting for the pod disruption budget\r\n# The default value of 1 will make sure that kubernetes won't allow more than 1\r\n# of your pods to be unavailable during maintenance\r\nmaxUnavailable: 1\r\n\r\npodSecurityContext:\r\n fsGroup: 1000\r\n runAsUser: 1000\r\n\r\n# The following value is deprecated,\r\n# please use the above podSecurityContext.fsGroup instead\r\nfsGroup: \"\"\r\n\r\nsecurityContext:\r\n capabilities:\r\n drop:\r\n - ALL\r\n # readOnlyRootFilesystem: true\r\n runAsNonRoot: true\r\n runAsUser: 1000\r\n\r\n# How long to wait for elasticsearch to stop gracefully\r\nterminationGracePeriod: 120\r\n\r\nsysctlVmMaxMapCount: 262144\r\n\r\nreadinessProbe:\r\n failureThreshold: 3\r\n initialDelaySeconds: 10\r\n periodSeconds: 10\r\n successThreshold: 3\r\n timeoutSeconds: 5\r\n\r\n# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status\r\nclusterHealthCheckParams: \"wait_for_status=green&timeout=1s\"\r\n\r\n## Use an alternate scheduler.\r\n## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/\r\n##\r\nschedulerName: \"\"\r\n\r\nimagePullSecrets: []\r\nnodeSelector: {}\r\ntolerations: []\r\n\r\n# Enabling this will publically expose your Elasticsearch instance.\r\n# Only enable this if you have security enabled on your cluster\r\ningress:\r\n enabled: false\r\n annotations: {}\r\n # kubernetes.io/ingress.class: nginx\r\n # kubernetes.io/tls-acme: \"true\"\r\n path: /\r\n hosts:\r\n - elasticsearch.local\r\n tls: []\r\n # - secretName: chart-example-tls\r\n # hosts:\r\n # - chart-example.local\r\n\r\nnameOverride: \"\"\r\nfullnameOverride: \"\"\r\n\r\n# https://github.com/elastic/helm-charts/issues/63\r\nmasterTerminationFix: false\r\n\r\nlifecycle: {}\r\n # preStop:\r\n # exec:\r\n # command: [\"/bin/sh\", \"-c\", \"echo Hello from the postStart handler > /usr/share/message\"]\r\n # postStart:\r\n # exec:\r\n # command: [\"/bin/sh\", \"-c\", \"echo Hello from the postStart handler > /usr/share/message\"]\r\n\r\nsysctlInitContainer:\r\n enabled: true\r\n\r\nkeystore: []\r\n\r\n```\r\n\r\n**Any additional context:**\r\nI manually updated the statefulset to get around the problem, hoping helm would then recognise the new image version was in place, however that has gained me nothing. The upgrade still fails\n", "before_files": [{"content": "import tempfile\nimport yaml\nimport os\nimport json\nfrom subprocess import check_output\n\n\ndef helm_template(config):\n with tempfile.NamedTemporaryFile() as temp:\n with open(temp.name, \"w\") as values:\n values.write(config)\n helm_cmd = \"helm template -f {0} --namespace default ./\".format(temp.name)\n result = yaml.load_all(check_output(helm_cmd.split()))\n\n results = {}\n for r in result:\n if r:\n kind = r[\"kind\"].lower()\n if kind not in results:\n results[kind] = {}\n results[kind][r[\"metadata\"][\"name\"]] = r\n\n if os.environ.get(\"DEBUG\"):\n print(json.dumps(results, indent=4, sort_keys=True))\n return results\n", "path": "helpers/helpers.py"}], "after_files": [{"content": "import tempfile\nimport yaml\nimport os\nimport json\nfrom subprocess import check_output\n\n\ndef helm_template(config):\n with tempfile.NamedTemporaryFile() as temp:\n with open(temp.name, \"w\") as values:\n values.write(config)\n helm_cmd = \"helm template release-name -f {0} ./\".format(temp.name)\n result = yaml.load_all(check_output(helm_cmd.split()))\n\n results = {}\n for r in result:\n if r:\n kind = r[\"kind\"].lower()\n if kind not in results:\n results[kind] = {}\n results[kind][r[\"metadata\"][\"name\"]] = r\n\n if os.environ.get(\"DEBUG\"):\n print(json.dumps(results, indent=4, sort_keys=True))\n return results\n", "path": "helpers/helpers.py"}]} |
gh_patches_debug_1547 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
running `pre-commit autoupdate` fails because tip of HEAD is missing hook
Hello 👋
I'm setting up `pre-commit` on a project and came across an issue when adding hook `destroyed-symlinks`. The error message suggested running `pre-commit autoupdate`. I ran that and saw that it cannot update because the tip of HEAD is missing that hook. I'm not sure what that means so posting here.
```console
$ echo ' - id: destroyed-symlinks' >> .pre-commit-config.yaml
$ git add -p !$
git add -p .pre-commit-config.yaml
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bfde4717..949f3ffc 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -21,3 +21,4 @@ repos:
- id: check-vcs-permalinks
- id: check-xml
- id: debug-statements
+ - id: destroyed-symlinks
(1/1) Stage this hunk [y,n,q,a,d,e,?]? y
$ git commit -m 'new hook destroyed-symlinks'
[ERROR] `destroyed-symlinks` is not present in repository https://github.com/pre-commit/pre-commit-hooks. Typo? Perhaps it is introduced in a newer version? Often `pre-commit autoupdate` fixes this.
$ git status
On branch pre-commit
Changes to be committed:
(use "git restore --staged <file>..." to unstage)
modified: .pre-commit-config.yaml
Untracked files:
(use "git add <file>..." to include in what will be committed)
tests/__init__.py
$ pre-commit autoupdate
Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.
Cannot update because the tip of HEAD is missing these hooks:
destroyed-symlinks
$ git checkout .
Updated 0 paths from the index
$ pre-commit autoupdate
Updating https://github.com/pre-commit/pre-commit-hooks ... Cannot update because the tip of HEAD is missing these hooks:
destroyed-symlinks
$ pre-commit --version
pre-commit 2.9.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/autoupdate.py`
Content:
```
1 import os.path
2 import re
3 from typing import Any
4 from typing import Dict
5 from typing import List
6 from typing import NamedTuple
7 from typing import Optional
8 from typing import Sequence
9 from typing import Tuple
10
11 import pre_commit.constants as C
12 from pre_commit import git
13 from pre_commit import output
14 from pre_commit.clientlib import InvalidManifestError
15 from pre_commit.clientlib import load_config
16 from pre_commit.clientlib import load_manifest
17 from pre_commit.clientlib import LOCAL
18 from pre_commit.clientlib import META
19 from pre_commit.commands.migrate_config import migrate_config
20 from pre_commit.store import Store
21 from pre_commit.util import CalledProcessError
22 from pre_commit.util import cmd_output
23 from pre_commit.util import cmd_output_b
24 from pre_commit.util import tmpdir
25 from pre_commit.util import yaml_dump
26 from pre_commit.util import yaml_load
27
28
29 class RevInfo(NamedTuple):
30 repo: str
31 rev: str
32 frozen: Optional[str]
33
34 @classmethod
35 def from_config(cls, config: Dict[str, Any]) -> 'RevInfo':
36 return cls(config['repo'], config['rev'], None)
37
38 def update(self, tags_only: bool, freeze: bool) -> 'RevInfo':
39 if tags_only:
40 tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--abbrev=0')
41 else:
42 tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--exact')
43
44 with tmpdir() as tmp:
45 git.init_repo(tmp, self.repo)
46 cmd_output_b('git', 'fetch', 'origin', 'HEAD', '--tags', cwd=tmp)
47
48 try:
49 rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()
50 except CalledProcessError:
51 cmd = ('git', 'rev-parse', 'FETCH_HEAD')
52 rev = cmd_output(*cmd, cwd=tmp)[1].strip()
53
54 frozen = None
55 if freeze:
56 exact = cmd_output('git', 'rev-parse', rev, cwd=tmp)[1].strip()
57 if exact != rev:
58 rev, frozen = exact, rev
59 return self._replace(rev=rev, frozen=frozen)
60
61
62 class RepositoryCannotBeUpdatedError(RuntimeError):
63 pass
64
65
66 def _check_hooks_still_exist_at_rev(
67 repo_config: Dict[str, Any],
68 info: RevInfo,
69 store: Store,
70 ) -> None:
71 try:
72 path = store.clone(repo_config['repo'], info.rev)
73 manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))
74 except InvalidManifestError as e:
75 raise RepositoryCannotBeUpdatedError(str(e))
76
77 # See if any of our hooks were deleted with the new commits
78 hooks = {hook['id'] for hook in repo_config['hooks']}
79 hooks_missing = hooks - {hook['id'] for hook in manifest}
80 if hooks_missing:
81 raise RepositoryCannotBeUpdatedError(
82 f'Cannot update because the tip of HEAD is missing these hooks:\n'
83 f'{", ".join(sorted(hooks_missing))}',
84 )
85
86
87 REV_LINE_RE = re.compile(r'^(\s+)rev:(\s*)([\'"]?)([^\s#]+)(.*)(\r?\n)$')
88
89
90 def _original_lines(
91 path: str,
92 rev_infos: List[Optional[RevInfo]],
93 retry: bool = False,
94 ) -> Tuple[List[str], List[int]]:
95 """detect `rev:` lines or reformat the file"""
96 with open(path, newline='') as f:
97 original = f.read()
98
99 lines = original.splitlines(True)
100 idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]
101 if len(idxs) == len(rev_infos):
102 return lines, idxs
103 elif retry:
104 raise AssertionError('could not find rev lines')
105 else:
106 with open(path, 'w') as f:
107 f.write(yaml_dump(yaml_load(original)))
108 return _original_lines(path, rev_infos, retry=True)
109
110
111 def _write_new_config(path: str, rev_infos: List[Optional[RevInfo]]) -> None:
112 lines, idxs = _original_lines(path, rev_infos)
113
114 for idx, rev_info in zip(idxs, rev_infos):
115 if rev_info is None:
116 continue
117 match = REV_LINE_RE.match(lines[idx])
118 assert match is not None
119 new_rev_s = yaml_dump({'rev': rev_info.rev}, default_style=match[3])
120 new_rev = new_rev_s.split(':', 1)[1].strip()
121 if rev_info.frozen is not None:
122 comment = f' # frozen: {rev_info.frozen}'
123 elif match[5].strip().startswith('# frozen:'):
124 comment = ''
125 else:
126 comment = match[5]
127 lines[idx] = f'{match[1]}rev:{match[2]}{new_rev}{comment}{match[6]}'
128
129 with open(path, 'w', newline='') as f:
130 f.write(''.join(lines))
131
132
133 def autoupdate(
134 config_file: str,
135 store: Store,
136 tags_only: bool,
137 freeze: bool,
138 repos: Sequence[str] = (),
139 ) -> int:
140 """Auto-update the pre-commit config to the latest versions of repos."""
141 migrate_config(config_file, quiet=True)
142 retv = 0
143 rev_infos: List[Optional[RevInfo]] = []
144 changed = False
145
146 config = load_config(config_file)
147 for repo_config in config['repos']:
148 if repo_config['repo'] in {LOCAL, META}:
149 continue
150
151 info = RevInfo.from_config(repo_config)
152 if repos and info.repo not in repos:
153 rev_infos.append(None)
154 continue
155
156 output.write(f'Updating {info.repo} ... ')
157 new_info = info.update(tags_only=tags_only, freeze=freeze)
158 try:
159 _check_hooks_still_exist_at_rev(repo_config, new_info, store)
160 except RepositoryCannotBeUpdatedError as error:
161 output.write_line(error.args[0])
162 rev_infos.append(None)
163 retv = 1
164 continue
165
166 if new_info.rev != info.rev:
167 changed = True
168 if new_info.frozen:
169 updated_to = f'{new_info.frozen} (frozen)'
170 else:
171 updated_to = new_info.rev
172 msg = f'updating {info.rev} -> {updated_to}.'
173 output.write_line(msg)
174 rev_infos.append(new_info)
175 else:
176 output.write_line('already up to date.')
177 rev_infos.append(None)
178
179 if changed:
180 _write_new_config(config_file, rev_infos)
181
182 return retv
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/commands/autoupdate.py b/pre_commit/commands/autoupdate.py
--- a/pre_commit/commands/autoupdate.py
+++ b/pre_commit/commands/autoupdate.py
@@ -79,8 +79,8 @@
hooks_missing = hooks - {hook['id'] for hook in manifest}
if hooks_missing:
raise RepositoryCannotBeUpdatedError(
- f'Cannot update because the tip of HEAD is missing these hooks:\n'
- f'{", ".join(sorted(hooks_missing))}',
+ f'Cannot update because the update target is missing these '
+ f'hooks:\n{", ".join(sorted(hooks_missing))}',
)
| {"golden_diff": "diff --git a/pre_commit/commands/autoupdate.py b/pre_commit/commands/autoupdate.py\n--- a/pre_commit/commands/autoupdate.py\n+++ b/pre_commit/commands/autoupdate.py\n@@ -79,8 +79,8 @@\n hooks_missing = hooks - {hook['id'] for hook in manifest}\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n- f'Cannot update because the tip of HEAD is missing these hooks:\\n'\n- f'{\", \".join(sorted(hooks_missing))}',\n+ f'Cannot update because the update target is missing these '\n+ f'hooks:\\n{\", \".join(sorted(hooks_missing))}',\n )\n", "issue": "running `pre-commit autoupdate` fails because tip of HEAD is missing hook\nHello \ud83d\udc4b \r\nI'm setting up `pre-commit` on a project and came across an issue when adding hook `destroyed-symlinks`. The error message suggested running `pre-commit autoupdate`. I ran that and saw that it cannot update because the tip of HEAD is missing that hook. I'm not sure what that means so posting here.\r\n\r\n```console\r\n$ echo ' - id: destroyed-symlinks' >> .pre-commit-config.yaml\r\n$ git add -p !$\r\ngit add -p .pre-commit-config.yaml\r\ndiff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml\r\nindex bfde4717..949f3ffc 100644\r\n--- a/.pre-commit-config.yaml\r\n+++ b/.pre-commit-config.yaml\r\n@@ -21,3 +21,4 @@ repos:\r\n - id: check-vcs-permalinks\r\n - id: check-xml\r\n - id: debug-statements\r\n+ - id: destroyed-symlinks\r\n(1/1) Stage this hunk [y,n,q,a,d,e,?]? y\r\n\r\n$ git commit -m 'new hook destroyed-symlinks'\r\n[ERROR] `destroyed-symlinks` is not present in repository https://github.com/pre-commit/pre-commit-hooks. Typo? Perhaps it is introduced in a newer version? Often `pre-commit autoupdate` fixes this.\r\n$ git status\r\nOn branch pre-commit\r\nChanges to be committed:\r\n (use \"git restore --staged <file>...\" to unstage)\r\n modified: .pre-commit-config.yaml\r\n\r\nUntracked files:\r\n (use \"git add <file>...\" to include in what will be committed)\r\n tests/__init__.py\r\n\r\n$ pre-commit autoupdate\r\nUpdating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.\r\nCannot update because the tip of HEAD is missing these hooks:\r\ndestroyed-symlinks\r\n$ git checkout .\r\nUpdated 0 paths from the index\r\n$ pre-commit autoupdate\r\nUpdating https://github.com/pre-commit/pre-commit-hooks ... Cannot update because the tip of HEAD is missing these hooks:\r\ndestroyed-symlinks\r\n$ pre-commit --version\r\npre-commit 2.9.0\r\n```\n", "before_files": [{"content": "import os.path\nimport re\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import InvalidManifestError\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.store import Store\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import tmpdir\nfrom pre_commit.util import yaml_dump\nfrom pre_commit.util import yaml_load\n\n\nclass RevInfo(NamedTuple):\n repo: str\n rev: str\n frozen: Optional[str]\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> 'RevInfo':\n return cls(config['repo'], config['rev'], None)\n\n def update(self, tags_only: bool, freeze: bool) -> 'RevInfo':\n if tags_only:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--abbrev=0')\n else:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--exact')\n\n with tmpdir() as tmp:\n git.init_repo(tmp, self.repo)\n cmd_output_b('git', 'fetch', 'origin', 'HEAD', '--tags', cwd=tmp)\n\n try:\n rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()\n except CalledProcessError:\n cmd = ('git', 'rev-parse', 'FETCH_HEAD')\n rev = cmd_output(*cmd, cwd=tmp)[1].strip()\n\n frozen = None\n if freeze:\n exact = cmd_output('git', 'rev-parse', rev, cwd=tmp)[1].strip()\n if exact != rev:\n rev, frozen = exact, rev\n return self._replace(rev=rev, frozen=frozen)\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _check_hooks_still_exist_at_rev(\n repo_config: Dict[str, Any],\n info: RevInfo,\n store: Store,\n) -> None:\n try:\n path = store.clone(repo_config['repo'], info.rev)\n manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n except InvalidManifestError as e:\n raise RepositoryCannotBeUpdatedError(str(e))\n\n # See if any of our hooks were deleted with the new commits\n hooks = {hook['id'] for hook in repo_config['hooks']}\n hooks_missing = hooks - {hook['id'] for hook in manifest}\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n f'Cannot update because the tip of HEAD is missing these hooks:\\n'\n f'{\", \".join(sorted(hooks_missing))}',\n )\n\n\nREV_LINE_RE = re.compile(r'^(\\s+)rev:(\\s*)([\\'\"]?)([^\\s#]+)(.*)(\\r?\\n)$')\n\n\ndef _original_lines(\n path: str,\n rev_infos: List[Optional[RevInfo]],\n retry: bool = False,\n) -> Tuple[List[str], List[int]]:\n \"\"\"detect `rev:` lines or reformat the file\"\"\"\n with open(path, newline='') as f:\n original = f.read()\n\n lines = original.splitlines(True)\n idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]\n if len(idxs) == len(rev_infos):\n return lines, idxs\n elif retry:\n raise AssertionError('could not find rev lines')\n else:\n with open(path, 'w') as f:\n f.write(yaml_dump(yaml_load(original)))\n return _original_lines(path, rev_infos, retry=True)\n\n\ndef _write_new_config(path: str, rev_infos: List[Optional[RevInfo]]) -> None:\n lines, idxs = _original_lines(path, rev_infos)\n\n for idx, rev_info in zip(idxs, rev_infos):\n if rev_info is None:\n continue\n match = REV_LINE_RE.match(lines[idx])\n assert match is not None\n new_rev_s = yaml_dump({'rev': rev_info.rev}, default_style=match[3])\n new_rev = new_rev_s.split(':', 1)[1].strip()\n if rev_info.frozen is not None:\n comment = f' # frozen: {rev_info.frozen}'\n elif match[5].strip().startswith('# frozen:'):\n comment = ''\n else:\n comment = match[5]\n lines[idx] = f'{match[1]}rev:{match[2]}{new_rev}{comment}{match[6]}'\n\n with open(path, 'w', newline='') as f:\n f.write(''.join(lines))\n\n\ndef autoupdate(\n config_file: str,\n store: Store,\n tags_only: bool,\n freeze: bool,\n repos: Sequence[str] = (),\n) -> int:\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n migrate_config(config_file, quiet=True)\n retv = 0\n rev_infos: List[Optional[RevInfo]] = []\n changed = False\n\n config = load_config(config_file)\n for repo_config in config['repos']:\n if repo_config['repo'] in {LOCAL, META}:\n continue\n\n info = RevInfo.from_config(repo_config)\n if repos and info.repo not in repos:\n rev_infos.append(None)\n continue\n\n output.write(f'Updating {info.repo} ... ')\n new_info = info.update(tags_only=tags_only, freeze=freeze)\n try:\n _check_hooks_still_exist_at_rev(repo_config, new_info, store)\n except RepositoryCannotBeUpdatedError as error:\n output.write_line(error.args[0])\n rev_infos.append(None)\n retv = 1\n continue\n\n if new_info.rev != info.rev:\n changed = True\n if new_info.frozen:\n updated_to = f'{new_info.frozen} (frozen)'\n else:\n updated_to = new_info.rev\n msg = f'updating {info.rev} -> {updated_to}.'\n output.write_line(msg)\n rev_infos.append(new_info)\n else:\n output.write_line('already up to date.')\n rev_infos.append(None)\n\n if changed:\n _write_new_config(config_file, rev_infos)\n\n return retv\n", "path": "pre_commit/commands/autoupdate.py"}], "after_files": [{"content": "import os.path\nimport re\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import InvalidManifestError\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.store import Store\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import tmpdir\nfrom pre_commit.util import yaml_dump\nfrom pre_commit.util import yaml_load\n\n\nclass RevInfo(NamedTuple):\n repo: str\n rev: str\n frozen: Optional[str]\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> 'RevInfo':\n return cls(config['repo'], config['rev'], None)\n\n def update(self, tags_only: bool, freeze: bool) -> 'RevInfo':\n if tags_only:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--abbrev=0')\n else:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--exact')\n\n with tmpdir() as tmp:\n git.init_repo(tmp, self.repo)\n cmd_output_b('git', 'fetch', 'origin', 'HEAD', '--tags', cwd=tmp)\n\n try:\n rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()\n except CalledProcessError:\n cmd = ('git', 'rev-parse', 'FETCH_HEAD')\n rev = cmd_output(*cmd, cwd=tmp)[1].strip()\n\n frozen = None\n if freeze:\n exact = cmd_output('git', 'rev-parse', rev, cwd=tmp)[1].strip()\n if exact != rev:\n rev, frozen = exact, rev\n return self._replace(rev=rev, frozen=frozen)\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _check_hooks_still_exist_at_rev(\n repo_config: Dict[str, Any],\n info: RevInfo,\n store: Store,\n) -> None:\n try:\n path = store.clone(repo_config['repo'], info.rev)\n manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n except InvalidManifestError as e:\n raise RepositoryCannotBeUpdatedError(str(e))\n\n # See if any of our hooks were deleted with the new commits\n hooks = {hook['id'] for hook in repo_config['hooks']}\n hooks_missing = hooks - {hook['id'] for hook in manifest}\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n f'Cannot update because the update target is missing these '\n f'hooks:\\n{\", \".join(sorted(hooks_missing))}',\n )\n\n\nREV_LINE_RE = re.compile(r'^(\\s+)rev:(\\s*)([\\'\"]?)([^\\s#]+)(.*)(\\r?\\n)$')\n\n\ndef _original_lines(\n path: str,\n rev_infos: List[Optional[RevInfo]],\n retry: bool = False,\n) -> Tuple[List[str], List[int]]:\n \"\"\"detect `rev:` lines or reformat the file\"\"\"\n with open(path, newline='') as f:\n original = f.read()\n\n lines = original.splitlines(True)\n idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]\n if len(idxs) == len(rev_infos):\n return lines, idxs\n elif retry:\n raise AssertionError('could not find rev lines')\n else:\n with open(path, 'w') as f:\n f.write(yaml_dump(yaml_load(original)))\n return _original_lines(path, rev_infos, retry=True)\n\n\ndef _write_new_config(path: str, rev_infos: List[Optional[RevInfo]]) -> None:\n lines, idxs = _original_lines(path, rev_infos)\n\n for idx, rev_info in zip(idxs, rev_infos):\n if rev_info is None:\n continue\n match = REV_LINE_RE.match(lines[idx])\n assert match is not None\n new_rev_s = yaml_dump({'rev': rev_info.rev}, default_style=match[3])\n new_rev = new_rev_s.split(':', 1)[1].strip()\n if rev_info.frozen is not None:\n comment = f' # frozen: {rev_info.frozen}'\n elif match[5].strip().startswith('# frozen:'):\n comment = ''\n else:\n comment = match[5]\n lines[idx] = f'{match[1]}rev:{match[2]}{new_rev}{comment}{match[6]}'\n\n with open(path, 'w', newline='') as f:\n f.write(''.join(lines))\n\n\ndef autoupdate(\n config_file: str,\n store: Store,\n tags_only: bool,\n freeze: bool,\n repos: Sequence[str] = (),\n) -> int:\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n migrate_config(config_file, quiet=True)\n retv = 0\n rev_infos: List[Optional[RevInfo]] = []\n changed = False\n\n config = load_config(config_file)\n for repo_config in config['repos']:\n if repo_config['repo'] in {LOCAL, META}:\n continue\n\n info = RevInfo.from_config(repo_config)\n if repos and info.repo not in repos:\n rev_infos.append(None)\n continue\n\n output.write(f'Updating {info.repo} ... ')\n new_info = info.update(tags_only=tags_only, freeze=freeze)\n try:\n _check_hooks_still_exist_at_rev(repo_config, new_info, store)\n except RepositoryCannotBeUpdatedError as error:\n output.write_line(error.args[0])\n rev_infos.append(None)\n retv = 1\n continue\n\n if new_info.rev != info.rev:\n changed = True\n if new_info.frozen:\n updated_to = f'{new_info.frozen} (frozen)'\n else:\n updated_to = new_info.rev\n msg = f'updating {info.rev} -> {updated_to}.'\n output.write_line(msg)\n rev_infos.append(new_info)\n else:\n output.write_line('already up to date.')\n rev_infos.append(None)\n\n if changed:\n _write_new_config(config_file, rev_infos)\n\n return retv\n", "path": "pre_commit/commands/autoupdate.py"}]} |
gh_patches_debug_1548 | rasdani/github-patches | git_diff | secdev__scapy-1007 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BERcodec_STRING With RandString cause length incorrect
Scapy Version: 2.3.3.dev929
System: OSX 10.13.1
Python Version: 2.7.13
Using RandString with ASN1_STRING will cause length enc incorrect.
This code can reappear that problem.
```python
from scapy.all import *
for i in range(10):
data = str(ASN1_STRING(RandString()))
enc_len = ord(data[1])
str_len = len(data[2:])
if enc_len != str_len:
print("Got enc length incorrect, enc_length:%s, data_length:%s" % (enc_len, str_len))
print("Hex string is: %s" % data[2:].encode('hex'))
```
When BERcodec_STRING input should be string but got RandString instance.
len(s) and raw(s) in BERcodec_STRING will trigger RandString._fix() twice and got two different string.
A simple fix may look like this.
```python
class BERcodec_STRING(BERcodec_Object):
tag = ASN1_Class_UNIVERSAL.STRING
@classmethod
def enc(cls,s):
s=str(s)
return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/asn1/ber.py`
Content:
```
1 ## This file is part of Scapy
2 ## See http://www.secdev.org/projects/scapy for more informations
3 ## Copyright (C) Philippe Biondi <[email protected]>
4 ## Modified by Maxence Tury <[email protected]>
5 ## Acknowledgment: Ralph Broenink
6 ## This program is published under a GPLv2 license
7
8 """
9 Basic Encoding Rules (BER) for ASN.1
10 """
11
12 from __future__ import absolute_import
13 from scapy.error import warning
14 from scapy.compat import *
15 from scapy.utils import binrepr,inet_aton,inet_ntoa
16 from scapy.asn1.asn1 import ASN1_Decoding_Error,ASN1_Encoding_Error,ASN1_BadTag_Decoding_Error,ASN1_Codecs,ASN1_Class_UNIVERSAL,ASN1_Error,ASN1_DECODING_ERROR,ASN1_BADTAG
17 import scapy.modules.six as six
18
19 ##################
20 ## BER encoding ##
21 ##################
22
23
24
25 #####[ BER tools ]#####
26
27
28 class BER_Exception(Exception):
29 pass
30
31 class BER_Encoding_Error(ASN1_Encoding_Error):
32 def __init__(self, msg, encoded=None, remaining=None):
33 Exception.__init__(self, msg)
34 self.remaining = remaining
35 self.encoded = encoded
36 def __str__(self):
37 s = Exception.__str__(self)
38 if isinstance(self.encoded, BERcodec_Object):
39 s+="\n### Already encoded ###\n%s" % self.encoded.strshow()
40 else:
41 s+="\n### Already encoded ###\n%r" % self.encoded
42 s+="\n### Remaining ###\n%r" % self.remaining
43 return s
44
45 class BER_Decoding_Error(ASN1_Decoding_Error):
46 def __init__(self, msg, decoded=None, remaining=None):
47 Exception.__init__(self, msg)
48 self.remaining = remaining
49 self.decoded = decoded
50 def __str__(self):
51 s = Exception.__str__(self)
52 if isinstance(self.decoded, BERcodec_Object):
53 s+="\n### Already decoded ###\n%s" % self.decoded.strshow()
54 else:
55 s+="\n### Already decoded ###\n%r" % self.decoded
56 s+="\n### Remaining ###\n%r" % self.remaining
57 return s
58
59 class BER_BadTag_Decoding_Error(BER_Decoding_Error, ASN1_BadTag_Decoding_Error):
60 pass
61
62 def BER_len_enc(l, size=0):
63 if l <= 127 and size==0:
64 return chb(l)
65 s = b""
66 while l or size>0:
67 s = chb(l&0xff)+s
68 l >>= 8
69 size -= 1
70 if len(s) > 127:
71 raise BER_Exception("BER_len_enc: Length too long (%i) to be encoded [%r]" % (len(s),s))
72 return chb(len(s)|0x80)+s
73 def BER_len_dec(s):
74 l = orb(s[0])
75 if not l & 0x80:
76 return l,s[1:]
77 l &= 0x7f
78 if len(s) <= l:
79 raise BER_Decoding_Error("BER_len_dec: Got %i bytes while expecting %i" % (len(s)-1, l),remaining=s)
80 ll = 0
81 for c in s[1:l+1]:
82 ll <<= 8
83 ll |= orb(c)
84 return ll,s[l+1:]
85
86 def BER_num_enc(l, size=1):
87 x=[]
88 while l or size>0:
89 x.insert(0, l & 0x7f)
90 if len(x) > 1:
91 x[0] |= 0x80
92 l >>= 7
93 size -= 1
94 return b"".join(chb(k) for k in x)
95 def BER_num_dec(s, cls_id=0):
96 if len(s) == 0:
97 raise BER_Decoding_Error("BER_num_dec: got empty string", remaining=s)
98 x = cls_id
99 for i, c in enumerate(s):
100 c = orb(c)
101 x <<= 7
102 x |= c&0x7f
103 if not c&0x80:
104 break
105 if c&0x80:
106 raise BER_Decoding_Error("BER_num_dec: unfinished number description", remaining=s)
107 return x, s[i+1:]
108
109 def BER_id_dec(s):
110 # This returns the tag ALONG WITH THE PADDED CLASS+CONSTRUCTIVE INFO.
111 # Let's recall that bits 8-7 from the first byte of the tag encode
112 # the class information, while bit 6 means primitive or constructive.
113 #
114 # For instance, with low-tag-number b'\x81', class would be 0b10
115 # ('context-specific') and tag 0x01, but we return 0x81 as a whole.
116 # For b'\xff\x22', class would be 0b11 ('private'), constructed, then
117 # padding, then tag 0x22, but we return (0xff>>5)*128^1 + 0x22*128^0.
118 # Why the 5-bit-shifting? Because it provides an unequivocal encoding
119 # on base 128 (note that 0xff would equal 1*128^1 + 127*128^0...),
120 # as we know that bits 5 to 1 are fixed to 1 anyway.
121 #
122 # As long as there is no class differentiation, we have to keep this info
123 # encoded in scapy's tag in order to reuse it for packet building.
124 # Note that tags thus may have to be hard-coded with their extended
125 # information, e.g. a SEQUENCE from asn1.py has a direct tag 0x20|16.
126 x = orb(s[0])
127 if x & 0x1f != 0x1f:
128 # low-tag-number
129 return x,s[1:]
130 else:
131 # high-tag-number
132 return BER_num_dec(s[1:], cls_id=x>>5)
133 def BER_id_enc(n):
134 if n < 256:
135 # low-tag-number
136 return chb(n)
137 else:
138 # high-tag-number
139 s = BER_num_enc(n)
140 tag = orb(s[0]) # first byte, as an int
141 tag &= 0x07 # reset every bit from 8 to 4
142 tag <<= 5 # move back the info bits on top
143 tag |= 0x1f # pad with 1s every bit from 5 to 1
144 return chb(tag) + s[1:]
145
146 # The functions below provide implicit and explicit tagging support.
147 def BER_tagging_dec(s, hidden_tag=None, implicit_tag=None,
148 explicit_tag=None, safe=False):
149 # We output the 'real_tag' if it is different from the (im|ex)plicit_tag.
150 real_tag = None
151 if len(s) > 0:
152 err_msg = "BER_tagging_dec: observed tag does not match expected tag"
153 if implicit_tag is not None:
154 ber_id,s = BER_id_dec(s)
155 if ber_id != implicit_tag:
156 if not safe:
157 raise BER_Decoding_Error(err_msg, remaining=s)
158 else:
159 real_tag = ber_id
160 s = chb(hash(hidden_tag)) + s
161 elif explicit_tag is not None:
162 ber_id,s = BER_id_dec(s)
163 if ber_id != explicit_tag:
164 if not safe:
165 raise BER_Decoding_Error(err_msg, remaining=s)
166 else:
167 real_tag = ber_id
168 l,s = BER_len_dec(s)
169 return real_tag, s
170 def BER_tagging_enc(s, implicit_tag=None, explicit_tag=None):
171 if len(s) > 0:
172 if implicit_tag is not None:
173 s = BER_id_enc(implicit_tag) + s[1:]
174 elif explicit_tag is not None:
175 s = BER_id_enc(explicit_tag) + BER_len_enc(len(s)) + s
176 return s
177
178 #####[ BER classes ]#####
179
180 class BERcodec_metaclass(type):
181 def __new__(cls, name, bases, dct):
182 c = super(BERcodec_metaclass, cls).__new__(cls, name, bases, dct)
183 try:
184 c.tag.register(c.codec, c)
185 except:
186 warning("Error registering %r for %r" % (c.tag, c.codec))
187 return c
188
189
190 class BERcodec_Object(six.with_metaclass(BERcodec_metaclass)):
191 codec = ASN1_Codecs.BER
192 tag = ASN1_Class_UNIVERSAL.ANY
193
194 @classmethod
195 def asn1_object(cls, val):
196 return cls.tag.asn1_object(val)
197
198 @classmethod
199 def check_string(cls, s):
200 if not s:
201 raise BER_Decoding_Error("%s: Got empty object while expecting tag %r" %
202 (cls.__name__,cls.tag), remaining=s)
203 @classmethod
204 def check_type(cls, s):
205 cls.check_string(s)
206 tag, remainder = BER_id_dec(s)
207 if cls.tag != tag:
208 raise BER_BadTag_Decoding_Error("%s: Got tag [%i/%#x] while expecting %r" %
209 (cls.__name__, tag, tag, cls.tag), remaining=s)
210 return remainder
211 @classmethod
212 def check_type_get_len(cls, s):
213 s2 = cls.check_type(s)
214 if not s2:
215 raise BER_Decoding_Error("%s: No bytes while expecting a length" %
216 cls.__name__, remaining=s)
217 return BER_len_dec(s2)
218 @classmethod
219 def check_type_check_len(cls, s):
220 l,s3 = cls.check_type_get_len(s)
221 if len(s3) < l:
222 raise BER_Decoding_Error("%s: Got %i bytes while expecting %i" %
223 (cls.__name__, len(s3), l), remaining=s)
224 return l,s3[:l],s3[l:]
225
226 @classmethod
227 def do_dec(cls, s, context=None, safe=False):
228 if context is None:
229 context = cls.tag.context
230 cls.check_string(s)
231 p,_ = BER_id_dec(s)
232 if p not in context:
233 t = s
234 if len(t) > 18:
235 t = t[:15]+b"..."
236 raise BER_Decoding_Error("Unknown prefix [%02x] for [%r]" % (p,t), remaining=s)
237 codec = context[p].get_codec(ASN1_Codecs.BER)
238 return codec.dec(s,context,safe)
239
240 @classmethod
241 def dec(cls, s, context=None, safe=False):
242 if not safe:
243 return cls.do_dec(s, context, safe)
244 try:
245 return cls.do_dec(s, context, safe)
246 except BER_BadTag_Decoding_Error as e:
247 o,remain = BERcodec_Object.dec(e.remaining, context, safe)
248 return ASN1_BADTAG(o),remain
249 except BER_Decoding_Error as e:
250 return ASN1_DECODING_ERROR(s, exc=e),""
251 except ASN1_Error as e:
252 return ASN1_DECODING_ERROR(s, exc=e),""
253
254 @classmethod
255 def safedec(cls, s, context=None):
256 return cls.dec(s, context, safe=True)
257
258
259 @classmethod
260 def enc(cls, s):
261 if isinstance(s, six.string_types):
262 return BERcodec_STRING.enc(s)
263 else:
264 return BERcodec_INTEGER.enc(int(s))
265
266 ASN1_Codecs.BER.register_stem(BERcodec_Object)
267
268
269 ##########################
270 #### BERcodec objects ####
271 ##########################
272
273 class BERcodec_INTEGER(BERcodec_Object):
274 tag = ASN1_Class_UNIVERSAL.INTEGER
275 @classmethod
276 def enc(cls, i):
277 s = []
278 while True:
279 s.append(i&0xff)
280 if -127 <= i < 0:
281 break
282 if 128 <= i <= 255:
283 s.append(0)
284 i >>= 8
285 if not i:
286 break
287 s = [chb(hash(c)) for c in s]
288 s.append(BER_len_enc(len(s)))
289 s.append(chb(hash(cls.tag)))
290 s.reverse()
291 return b"".join(s)
292 @classmethod
293 def do_dec(cls, s, context=None, safe=False):
294 l,s,t = cls.check_type_check_len(s)
295 x = 0
296 if s:
297 if orb(s[0])&0x80: # negative int
298 x = -1
299 for c in s:
300 x <<= 8
301 x |= orb(c)
302 return cls.asn1_object(x),t
303
304 class BERcodec_BOOLEAN(BERcodec_INTEGER):
305 tag = ASN1_Class_UNIVERSAL.BOOLEAN
306
307 class BERcodec_BIT_STRING(BERcodec_Object):
308 tag = ASN1_Class_UNIVERSAL.BIT_STRING
309 @classmethod
310 def do_dec(cls, s, context=None, safe=False):
311 # /!\ the unused_bits information is lost after this decoding
312 l,s,t = cls.check_type_check_len(s)
313 if len(s) > 0:
314 unused_bits = orb(s[0])
315 if safe and unused_bits > 7:
316 raise BER_Decoding_Error("BERcodec_BIT_STRING: too many unused_bits advertised", remaining=s)
317 s = "".join(binrepr(orb(x)).zfill(8) for x in s[1:])
318 if unused_bits > 0:
319 s = s[:-unused_bits]
320 return cls.tag.asn1_object(s),t
321 else:
322 raise BER_Decoding_Error("BERcodec_BIT_STRING found no content (not even unused_bits byte)", remaining=s)
323 @classmethod
324 def enc(cls,s):
325 # /!\ this is DER encoding (bit strings are only zero-bit padded)
326 s = raw(s)
327 if len(s) % 8 == 0:
328 unused_bits = 0
329 else:
330 unused_bits = 8 - len(s)%8
331 s += b"0"*unused_bits
332 s = b"".join(chb(int(b"".join(chb(y) for y in x),2)) for x in zip(*[iter(s)]*8))
333 s = chb(unused_bits) + s
334 return chb(hash(cls.tag))+BER_len_enc(len(s))+s
335
336 class BERcodec_STRING(BERcodec_Object):
337 tag = ASN1_Class_UNIVERSAL.STRING
338 @classmethod
339 def enc(cls,s):
340 return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes
341 @classmethod
342 def do_dec(cls, s, context=None, safe=False):
343 l,s,t = cls.check_type_check_len(s)
344 return cls.tag.asn1_object(s),t
345
346 class BERcodec_NULL(BERcodec_INTEGER):
347 tag = ASN1_Class_UNIVERSAL.NULL
348 @classmethod
349 def enc(cls, i):
350 if i == 0:
351 return chb(hash(cls.tag))+b"\0"
352 else:
353 return super(cls,cls).enc(i)
354
355 class BERcodec_OID(BERcodec_Object):
356 tag = ASN1_Class_UNIVERSAL.OID
357 @classmethod
358 def enc(cls, oid):
359 oid = raw(oid)
360 lst = [int(x) for x in oid.strip(b".").split(b".")]
361 if len(lst) >= 2:
362 lst[1] += 40*lst[0]
363 del(lst[0])
364 s = b"".join(BER_num_enc(k) for k in lst)
365 return chb(hash(cls.tag))+BER_len_enc(len(s))+s
366 @classmethod
367 def do_dec(cls, s, context=None, safe=False):
368 l,s,t = cls.check_type_check_len(s)
369 lst = []
370 while s:
371 l,s = BER_num_dec(s)
372 lst.append(l)
373 if (len(lst) > 0):
374 lst.insert(0,lst[0]//40)
375 lst[1] %= 40
376 return cls.asn1_object(b".".join(str(k).encode('ascii') for k in lst)), t
377
378 class BERcodec_ENUMERATED(BERcodec_INTEGER):
379 tag = ASN1_Class_UNIVERSAL.ENUMERATED
380
381 class BERcodec_UTF8_STRING(BERcodec_STRING):
382 tag = ASN1_Class_UNIVERSAL.UTF8_STRING
383
384 class BERcodec_NUMERIC_STRING(BERcodec_STRING):
385 tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING
386
387 class BERcodec_PRINTABLE_STRING(BERcodec_STRING):
388 tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
389
390 class BERcodec_T61_STRING(BERcodec_STRING):
391 tag = ASN1_Class_UNIVERSAL.T61_STRING
392
393 class BERcodec_VIDEOTEX_STRING(BERcodec_STRING):
394 tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING
395
396 class BERcodec_IA5_STRING(BERcodec_STRING):
397 tag = ASN1_Class_UNIVERSAL.IA5_STRING
398
399 class BERcodec_UTC_TIME(BERcodec_STRING):
400 tag = ASN1_Class_UNIVERSAL.UTC_TIME
401
402 class BERcodec_GENERALIZED_TIME(BERcodec_STRING):
403 tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
404
405 class BERcodec_ISO646_STRING(BERcodec_STRING):
406 tag = ASN1_Class_UNIVERSAL.ISO646_STRING
407
408 class BERcodec_UNIVERSAL_STRING(BERcodec_STRING):
409 tag = ASN1_Class_UNIVERSAL.UNIVERSAL_STRING
410
411 class BERcodec_BMP_STRING(BERcodec_STRING):
412 tag = ASN1_Class_UNIVERSAL.BMP_STRING
413
414 class BERcodec_SEQUENCE(BERcodec_Object):
415 tag = ASN1_Class_UNIVERSAL.SEQUENCE
416 @classmethod
417 def enc(cls, l):
418 if not isinstance(l, bytes):
419 l = b"".join(x.enc(cls.codec) for x in l)
420 return chb(hash(cls.tag))+BER_len_enc(len(l))+l
421 @classmethod
422 def do_dec(cls, s, context=None, safe=False):
423 if context is None:
424 context = cls.tag.context
425 l,st = cls.check_type_get_len(s) # we may have len(s) < l
426 s,t = st[:l],st[l:]
427 obj = []
428 while s:
429 try:
430 o,s = BERcodec_Object.dec(s, context, safe)
431 except BER_Decoding_Error as err:
432 err.remaining += t
433 if err.decoded is not None:
434 obj.append(err.decoded)
435 err.decoded = obj
436 raise
437 obj.append(o)
438 if len(st) < l:
439 raise BER_Decoding_Error("Not enough bytes to decode sequence", decoded=obj)
440 return cls.asn1_object(obj),t
441
442 class BERcodec_SET(BERcodec_SEQUENCE):
443 tag = ASN1_Class_UNIVERSAL.SET
444
445 class BERcodec_IPADDRESS(BERcodec_STRING):
446 tag = ASN1_Class_UNIVERSAL.IPADDRESS
447 @classmethod
448 def enc(cls, ipaddr_ascii):
449 try:
450 s = inet_aton(ipaddr_ascii)
451 except Exception:
452 raise BER_Encoding_Error("IPv4 address could not be encoded")
453 return chb(hash(cls.tag))+BER_len_enc(len(s))+s
454 @classmethod
455 def do_dec(cls, s, context=None, safe=False):
456 l,s,t = cls.check_type_check_len(s)
457 try:
458 ipaddr_ascii = inet_ntoa(s)
459 except Exception:
460 raise BER_Decoding_Error("IP address could not be decoded", remaining=s)
461 return cls.asn1_object(ipaddr_ascii), t
462
463 class BERcodec_COUNTER32(BERcodec_INTEGER):
464 tag = ASN1_Class_UNIVERSAL.COUNTER32
465
466 class BERcodec_GAUGE32(BERcodec_INTEGER):
467 tag = ASN1_Class_UNIVERSAL.GAUGE32
468
469 class BERcodec_TIME_TICKS(BERcodec_INTEGER):
470 tag = ASN1_Class_UNIVERSAL.TIME_TICKS
471
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scapy/asn1/ber.py b/scapy/asn1/ber.py
--- a/scapy/asn1/ber.py
+++ b/scapy/asn1/ber.py
@@ -337,7 +337,8 @@
tag = ASN1_Class_UNIVERSAL.STRING
@classmethod
def enc(cls,s):
- return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes
+ s = raw(s)
+ return chb(hash(cls.tag))+BER_len_enc(len(s))+s # Be sure we are encoding bytes
@classmethod
def do_dec(cls, s, context=None, safe=False):
l,s,t = cls.check_type_check_len(s)
| {"golden_diff": "diff --git a/scapy/asn1/ber.py b/scapy/asn1/ber.py\n--- a/scapy/asn1/ber.py\n+++ b/scapy/asn1/ber.py\n@@ -337,7 +337,8 @@\n tag = ASN1_Class_UNIVERSAL.STRING\n @classmethod\n def enc(cls,s):\n- return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes\n+ s = raw(s)\n+ return chb(hash(cls.tag))+BER_len_enc(len(s))+s # Be sure we are encoding bytes\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n", "issue": "BERcodec_STRING With RandString cause length incorrect\nScapy Version: 2.3.3.dev929\r\nSystem: OSX 10.13.1\r\nPython Version: 2.7.13\r\n\r\nUsing RandString with ASN1_STRING will cause length enc incorrect.\r\nThis code can reappear that problem.\r\n\r\n```python\r\nfrom scapy.all import *\r\n\r\nfor i in range(10):\r\n data = str(ASN1_STRING(RandString()))\r\n enc_len = ord(data[1])\r\n str_len = len(data[2:])\r\n if enc_len != str_len:\r\n print(\"Got enc length incorrect, enc_length:%s, data_length:%s\" % (enc_len, str_len))\r\n print(\"Hex string is: %s\" % data[2:].encode('hex'))\r\n```\r\n\r\nWhen BERcodec_STRING input should be string but got RandString instance. \r\nlen(s) and raw(s) in BERcodec_STRING will trigger RandString._fix() twice and got two different string.\r\n\r\nA simple fix may look like this.\r\n```python\r\nclass BERcodec_STRING(BERcodec_Object):\r\n tag = ASN1_Class_UNIVERSAL.STRING\r\n @classmethod\r\n def enc(cls,s):\r\n s=str(s)\r\n return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes\r\n```\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## Modified by Maxence Tury <[email protected]>\n## Acknowledgment: Ralph Broenink\n## This program is published under a GPLv2 license\n\n\"\"\"\nBasic Encoding Rules (BER) for ASN.1\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.error import warning\nfrom scapy.compat import *\nfrom scapy.utils import binrepr,inet_aton,inet_ntoa\nfrom scapy.asn1.asn1 import ASN1_Decoding_Error,ASN1_Encoding_Error,ASN1_BadTag_Decoding_Error,ASN1_Codecs,ASN1_Class_UNIVERSAL,ASN1_Error,ASN1_DECODING_ERROR,ASN1_BADTAG\nimport scapy.modules.six as six\n\n##################\n## BER encoding ##\n##################\n\n\n\n#####[ BER tools ]#####\n\n\nclass BER_Exception(Exception):\n pass\n\nclass BER_Encoding_Error(ASN1_Encoding_Error):\n def __init__(self, msg, encoded=None, remaining=None):\n Exception.__init__(self, msg)\n self.remaining = remaining\n self.encoded = encoded\n def __str__(self):\n s = Exception.__str__(self)\n if isinstance(self.encoded, BERcodec_Object):\n s+=\"\\n### Already encoded ###\\n%s\" % self.encoded.strshow()\n else:\n s+=\"\\n### Already encoded ###\\n%r\" % self.encoded\n s+=\"\\n### Remaining ###\\n%r\" % self.remaining\n return s\n\nclass BER_Decoding_Error(ASN1_Decoding_Error):\n def __init__(self, msg, decoded=None, remaining=None):\n Exception.__init__(self, msg)\n self.remaining = remaining\n self.decoded = decoded\n def __str__(self):\n s = Exception.__str__(self)\n if isinstance(self.decoded, BERcodec_Object):\n s+=\"\\n### Already decoded ###\\n%s\" % self.decoded.strshow()\n else:\n s+=\"\\n### Already decoded ###\\n%r\" % self.decoded\n s+=\"\\n### Remaining ###\\n%r\" % self.remaining\n return s\n\nclass BER_BadTag_Decoding_Error(BER_Decoding_Error, ASN1_BadTag_Decoding_Error):\n pass\n\ndef BER_len_enc(l, size=0):\n if l <= 127 and size==0:\n return chb(l)\n s = b\"\"\n while l or size>0:\n s = chb(l&0xff)+s\n l >>= 8\n size -= 1\n if len(s) > 127:\n raise BER_Exception(\"BER_len_enc: Length too long (%i) to be encoded [%r]\" % (len(s),s))\n return chb(len(s)|0x80)+s\ndef BER_len_dec(s):\n l = orb(s[0])\n if not l & 0x80:\n return l,s[1:]\n l &= 0x7f\n if len(s) <= l:\n raise BER_Decoding_Error(\"BER_len_dec: Got %i bytes while expecting %i\" % (len(s)-1, l),remaining=s)\n ll = 0\n for c in s[1:l+1]:\n ll <<= 8\n ll |= orb(c)\n return ll,s[l+1:]\n \ndef BER_num_enc(l, size=1):\n x=[]\n while l or size>0:\n x.insert(0, l & 0x7f)\n if len(x) > 1:\n x[0] |= 0x80\n l >>= 7\n size -= 1\n return b\"\".join(chb(k) for k in x)\ndef BER_num_dec(s, cls_id=0):\n if len(s) == 0:\n raise BER_Decoding_Error(\"BER_num_dec: got empty string\", remaining=s)\n x = cls_id\n for i, c in enumerate(s):\n c = orb(c)\n x <<= 7\n x |= c&0x7f\n if not c&0x80:\n break\n if c&0x80:\n raise BER_Decoding_Error(\"BER_num_dec: unfinished number description\", remaining=s)\n return x, s[i+1:]\n\ndef BER_id_dec(s):\n # This returns the tag ALONG WITH THE PADDED CLASS+CONSTRUCTIVE INFO.\n # Let's recall that bits 8-7 from the first byte of the tag encode\n # the class information, while bit 6 means primitive or constructive.\n #\n # For instance, with low-tag-number b'\\x81', class would be 0b10\n # ('context-specific') and tag 0x01, but we return 0x81 as a whole.\n # For b'\\xff\\x22', class would be 0b11 ('private'), constructed, then\n # padding, then tag 0x22, but we return (0xff>>5)*128^1 + 0x22*128^0.\n # Why the 5-bit-shifting? Because it provides an unequivocal encoding\n # on base 128 (note that 0xff would equal 1*128^1 + 127*128^0...),\n # as we know that bits 5 to 1 are fixed to 1 anyway.\n #\n # As long as there is no class differentiation, we have to keep this info\n # encoded in scapy's tag in order to reuse it for packet building.\n # Note that tags thus may have to be hard-coded with their extended\n # information, e.g. a SEQUENCE from asn1.py has a direct tag 0x20|16.\n x = orb(s[0])\n if x & 0x1f != 0x1f:\n # low-tag-number\n return x,s[1:]\n else:\n # high-tag-number\n return BER_num_dec(s[1:], cls_id=x>>5)\ndef BER_id_enc(n):\n if n < 256:\n # low-tag-number\n return chb(n)\n else:\n # high-tag-number\n s = BER_num_enc(n)\n tag = orb(s[0]) # first byte, as an int\n tag &= 0x07 # reset every bit from 8 to 4\n tag <<= 5 # move back the info bits on top\n tag |= 0x1f # pad with 1s every bit from 5 to 1\n return chb(tag) + s[1:]\n\n# The functions below provide implicit and explicit tagging support.\ndef BER_tagging_dec(s, hidden_tag=None, implicit_tag=None,\n explicit_tag=None, safe=False):\n # We output the 'real_tag' if it is different from the (im|ex)plicit_tag.\n real_tag = None\n if len(s) > 0:\n err_msg = \"BER_tagging_dec: observed tag does not match expected tag\"\n if implicit_tag is not None:\n ber_id,s = BER_id_dec(s)\n if ber_id != implicit_tag:\n if not safe:\n raise BER_Decoding_Error(err_msg, remaining=s)\n else:\n real_tag = ber_id\n s = chb(hash(hidden_tag)) + s\n elif explicit_tag is not None:\n ber_id,s = BER_id_dec(s)\n if ber_id != explicit_tag:\n if not safe:\n raise BER_Decoding_Error(err_msg, remaining=s)\n else:\n real_tag = ber_id\n l,s = BER_len_dec(s)\n return real_tag, s\ndef BER_tagging_enc(s, implicit_tag=None, explicit_tag=None):\n if len(s) > 0:\n if implicit_tag is not None:\n s = BER_id_enc(implicit_tag) + s[1:]\n elif explicit_tag is not None:\n s = BER_id_enc(explicit_tag) + BER_len_enc(len(s)) + s\n return s\n\n#####[ BER classes ]#####\n\nclass BERcodec_metaclass(type):\n def __new__(cls, name, bases, dct):\n c = super(BERcodec_metaclass, cls).__new__(cls, name, bases, dct)\n try:\n c.tag.register(c.codec, c)\n except:\n warning(\"Error registering %r for %r\" % (c.tag, c.codec))\n return c\n\n\nclass BERcodec_Object(six.with_metaclass(BERcodec_metaclass)):\n codec = ASN1_Codecs.BER\n tag = ASN1_Class_UNIVERSAL.ANY\n\n @classmethod\n def asn1_object(cls, val):\n return cls.tag.asn1_object(val)\n\n @classmethod\n def check_string(cls, s):\n if not s:\n raise BER_Decoding_Error(\"%s: Got empty object while expecting tag %r\" %\n (cls.__name__,cls.tag), remaining=s) \n @classmethod\n def check_type(cls, s):\n cls.check_string(s)\n tag, remainder = BER_id_dec(s)\n if cls.tag != tag:\n raise BER_BadTag_Decoding_Error(\"%s: Got tag [%i/%#x] while expecting %r\" %\n (cls.__name__, tag, tag, cls.tag), remaining=s)\n return remainder\n @classmethod\n def check_type_get_len(cls, s):\n s2 = cls.check_type(s)\n if not s2:\n raise BER_Decoding_Error(\"%s: No bytes while expecting a length\" %\n cls.__name__, remaining=s)\n return BER_len_dec(s2)\n @classmethod\n def check_type_check_len(cls, s):\n l,s3 = cls.check_type_get_len(s)\n if len(s3) < l:\n raise BER_Decoding_Error(\"%s: Got %i bytes while expecting %i\" %\n (cls.__name__, len(s3), l), remaining=s)\n return l,s3[:l],s3[l:]\n\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n if context is None:\n context = cls.tag.context\n cls.check_string(s)\n p,_ = BER_id_dec(s)\n if p not in context:\n t = s\n if len(t) > 18:\n t = t[:15]+b\"...\"\n raise BER_Decoding_Error(\"Unknown prefix [%02x] for [%r]\" % (p,t), remaining=s)\n codec = context[p].get_codec(ASN1_Codecs.BER)\n return codec.dec(s,context,safe)\n\n @classmethod\n def dec(cls, s, context=None, safe=False):\n if not safe:\n return cls.do_dec(s, context, safe)\n try:\n return cls.do_dec(s, context, safe)\n except BER_BadTag_Decoding_Error as e:\n o,remain = BERcodec_Object.dec(e.remaining, context, safe)\n return ASN1_BADTAG(o),remain\n except BER_Decoding_Error as e:\n return ASN1_DECODING_ERROR(s, exc=e),\"\"\n except ASN1_Error as e:\n return ASN1_DECODING_ERROR(s, exc=e),\"\"\n\n @classmethod\n def safedec(cls, s, context=None):\n return cls.dec(s, context, safe=True)\n\n\n @classmethod\n def enc(cls, s):\n if isinstance(s, six.string_types):\n return BERcodec_STRING.enc(s)\n else:\n return BERcodec_INTEGER.enc(int(s))\n\nASN1_Codecs.BER.register_stem(BERcodec_Object)\n\n\n##########################\n#### BERcodec objects ####\n##########################\n\nclass BERcodec_INTEGER(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.INTEGER\n @classmethod\n def enc(cls, i):\n s = []\n while True:\n s.append(i&0xff)\n if -127 <= i < 0:\n break\n if 128 <= i <= 255:\n s.append(0)\n i >>= 8\n if not i:\n break\n s = [chb(hash(c)) for c in s]\n s.append(BER_len_enc(len(s)))\n s.append(chb(hash(cls.tag)))\n s.reverse()\n return b\"\".join(s)\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n x = 0\n if s:\n if orb(s[0])&0x80: # negative int\n x = -1\n for c in s:\n x <<= 8\n x |= orb(c)\n return cls.asn1_object(x),t\n \nclass BERcodec_BOOLEAN(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.BOOLEAN\n\nclass BERcodec_BIT_STRING(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.BIT_STRING\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n # /!\\ the unused_bits information is lost after this decoding\n l,s,t = cls.check_type_check_len(s)\n if len(s) > 0:\n unused_bits = orb(s[0])\n if safe and unused_bits > 7:\n raise BER_Decoding_Error(\"BERcodec_BIT_STRING: too many unused_bits advertised\", remaining=s)\n s = \"\".join(binrepr(orb(x)).zfill(8) for x in s[1:])\n if unused_bits > 0:\n s = s[:-unused_bits]\n return cls.tag.asn1_object(s),t\n else:\n raise BER_Decoding_Error(\"BERcodec_BIT_STRING found no content (not even unused_bits byte)\", remaining=s)\n @classmethod\n def enc(cls,s):\n # /!\\ this is DER encoding (bit strings are only zero-bit padded)\n s = raw(s)\n if len(s) % 8 == 0:\n unused_bits = 0\n else:\n unused_bits = 8 - len(s)%8\n s += b\"0\"*unused_bits\n s = b\"\".join(chb(int(b\"\".join(chb(y) for y in x),2)) for x in zip(*[iter(s)]*8))\n s = chb(unused_bits) + s\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n\nclass BERcodec_STRING(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.STRING\n @classmethod\n def enc(cls,s):\n return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n return cls.tag.asn1_object(s),t\n\nclass BERcodec_NULL(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.NULL\n @classmethod\n def enc(cls, i):\n if i == 0:\n return chb(hash(cls.tag))+b\"\\0\"\n else:\n return super(cls,cls).enc(i)\n\nclass BERcodec_OID(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.OID\n @classmethod\n def enc(cls, oid):\n oid = raw(oid)\n lst = [int(x) for x in oid.strip(b\".\").split(b\".\")]\n if len(lst) >= 2:\n lst[1] += 40*lst[0]\n del(lst[0])\n s = b\"\".join(BER_num_enc(k) for k in lst)\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n lst = []\n while s:\n l,s = BER_num_dec(s)\n lst.append(l)\n if (len(lst) > 0):\n lst.insert(0,lst[0]//40)\n lst[1] %= 40\n return cls.asn1_object(b\".\".join(str(k).encode('ascii') for k in lst)), t\n\nclass BERcodec_ENUMERATED(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.ENUMERATED\n\nclass BERcodec_UTF8_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UTF8_STRING\n\nclass BERcodec_NUMERIC_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING\n\nclass BERcodec_PRINTABLE_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING\n\nclass BERcodec_T61_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.T61_STRING\n\nclass BERcodec_VIDEOTEX_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING\n\nclass BERcodec_IA5_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.IA5_STRING\n\nclass BERcodec_UTC_TIME(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UTC_TIME\n\nclass BERcodec_GENERALIZED_TIME(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME\n\nclass BERcodec_ISO646_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.ISO646_STRING\n\nclass BERcodec_UNIVERSAL_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UNIVERSAL_STRING\n\nclass BERcodec_BMP_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.BMP_STRING\n\nclass BERcodec_SEQUENCE(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.SEQUENCE\n @classmethod\n def enc(cls, l):\n if not isinstance(l, bytes):\n l = b\"\".join(x.enc(cls.codec) for x in l)\n return chb(hash(cls.tag))+BER_len_enc(len(l))+l\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n if context is None:\n context = cls.tag.context\n l,st = cls.check_type_get_len(s) # we may have len(s) < l\n s,t = st[:l],st[l:]\n obj = []\n while s:\n try:\n o,s = BERcodec_Object.dec(s, context, safe)\n except BER_Decoding_Error as err:\n err.remaining += t\n if err.decoded is not None:\n obj.append(err.decoded)\n err.decoded = obj\n raise \n obj.append(o)\n if len(st) < l:\n raise BER_Decoding_Error(\"Not enough bytes to decode sequence\", decoded=obj)\n return cls.asn1_object(obj),t\n\nclass BERcodec_SET(BERcodec_SEQUENCE):\n tag = ASN1_Class_UNIVERSAL.SET\n\nclass BERcodec_IPADDRESS(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.IPADDRESS\n @classmethod\n def enc(cls, ipaddr_ascii):\n try:\n s = inet_aton(ipaddr_ascii)\n except Exception:\n raise BER_Encoding_Error(\"IPv4 address could not be encoded\") \n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n try:\n ipaddr_ascii = inet_ntoa(s)\n except Exception:\n raise BER_Decoding_Error(\"IP address could not be decoded\", remaining=s)\n return cls.asn1_object(ipaddr_ascii), t\n\nclass BERcodec_COUNTER32(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.COUNTER32\n\nclass BERcodec_GAUGE32(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.GAUGE32\n\nclass BERcodec_TIME_TICKS(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.TIME_TICKS\n", "path": "scapy/asn1/ber.py"}], "after_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## Modified by Maxence Tury <[email protected]>\n## Acknowledgment: Ralph Broenink\n## This program is published under a GPLv2 license\n\n\"\"\"\nBasic Encoding Rules (BER) for ASN.1\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.error import warning\nfrom scapy.compat import *\nfrom scapy.utils import binrepr,inet_aton,inet_ntoa\nfrom scapy.asn1.asn1 import ASN1_Decoding_Error,ASN1_Encoding_Error,ASN1_BadTag_Decoding_Error,ASN1_Codecs,ASN1_Class_UNIVERSAL,ASN1_Error,ASN1_DECODING_ERROR,ASN1_BADTAG\nimport scapy.modules.six as six\n\n##################\n## BER encoding ##\n##################\n\n\n\n#####[ BER tools ]#####\n\n\nclass BER_Exception(Exception):\n pass\n\nclass BER_Encoding_Error(ASN1_Encoding_Error):\n def __init__(self, msg, encoded=None, remaining=None):\n Exception.__init__(self, msg)\n self.remaining = remaining\n self.encoded = encoded\n def __str__(self):\n s = Exception.__str__(self)\n if isinstance(self.encoded, BERcodec_Object):\n s+=\"\\n### Already encoded ###\\n%s\" % self.encoded.strshow()\n else:\n s+=\"\\n### Already encoded ###\\n%r\" % self.encoded\n s+=\"\\n### Remaining ###\\n%r\" % self.remaining\n return s\n\nclass BER_Decoding_Error(ASN1_Decoding_Error):\n def __init__(self, msg, decoded=None, remaining=None):\n Exception.__init__(self, msg)\n self.remaining = remaining\n self.decoded = decoded\n def __str__(self):\n s = Exception.__str__(self)\n if isinstance(self.decoded, BERcodec_Object):\n s+=\"\\n### Already decoded ###\\n%s\" % self.decoded.strshow()\n else:\n s+=\"\\n### Already decoded ###\\n%r\" % self.decoded\n s+=\"\\n### Remaining ###\\n%r\" % self.remaining\n return s\n\nclass BER_BadTag_Decoding_Error(BER_Decoding_Error, ASN1_BadTag_Decoding_Error):\n pass\n\ndef BER_len_enc(l, size=0):\n if l <= 127 and size==0:\n return chb(l)\n s = b\"\"\n while l or size>0:\n s = chb(l&0xff)+s\n l >>= 8\n size -= 1\n if len(s) > 127:\n raise BER_Exception(\"BER_len_enc: Length too long (%i) to be encoded [%r]\" % (len(s),s))\n return chb(len(s)|0x80)+s\ndef BER_len_dec(s):\n l = orb(s[0])\n if not l & 0x80:\n return l,s[1:]\n l &= 0x7f\n if len(s) <= l:\n raise BER_Decoding_Error(\"BER_len_dec: Got %i bytes while expecting %i\" % (len(s)-1, l),remaining=s)\n ll = 0\n for c in s[1:l+1]:\n ll <<= 8\n ll |= orb(c)\n return ll,s[l+1:]\n \ndef BER_num_enc(l, size=1):\n x=[]\n while l or size>0:\n x.insert(0, l & 0x7f)\n if len(x) > 1:\n x[0] |= 0x80\n l >>= 7\n size -= 1\n return b\"\".join(chb(k) for k in x)\ndef BER_num_dec(s, cls_id=0):\n if len(s) == 0:\n raise BER_Decoding_Error(\"BER_num_dec: got empty string\", remaining=s)\n x = cls_id\n for i, c in enumerate(s):\n c = orb(c)\n x <<= 7\n x |= c&0x7f\n if not c&0x80:\n break\n if c&0x80:\n raise BER_Decoding_Error(\"BER_num_dec: unfinished number description\", remaining=s)\n return x, s[i+1:]\n\ndef BER_id_dec(s):\n # This returns the tag ALONG WITH THE PADDED CLASS+CONSTRUCTIVE INFO.\n # Let's recall that bits 8-7 from the first byte of the tag encode\n # the class information, while bit 6 means primitive or constructive.\n #\n # For instance, with low-tag-number b'\\x81', class would be 0b10\n # ('context-specific') and tag 0x01, but we return 0x81 as a whole.\n # For b'\\xff\\x22', class would be 0b11 ('private'), constructed, then\n # padding, then tag 0x22, but we return (0xff>>5)*128^1 + 0x22*128^0.\n # Why the 5-bit-shifting? Because it provides an unequivocal encoding\n # on base 128 (note that 0xff would equal 1*128^1 + 127*128^0...),\n # as we know that bits 5 to 1 are fixed to 1 anyway.\n #\n # As long as there is no class differentiation, we have to keep this info\n # encoded in scapy's tag in order to reuse it for packet building.\n # Note that tags thus may have to be hard-coded with their extended\n # information, e.g. a SEQUENCE from asn1.py has a direct tag 0x20|16.\n x = orb(s[0])\n if x & 0x1f != 0x1f:\n # low-tag-number\n return x,s[1:]\n else:\n # high-tag-number\n return BER_num_dec(s[1:], cls_id=x>>5)\ndef BER_id_enc(n):\n if n < 256:\n # low-tag-number\n return chb(n)\n else:\n # high-tag-number\n s = BER_num_enc(n)\n tag = orb(s[0]) # first byte, as an int\n tag &= 0x07 # reset every bit from 8 to 4\n tag <<= 5 # move back the info bits on top\n tag |= 0x1f # pad with 1s every bit from 5 to 1\n return chb(tag) + s[1:]\n\n# The functions below provide implicit and explicit tagging support.\ndef BER_tagging_dec(s, hidden_tag=None, implicit_tag=None,\n explicit_tag=None, safe=False):\n # We output the 'real_tag' if it is different from the (im|ex)plicit_tag.\n real_tag = None\n if len(s) > 0:\n err_msg = \"BER_tagging_dec: observed tag does not match expected tag\"\n if implicit_tag is not None:\n ber_id,s = BER_id_dec(s)\n if ber_id != implicit_tag:\n if not safe:\n raise BER_Decoding_Error(err_msg, remaining=s)\n else:\n real_tag = ber_id\n s = chb(hash(hidden_tag)) + s\n elif explicit_tag is not None:\n ber_id,s = BER_id_dec(s)\n if ber_id != explicit_tag:\n if not safe:\n raise BER_Decoding_Error(err_msg, remaining=s)\n else:\n real_tag = ber_id\n l,s = BER_len_dec(s)\n return real_tag, s\ndef BER_tagging_enc(s, implicit_tag=None, explicit_tag=None):\n if len(s) > 0:\n if implicit_tag is not None:\n s = BER_id_enc(implicit_tag) + s[1:]\n elif explicit_tag is not None:\n s = BER_id_enc(explicit_tag) + BER_len_enc(len(s)) + s\n return s\n\n#####[ BER classes ]#####\n\nclass BERcodec_metaclass(type):\n def __new__(cls, name, bases, dct):\n c = super(BERcodec_metaclass, cls).__new__(cls, name, bases, dct)\n try:\n c.tag.register(c.codec, c)\n except:\n warning(\"Error registering %r for %r\" % (c.tag, c.codec))\n return c\n\n\nclass BERcodec_Object(six.with_metaclass(BERcodec_metaclass)):\n codec = ASN1_Codecs.BER\n tag = ASN1_Class_UNIVERSAL.ANY\n\n @classmethod\n def asn1_object(cls, val):\n return cls.tag.asn1_object(val)\n\n @classmethod\n def check_string(cls, s):\n if not s:\n raise BER_Decoding_Error(\"%s: Got empty object while expecting tag %r\" %\n (cls.__name__,cls.tag), remaining=s) \n @classmethod\n def check_type(cls, s):\n cls.check_string(s)\n tag, remainder = BER_id_dec(s)\n if cls.tag != tag:\n raise BER_BadTag_Decoding_Error(\"%s: Got tag [%i/%#x] while expecting %r\" %\n (cls.__name__, tag, tag, cls.tag), remaining=s)\n return remainder\n @classmethod\n def check_type_get_len(cls, s):\n s2 = cls.check_type(s)\n if not s2:\n raise BER_Decoding_Error(\"%s: No bytes while expecting a length\" %\n cls.__name__, remaining=s)\n return BER_len_dec(s2)\n @classmethod\n def check_type_check_len(cls, s):\n l,s3 = cls.check_type_get_len(s)\n if len(s3) < l:\n raise BER_Decoding_Error(\"%s: Got %i bytes while expecting %i\" %\n (cls.__name__, len(s3), l), remaining=s)\n return l,s3[:l],s3[l:]\n\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n if context is None:\n context = cls.tag.context\n cls.check_string(s)\n p,_ = BER_id_dec(s)\n if p not in context:\n t = s\n if len(t) > 18:\n t = t[:15]+b\"...\"\n raise BER_Decoding_Error(\"Unknown prefix [%02x] for [%r]\" % (p,t), remaining=s)\n codec = context[p].get_codec(ASN1_Codecs.BER)\n return codec.dec(s,context,safe)\n\n @classmethod\n def dec(cls, s, context=None, safe=False):\n if not safe:\n return cls.do_dec(s, context, safe)\n try:\n return cls.do_dec(s, context, safe)\n except BER_BadTag_Decoding_Error as e:\n o,remain = BERcodec_Object.dec(e.remaining, context, safe)\n return ASN1_BADTAG(o),remain\n except BER_Decoding_Error as e:\n return ASN1_DECODING_ERROR(s, exc=e),\"\"\n except ASN1_Error as e:\n return ASN1_DECODING_ERROR(s, exc=e),\"\"\n\n @classmethod\n def safedec(cls, s, context=None):\n return cls.dec(s, context, safe=True)\n\n\n @classmethod\n def enc(cls, s):\n if isinstance(s, six.string_types):\n return BERcodec_STRING.enc(s)\n else:\n return BERcodec_INTEGER.enc(int(s))\n\nASN1_Codecs.BER.register_stem(BERcodec_Object)\n\n\n##########################\n#### BERcodec objects ####\n##########################\n\nclass BERcodec_INTEGER(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.INTEGER\n @classmethod\n def enc(cls, i):\n s = []\n while True:\n s.append(i&0xff)\n if -127 <= i < 0:\n break\n if 128 <= i <= 255:\n s.append(0)\n i >>= 8\n if not i:\n break\n s = [chb(hash(c)) for c in s]\n s.append(BER_len_enc(len(s)))\n s.append(chb(hash(cls.tag)))\n s.reverse()\n return b\"\".join(s)\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n x = 0\n if s:\n if orb(s[0])&0x80: # negative int\n x = -1\n for c in s:\n x <<= 8\n x |= orb(c)\n return cls.asn1_object(x),t\n \nclass BERcodec_BOOLEAN(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.BOOLEAN\n\nclass BERcodec_BIT_STRING(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.BIT_STRING\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n # /!\\ the unused_bits information is lost after this decoding\n l,s,t = cls.check_type_check_len(s)\n if len(s) > 0:\n unused_bits = orb(s[0])\n if safe and unused_bits > 7:\n raise BER_Decoding_Error(\"BERcodec_BIT_STRING: too many unused_bits advertised\", remaining=s)\n s = \"\".join(binrepr(orb(x)).zfill(8) for x in s[1:])\n if unused_bits > 0:\n s = s[:-unused_bits]\n return cls.tag.asn1_object(s),t\n else:\n raise BER_Decoding_Error(\"BERcodec_BIT_STRING found no content (not even unused_bits byte)\", remaining=s)\n @classmethod\n def enc(cls,s):\n # /!\\ this is DER encoding (bit strings are only zero-bit padded)\n s = raw(s)\n if len(s) % 8 == 0:\n unused_bits = 0\n else:\n unused_bits = 8 - len(s)%8\n s += b\"0\"*unused_bits\n s = b\"\".join(chb(int(b\"\".join(chb(y) for y in x),2)) for x in zip(*[iter(s)]*8))\n s = chb(unused_bits) + s\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n\nclass BERcodec_STRING(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.STRING\n @classmethod\n def enc(cls,s):\n s = raw(s)\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s # Be sure we are encoding bytes\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n return cls.tag.asn1_object(s),t\n\nclass BERcodec_NULL(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.NULL\n @classmethod\n def enc(cls, i):\n if i == 0:\n return chb(hash(cls.tag))+b\"\\0\"\n else:\n return super(cls,cls).enc(i)\n\nclass BERcodec_OID(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.OID\n @classmethod\n def enc(cls, oid):\n oid = raw(oid)\n lst = [int(x) for x in oid.strip(b\".\").split(b\".\")]\n if len(lst) >= 2:\n lst[1] += 40*lst[0]\n del(lst[0])\n s = b\"\".join(BER_num_enc(k) for k in lst)\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n lst = []\n while s:\n l,s = BER_num_dec(s)\n lst.append(l)\n if (len(lst) > 0):\n lst.insert(0,lst[0]//40)\n lst[1] %= 40\n return cls.asn1_object(b\".\".join(str(k).encode('ascii') for k in lst)), t\n\nclass BERcodec_ENUMERATED(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.ENUMERATED\n\nclass BERcodec_UTF8_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UTF8_STRING\n\nclass BERcodec_NUMERIC_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING\n\nclass BERcodec_PRINTABLE_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING\n\nclass BERcodec_T61_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.T61_STRING\n\nclass BERcodec_VIDEOTEX_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING\n\nclass BERcodec_IA5_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.IA5_STRING\n\nclass BERcodec_UTC_TIME(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UTC_TIME\n\nclass BERcodec_GENERALIZED_TIME(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME\n\nclass BERcodec_ISO646_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.ISO646_STRING\n\nclass BERcodec_UNIVERSAL_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UNIVERSAL_STRING\n\nclass BERcodec_BMP_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.BMP_STRING\n\nclass BERcodec_SEQUENCE(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.SEQUENCE\n @classmethod\n def enc(cls, l):\n if not isinstance(l, bytes):\n l = b\"\".join(x.enc(cls.codec) for x in l)\n return chb(hash(cls.tag))+BER_len_enc(len(l))+l\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n if context is None:\n context = cls.tag.context\n l,st = cls.check_type_get_len(s) # we may have len(s) < l\n s,t = st[:l],st[l:]\n obj = []\n while s:\n try:\n o,s = BERcodec_Object.dec(s, context, safe)\n except BER_Decoding_Error as err:\n err.remaining += t\n if err.decoded is not None:\n obj.append(err.decoded)\n err.decoded = obj\n raise \n obj.append(o)\n if len(st) < l:\n raise BER_Decoding_Error(\"Not enough bytes to decode sequence\", decoded=obj)\n return cls.asn1_object(obj),t\n\nclass BERcodec_SET(BERcodec_SEQUENCE):\n tag = ASN1_Class_UNIVERSAL.SET\n\nclass BERcodec_IPADDRESS(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.IPADDRESS\n @classmethod\n def enc(cls, ipaddr_ascii):\n try:\n s = inet_aton(ipaddr_ascii)\n except Exception:\n raise BER_Encoding_Error(\"IPv4 address could not be encoded\") \n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n try:\n ipaddr_ascii = inet_ntoa(s)\n except Exception:\n raise BER_Decoding_Error(\"IP address could not be decoded\", remaining=s)\n return cls.asn1_object(ipaddr_ascii), t\n\nclass BERcodec_COUNTER32(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.COUNTER32\n\nclass BERcodec_GAUGE32(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.GAUGE32\n\nclass BERcodec_TIME_TICKS(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.TIME_TICKS\n", "path": "scapy/asn1/ber.py"}]} |
gh_patches_debug_1549 | rasdani/github-patches | git_diff | pytorch__audio-1339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Making `AudioMetaData` print friendly
`AudioMetaData` class reports meta-data of audio source. It is however not print friendly.
```python
print(torchaudio.info(src))
>>> <torchaudio.backend.common.AudioMetaData object at 0x7f1bc5cd2890>
```
It is nice if we can simply print the attributes like `dataclass` objects do.
```python
print(torchaudio.info(src))
>>> AudioMetaData(sample_rate=900, encoding="PCM", ...)
```
## Steps
There are two approaches I can think of
1. Add `__str__` method.
2. Use `dataclasses.dataclass`
For 2, the `info` function has to be TorchScript-compatible. This means that its return type `AudioMetaData` has to be TorchScript-able. For this reason, `dataclass` might not be applicable. This can be checked with the following test;
```bash
(cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py)
```
## Build and test
Please refer to the [contribution guide](https://github.com/pytorch/audio/blob/master/CONTRIBUTING.md) for how to setup development environment.
To test,
```bash
(cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py torchaudio_unittest/backend/sox_io/info_test.py torchaudio_unittest/backend/soundfile_io/info_test.py)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchaudio/backend/common.py`
Content:
```
1 class AudioMetaData:
2 """Return type of ``torchaudio.info`` function.
3
4 This class is used by :ref:`"sox_io" backend<sox_io_backend>` and
5 :ref:`"soundfile" backend with the new interface<soundfile_backend>`.
6
7 :ivar int sample_rate: Sample rate
8 :ivar int num_frames: The number of frames
9 :ivar int num_channels: The number of channels
10 :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,
11 or when it cannot be accurately inferred.
12 :ivar str encoding: Audio encoding
13 The values encoding can take are one of the following:
14
15 * ``PCM_S``: Signed integer linear PCM
16 * ``PCM_U``: Unsigned integer linear PCM
17 * ``PCM_F``: Floating point linear PCM
18 * ``FLAC``: Flac, Free Lossless Audio Codec
19 * ``ULAW``: Mu-law
20 * ``ALAW``: A-law
21 * ``MP3`` : MP3, MPEG-1 Audio Layer III
22 * ``VORBIS``: OGG Vorbis
23 * ``AMR_WB``: Adaptive Multi-Rate
24 * ``AMR_NB``: Adaptive Multi-Rate Wideband
25 * ``OPUS``: Opus
26 * ``UNKNOWN`` : None of above
27 """
28 def __init__(
29 self,
30 sample_rate: int,
31 num_frames: int,
32 num_channels: int,
33 bits_per_sample: int,
34 encoding: str,
35 ):
36 self.sample_rate = sample_rate
37 self.num_frames = num_frames
38 self.num_channels = num_channels
39 self.bits_per_sample = bits_per_sample
40 self.encoding = encoding
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchaudio/backend/common.py b/torchaudio/backend/common.py
--- a/torchaudio/backend/common.py
+++ b/torchaudio/backend/common.py
@@ -38,3 +38,14 @@
self.num_channels = num_channels
self.bits_per_sample = bits_per_sample
self.encoding = encoding
+
+ def __str__(self):
+ return (
+ f"AudioMetaData("
+ f"sample_rate={self.sample_rate}, "
+ f"num_frames={self.num_frames}, "
+ f"num_channels={self.num_channels}, "
+ f"bits_per_sample={self.bits_per_sample}, "
+ f"encoding={self.encoding}"
+ f")"
+ )
| {"golden_diff": "diff --git a/torchaudio/backend/common.py b/torchaudio/backend/common.py\n--- a/torchaudio/backend/common.py\n+++ b/torchaudio/backend/common.py\n@@ -38,3 +38,14 @@\n self.num_channels = num_channels\n self.bits_per_sample = bits_per_sample\n self.encoding = encoding\n+\n+ def __str__(self):\n+ return (\n+ f\"AudioMetaData(\"\n+ f\"sample_rate={self.sample_rate}, \"\n+ f\"num_frames={self.num_frames}, \"\n+ f\"num_channels={self.num_channels}, \"\n+ f\"bits_per_sample={self.bits_per_sample}, \"\n+ f\"encoding={self.encoding}\"\n+ f\")\"\n+ )\n", "issue": "Making `AudioMetaData` print friendly\n`AudioMetaData` class reports meta-data of audio source. It is however not print friendly.\r\n\r\n```python\r\nprint(torchaudio.info(src))\r\n>>> <torchaudio.backend.common.AudioMetaData object at 0x7f1bc5cd2890>\r\n```\r\n\r\nIt is nice if we can simply print the attributes like `dataclass` objects do.\r\n\r\n```python\r\nprint(torchaudio.info(src))\r\n>>> AudioMetaData(sample_rate=900, encoding=\"PCM\", ...)\r\n```\r\n\r\n## Steps\r\n\r\nThere are two approaches I can think of\r\n1. Add `__str__` method.\r\n2. Use `dataclasses.dataclass`\r\n\r\nFor 2, the `info` function has to be TorchScript-compatible. This means that its return type `AudioMetaData` has to be TorchScript-able. For this reason, `dataclass` might not be applicable. This can be checked with the following test;\r\n\r\n```bash\r\n(cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py)\r\n```\r\n\r\n## Build and test\r\n\r\nPlease refer to the [contribution guide](https://github.com/pytorch/audio/blob/master/CONTRIBUTING.md) for how to setup development environment.\r\n\r\nTo test, \r\n\r\n```bash\r\n(cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py torchaudio_unittest/backend/sox_io/info_test.py torchaudio_unittest/backend/soundfile_io/info_test.py)\r\n```\n", "before_files": [{"content": "class AudioMetaData:\n \"\"\"Return type of ``torchaudio.info`` function.\n\n This class is used by :ref:`\"sox_io\" backend<sox_io_backend>` and\n :ref:`\"soundfile\" backend with the new interface<soundfile_backend>`.\n\n :ivar int sample_rate: Sample rate\n :ivar int num_frames: The number of frames\n :ivar int num_channels: The number of channels\n :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,\n or when it cannot be accurately inferred.\n :ivar str encoding: Audio encoding\n The values encoding can take are one of the following:\n\n * ``PCM_S``: Signed integer linear PCM\n * ``PCM_U``: Unsigned integer linear PCM\n * ``PCM_F``: Floating point linear PCM\n * ``FLAC``: Flac, Free Lossless Audio Codec\n * ``ULAW``: Mu-law\n * ``ALAW``: A-law\n * ``MP3`` : MP3, MPEG-1 Audio Layer III\n * ``VORBIS``: OGG Vorbis\n * ``AMR_WB``: Adaptive Multi-Rate\n * ``AMR_NB``: Adaptive Multi-Rate Wideband\n * ``OPUS``: Opus\n * ``UNKNOWN`` : None of above\n \"\"\"\n def __init__(\n self,\n sample_rate: int,\n num_frames: int,\n num_channels: int,\n bits_per_sample: int,\n encoding: str,\n ):\n self.sample_rate = sample_rate\n self.num_frames = num_frames\n self.num_channels = num_channels\n self.bits_per_sample = bits_per_sample\n self.encoding = encoding\n", "path": "torchaudio/backend/common.py"}], "after_files": [{"content": "class AudioMetaData:\n \"\"\"Return type of ``torchaudio.info`` function.\n\n This class is used by :ref:`\"sox_io\" backend<sox_io_backend>` and\n :ref:`\"soundfile\" backend with the new interface<soundfile_backend>`.\n\n :ivar int sample_rate: Sample rate\n :ivar int num_frames: The number of frames\n :ivar int num_channels: The number of channels\n :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,\n or when it cannot be accurately inferred.\n :ivar str encoding: Audio encoding\n The values encoding can take are one of the following:\n\n * ``PCM_S``: Signed integer linear PCM\n * ``PCM_U``: Unsigned integer linear PCM\n * ``PCM_F``: Floating point linear PCM\n * ``FLAC``: Flac, Free Lossless Audio Codec\n * ``ULAW``: Mu-law\n * ``ALAW``: A-law\n * ``MP3`` : MP3, MPEG-1 Audio Layer III\n * ``VORBIS``: OGG Vorbis\n * ``AMR_WB``: Adaptive Multi-Rate\n * ``AMR_NB``: Adaptive Multi-Rate Wideband\n * ``OPUS``: Opus\n * ``UNKNOWN`` : None of above\n \"\"\"\n def __init__(\n self,\n sample_rate: int,\n num_frames: int,\n num_channels: int,\n bits_per_sample: int,\n encoding: str,\n ):\n self.sample_rate = sample_rate\n self.num_frames = num_frames\n self.num_channels = num_channels\n self.bits_per_sample = bits_per_sample\n self.encoding = encoding\n\n def __str__(self):\n return (\n f\"AudioMetaData(\"\n f\"sample_rate={self.sample_rate}, \"\n f\"num_frames={self.num_frames}, \"\n f\"num_channels={self.num_channels}, \"\n f\"bits_per_sample={self.bits_per_sample}, \"\n f\"encoding={self.encoding}\"\n f\")\"\n )\n", "path": "torchaudio/backend/common.py"}]} |
gh_patches_debug_1550 | rasdani/github-patches | git_diff | cython__cython-2497 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
submodule with name "cython.pyx" doesn't build
The[ fix for #2422](https://github.com/cython/cython/commit/6c91bf8e5bc99b625405919f9318d5626ecfa782#diff-26945d164aa2d5cb24bbe2cb4b8903ed) introduced a regression: submodules called cython.pyx are no longer built, i.e. for such a test case:
```
######## my_module/__init__.py ########
######## mymodule/cython.pyx ########
```
It might be a little bit inconsistent to build cython.pyx in one case (submodule) but not in the other, but it is probably better not to break existing workflows.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Cython/Utils.py`
Content:
```
1 #
2 # Cython -- Things that don't belong
3 # anywhere else in particular
4 #
5
6 from __future__ import absolute_import
7
8 try:
9 from __builtin__ import basestring
10 except ImportError:
11 basestring = str
12
13 import os
14 import sys
15 import re
16 import io
17 import codecs
18 import shutil
19 from contextlib import contextmanager
20
21 modification_time = os.path.getmtime
22
23 _function_caches = []
24 def clear_function_caches():
25 for cache in _function_caches:
26 cache.clear()
27
28 def cached_function(f):
29 cache = {}
30 _function_caches.append(cache)
31 uncomputed = object()
32 def wrapper(*args):
33 res = cache.get(args, uncomputed)
34 if res is uncomputed:
35 res = cache[args] = f(*args)
36 return res
37 wrapper.uncached = f
38 return wrapper
39
40 def cached_method(f):
41 cache_name = '__%s_cache' % f.__name__
42 def wrapper(self, *args):
43 cache = getattr(self, cache_name, None)
44 if cache is None:
45 cache = {}
46 setattr(self, cache_name, cache)
47 if args in cache:
48 return cache[args]
49 res = cache[args] = f(self, *args)
50 return res
51 return wrapper
52
53 def replace_suffix(path, newsuf):
54 base, _ = os.path.splitext(path)
55 return base + newsuf
56
57
58 def open_new_file(path):
59 if os.path.exists(path):
60 # Make sure to create a new file here so we can
61 # safely hard link the output files.
62 os.unlink(path)
63
64 # we use the ISO-8859-1 encoding here because we only write pure
65 # ASCII strings or (e.g. for file names) byte encoded strings as
66 # Unicode, so we need a direct mapping from the first 256 Unicode
67 # characters to a byte sequence, which ISO-8859-1 provides
68
69 # note: can't use io.open() in Py2 as we may be writing str objects
70 return codecs.open(path, "w", encoding="ISO-8859-1")
71
72
73 def castrate_file(path, st):
74 # Remove junk contents from an output file after a
75 # failed compilation.
76 # Also sets access and modification times back to
77 # those specified by st (a stat struct).
78 try:
79 f = open_new_file(path)
80 except EnvironmentError:
81 pass
82 else:
83 f.write(
84 "#error Do not use this file, it is the result of a failed Cython compilation.\n")
85 f.close()
86 if st:
87 os.utime(path, (st.st_atime, st.st_mtime-1))
88
89 def file_newer_than(path, time):
90 ftime = modification_time(path)
91 return ftime > time
92
93
94 def safe_makedirs(path):
95 try:
96 os.makedirs(path)
97 except OSError:
98 if not os.path.isdir(path):
99 raise
100
101
102 def copy_file_to_dir_if_newer(sourcefile, destdir):
103 """
104 Copy file sourcefile to directory destdir (creating it if needed),
105 preserving metadata. If the destination file exists and is not
106 older than the source file, the copying is skipped.
107 """
108 destfile = os.path.join(destdir, os.path.basename(sourcefile))
109 try:
110 desttime = modification_time(destfile)
111 except OSError:
112 # New file does not exist, destdir may or may not exist
113 safe_makedirs(destdir)
114 else:
115 # New file already exists
116 if not file_newer_than(sourcefile, desttime):
117 return
118 shutil.copy2(sourcefile, destfile)
119
120
121 @cached_function
122 def search_include_directories(dirs, qualified_name, suffix, pos,
123 include=False, sys_path=False):
124 # Search the list of include directories for the given
125 # file name. If a source file position is given, first
126 # searches the directory containing that file. Returns
127 # None if not found, but does not report an error.
128 # The 'include' option will disable package dereferencing.
129 # If 'sys_path' is True, also search sys.path.
130 if sys_path:
131 dirs = dirs + tuple(sys.path)
132 if pos:
133 file_desc = pos[0]
134 from Cython.Compiler.Scanning import FileSourceDescriptor
135 if not isinstance(file_desc, FileSourceDescriptor):
136 raise RuntimeError("Only file sources for code supported")
137 if include:
138 dirs = (os.path.dirname(file_desc.filename),) + dirs
139 else:
140 dirs = (find_root_package_dir(file_desc.filename),) + dirs
141
142 dotted_filename = qualified_name
143 if suffix:
144 dotted_filename += suffix
145 if not include:
146 names = qualified_name.split('.')
147 package_names = tuple(names[:-1])
148 module_name = names[-1]
149 module_filename = module_name + suffix
150 package_filename = "__init__" + suffix
151
152 for dir in dirs:
153 path = os.path.join(dir, dotted_filename)
154 if path_exists(path):
155 return path
156 if not include:
157 package_dir = check_package_dir(dir, package_names)
158 if package_dir is not None:
159 path = os.path.join(package_dir, module_filename)
160 if path_exists(path):
161 return path
162 path = os.path.join(dir, package_dir, module_name,
163 package_filename)
164 if path_exists(path):
165 return path
166 return None
167
168
169 @cached_function
170 def find_root_package_dir(file_path):
171 dir = os.path.dirname(file_path)
172 if file_path == dir:
173 return dir
174 elif is_package_dir(dir):
175 return find_root_package_dir(dir)
176 else:
177 return dir
178
179 @cached_function
180 def check_package_dir(dir, package_names):
181 for dirname in package_names:
182 dir = os.path.join(dir, dirname)
183 if not is_package_dir(dir):
184 return None
185 return dir
186
187 @cached_function
188 def is_package_dir(dir_path):
189 for filename in ("__init__.py",
190 "__init__.pyc",
191 "__init__.pyx",
192 "__init__.pxd"):
193 path = os.path.join(dir_path, filename)
194 if path_exists(path):
195 return 1
196
197 @cached_function
198 def path_exists(path):
199 # try on the filesystem first
200 if os.path.exists(path):
201 return True
202 # figure out if a PEP 302 loader is around
203 try:
204 loader = __loader__
205 # XXX the code below assumes a 'zipimport.zipimporter' instance
206 # XXX should be easy to generalize, but too lazy right now to write it
207 archive_path = getattr(loader, 'archive', None)
208 if archive_path:
209 normpath = os.path.normpath(path)
210 if normpath.startswith(archive_path):
211 arcname = normpath[len(archive_path)+1:]
212 try:
213 loader.get_data(arcname)
214 return True
215 except IOError:
216 return False
217 except NameError:
218 pass
219 return False
220
221 # file name encodings
222
223 def decode_filename(filename):
224 if isinstance(filename, bytes):
225 try:
226 filename_encoding = sys.getfilesystemencoding()
227 if filename_encoding is None:
228 filename_encoding = sys.getdefaultencoding()
229 filename = filename.decode(filename_encoding)
230 except UnicodeDecodeError:
231 pass
232 return filename
233
234 # support for source file encoding detection
235
236 _match_file_encoding = re.compile(u"coding[:=]\s*([-\w.]+)").search
237
238
239 def detect_file_encoding(source_filename):
240 f = open_source_file(source_filename, encoding="UTF-8", error_handling='ignore')
241 try:
242 return detect_opened_file_encoding(f)
243 finally:
244 f.close()
245
246
247 def detect_opened_file_encoding(f):
248 # PEPs 263 and 3120
249 # Most of the time the first two lines fall in the first 250 chars,
250 # and this bulk read/split is much faster.
251 lines = f.read(250).split(u"\n")
252 if len(lines) > 1:
253 m = _match_file_encoding(lines[0])
254 if m:
255 return m.group(1)
256 elif len(lines) > 2:
257 m = _match_file_encoding(lines[1])
258 if m:
259 return m.group(1)
260 else:
261 return "UTF-8"
262 # Fallback to one-char-at-a-time detection.
263 f.seek(0)
264 chars = []
265 for i in range(2):
266 c = f.read(1)
267 while c and c != u'\n':
268 chars.append(c)
269 c = f.read(1)
270 encoding = _match_file_encoding(u''.join(chars))
271 if encoding:
272 return encoding.group(1)
273 return "UTF-8"
274
275
276 def skip_bom(f):
277 """
278 Read past a BOM at the beginning of a source file.
279 This could be added to the scanner, but it's *substantially* easier
280 to keep it at this level.
281 """
282 if f.read(1) != u'\uFEFF':
283 f.seek(0)
284
285
286 def open_source_file(source_filename, mode="r",
287 encoding=None, error_handling=None):
288 if encoding is None:
289 # Most of the time the coding is unspecified, so be optimistic that
290 # it's UTF-8.
291 f = open_source_file(source_filename, encoding="UTF-8", mode=mode, error_handling='ignore')
292 encoding = detect_opened_file_encoding(f)
293 if encoding == "UTF-8" and error_handling == 'ignore':
294 f.seek(0)
295 skip_bom(f)
296 return f
297 else:
298 f.close()
299
300 if not os.path.exists(source_filename):
301 try:
302 loader = __loader__
303 if source_filename.startswith(loader.archive):
304 return open_source_from_loader(
305 loader, source_filename,
306 encoding, error_handling)
307 except (NameError, AttributeError):
308 pass
309
310 stream = io.open(source_filename, mode=mode,
311 encoding=encoding, errors=error_handling)
312 skip_bom(stream)
313 return stream
314
315
316 def open_source_from_loader(loader,
317 source_filename,
318 encoding=None, error_handling=None):
319 nrmpath = os.path.normpath(source_filename)
320 arcname = nrmpath[len(loader.archive)+1:]
321 data = loader.get_data(arcname)
322 return io.TextIOWrapper(io.BytesIO(data),
323 encoding=encoding,
324 errors=error_handling)
325
326
327 def str_to_number(value):
328 # note: this expects a string as input that was accepted by the
329 # parser already, with an optional "-" sign in front
330 is_neg = False
331 if value[:1] == '-':
332 is_neg = True
333 value = value[1:]
334 if len(value) < 2:
335 value = int(value, 0)
336 elif value[0] == '0':
337 literal_type = value[1] # 0'o' - 0'b' - 0'x'
338 if literal_type in 'xX':
339 # hex notation ('0x1AF')
340 value = int(value[2:], 16)
341 elif literal_type in 'oO':
342 # Py3 octal notation ('0o136')
343 value = int(value[2:], 8)
344 elif literal_type in 'bB':
345 # Py3 binary notation ('0b101')
346 value = int(value[2:], 2)
347 else:
348 # Py2 octal notation ('0136')
349 value = int(value, 8)
350 else:
351 value = int(value, 0)
352 return -value if is_neg else value
353
354
355 def long_literal(value):
356 if isinstance(value, basestring):
357 value = str_to_number(value)
358 return not -2**31 <= value < 2**31
359
360
361 @cached_function
362 def get_cython_cache_dir():
363 r"""
364 Return the base directory containing Cython's caches.
365
366 Priority:
367
368 1. CYTHON_CACHE_DIR
369 2. (OS X): ~/Library/Caches/Cython
370 (posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined
371 3. ~/.cython
372
373 """
374 if 'CYTHON_CACHE_DIR' in os.environ:
375 return os.environ['CYTHON_CACHE_DIR']
376
377 parent = None
378 if os.name == 'posix':
379 if sys.platform == 'darwin':
380 parent = os.path.expanduser('~/Library/Caches')
381 else:
382 # this could fallback on ~/.cache
383 parent = os.environ.get('XDG_CACHE_HOME')
384
385 if parent and os.path.isdir(parent):
386 return os.path.join(parent, 'cython')
387
388 # last fallback: ~/.cython
389 return os.path.expanduser(os.path.join('~', '.cython'))
390
391
392 @contextmanager
393 def captured_fd(stream=2, encoding=None):
394 pipe_in = t = None
395 orig_stream = os.dup(stream) # keep copy of original stream
396 try:
397 pipe_in, pipe_out = os.pipe()
398 os.dup2(pipe_out, stream) # replace stream by copy of pipe
399 try:
400 os.close(pipe_out) # close original pipe-out stream
401 data = []
402
403 def copy():
404 try:
405 while True:
406 d = os.read(pipe_in, 1000)
407 if d:
408 data.append(d)
409 else:
410 break
411 finally:
412 os.close(pipe_in)
413
414 def get_output():
415 output = b''.join(data)
416 if encoding:
417 output = output.decode(encoding)
418 return output
419
420 from threading import Thread
421 t = Thread(target=copy)
422 t.daemon = True # just in case
423 t.start()
424 yield get_output
425 finally:
426 os.dup2(orig_stream, stream) # restore original stream
427 if t is not None:
428 t.join()
429 finally:
430 os.close(orig_stream)
431
432
433 def print_bytes(s, end=b'\n', file=sys.stdout, flush=True):
434 file.flush()
435 try:
436 out = file.buffer # Py3
437 except AttributeError:
438 out = file # Py2
439 out.write(s)
440 if end:
441 out.write(end)
442 if flush:
443 out.flush()
444
445 class LazyStr:
446 def __init__(self, callback):
447 self.callback = callback
448 def __str__(self):
449 return self.callback()
450 def __repr__(self):
451 return self.callback()
452 def __add__(self, right):
453 return self.callback() + right
454 def __radd__(self, left):
455 return left + self.callback()
456
457
458 class OrderedSet(object):
459 def __init__(self, elements=()):
460 self._list = []
461 self._set = set()
462 self.update(elements)
463 def __iter__(self):
464 return iter(self._list)
465 def update(self, elements):
466 for e in elements:
467 self.add(e)
468 def add(self, e):
469 if e not in self._set:
470 self._list.append(e)
471 self._set.add(e)
472
473
474 # Class decorator that adds a metaclass and recreates the class with it.
475 # Copied from 'six'.
476 def add_metaclass(metaclass):
477 """Class decorator for creating a class with a metaclass."""
478 def wrapper(cls):
479 orig_vars = cls.__dict__.copy()
480 slots = orig_vars.get('__slots__')
481 if slots is not None:
482 if isinstance(slots, str):
483 slots = [slots]
484 for slots_var in slots:
485 orig_vars.pop(slots_var)
486 orig_vars.pop('__dict__', None)
487 orig_vars.pop('__weakref__', None)
488 return metaclass(cls.__name__, cls.__bases__, orig_vars)
489 return wrapper
490
491
492 def raise_error_if_module_name_forbidden(full_module_name):
493 #it is bad idea to call the pyx-file cython.pyx, so fail early
494 if full_module_name == 'cython' or full_module_name.endswith('.cython'):
495 raise ValueError('cython is a special module, cannot be used as a module name')
496
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Cython/Utils.py b/Cython/Utils.py
--- a/Cython/Utils.py
+++ b/Cython/Utils.py
@@ -491,5 +491,5 @@
def raise_error_if_module_name_forbidden(full_module_name):
#it is bad idea to call the pyx-file cython.pyx, so fail early
- if full_module_name == 'cython' or full_module_name.endswith('.cython'):
+ if full_module_name == 'cython' or full_module_name.startswith('cython.'):
raise ValueError('cython is a special module, cannot be used as a module name')
| {"golden_diff": "diff --git a/Cython/Utils.py b/Cython/Utils.py\n--- a/Cython/Utils.py\n+++ b/Cython/Utils.py\n@@ -491,5 +491,5 @@\n \n def raise_error_if_module_name_forbidden(full_module_name):\n #it is bad idea to call the pyx-file cython.pyx, so fail early\n- if full_module_name == 'cython' or full_module_name.endswith('.cython'):\n+ if full_module_name == 'cython' or full_module_name.startswith('cython.'):\n raise ValueError('cython is a special module, cannot be used as a module name')\n", "issue": "submodule with name \"cython.pyx\" doesn't build\nThe[ fix for #2422](https://github.com/cython/cython/commit/6c91bf8e5bc99b625405919f9318d5626ecfa782#diff-26945d164aa2d5cb24bbe2cb4b8903ed) introduced a regression: submodules called cython.pyx are no longer built, i.e. for such a test case:\r\n\r\n```\r\n######## my_module/__init__.py ########\r\n######## mymodule/cython.pyx ########\r\n```\r\n\r\nIt might be a little bit inconsistent to build cython.pyx in one case (submodule) but not in the other, but it is probably better not to break existing workflows.\n", "before_files": [{"content": "#\n# Cython -- Things that don't belong\n# anywhere else in particular\n#\n\nfrom __future__ import absolute_import\n\ntry:\n from __builtin__ import basestring\nexcept ImportError:\n basestring = str\n\nimport os\nimport sys\nimport re\nimport io\nimport codecs\nimport shutil\nfrom contextlib import contextmanager\n\nmodification_time = os.path.getmtime\n\n_function_caches = []\ndef clear_function_caches():\n for cache in _function_caches:\n cache.clear()\n\ndef cached_function(f):\n cache = {}\n _function_caches.append(cache)\n uncomputed = object()\n def wrapper(*args):\n res = cache.get(args, uncomputed)\n if res is uncomputed:\n res = cache[args] = f(*args)\n return res\n wrapper.uncached = f\n return wrapper\n\ndef cached_method(f):\n cache_name = '__%s_cache' % f.__name__\n def wrapper(self, *args):\n cache = getattr(self, cache_name, None)\n if cache is None:\n cache = {}\n setattr(self, cache_name, cache)\n if args in cache:\n return cache[args]\n res = cache[args] = f(self, *args)\n return res\n return wrapper\n\ndef replace_suffix(path, newsuf):\n base, _ = os.path.splitext(path)\n return base + newsuf\n\n\ndef open_new_file(path):\n if os.path.exists(path):\n # Make sure to create a new file here so we can\n # safely hard link the output files.\n os.unlink(path)\n\n # we use the ISO-8859-1 encoding here because we only write pure\n # ASCII strings or (e.g. for file names) byte encoded strings as\n # Unicode, so we need a direct mapping from the first 256 Unicode\n # characters to a byte sequence, which ISO-8859-1 provides\n\n # note: can't use io.open() in Py2 as we may be writing str objects\n return codecs.open(path, \"w\", encoding=\"ISO-8859-1\")\n\n\ndef castrate_file(path, st):\n # Remove junk contents from an output file after a\n # failed compilation.\n # Also sets access and modification times back to\n # those specified by st (a stat struct).\n try:\n f = open_new_file(path)\n except EnvironmentError:\n pass\n else:\n f.write(\n \"#error Do not use this file, it is the result of a failed Cython compilation.\\n\")\n f.close()\n if st:\n os.utime(path, (st.st_atime, st.st_mtime-1))\n\ndef file_newer_than(path, time):\n ftime = modification_time(path)\n return ftime > time\n\n\ndef safe_makedirs(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef copy_file_to_dir_if_newer(sourcefile, destdir):\n \"\"\"\n Copy file sourcefile to directory destdir (creating it if needed),\n preserving metadata. If the destination file exists and is not\n older than the source file, the copying is skipped.\n \"\"\"\n destfile = os.path.join(destdir, os.path.basename(sourcefile))\n try:\n desttime = modification_time(destfile)\n except OSError:\n # New file does not exist, destdir may or may not exist\n safe_makedirs(destdir)\n else:\n # New file already exists\n if not file_newer_than(sourcefile, desttime):\n return\n shutil.copy2(sourcefile, destfile)\n\n\n@cached_function\ndef search_include_directories(dirs, qualified_name, suffix, pos,\n include=False, sys_path=False):\n # Search the list of include directories for the given\n # file name. If a source file position is given, first\n # searches the directory containing that file. Returns\n # None if not found, but does not report an error.\n # The 'include' option will disable package dereferencing.\n # If 'sys_path' is True, also search sys.path.\n if sys_path:\n dirs = dirs + tuple(sys.path)\n if pos:\n file_desc = pos[0]\n from Cython.Compiler.Scanning import FileSourceDescriptor\n if not isinstance(file_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n if include:\n dirs = (os.path.dirname(file_desc.filename),) + dirs\n else:\n dirs = (find_root_package_dir(file_desc.filename),) + dirs\n\n dotted_filename = qualified_name\n if suffix:\n dotted_filename += suffix\n if not include:\n names = qualified_name.split('.')\n package_names = tuple(names[:-1])\n module_name = names[-1]\n module_filename = module_name + suffix\n package_filename = \"__init__\" + suffix\n\n for dir in dirs:\n path = os.path.join(dir, dotted_filename)\n if path_exists(path):\n return path\n if not include:\n package_dir = check_package_dir(dir, package_names)\n if package_dir is not None:\n path = os.path.join(package_dir, module_filename)\n if path_exists(path):\n return path\n path = os.path.join(dir, package_dir, module_name,\n package_filename)\n if path_exists(path):\n return path\n return None\n\n\n@cached_function\ndef find_root_package_dir(file_path):\n dir = os.path.dirname(file_path)\n if file_path == dir:\n return dir\n elif is_package_dir(dir):\n return find_root_package_dir(dir)\n else:\n return dir\n\n@cached_function\ndef check_package_dir(dir, package_names):\n for dirname in package_names:\n dir = os.path.join(dir, dirname)\n if not is_package_dir(dir):\n return None\n return dir\n\n@cached_function\ndef is_package_dir(dir_path):\n for filename in (\"__init__.py\",\n \"__init__.pyc\",\n \"__init__.pyx\",\n \"__init__.pxd\"):\n path = os.path.join(dir_path, filename)\n if path_exists(path):\n return 1\n\n@cached_function\ndef path_exists(path):\n # try on the filesystem first\n if os.path.exists(path):\n return True\n # figure out if a PEP 302 loader is around\n try:\n loader = __loader__\n # XXX the code below assumes a 'zipimport.zipimporter' instance\n # XXX should be easy to generalize, but too lazy right now to write it\n archive_path = getattr(loader, 'archive', None)\n if archive_path:\n normpath = os.path.normpath(path)\n if normpath.startswith(archive_path):\n arcname = normpath[len(archive_path)+1:]\n try:\n loader.get_data(arcname)\n return True\n except IOError:\n return False\n except NameError:\n pass\n return False\n\n# file name encodings\n\ndef decode_filename(filename):\n if isinstance(filename, bytes):\n try:\n filename_encoding = sys.getfilesystemencoding()\n if filename_encoding is None:\n filename_encoding = sys.getdefaultencoding()\n filename = filename.decode(filename_encoding)\n except UnicodeDecodeError:\n pass\n return filename\n\n# support for source file encoding detection\n\n_match_file_encoding = re.compile(u\"coding[:=]\\s*([-\\w.]+)\").search\n\n\ndef detect_file_encoding(source_filename):\n f = open_source_file(source_filename, encoding=\"UTF-8\", error_handling='ignore')\n try:\n return detect_opened_file_encoding(f)\n finally:\n f.close()\n\n\ndef detect_opened_file_encoding(f):\n # PEPs 263 and 3120\n # Most of the time the first two lines fall in the first 250 chars,\n # and this bulk read/split is much faster.\n lines = f.read(250).split(u\"\\n\")\n if len(lines) > 1:\n m = _match_file_encoding(lines[0])\n if m:\n return m.group(1)\n elif len(lines) > 2:\n m = _match_file_encoding(lines[1])\n if m:\n return m.group(1)\n else:\n return \"UTF-8\"\n # Fallback to one-char-at-a-time detection.\n f.seek(0)\n chars = []\n for i in range(2):\n c = f.read(1)\n while c and c != u'\\n':\n chars.append(c)\n c = f.read(1)\n encoding = _match_file_encoding(u''.join(chars))\n if encoding:\n return encoding.group(1)\n return \"UTF-8\"\n\n\ndef skip_bom(f):\n \"\"\"\n Read past a BOM at the beginning of a source file.\n This could be added to the scanner, but it's *substantially* easier\n to keep it at this level.\n \"\"\"\n if f.read(1) != u'\\uFEFF':\n f.seek(0)\n\n\ndef open_source_file(source_filename, mode=\"r\",\n encoding=None, error_handling=None):\n if encoding is None:\n # Most of the time the coding is unspecified, so be optimistic that\n # it's UTF-8.\n f = open_source_file(source_filename, encoding=\"UTF-8\", mode=mode, error_handling='ignore')\n encoding = detect_opened_file_encoding(f)\n if encoding == \"UTF-8\" and error_handling == 'ignore':\n f.seek(0)\n skip_bom(f)\n return f\n else:\n f.close()\n\n if not os.path.exists(source_filename):\n try:\n loader = __loader__\n if source_filename.startswith(loader.archive):\n return open_source_from_loader(\n loader, source_filename,\n encoding, error_handling)\n except (NameError, AttributeError):\n pass\n\n stream = io.open(source_filename, mode=mode,\n encoding=encoding, errors=error_handling)\n skip_bom(stream)\n return stream\n\n\ndef open_source_from_loader(loader,\n source_filename,\n encoding=None, error_handling=None):\n nrmpath = os.path.normpath(source_filename)\n arcname = nrmpath[len(loader.archive)+1:]\n data = loader.get_data(arcname)\n return io.TextIOWrapper(io.BytesIO(data),\n encoding=encoding,\n errors=error_handling)\n\n\ndef str_to_number(value):\n # note: this expects a string as input that was accepted by the\n # parser already, with an optional \"-\" sign in front\n is_neg = False\n if value[:1] == '-':\n is_neg = True\n value = value[1:]\n if len(value) < 2:\n value = int(value, 0)\n elif value[0] == '0':\n literal_type = value[1] # 0'o' - 0'b' - 0'x'\n if literal_type in 'xX':\n # hex notation ('0x1AF')\n value = int(value[2:], 16)\n elif literal_type in 'oO':\n # Py3 octal notation ('0o136')\n value = int(value[2:], 8)\n elif literal_type in 'bB':\n # Py3 binary notation ('0b101')\n value = int(value[2:], 2)\n else:\n # Py2 octal notation ('0136')\n value = int(value, 8)\n else:\n value = int(value, 0)\n return -value if is_neg else value\n\n\ndef long_literal(value):\n if isinstance(value, basestring):\n value = str_to_number(value)\n return not -2**31 <= value < 2**31\n\n\n@cached_function\ndef get_cython_cache_dir():\n r\"\"\"\n Return the base directory containing Cython's caches.\n\n Priority:\n\n 1. CYTHON_CACHE_DIR\n 2. (OS X): ~/Library/Caches/Cython\n (posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined\n 3. ~/.cython\n\n \"\"\"\n if 'CYTHON_CACHE_DIR' in os.environ:\n return os.environ['CYTHON_CACHE_DIR']\n\n parent = None\n if os.name == 'posix':\n if sys.platform == 'darwin':\n parent = os.path.expanduser('~/Library/Caches')\n else:\n # this could fallback on ~/.cache\n parent = os.environ.get('XDG_CACHE_HOME')\n\n if parent and os.path.isdir(parent):\n return os.path.join(parent, 'cython')\n\n # last fallback: ~/.cython\n return os.path.expanduser(os.path.join('~', '.cython'))\n\n\n@contextmanager\ndef captured_fd(stream=2, encoding=None):\n pipe_in = t = None\n orig_stream = os.dup(stream) # keep copy of original stream\n try:\n pipe_in, pipe_out = os.pipe()\n os.dup2(pipe_out, stream) # replace stream by copy of pipe\n try:\n os.close(pipe_out) # close original pipe-out stream\n data = []\n\n def copy():\n try:\n while True:\n d = os.read(pipe_in, 1000)\n if d:\n data.append(d)\n else:\n break\n finally:\n os.close(pipe_in)\n\n def get_output():\n output = b''.join(data)\n if encoding:\n output = output.decode(encoding)\n return output\n\n from threading import Thread\n t = Thread(target=copy)\n t.daemon = True # just in case\n t.start()\n yield get_output\n finally:\n os.dup2(orig_stream, stream) # restore original stream\n if t is not None:\n t.join()\n finally:\n os.close(orig_stream)\n\n\ndef print_bytes(s, end=b'\\n', file=sys.stdout, flush=True):\n file.flush()\n try:\n out = file.buffer # Py3\n except AttributeError:\n out = file # Py2\n out.write(s)\n if end:\n out.write(end)\n if flush:\n out.flush()\n\nclass LazyStr:\n def __init__(self, callback):\n self.callback = callback\n def __str__(self):\n return self.callback()\n def __repr__(self):\n return self.callback()\n def __add__(self, right):\n return self.callback() + right\n def __radd__(self, left):\n return left + self.callback()\n\n\nclass OrderedSet(object):\n def __init__(self, elements=()):\n self._list = []\n self._set = set()\n self.update(elements)\n def __iter__(self):\n return iter(self._list)\n def update(self, elements):\n for e in elements:\n self.add(e)\n def add(self, e):\n if e not in self._set:\n self._list.append(e)\n self._set.add(e)\n\n\n# Class decorator that adds a metaclass and recreates the class with it.\n# Copied from 'six'.\ndef add_metaclass(metaclass):\n \"\"\"Class decorator for creating a class with a metaclass.\"\"\"\n def wrapper(cls):\n orig_vars = cls.__dict__.copy()\n slots = orig_vars.get('__slots__')\n if slots is not None:\n if isinstance(slots, str):\n slots = [slots]\n for slots_var in slots:\n orig_vars.pop(slots_var)\n orig_vars.pop('__dict__', None)\n orig_vars.pop('__weakref__', None)\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n return wrapper\n\n\ndef raise_error_if_module_name_forbidden(full_module_name):\n #it is bad idea to call the pyx-file cython.pyx, so fail early\n if full_module_name == 'cython' or full_module_name.endswith('.cython'):\n raise ValueError('cython is a special module, cannot be used as a module name')\n", "path": "Cython/Utils.py"}], "after_files": [{"content": "#\n# Cython -- Things that don't belong\n# anywhere else in particular\n#\n\nfrom __future__ import absolute_import\n\ntry:\n from __builtin__ import basestring\nexcept ImportError:\n basestring = str\n\nimport os\nimport sys\nimport re\nimport io\nimport codecs\nimport shutil\nfrom contextlib import contextmanager\n\nmodification_time = os.path.getmtime\n\n_function_caches = []\ndef clear_function_caches():\n for cache in _function_caches:\n cache.clear()\n\ndef cached_function(f):\n cache = {}\n _function_caches.append(cache)\n uncomputed = object()\n def wrapper(*args):\n res = cache.get(args, uncomputed)\n if res is uncomputed:\n res = cache[args] = f(*args)\n return res\n wrapper.uncached = f\n return wrapper\n\ndef cached_method(f):\n cache_name = '__%s_cache' % f.__name__\n def wrapper(self, *args):\n cache = getattr(self, cache_name, None)\n if cache is None:\n cache = {}\n setattr(self, cache_name, cache)\n if args in cache:\n return cache[args]\n res = cache[args] = f(self, *args)\n return res\n return wrapper\n\ndef replace_suffix(path, newsuf):\n base, _ = os.path.splitext(path)\n return base + newsuf\n\n\ndef open_new_file(path):\n if os.path.exists(path):\n # Make sure to create a new file here so we can\n # safely hard link the output files.\n os.unlink(path)\n\n # we use the ISO-8859-1 encoding here because we only write pure\n # ASCII strings or (e.g. for file names) byte encoded strings as\n # Unicode, so we need a direct mapping from the first 256 Unicode\n # characters to a byte sequence, which ISO-8859-1 provides\n\n # note: can't use io.open() in Py2 as we may be writing str objects\n return codecs.open(path, \"w\", encoding=\"ISO-8859-1\")\n\n\ndef castrate_file(path, st):\n # Remove junk contents from an output file after a\n # failed compilation.\n # Also sets access and modification times back to\n # those specified by st (a stat struct).\n try:\n f = open_new_file(path)\n except EnvironmentError:\n pass\n else:\n f.write(\n \"#error Do not use this file, it is the result of a failed Cython compilation.\\n\")\n f.close()\n if st:\n os.utime(path, (st.st_atime, st.st_mtime-1))\n\ndef file_newer_than(path, time):\n ftime = modification_time(path)\n return ftime > time\n\n\ndef safe_makedirs(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef copy_file_to_dir_if_newer(sourcefile, destdir):\n \"\"\"\n Copy file sourcefile to directory destdir (creating it if needed),\n preserving metadata. If the destination file exists and is not\n older than the source file, the copying is skipped.\n \"\"\"\n destfile = os.path.join(destdir, os.path.basename(sourcefile))\n try:\n desttime = modification_time(destfile)\n except OSError:\n # New file does not exist, destdir may or may not exist\n safe_makedirs(destdir)\n else:\n # New file already exists\n if not file_newer_than(sourcefile, desttime):\n return\n shutil.copy2(sourcefile, destfile)\n\n\n@cached_function\ndef search_include_directories(dirs, qualified_name, suffix, pos,\n include=False, sys_path=False):\n # Search the list of include directories for the given\n # file name. If a source file position is given, first\n # searches the directory containing that file. Returns\n # None if not found, but does not report an error.\n # The 'include' option will disable package dereferencing.\n # If 'sys_path' is True, also search sys.path.\n if sys_path:\n dirs = dirs + tuple(sys.path)\n if pos:\n file_desc = pos[0]\n from Cython.Compiler.Scanning import FileSourceDescriptor\n if not isinstance(file_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n if include:\n dirs = (os.path.dirname(file_desc.filename),) + dirs\n else:\n dirs = (find_root_package_dir(file_desc.filename),) + dirs\n\n dotted_filename = qualified_name\n if suffix:\n dotted_filename += suffix\n if not include:\n names = qualified_name.split('.')\n package_names = tuple(names[:-1])\n module_name = names[-1]\n module_filename = module_name + suffix\n package_filename = \"__init__\" + suffix\n\n for dir in dirs:\n path = os.path.join(dir, dotted_filename)\n if path_exists(path):\n return path\n if not include:\n package_dir = check_package_dir(dir, package_names)\n if package_dir is not None:\n path = os.path.join(package_dir, module_filename)\n if path_exists(path):\n return path\n path = os.path.join(dir, package_dir, module_name,\n package_filename)\n if path_exists(path):\n return path\n return None\n\n\n@cached_function\ndef find_root_package_dir(file_path):\n dir = os.path.dirname(file_path)\n if file_path == dir:\n return dir\n elif is_package_dir(dir):\n return find_root_package_dir(dir)\n else:\n return dir\n\n@cached_function\ndef check_package_dir(dir, package_names):\n for dirname in package_names:\n dir = os.path.join(dir, dirname)\n if not is_package_dir(dir):\n return None\n return dir\n\n@cached_function\ndef is_package_dir(dir_path):\n for filename in (\"__init__.py\",\n \"__init__.pyc\",\n \"__init__.pyx\",\n \"__init__.pxd\"):\n path = os.path.join(dir_path, filename)\n if path_exists(path):\n return 1\n\n@cached_function\ndef path_exists(path):\n # try on the filesystem first\n if os.path.exists(path):\n return True\n # figure out if a PEP 302 loader is around\n try:\n loader = __loader__\n # XXX the code below assumes a 'zipimport.zipimporter' instance\n # XXX should be easy to generalize, but too lazy right now to write it\n archive_path = getattr(loader, 'archive', None)\n if archive_path:\n normpath = os.path.normpath(path)\n if normpath.startswith(archive_path):\n arcname = normpath[len(archive_path)+1:]\n try:\n loader.get_data(arcname)\n return True\n except IOError:\n return False\n except NameError:\n pass\n return False\n\n# file name encodings\n\ndef decode_filename(filename):\n if isinstance(filename, bytes):\n try:\n filename_encoding = sys.getfilesystemencoding()\n if filename_encoding is None:\n filename_encoding = sys.getdefaultencoding()\n filename = filename.decode(filename_encoding)\n except UnicodeDecodeError:\n pass\n return filename\n\n# support for source file encoding detection\n\n_match_file_encoding = re.compile(u\"coding[:=]\\s*([-\\w.]+)\").search\n\n\ndef detect_file_encoding(source_filename):\n f = open_source_file(source_filename, encoding=\"UTF-8\", error_handling='ignore')\n try:\n return detect_opened_file_encoding(f)\n finally:\n f.close()\n\n\ndef detect_opened_file_encoding(f):\n # PEPs 263 and 3120\n # Most of the time the first two lines fall in the first 250 chars,\n # and this bulk read/split is much faster.\n lines = f.read(250).split(u\"\\n\")\n if len(lines) > 1:\n m = _match_file_encoding(lines[0])\n if m:\n return m.group(1)\n elif len(lines) > 2:\n m = _match_file_encoding(lines[1])\n if m:\n return m.group(1)\n else:\n return \"UTF-8\"\n # Fallback to one-char-at-a-time detection.\n f.seek(0)\n chars = []\n for i in range(2):\n c = f.read(1)\n while c and c != u'\\n':\n chars.append(c)\n c = f.read(1)\n encoding = _match_file_encoding(u''.join(chars))\n if encoding:\n return encoding.group(1)\n return \"UTF-8\"\n\n\ndef skip_bom(f):\n \"\"\"\n Read past a BOM at the beginning of a source file.\n This could be added to the scanner, but it's *substantially* easier\n to keep it at this level.\n \"\"\"\n if f.read(1) != u'\\uFEFF':\n f.seek(0)\n\n\ndef open_source_file(source_filename, mode=\"r\",\n encoding=None, error_handling=None):\n if encoding is None:\n # Most of the time the coding is unspecified, so be optimistic that\n # it's UTF-8.\n f = open_source_file(source_filename, encoding=\"UTF-8\", mode=mode, error_handling='ignore')\n encoding = detect_opened_file_encoding(f)\n if encoding == \"UTF-8\" and error_handling == 'ignore':\n f.seek(0)\n skip_bom(f)\n return f\n else:\n f.close()\n\n if not os.path.exists(source_filename):\n try:\n loader = __loader__\n if source_filename.startswith(loader.archive):\n return open_source_from_loader(\n loader, source_filename,\n encoding, error_handling)\n except (NameError, AttributeError):\n pass\n\n stream = io.open(source_filename, mode=mode,\n encoding=encoding, errors=error_handling)\n skip_bom(stream)\n return stream\n\n\ndef open_source_from_loader(loader,\n source_filename,\n encoding=None, error_handling=None):\n nrmpath = os.path.normpath(source_filename)\n arcname = nrmpath[len(loader.archive)+1:]\n data = loader.get_data(arcname)\n return io.TextIOWrapper(io.BytesIO(data),\n encoding=encoding,\n errors=error_handling)\n\n\ndef str_to_number(value):\n # note: this expects a string as input that was accepted by the\n # parser already, with an optional \"-\" sign in front\n is_neg = False\n if value[:1] == '-':\n is_neg = True\n value = value[1:]\n if len(value) < 2:\n value = int(value, 0)\n elif value[0] == '0':\n literal_type = value[1] # 0'o' - 0'b' - 0'x'\n if literal_type in 'xX':\n # hex notation ('0x1AF')\n value = int(value[2:], 16)\n elif literal_type in 'oO':\n # Py3 octal notation ('0o136')\n value = int(value[2:], 8)\n elif literal_type in 'bB':\n # Py3 binary notation ('0b101')\n value = int(value[2:], 2)\n else:\n # Py2 octal notation ('0136')\n value = int(value, 8)\n else:\n value = int(value, 0)\n return -value if is_neg else value\n\n\ndef long_literal(value):\n if isinstance(value, basestring):\n value = str_to_number(value)\n return not -2**31 <= value < 2**31\n\n\n@cached_function\ndef get_cython_cache_dir():\n r\"\"\"\n Return the base directory containing Cython's caches.\n\n Priority:\n\n 1. CYTHON_CACHE_DIR\n 2. (OS X): ~/Library/Caches/Cython\n (posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined\n 3. ~/.cython\n\n \"\"\"\n if 'CYTHON_CACHE_DIR' in os.environ:\n return os.environ['CYTHON_CACHE_DIR']\n\n parent = None\n if os.name == 'posix':\n if sys.platform == 'darwin':\n parent = os.path.expanduser('~/Library/Caches')\n else:\n # this could fallback on ~/.cache\n parent = os.environ.get('XDG_CACHE_HOME')\n\n if parent and os.path.isdir(parent):\n return os.path.join(parent, 'cython')\n\n # last fallback: ~/.cython\n return os.path.expanduser(os.path.join('~', '.cython'))\n\n\n@contextmanager\ndef captured_fd(stream=2, encoding=None):\n pipe_in = t = None\n orig_stream = os.dup(stream) # keep copy of original stream\n try:\n pipe_in, pipe_out = os.pipe()\n os.dup2(pipe_out, stream) # replace stream by copy of pipe\n try:\n os.close(pipe_out) # close original pipe-out stream\n data = []\n\n def copy():\n try:\n while True:\n d = os.read(pipe_in, 1000)\n if d:\n data.append(d)\n else:\n break\n finally:\n os.close(pipe_in)\n\n def get_output():\n output = b''.join(data)\n if encoding:\n output = output.decode(encoding)\n return output\n\n from threading import Thread\n t = Thread(target=copy)\n t.daemon = True # just in case\n t.start()\n yield get_output\n finally:\n os.dup2(orig_stream, stream) # restore original stream\n if t is not None:\n t.join()\n finally:\n os.close(orig_stream)\n\n\ndef print_bytes(s, end=b'\\n', file=sys.stdout, flush=True):\n file.flush()\n try:\n out = file.buffer # Py3\n except AttributeError:\n out = file # Py2\n out.write(s)\n if end:\n out.write(end)\n if flush:\n out.flush()\n\nclass LazyStr:\n def __init__(self, callback):\n self.callback = callback\n def __str__(self):\n return self.callback()\n def __repr__(self):\n return self.callback()\n def __add__(self, right):\n return self.callback() + right\n def __radd__(self, left):\n return left + self.callback()\n\n\nclass OrderedSet(object):\n def __init__(self, elements=()):\n self._list = []\n self._set = set()\n self.update(elements)\n def __iter__(self):\n return iter(self._list)\n def update(self, elements):\n for e in elements:\n self.add(e)\n def add(self, e):\n if e not in self._set:\n self._list.append(e)\n self._set.add(e)\n\n\n# Class decorator that adds a metaclass and recreates the class with it.\n# Copied from 'six'.\ndef add_metaclass(metaclass):\n \"\"\"Class decorator for creating a class with a metaclass.\"\"\"\n def wrapper(cls):\n orig_vars = cls.__dict__.copy()\n slots = orig_vars.get('__slots__')\n if slots is not None:\n if isinstance(slots, str):\n slots = [slots]\n for slots_var in slots:\n orig_vars.pop(slots_var)\n orig_vars.pop('__dict__', None)\n orig_vars.pop('__weakref__', None)\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n return wrapper\n\n\ndef raise_error_if_module_name_forbidden(full_module_name):\n #it is bad idea to call the pyx-file cython.pyx, so fail early\n if full_module_name == 'cython' or full_module_name.startswith('cython.'):\n raise ValueError('cython is a special module, cannot be used as a module name')\n", "path": "Cython/Utils.py"}]} |
gh_patches_debug_1551 | rasdani/github-patches | git_diff | vyperlang__vyper-1465 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Vyper treats strings as having length 32
### Version Information
* vyper Version: 0.1.0b9
* OS: osx
### What's your issue about?
When using `concat` Vyper treats all strings as having length 32. Therefore this
```
@public
def conc(a: string[33], b: string[33]) -> string[64]:
c: string[64] = concat(a, b)
return c
```
will compile and even run (returning a string of length 66) and the following
```
@public
def conc_fail(a: string[5], b: string[4]) -> string[9]:
c: string[9] = concat(a, b)
return c
```
leads to a compiler error "Cannot cast from greater max-length 64 to shorter max-length 9".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/functions/functions.py`
Content:
```
1 import hashlib
2
3 from vyper import ast
4 from vyper.exceptions import (
5 ConstancyViolationException,
6 InvalidLiteralException,
7 ParserException,
8 StructureException,
9 TypeMismatchException,
10 )
11 from vyper.parser.expr import (
12 Expr,
13 )
14 from vyper.parser.parser_utils import (
15 LLLnode,
16 add_variable_offset,
17 byte_array_to_num,
18 get_length,
19 get_number_as_fraction,
20 getpos,
21 make_byte_array_copier,
22 make_byte_slice_copier,
23 unwrap_location,
24 )
25 from vyper.signatures.function_signature import (
26 VariableRecord,
27 )
28 from vyper.types import (
29 BaseType,
30 ByteArrayLike,
31 ByteArrayType,
32 ListType,
33 StringType,
34 TupleType,
35 are_units_compatible,
36 get_size_of_type,
37 is_base_type,
38 )
39 from vyper.types.convert import (
40 convert,
41 )
42 from vyper.utils import (
43 DECIMAL_DIVISOR,
44 RLP_DECODER_ADDRESS,
45 MemoryPositions,
46 SizeLimits,
47 bytes_to_int,
48 fourbytes_to_int,
49 sha3,
50 )
51
52 from .signatures import (
53 Optional,
54 signature,
55 )
56
57 SHA256_ADDRESS = 2
58 SHA256_BASE_GAS = 60
59 SHA256_PER_WORD_GAS = 12
60
61
62 def enforce_units(typ, obj, expected):
63 if not are_units_compatible(typ, expected):
64 raise TypeMismatchException("Invalid units", obj)
65
66
67 def get_keyword(expr, keyword):
68 for kw in expr.keywords:
69 if kw.arg == keyword:
70 return kw.value
71 # This should never happen, as kwargs['value'] will KeyError first.
72 # Leaving exception for other use cases.
73 raise Exception("Keyword %s not found" % keyword) # pragma: no cover
74
75
76 @signature('decimal')
77 def floor(expr, args, kwargs, context):
78 return LLLnode.from_list(
79 [
80 'if',
81 ['slt', args[0], 0],
82 ['sdiv', ['sub', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],
83 ['sdiv', args[0], DECIMAL_DIVISOR]
84 ],
85 typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),
86 pos=getpos(expr)
87 )
88
89
90 @signature('decimal')
91 def ceil(expr, args, kwards, context):
92 return LLLnode.from_list(
93 [
94 'if',
95 ['slt', args[0], 0],
96 ['sdiv', args[0], DECIMAL_DIVISOR],
97 ['sdiv', ['add', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR]
98 ],
99 typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),
100 pos=getpos(expr)
101 )
102
103
104 @signature(('uint256', 'int128', 'decimal'))
105 def as_unitless_number(expr, args, kwargs, context):
106 return LLLnode(
107 value=args[0].value,
108 args=args[0].args,
109 typ=BaseType(args[0].typ.typ, {}),
110 pos=getpos(expr),
111 )
112
113
114 def _convert(expr, context):
115 return convert(expr, context)
116
117
118 @signature(('bytes32', 'bytes', 'string'), start='int128', len='int128')
119 def _slice(expr, args, kwargs, context):
120
121 sub, start, length = args[0], kwargs['start'], kwargs['len']
122 if not are_units_compatible(start.typ, BaseType('int128')):
123 raise TypeMismatchException("Type for slice start index must be a unitless number", expr)
124 # Expression representing the length of the slice
125 if not are_units_compatible(length.typ, BaseType('int128')):
126 raise TypeMismatchException("Type for slice length must be a unitless number", expr)
127
128 if is_base_type(sub.typ, 'bytes32'):
129 if (start.typ.is_literal and length.typ.is_literal) and \
130 not (0 <= start.value + length.value <= 32):
131 raise InvalidLiteralException(
132 'Invalid start / length values needs to be between 0 and 32.',
133 expr,
134 )
135 sub_typ_maxlen = 32
136 else:
137 sub_typ_maxlen = sub.typ.maxlen
138
139 # Get returntype string or bytes
140 if isinstance(args[0].typ, ByteArrayType) or is_base_type(sub.typ, 'bytes32'):
141 ReturnType = ByteArrayType
142 else:
143 ReturnType = StringType
144
145 # Node representing the position of the output in memory
146 np = context.new_placeholder(ReturnType(maxlen=sub_typ_maxlen + 32))
147 placeholder_node = LLLnode.from_list(np, typ=sub.typ, location='memory')
148 placeholder_plus_32_node = LLLnode.from_list(np + 32, typ=sub.typ, location='memory')
149 # Copies over bytearray data
150 if sub.location == 'storage':
151 adj_sub = LLLnode.from_list(
152 ['add', ['sha3_32', sub], ['add', ['div', '_start', 32], 1]],
153 typ=sub.typ,
154 location=sub.location,
155 )
156 else:
157 adj_sub = LLLnode.from_list(
158 ['add', sub, ['add', ['sub', '_start', ['mod', '_start', 32]], 32]],
159 typ=sub.typ,
160 location=sub.location,
161 )
162
163 if is_base_type(sub.typ, 'bytes32'):
164 adj_sub = LLLnode.from_list(
165 sub.args[0], typ=sub.typ, location="memory"
166 )
167
168 copier = make_byte_slice_copier(
169 placeholder_plus_32_node,
170 adj_sub,
171 ['add', '_length', 32],
172 sub_typ_maxlen,
173 pos=getpos(expr),
174 )
175 # New maximum length in the type of the result
176 newmaxlen = length.value if not len(length.args) else sub_typ_maxlen
177 if is_base_type(sub.typ, 'bytes32'):
178 maxlen = 32
179 else:
180 maxlen = ['mload', Expr(sub, context=context).lll_node] # Retrieve length of the bytes.
181
182 out = [
183 'with', '_start', start, [
184 'with', '_length', length, [
185 'with', '_opos', ['add', placeholder_node, ['mod', '_start', 32]], [
186 'seq',
187 ['assert', ['le', ['add', '_start', '_length'], maxlen]],
188 copier,
189 ['mstore', '_opos', '_length'],
190 '_opos'
191 ],
192 ],
193 ],
194 ]
195 return LLLnode.from_list(out, typ=ReturnType(newmaxlen), location='memory', pos=getpos(expr))
196
197
198 @signature(('bytes', 'string'))
199 def _len(expr, args, kwargs, context):
200 return get_length(args[0])
201
202
203 def concat(expr, context):
204 args = [Expr(arg, context).lll_node for arg in expr.args]
205 if len(args) < 2:
206 raise StructureException("Concat expects at least two arguments", expr)
207
208 prev_type = ''
209 for _, (expr_arg, arg) in enumerate(zip(expr.args, args)):
210 if not isinstance(arg.typ, ByteArrayLike) and not is_base_type(arg.typ, 'bytes32'):
211 raise TypeMismatchException("Concat expects string, bytes or bytes32 objects", expr_arg)
212
213 current_type = (
214 'bytes'
215 if isinstance(arg.typ, ByteArrayType) or is_base_type(arg.typ, 'bytes32')
216 else 'string'
217 )
218 if prev_type and current_type != prev_type:
219 raise TypeMismatchException(
220 (
221 "Concat expects consistant use of string or byte types, "
222 "user either bytes or string."
223 ),
224 expr_arg,
225 )
226 prev_type = current_type
227
228 if current_type == 'string':
229 ReturnType = StringType
230 else:
231 ReturnType = ByteArrayType
232
233 # Maximum length of the output
234 total_maxlen = sum([
235 arg.typ.maxlen if isinstance(arg.typ, ByteArrayType) else 32 for arg in args
236 ])
237 # Node representing the position of the output in memory
238 placeholder = context.new_placeholder(ReturnType(total_maxlen))
239 # Object representing the output
240 seq = []
241 # For each argument we are concatenating...
242 for arg in args:
243 # Start pasting into a position the starts at zero, and keeps
244 # incrementing as we concatenate arguments
245 placeholder_node = LLLnode.from_list(
246 ['add', placeholder, '_poz'],
247 typ=ReturnType(total_maxlen),
248 location='memory',
249 )
250 placeholder_node_plus_32 = LLLnode.from_list(
251 ['add', ['add', placeholder, '_poz'], 32],
252 typ=ReturnType(total_maxlen),
253 location='memory',
254 )
255 if isinstance(arg.typ, ReturnType):
256 # Ignore empty strings
257 if arg.typ.maxlen == 0:
258 continue
259 # Get the length of the current argument
260 if arg.location == "memory":
261 length = LLLnode.from_list(['mload', '_arg'], typ=BaseType('int128'))
262 argstart = LLLnode.from_list(
263 ['add', '_arg', 32],
264 typ=arg.typ,
265 location=arg.location,
266 )
267 elif arg.location == "storage":
268 length = LLLnode.from_list(['sload', ['sha3_32', '_arg']], typ=BaseType('int128'))
269 argstart = LLLnode.from_list(
270 ['add', ['sha3_32', '_arg'], 1],
271 typ=arg.typ,
272 location=arg.location,
273 )
274 # Make a copier to copy over data from that argument
275 seq.append([
276 'with', '_arg', arg, [
277 'seq',
278 make_byte_slice_copier(
279 placeholder_node_plus_32,
280 argstart,
281 length,
282 arg.typ.maxlen, pos=getpos(expr),
283 ),
284 # Change the position to start at the correct
285 # place to paste the next value
286 ['set', '_poz', ['add', '_poz', length]],
287 ],
288 ])
289 else:
290 seq.append([
291 'seq',
292 ['mstore', ['add', placeholder_node, 32], unwrap_location(arg)],
293 ['set', '_poz', ['add', '_poz', 32]],
294 ])
295 # The position, after all arguments are processing, equals the total
296 # length. Paste this in to make the output a proper bytearray
297 seq.append(['mstore', placeholder, '_poz'])
298 # Memory location of the output
299 seq.append(placeholder)
300 return LLLnode.from_list(
301 ['with', '_poz', 0, ['seq'] + seq],
302 typ=ReturnType(total_maxlen),
303 location='memory',
304 pos=getpos(expr),
305 annotation='concat',
306 )
307
308
309 @signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))
310 def _sha3(expr, args, kwargs, context):
311 sub = args[0]
312 # Can hash literals
313 if isinstance(sub, bytes):
314 return LLLnode.from_list(
315 bytes_to_int(sha3(sub)),
316 typ=BaseType('bytes32'),
317 pos=getpos(expr)
318 )
319 # Can hash bytes32 objects
320 if is_base_type(sub.typ, 'bytes32'):
321 return LLLnode.from_list(
322 [
323 'seq',
324 ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],
325 ['sha3', MemoryPositions.FREE_VAR_SPACE, 32]
326 ],
327 typ=BaseType('bytes32'),
328 pos=getpos(expr),
329 )
330 # Copy the data to an in-memory array
331 if sub.location == "memory":
332 # If we are hashing a value in memory, no need to copy it, just hash in-place
333 return LLLnode.from_list(
334 ['with', '_sub', sub, ['sha3', ['add', '_sub', 32], ['mload', '_sub']]],
335 typ=BaseType('bytes32'),
336 pos=getpos(expr),
337 )
338 elif sub.location == "storage":
339 lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))
340 else:
341 # This should never happen, but just left here for future compiler-writers.
342 raise Exception("Unsupported location: %s" % sub.location) # pragma: no test
343 placeholder = context.new_placeholder(sub.typ)
344 placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')
345 copier = make_byte_array_copier(
346 placeholder_node,
347 LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),
348 )
349 return LLLnode.from_list(
350 [
351 'with', '_sub', sub, [
352 'seq',
353 copier,
354 ['sha3', ['add', placeholder, 32], lengetter]
355 ],
356 ],
357 typ=BaseType('bytes32'),
358 pos=getpos(expr)
359 )
360
361
362 def _make_sha256_call(inp_start, inp_len, out_start, out_len):
363 return [
364 'assert', [
365 'call',
366 ['gas'], # gas
367 SHA256_ADDRESS, # address
368 0, # value
369 inp_start,
370 inp_len,
371 out_start,
372 out_len
373 ]
374 ]
375
376
377 @signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))
378 def sha256(expr, args, kwargs, context):
379 sub = args[0]
380 # Literal input
381 if isinstance(sub, bytes):
382 return LLLnode.from_list(
383 bytes_to_int(hashlib.sha256(sub).digest()),
384 typ=BaseType('bytes32'),
385 pos=getpos(expr)
386 )
387 # bytes32 input
388 elif is_base_type(sub.typ, 'bytes32'):
389 return LLLnode.from_list(
390 [
391 'seq',
392 ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],
393 _make_sha256_call(
394 inp_start=MemoryPositions.FREE_VAR_SPACE,
395 inp_len=32,
396 out_start=MemoryPositions.FREE_VAR_SPACE,
397 out_len=32
398 ),
399 ['mload', MemoryPositions.FREE_VAR_SPACE] # push value onto stack
400 ],
401 typ=BaseType('bytes32'),
402 pos=getpos(expr),
403 add_gas_estimate=SHA256_BASE_GAS + 1 * SHA256_PER_WORD_GAS
404 )
405 # bytearay-like input
406 if sub.location == "storage":
407 # Copy storage to memory
408 placeholder = context.new_placeholder(sub.typ)
409 placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')
410 copier = make_byte_array_copier(
411 placeholder_node,
412 LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),
413 )
414 return LLLnode.from_list(
415 [
416 'with', '_sub', sub, [
417 'seq',
418 copier,
419 _make_sha256_call(
420 inp_start=['add', placeholder, 32],
421 inp_len=['mload', placeholder],
422 out_start=MemoryPositions.FREE_VAR_SPACE,
423 out_len=32
424 ),
425 ['mload', MemoryPositions.FREE_VAR_SPACE]
426 ],
427 ],
428 typ=BaseType('bytes32'),
429 pos=getpos(expr),
430 add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS
431 )
432 elif sub.location == "memory":
433 return LLLnode.from_list(
434 [
435 'with', '_sub', sub, [
436 'seq',
437 _make_sha256_call(
438 inp_start=['add', '_sub', 32],
439 inp_len=['mload', '_sub'],
440 out_start=MemoryPositions.FREE_VAR_SPACE,
441 out_len=32
442 ),
443 ['mload', MemoryPositions.FREE_VAR_SPACE]
444 ]
445 ],
446 typ=BaseType('bytes32'),
447 pos=getpos(expr),
448 add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS
449 )
450 else:
451 # This should never happen, but just left here for future compiler-writers.
452 raise Exception("Unsupported location: %s" % sub.location) # pragma: no test
453
454
455 @signature('str_literal', 'name_literal')
456 def method_id(expr, args, kwargs, context):
457 if b' ' in args[0]:
458 raise TypeMismatchException('Invalid function signature no spaces allowed.')
459 method_id = fourbytes_to_int(sha3(args[0])[:4])
460 if args[1] == 'bytes32':
461 return LLLnode(method_id, typ=BaseType('bytes32'), pos=getpos(expr))
462 elif args[1] == 'bytes[4]':
463 placeholder = LLLnode.from_list(context.new_placeholder(ByteArrayType(4)))
464 return LLLnode.from_list(
465 ['seq',
466 ['mstore', ['add', placeholder, 4], method_id],
467 ['mstore', placeholder, 4], placeholder],
468 typ=ByteArrayType(4), location='memory', pos=getpos(expr))
469 else:
470 raise StructureException('Can only produce bytes32 or bytes[4] as outputs')
471
472
473 @signature('bytes32', 'uint256', 'uint256', 'uint256')
474 def ecrecover(expr, args, kwargs, context):
475 placeholder_node = LLLnode.from_list(
476 context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
477 )
478 return LLLnode.from_list([
479 'seq',
480 ['mstore', placeholder_node, args[0]],
481 ['mstore', ['add', placeholder_node, 32], args[1]],
482 ['mstore', ['add', placeholder_node, 64], args[2]],
483 ['mstore', ['add', placeholder_node, 96], args[3]],
484 ['pop', ['call', 3000, 1, 0, placeholder_node, 128, MemoryPositions.FREE_VAR_SPACE, 32]],
485 ['mload', MemoryPositions.FREE_VAR_SPACE],
486 ], typ=BaseType('address'), pos=getpos(expr))
487
488
489 def avo(arg, ind, pos):
490 return unwrap_location(add_variable_offset(arg, LLLnode.from_list(ind, 'int128'), pos=pos))
491
492
493 @signature('uint256[2]', 'uint256[2]')
494 def ecadd(expr, args, kwargs, context):
495 placeholder_node = LLLnode.from_list(
496 context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
497 )
498 pos = getpos(expr)
499 o = LLLnode.from_list([
500 'seq',
501 ['mstore', placeholder_node, avo(args[0], 0, pos)],
502 ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],
503 ['mstore', ['add', placeholder_node, 64], avo(args[1], 0, pos)],
504 ['mstore', ['add', placeholder_node, 96], avo(args[1], 1, pos)],
505 ['assert', ['call', 500, 6, 0, placeholder_node, 128, placeholder_node, 64]],
506 placeholder_node,
507 ], typ=ListType(BaseType('uint256'), 2), pos=getpos(expr), location='memory')
508 return o
509
510
511 @signature('uint256[2]', 'uint256')
512 def ecmul(expr, args, kwargs, context):
513 placeholder_node = LLLnode.from_list(
514 context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
515 )
516 pos = getpos(expr)
517 o = LLLnode.from_list([
518 'seq',
519 ['mstore', placeholder_node, avo(args[0], 0, pos)],
520 ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],
521 ['mstore', ['add', placeholder_node, 64], args[1]],
522 ['assert', ['call', 40000, 7, 0, placeholder_node, 96, placeholder_node, 64]],
523 placeholder_node,
524 ], typ=ListType(BaseType('uint256'), 2), pos=pos, location='memory')
525 return o
526
527
528 def _memory_element_getter(index):
529 return LLLnode.from_list(
530 ['mload', ['add', '_sub', ['add', 32, ['mul', 32, index]]]],
531 typ=BaseType('int128'),
532 )
533
534
535 def _storage_element_getter(index):
536 return LLLnode.from_list(
537 ['sload', ['add', ['sha3_32', '_sub'], ['add', 1, index]]],
538 typ=BaseType('int128'),
539 )
540
541
542 @signature('bytes', 'int128', type=Optional('name_literal', 'bytes32'))
543 def extract32(expr, args, kwargs, context):
544 sub, index = args
545 ret_type = kwargs['type']
546 # Get length and specific element
547 if sub.location == "memory":
548 lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))
549 elementgetter = _memory_element_getter
550 elif sub.location == "storage":
551 lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))
552 elementgetter = _storage_element_getter
553 # TODO: unclosed if/elif clause. Undefined behavior if `sub.location`
554 # isn't one of `memory`/`storage`
555
556 # Special case: index known to be a multiple of 32
557 if isinstance(index.value, int) and not index.value % 32:
558 o = LLLnode.from_list(
559 [
560 'with', '_sub', sub,
561 elementgetter(['div', ['clamp', 0, index, ['sub', lengetter, 32]], 32])
562 ],
563 typ=BaseType(ret_type),
564 annotation='extracting 32 bytes',
565 )
566 # General case
567 else:
568 o = LLLnode.from_list([
569 'with', '_sub', sub, [
570 'with', '_len', lengetter, [
571 'with', '_index', ['clamp', 0, index, ['sub', '_len', 32]], [
572 'with', '_mi32', ['mod', '_index', 32], [
573 'with', '_di32', ['div', '_index', 32],
574 [
575 'if',
576 '_mi32',
577 [
578 'add',
579 ['mul', elementgetter('_di32'), ['exp', 256, '_mi32']],
580 [
581 'div',
582 elementgetter(['add', '_di32', 1]),
583 ['exp', 256, ['sub', 32, '_mi32']],
584 ],
585 ],
586 elementgetter('_di32'),
587 ],
588 ],
589 ],
590 ],
591 ],
592 ], typ=BaseType(ret_type), pos=getpos(expr), annotation='extracting 32 bytes')
593 if ret_type == 'int128':
594 return LLLnode.from_list(
595 ['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]],
596 typ=BaseType('int128'),
597 pos=getpos(expr),
598 )
599 elif ret_type == 'address':
600 return LLLnode.from_list(
601 ['uclamplt', o, ['mload', MemoryPositions.ADDRSIZE]],
602 typ=BaseType(ret_type),
603 pos=getpos(expr),
604 )
605 else:
606 return o
607
608
609 @signature(('num_literal', 'int128', 'uint256', 'decimal'), 'str_literal')
610 def as_wei_value(expr, args, kwargs, context):
611 # Denominations
612 names_denom = {
613 (b"wei", ): 1,
614 (b"femtoether", b"kwei", b"babbage"): 10**3,
615 (b"picoether", b"mwei", b"lovelace"): 10**6,
616 (b"nanoether", b"gwei", b"shannon"): 10**9,
617 (b"microether", b"szabo", ): 10**12,
618 (b"milliether", b"finney", ): 10**15,
619 (b"ether", ): 10**18,
620 (b"kether", b"grand"): 10**21,
621 }
622
623 for names, denom in names_denom.items():
624 if args[1] in names:
625 denomination = denom
626 break
627 else:
628 raise InvalidLiteralException(
629 "Invalid denomination: %s, valid denominations are: %s" % (
630 args[1],
631 ",".join(x[0].decode() for x in names_denom)
632 ),
633 expr.args[1]
634 )
635 # Compute the amount of wei and return that value
636 if isinstance(args[0], (int, float)):
637 expr_args_0 = expr.args[0]
638 # On constant reference fetch value node of constant assignment.
639 if context.constants.ast_is_constant(expr.args[0]):
640 expr_args_0 = context.constants._constants_ast[expr.args[0].id]
641 numstring, num, den = get_number_as_fraction(expr_args_0, context)
642 if denomination % den:
643 raise InvalidLiteralException("Too many decimal places: %s" % numstring, expr.args[0])
644 sub = num * denomination // den
645 elif args[0].typ.is_literal:
646 if args[0].value <= 0:
647 raise InvalidLiteralException("Negative wei value not allowed", expr)
648 sub = ['mul', args[0].value, denomination]
649 elif args[0].typ.typ == 'uint256':
650 sub = ['mul', args[0], denomination]
651 else:
652 sub = ['div', ['mul', args[0], denomination], DECIMAL_DIVISOR]
653
654 return LLLnode.from_list(
655 sub,
656 typ=BaseType('uint256', {'wei': 1}),
657 location=None,
658 pos=getpos(expr),
659 )
660
661
662 zero_value = LLLnode.from_list(0, typ=BaseType('uint256', {'wei': 1}))
663 false_value = LLLnode.from_list(0, typ=BaseType('bool', is_literal=True))
664
665
666 @signature(
667 'address',
668 'bytes',
669 outsize='num_literal',
670 gas='uint256',
671 value=Optional('uint256', zero_value),
672 delegate_call=Optional('bool', false_value),
673 )
674 def raw_call(expr, args, kwargs, context):
675 to, data = args
676 gas, value, outsize, delegate_call = (
677 kwargs['gas'],
678 kwargs['value'],
679 kwargs['outsize'],
680 kwargs['delegate_call'],
681 )
682 if delegate_call.typ.is_literal is False:
683 raise TypeMismatchException(
684 'The delegate_call parameter has to be a static/literal boolean value.'
685 )
686 if context.is_constant():
687 raise ConstancyViolationException(
688 "Cannot make calls from %s" % context.pp_constancy(),
689 expr,
690 )
691 if value != zero_value:
692 enforce_units(
693 value.typ,
694 get_keyword(expr, 'value'),
695 BaseType('uint256', {'wei': 1}),
696 )
697 placeholder = context.new_placeholder(data.typ)
698 placeholder_node = LLLnode.from_list(placeholder, typ=data.typ, location='memory')
699 copier = make_byte_array_copier(placeholder_node, data, pos=getpos(expr))
700 output_placeholder = context.new_placeholder(ByteArrayType(outsize))
701 output_node = LLLnode.from_list(
702 output_placeholder,
703 typ=ByteArrayType(outsize),
704 location='memory',
705 )
706
707 if delegate_call.value == 1:
708 z = LLLnode.from_list(
709 [
710 'seq',
711 copier,
712 [
713 'assert',
714 [
715 'delegatecall',
716 gas,
717 to,
718 ['add', placeholder_node, 32],
719 ['mload', placeholder_node],
720 ['add', output_node, 32],
721 outsize,
722 ],
723 ],
724 ['mstore', output_node, outsize],
725 output_node,
726 ],
727 typ=ByteArrayType(outsize),
728 location='memory',
729 pos=getpos(expr),
730 )
731 else:
732 z = LLLnode.from_list(
733 [
734 'seq',
735 copier,
736 [
737 'assert',
738 [
739 'call',
740 gas,
741 to,
742 value,
743 ['add', placeholder_node, 32],
744 ['mload', placeholder_node],
745 ['add', output_node, 32],
746 outsize,
747 ],
748 ],
749 ['mstore', output_node, outsize],
750 output_node,
751 ],
752 typ=ByteArrayType(outsize), location='memory', pos=getpos(expr)
753 )
754 return z
755
756
757 @signature('address', 'uint256')
758 def send(expr, args, kwargs, context):
759 to, value = args
760 if context.is_constant():
761 raise ConstancyViolationException(
762 "Cannot send ether inside %s!" % context.pp_constancy(),
763 expr,
764 )
765 enforce_units(value.typ, expr.args[1], BaseType('uint256', {'wei': 1}))
766 return LLLnode.from_list(
767 ['assert', ['call', 0, to, value, 0, 0, 0, 0]],
768 typ=None,
769 pos=getpos(expr),
770 )
771
772
773 @signature('address')
774 def selfdestruct(expr, args, kwargs, context):
775 if context.is_constant():
776 raise ConstancyViolationException(
777 "Cannot %s inside %s!" % (expr.func.id, context.pp_constancy()),
778 expr.func,
779 )
780 return LLLnode.from_list(['selfdestruct', args[0]], typ=None, pos=getpos(expr))
781
782
783 @signature(('uint256'))
784 def blockhash(expr, args, kwargs, contact):
785 return LLLnode.from_list(
786 ['blockhash', ['uclamplt', ['clampge', args[0], ['sub', ['number'], 256]], 'number']],
787 typ=BaseType('bytes32'),
788 pos=getpos(expr),
789 )
790
791
792 @signature('bytes', '*')
793 def _RLPlist(expr, args, kwargs, context):
794 # Second argument must be a list of types
795 if not isinstance(args[1], ast.List):
796 raise TypeMismatchException("Expecting list of types for second argument", args[1])
797 if len(args[1].elts) == 0:
798 raise TypeMismatchException("RLP list must have at least one item", expr)
799 if len(args[1].elts) > 32:
800 raise TypeMismatchException("RLP list must have at most 32 items", expr)
801 # Get the output format
802 _format = []
803 for arg in args[1].elts:
804 if isinstance(arg, ast.Name) and arg.id == "bytes":
805 subtyp = ByteArrayType(args[0].typ.maxlen)
806 else:
807 subtyp = context.parse_type(arg, 'memory')
808 if not isinstance(subtyp, BaseType):
809 raise TypeMismatchException("RLP lists only accept BaseTypes and byte arrays", arg)
810 if not is_base_type(subtyp, ('int128', 'uint256', 'bytes32', 'address', 'bool')):
811 raise TypeMismatchException("Unsupported base type: %s" % subtyp.typ, arg)
812 _format.append(subtyp)
813 output_type = TupleType(_format)
814 output_placeholder_type = ByteArrayType(
815 (2 * len(_format) + 1 + get_size_of_type(output_type)) * 32,
816 )
817 output_placeholder = context.new_placeholder(output_placeholder_type)
818 output_node = LLLnode.from_list(
819 output_placeholder,
820 typ=output_placeholder_type,
821 location='memory',
822 )
823 # Create a decoder for each element in the tuple
824 decoder = []
825 for i, typ in enumerate(_format):
826 # Decoder for bytes32
827 if is_base_type(typ, 'bytes32'):
828 decoder.append(LLLnode.from_list(
829 [
830 'seq',
831 [
832 'assert',
833 [
834 'eq',
835 [
836 'mload',
837 [
838 'add',
839 output_node,
840 ['mload', ['add', output_node, 32 * i]],
841 ],
842 ],
843 32,
844 ],
845 ],
846 [
847 'mload',
848 [
849 'add',
850 32,
851 [
852 'add',
853 output_node,
854 ['mload', ['add', output_node, 32 * i]],
855 ],
856 ],
857 ],
858 ],
859 typ,
860 annotation='getting and checking bytes32 item',
861 ))
862 # Decoder for address
863 elif is_base_type(typ, 'address'):
864 decoder.append(LLLnode.from_list(
865 [
866 'seq',
867 [
868 'assert',
869 [
870 'eq',
871 [
872 'mload',
873 [
874 'add',
875 output_node,
876 ['mload', ['add', output_node, 32 * i]],
877 ],
878 ],
879 20,
880 ]
881 ],
882 [
883 'mod',
884 [
885 'mload',
886 [
887 'add',
888 20,
889 ['add', output_node, ['mload', ['add', output_node, 32 * i]]],
890 ],
891 ],
892 ['mload', MemoryPositions.ADDRSIZE],
893 ]
894 ],
895 typ,
896 annotation='getting and checking address item',
897 ))
898 # Decoder for bytes
899 elif isinstance(typ, ByteArrayType):
900 decoder.append(LLLnode.from_list(
901 [
902 'add',
903 output_node,
904 ['mload', ['add', output_node, 32 * i]],
905 ],
906 typ,
907 location='memory',
908 annotation='getting byte array',
909 ))
910 # Decoder for num and uint256
911 elif is_base_type(typ, ('int128', 'uint256')):
912 bytez = LLLnode.from_list(
913 [
914 'add',
915 output_node,
916 ['mload', ['add', output_node, 32 * i]],
917 ],
918 typ,
919 location='memory',
920 annotation='getting and checking %s' % typ.typ,
921 )
922 decoder.append(byte_array_to_num(bytez, expr, typ.typ))
923 # Decoder for bools
924 elif is_base_type(typ, ('bool')):
925 # This is basically a really clever way to test for a
926 # length-prefixed one or zero. We take the 32 bytes starting one
927 # byte *after* the start of the length declaration; this includes
928 # the last 31 bytes of the length and the first byte of the value.
929 # 0 corresponds to length 0, first byte 0, and 257 corresponds to
930 # length 1, first byte \x01
931 decoder.append(LLLnode.from_list(
932 [
933 'with', '_ans', [
934 'mload',
935 [
936 'add',
937 1,
938 ['add', output_node, ['mload', ['add', output_node, 32 * i]]]
939 ],
940 ],
941 [
942 'seq',
943 ['assert', ['or', ['eq', '_ans', 0], ['eq', '_ans', 257]]],
944 ['div', '_ans', 257],
945 ],
946 ],
947 typ,
948 annotation='getting and checking bool',
949 ))
950 else:
951 # Should never reach because of top level base level check.
952 raise Exception("Type not yet supported") # pragma: no cover
953 # Copy the input data to memory
954 if args[0].location == "memory":
955 variable_pointer = args[0]
956 elif args[0].location == "storage":
957 placeholder = context.new_placeholder(args[0].typ)
958 placeholder_node = LLLnode.from_list(placeholder, typ=args[0].typ, location='memory')
959 copier = make_byte_array_copier(
960 placeholder_node,
961 LLLnode.from_list('_ptr', typ=args[0].typ, location=args[0].location),
962 )
963 variable_pointer = ['with', '_ptr', args[0], ['seq', copier, placeholder_node]]
964 else:
965 # Should never reach because of top level base level check.
966 raise Exception("Location not yet supported") # pragma: no cover
967 # Decode the input data
968 initial_setter = LLLnode.from_list(
969 ['seq',
970 ['with', '_sub', variable_pointer,
971 ['pop', ['call',
972 1500 + 400 * len(_format) + 10 * len(args),
973 LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'),
974 0,
975 ['add', '_sub', 32],
976 ['mload', '_sub'],
977 output_node,
978 64 * len(_format) + 32 + 32 * get_size_of_type(output_type)]]],
979 ['assert', ['eq', ['mload', output_node], 32 * len(_format) + 32]]],
980 typ=None)
981 # Shove the input data decoder in front of the first variable decoder
982 decoder[0] = LLLnode.from_list(
983 ['seq', initial_setter, decoder[0]],
984 typ=decoder[0].typ,
985 location=decoder[0].location,
986 )
987 return LLLnode.from_list(
988 ["multi"] + decoder,
989 typ=output_type,
990 location='memory',
991 pos=getpos(expr),
992 )
993
994
995 @signature('*', 'bytes')
996 def raw_log(expr, args, kwargs, context):
997 if not isinstance(args[0], ast.List) or len(args[0].elts) > 4:
998 raise StructureException("Expecting a list of 0-4 topics as first argument", args[0])
999 topics = []
1000 for elt in args[0].elts:
1001 arg = Expr.parse_value_expr(elt, context)
1002 if not is_base_type(arg.typ, 'bytes32'):
1003 raise TypeMismatchException("Expecting a bytes32 argument as topic", elt)
1004 topics.append(arg)
1005 if args[1].location == "memory":
1006 return LLLnode.from_list([
1007 "with", "_arr", args[1], [
1008 "log" + str(len(topics)),
1009 ["add", "_arr", 32],
1010 ["mload", "_arr"],
1011 ] + topics
1012 ], typ=None, pos=getpos(expr))
1013 placeholder = context.new_placeholder(args[1].typ)
1014 placeholder_node = LLLnode.from_list(placeholder, typ=args[1].typ, location='memory')
1015 copier = make_byte_array_copier(
1016 placeholder_node,
1017 LLLnode.from_list('_sub', typ=args[1].typ, location=args[1].location),
1018 pos=getpos(expr),
1019 )
1020 return LLLnode.from_list(
1021 [
1022 "with", "_sub", args[1],
1023 [
1024 "seq",
1025 copier,
1026 [
1027 "log" + str(len(topics)),
1028 ["add", placeholder_node, 32],
1029 ["mload", placeholder_node],
1030 ] + topics
1031 ],
1032 ],
1033 typ=None,
1034 pos=getpos(expr),
1035 )
1036
1037
1038 @signature('uint256', 'uint256')
1039 def bitwise_and(expr, args, kwargs, context):
1040 return LLLnode.from_list(['and', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
1041
1042
1043 @signature('uint256', 'uint256')
1044 def bitwise_or(expr, args, kwargs, context):
1045 return LLLnode.from_list(['or', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
1046
1047
1048 @signature('uint256', 'uint256')
1049 def bitwise_xor(expr, args, kwargs, context):
1050 return LLLnode.from_list(['xor', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
1051
1052
1053 @signature('uint256', 'uint256', 'uint256')
1054 def uint256_addmod(expr, args, kwargs, context):
1055 return LLLnode.from_list(
1056 [
1057 'seq',
1058 ['assert', args[2]],
1059 ['assert', ['or', ['iszero', args[1]], ['gt', ['add', args[0], args[1]], args[0]]]],
1060 ['addmod', args[0], args[1], args[2]],
1061 ],
1062 typ=BaseType('uint256'),
1063 pos=getpos(expr),
1064 )
1065
1066
1067 @signature('uint256', 'uint256', 'uint256')
1068 def uint256_mulmod(expr, args, kwargs, context):
1069 return LLLnode.from_list(
1070 [
1071 'seq',
1072 ['assert', args[2]],
1073 ['assert', [
1074 'or',
1075 ['iszero', args[0]],
1076 ['eq', ['div', ['mul', args[0], args[1]], args[0]], args[1]],
1077 ]],
1078 ['mulmod', args[0], args[1], args[2]],
1079 ],
1080 typ=BaseType('uint256'),
1081 pos=getpos(expr),
1082 )
1083
1084
1085 @signature('uint256')
1086 def bitwise_not(expr, args, kwargs, context):
1087 return LLLnode.from_list(['not', args[0]], typ=BaseType('uint256'), pos=getpos(expr))
1088
1089
1090 @signature('uint256', 'int128')
1091 def shift(expr, args, kwargs, context):
1092 return LLLnode.from_list(
1093 [
1094 'with', '_v', args[0], [
1095 'with', '_s', args[1], [
1096 # If second argument is positive, left-shift so multiply by a power of two
1097 # If it is negative, divide by a power of two
1098 # node that if the abs of the second argument >= 256, then in the EVM
1099 # 2**(second arg) = 0, and multiplying OR dividing by 0 gives 0
1100 'if',
1101 ['slt', '_s', 0],
1102 ['div', '_v', ['exp', 2, ['sub', 0, '_s']]],
1103 ['mul', '_v', ['exp', 2, '_s']]
1104 ],
1105 ],
1106 ],
1107 typ=BaseType('uint256'),
1108 pos=getpos(expr),
1109 )
1110
1111
1112 def get_create_forwarder_to_bytecode():
1113 from vyper.compile_lll import (
1114 assembly_to_evm,
1115 num_to_bytearray
1116 )
1117 code_a = [
1118 'PUSH1', 0x33,
1119 'PUSH1', 0x0c,
1120 'PUSH1', 0x00,
1121 'CODECOPY',
1122 'PUSH1', 0x33,
1123 'PUSH1', 0x00,
1124 'RETURN',
1125 'CALLDATASIZE',
1126 'PUSH1', 0x00,
1127 'PUSH1', 0x00,
1128 'CALLDATACOPY',
1129 'PUSH2', num_to_bytearray(0x1000),
1130 'PUSH1', 0x00,
1131 'CALLDATASIZE',
1132 'PUSH1', 0x00,
1133 'PUSH20', # [address to delegate to]
1134 ]
1135 code_b = [
1136 'GAS',
1137 'DELEGATECALL',
1138 'PUSH1', 0x2c, # jumpdest of whole program.
1139 'JUMPI',
1140 'PUSH1', 0x0,
1141 'DUP1',
1142 'REVERT',
1143 'JUMPDEST',
1144 'PUSH2', num_to_bytearray(0x1000),
1145 'PUSH1', 0x00,
1146 'RETURN'
1147 ]
1148 return assembly_to_evm(code_a)[0] + (b'\x00' * 20) + assembly_to_evm(code_b)[0]
1149
1150
1151 @signature('address', value=Optional('uint256', zero_value))
1152 def create_forwarder_to(expr, args, kwargs, context):
1153
1154 value = kwargs['value']
1155 if value != zero_value:
1156 enforce_units(value.typ, get_keyword(expr, 'value'),
1157 BaseType('uint256', {'wei': 1}))
1158 if context.is_constant():
1159 raise ConstancyViolationException(
1160 "Cannot make calls from %s" % context.pp_constancy(),
1161 expr,
1162 )
1163 placeholder = context.new_placeholder(ByteArrayType(96))
1164
1165 kode = get_create_forwarder_to_bytecode()
1166 high = bytes_to_int(kode[:32])
1167 low = bytes_to_int((kode + b'\x00' * 32)[47:79])
1168
1169 return LLLnode.from_list(
1170 [
1171 'seq',
1172 ['mstore', placeholder, high],
1173 ['mstore', ['add', placeholder, 27], ['mul', args[0], 2**96]],
1174 ['mstore', ['add', placeholder, 47], low],
1175 ['clamp_nonzero', ['create', value, placeholder, 96]],
1176 ],
1177 typ=BaseType('address'),
1178 pos=getpos(expr),
1179 add_gas_estimate=11000,
1180 )
1181
1182
1183 @signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))
1184 def _min(expr, args, kwargs, context):
1185 return minmax(expr, args, kwargs, context, True)
1186
1187
1188 @signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))
1189 def _max(expr, args, kwargs, context):
1190 return minmax(expr, args, kwargs, context, False)
1191
1192
1193 def minmax(expr, args, kwargs, context, is_min):
1194 def _can_compare_with_uint256(operand):
1195 if operand.typ.typ == 'uint256':
1196 return True
1197 elif operand.typ.typ == 'int128' and operand.typ.is_literal and SizeLimits.in_bounds('uint256', operand.value): # noqa: E501
1198 return True
1199 return False
1200
1201 left, right = args[0], args[1]
1202 if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ): # noqa: E501
1203 raise TypeMismatchException("Units must be compatible", expr)
1204 if left.typ.typ == 'uint256':
1205 comparator = 'gt' if is_min else 'lt'
1206 else:
1207 comparator = 'sgt' if is_min else 'slt'
1208 if left.typ.typ == right.typ.typ:
1209 o = ['if', [comparator, '_l', '_r'], '_r', '_l']
1210 otyp = left.typ
1211 otyp.is_literal = False
1212 elif _can_compare_with_uint256(left) and _can_compare_with_uint256(right):
1213 o = ['if', [comparator, '_l', '_r'], '_r', '_l']
1214 if right.typ.typ == 'uint256':
1215 otyp = right.typ
1216 else:
1217 otyp = left.typ
1218 otyp.is_literal = False
1219 else:
1220 raise TypeMismatchException(
1221 "Minmax types incompatible: %s %s" % (left.typ.typ, right.typ.typ)
1222 )
1223 return LLLnode.from_list(
1224 ['with', '_l', left, ['with', '_r', right, o]],
1225 typ=otyp,
1226 pos=getpos(expr),
1227 )
1228
1229
1230 @signature('decimal')
1231 def sqrt(expr, args, kwargs, context):
1232 from vyper.functions.utils import (
1233 generate_inline_function,
1234 )
1235 arg = args[0]
1236 sqrt_code = """
1237 assert x >= 0.0
1238 z: decimal
1239
1240 if x == 0.0:
1241 z = 0.0
1242 else:
1243 z = (x + 1.0) / 2.0
1244 y: decimal = x
1245
1246 for i in range(256):
1247 if z == y:
1248 break
1249 y = z
1250 z = (x / z + z) / 2.0
1251 """
1252
1253 x_type = BaseType('decimal')
1254 placeholder_copy = ['pass']
1255 # Steal current position if variable is already allocated.
1256 if arg.value == 'mload':
1257 new_var_pos = arg.args[0]
1258 # Other locations need to be copied.
1259 else:
1260 new_var_pos = context.new_placeholder(x_type)
1261 placeholder_copy = ['mstore', new_var_pos, arg]
1262 # Create input variables.
1263 variables = {
1264 'x': VariableRecord(
1265 name='x',
1266 pos=new_var_pos,
1267 typ=x_type,
1268 mutable=False
1269 )
1270 }
1271 # Generate inline LLL.
1272 new_ctx, sqrt_lll = generate_inline_function(
1273 code=sqrt_code,
1274 variables=variables,
1275 memory_allocator=context.memory_allocator
1276 )
1277 return LLLnode.from_list(
1278 [
1279 'seq_unchecked',
1280 placeholder_copy, # load x variable
1281 sqrt_lll,
1282 ['mload', new_ctx.vars['z'].pos] # unload z variable into the stack,
1283 ],
1284 typ=BaseType('decimal'),
1285 pos=getpos(expr),
1286 )
1287
1288
1289 def _clear():
1290 raise ParserException(
1291 "This function should never be called! `clear()` is currently handled "
1292 "differently than other functions as it self modifies its input argument "
1293 "statement. Please see `_clear()` in `stmt.py`"
1294 )
1295
1296
1297 dispatch_table = {
1298 'floor': floor,
1299 'ceil': ceil,
1300 'as_unitless_number': as_unitless_number,
1301 'convert': _convert,
1302 'slice': _slice,
1303 'len': _len,
1304 'concat': concat,
1305 'sha3': _sha3,
1306 'sha256': sha256,
1307 'method_id': method_id,
1308 'keccak256': _sha3,
1309 'ecrecover': ecrecover,
1310 'ecadd': ecadd,
1311 'ecmul': ecmul,
1312 'extract32': extract32,
1313 'as_wei_value': as_wei_value,
1314 'raw_call': raw_call,
1315 'RLPList': _RLPlist,
1316 'blockhash': blockhash,
1317 'bitwise_and': bitwise_and,
1318 'bitwise_or': bitwise_or,
1319 'bitwise_xor': bitwise_xor,
1320 'bitwise_not': bitwise_not,
1321 'uint256_addmod': uint256_addmod,
1322 'uint256_mulmod': uint256_mulmod,
1323 'sqrt': sqrt,
1324 'shift': shift,
1325 'create_forwarder_to': create_forwarder_to,
1326 'min': _min,
1327 'max': _max,
1328 }
1329
1330 stmt_dispatch_table = {
1331 'clear': _clear,
1332 'send': send,
1333 'selfdestruct': selfdestruct,
1334 'raw_call': raw_call,
1335 'raw_log': raw_log,
1336 'create_forwarder_to': create_forwarder_to,
1337 }
1338
1339 built_in_functions = [
1340 x for x in stmt_dispatch_table.keys()
1341 ] + [
1342 x for x in dispatch_table.keys()
1343 ]
1344
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vyper/functions/functions.py b/vyper/functions/functions.py
--- a/vyper/functions/functions.py
+++ b/vyper/functions/functions.py
@@ -232,7 +232,7 @@
# Maximum length of the output
total_maxlen = sum([
- arg.typ.maxlen if isinstance(arg.typ, ByteArrayType) else 32 for arg in args
+ arg.typ.maxlen if isinstance(arg.typ, ByteArrayLike) else 32 for arg in args
])
# Node representing the position of the output in memory
placeholder = context.new_placeholder(ReturnType(total_maxlen))
| {"golden_diff": "diff --git a/vyper/functions/functions.py b/vyper/functions/functions.py\n--- a/vyper/functions/functions.py\n+++ b/vyper/functions/functions.py\n@@ -232,7 +232,7 @@\n \n # Maximum length of the output\n total_maxlen = sum([\n- arg.typ.maxlen if isinstance(arg.typ, ByteArrayType) else 32 for arg in args\n+ arg.typ.maxlen if isinstance(arg.typ, ByteArrayLike) else 32 for arg in args\n ])\n # Node representing the position of the output in memory\n placeholder = context.new_placeholder(ReturnType(total_maxlen))\n", "issue": "Vyper treats strings as having length 32\n### Version Information\r\n\r\n* vyper Version: 0.1.0b9\r\n* OS: osx\r\n\r\n### What's your issue about?\r\n\r\nWhen using `concat` Vyper treats all strings as having length 32. Therefore this\r\n```\r\n@public\r\ndef conc(a: string[33], b: string[33]) -> string[64]:\r\n c: string[64] = concat(a, b)\r\n return c\r\n ```\r\nwill compile and even run (returning a string of length 66) and the following\r\n```\r\n@public\r\ndef conc_fail(a: string[5], b: string[4]) -> string[9]:\r\n c: string[9] = concat(a, b)\r\n return c\r\n```\r\nleads to a compiler error \"Cannot cast from greater max-length 64 to shorter max-length 9\".\r\n\n", "before_files": [{"content": "import hashlib\n\nfrom vyper import ast\nfrom vyper.exceptions import (\n ConstancyViolationException,\n InvalidLiteralException,\n ParserException,\n StructureException,\n TypeMismatchException,\n)\nfrom vyper.parser.expr import (\n Expr,\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n add_variable_offset,\n byte_array_to_num,\n get_length,\n get_number_as_fraction,\n getpos,\n make_byte_array_copier,\n make_byte_slice_copier,\n unwrap_location,\n)\nfrom vyper.signatures.function_signature import (\n VariableRecord,\n)\nfrom vyper.types import (\n BaseType,\n ByteArrayLike,\n ByteArrayType,\n ListType,\n StringType,\n TupleType,\n are_units_compatible,\n get_size_of_type,\n is_base_type,\n)\nfrom vyper.types.convert import (\n convert,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n RLP_DECODER_ADDRESS,\n MemoryPositions,\n SizeLimits,\n bytes_to_int,\n fourbytes_to_int,\n sha3,\n)\n\nfrom .signatures import (\n Optional,\n signature,\n)\n\nSHA256_ADDRESS = 2\nSHA256_BASE_GAS = 60\nSHA256_PER_WORD_GAS = 12\n\n\ndef enforce_units(typ, obj, expected):\n if not are_units_compatible(typ, expected):\n raise TypeMismatchException(\"Invalid units\", obj)\n\n\ndef get_keyword(expr, keyword):\n for kw in expr.keywords:\n if kw.arg == keyword:\n return kw.value\n # This should never happen, as kwargs['value'] will KeyError first.\n # Leaving exception for other use cases.\n raise Exception(\"Keyword %s not found\" % keyword) # pragma: no cover\n\n\n@signature('decimal')\ndef floor(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'if',\n ['slt', args[0], 0],\n ['sdiv', ['sub', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],\n ['sdiv', args[0], DECIMAL_DIVISOR]\n ],\n typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature('decimal')\ndef ceil(expr, args, kwards, context):\n return LLLnode.from_list(\n [\n 'if',\n ['slt', args[0], 0],\n ['sdiv', args[0], DECIMAL_DIVISOR],\n ['sdiv', ['add', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR]\n ],\n typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('uint256', 'int128', 'decimal'))\ndef as_unitless_number(expr, args, kwargs, context):\n return LLLnode(\n value=args[0].value,\n args=args[0].args,\n typ=BaseType(args[0].typ.typ, {}),\n pos=getpos(expr),\n )\n\n\ndef _convert(expr, context):\n return convert(expr, context)\n\n\n@signature(('bytes32', 'bytes', 'string'), start='int128', len='int128')\ndef _slice(expr, args, kwargs, context):\n\n sub, start, length = args[0], kwargs['start'], kwargs['len']\n if not are_units_compatible(start.typ, BaseType('int128')):\n raise TypeMismatchException(\"Type for slice start index must be a unitless number\", expr)\n # Expression representing the length of the slice\n if not are_units_compatible(length.typ, BaseType('int128')):\n raise TypeMismatchException(\"Type for slice length must be a unitless number\", expr)\n\n if is_base_type(sub.typ, 'bytes32'):\n if (start.typ.is_literal and length.typ.is_literal) and \\\n not (0 <= start.value + length.value <= 32):\n raise InvalidLiteralException(\n 'Invalid start / length values needs to be between 0 and 32.',\n expr,\n )\n sub_typ_maxlen = 32\n else:\n sub_typ_maxlen = sub.typ.maxlen\n\n # Get returntype string or bytes\n if isinstance(args[0].typ, ByteArrayType) or is_base_type(sub.typ, 'bytes32'):\n ReturnType = ByteArrayType\n else:\n ReturnType = StringType\n\n # Node representing the position of the output in memory\n np = context.new_placeholder(ReturnType(maxlen=sub_typ_maxlen + 32))\n placeholder_node = LLLnode.from_list(np, typ=sub.typ, location='memory')\n placeholder_plus_32_node = LLLnode.from_list(np + 32, typ=sub.typ, location='memory')\n # Copies over bytearray data\n if sub.location == 'storage':\n adj_sub = LLLnode.from_list(\n ['add', ['sha3_32', sub], ['add', ['div', '_start', 32], 1]],\n typ=sub.typ,\n location=sub.location,\n )\n else:\n adj_sub = LLLnode.from_list(\n ['add', sub, ['add', ['sub', '_start', ['mod', '_start', 32]], 32]],\n typ=sub.typ,\n location=sub.location,\n )\n\n if is_base_type(sub.typ, 'bytes32'):\n adj_sub = LLLnode.from_list(\n sub.args[0], typ=sub.typ, location=\"memory\"\n )\n\n copier = make_byte_slice_copier(\n placeholder_plus_32_node,\n adj_sub,\n ['add', '_length', 32],\n sub_typ_maxlen,\n pos=getpos(expr),\n )\n # New maximum length in the type of the result\n newmaxlen = length.value if not len(length.args) else sub_typ_maxlen\n if is_base_type(sub.typ, 'bytes32'):\n maxlen = 32\n else:\n maxlen = ['mload', Expr(sub, context=context).lll_node] # Retrieve length of the bytes.\n\n out = [\n 'with', '_start', start, [\n 'with', '_length', length, [\n 'with', '_opos', ['add', placeholder_node, ['mod', '_start', 32]], [\n 'seq',\n ['assert', ['le', ['add', '_start', '_length'], maxlen]],\n copier,\n ['mstore', '_opos', '_length'],\n '_opos'\n ],\n ],\n ],\n ]\n return LLLnode.from_list(out, typ=ReturnType(newmaxlen), location='memory', pos=getpos(expr))\n\n\n@signature(('bytes', 'string'))\ndef _len(expr, args, kwargs, context):\n return get_length(args[0])\n\n\ndef concat(expr, context):\n args = [Expr(arg, context).lll_node for arg in expr.args]\n if len(args) < 2:\n raise StructureException(\"Concat expects at least two arguments\", expr)\n\n prev_type = ''\n for _, (expr_arg, arg) in enumerate(zip(expr.args, args)):\n if not isinstance(arg.typ, ByteArrayLike) and not is_base_type(arg.typ, 'bytes32'):\n raise TypeMismatchException(\"Concat expects string, bytes or bytes32 objects\", expr_arg)\n\n current_type = (\n 'bytes'\n if isinstance(arg.typ, ByteArrayType) or is_base_type(arg.typ, 'bytes32')\n else 'string'\n )\n if prev_type and current_type != prev_type:\n raise TypeMismatchException(\n (\n \"Concat expects consistant use of string or byte types, \"\n \"user either bytes or string.\"\n ),\n expr_arg,\n )\n prev_type = current_type\n\n if current_type == 'string':\n ReturnType = StringType\n else:\n ReturnType = ByteArrayType\n\n # Maximum length of the output\n total_maxlen = sum([\n arg.typ.maxlen if isinstance(arg.typ, ByteArrayType) else 32 for arg in args\n ])\n # Node representing the position of the output in memory\n placeholder = context.new_placeholder(ReturnType(total_maxlen))\n # Object representing the output\n seq = []\n # For each argument we are concatenating...\n for arg in args:\n # Start pasting into a position the starts at zero, and keeps\n # incrementing as we concatenate arguments\n placeholder_node = LLLnode.from_list(\n ['add', placeholder, '_poz'],\n typ=ReturnType(total_maxlen),\n location='memory',\n )\n placeholder_node_plus_32 = LLLnode.from_list(\n ['add', ['add', placeholder, '_poz'], 32],\n typ=ReturnType(total_maxlen),\n location='memory',\n )\n if isinstance(arg.typ, ReturnType):\n # Ignore empty strings\n if arg.typ.maxlen == 0:\n continue\n # Get the length of the current argument\n if arg.location == \"memory\":\n length = LLLnode.from_list(['mload', '_arg'], typ=BaseType('int128'))\n argstart = LLLnode.from_list(\n ['add', '_arg', 32],\n typ=arg.typ,\n location=arg.location,\n )\n elif arg.location == \"storage\":\n length = LLLnode.from_list(['sload', ['sha3_32', '_arg']], typ=BaseType('int128'))\n argstart = LLLnode.from_list(\n ['add', ['sha3_32', '_arg'], 1],\n typ=arg.typ,\n location=arg.location,\n )\n # Make a copier to copy over data from that argument\n seq.append([\n 'with', '_arg', arg, [\n 'seq',\n make_byte_slice_copier(\n placeholder_node_plus_32,\n argstart,\n length,\n arg.typ.maxlen, pos=getpos(expr),\n ),\n # Change the position to start at the correct\n # place to paste the next value\n ['set', '_poz', ['add', '_poz', length]],\n ],\n ])\n else:\n seq.append([\n 'seq',\n ['mstore', ['add', placeholder_node, 32], unwrap_location(arg)],\n ['set', '_poz', ['add', '_poz', 32]],\n ])\n # The position, after all arguments are processing, equals the total\n # length. Paste this in to make the output a proper bytearray\n seq.append(['mstore', placeholder, '_poz'])\n # Memory location of the output\n seq.append(placeholder)\n return LLLnode.from_list(\n ['with', '_poz', 0, ['seq'] + seq],\n typ=ReturnType(total_maxlen),\n location='memory',\n pos=getpos(expr),\n annotation='concat',\n )\n\n\n@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))\ndef _sha3(expr, args, kwargs, context):\n sub = args[0]\n # Can hash literals\n if isinstance(sub, bytes):\n return LLLnode.from_list(\n bytes_to_int(sha3(sub)),\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n # Can hash bytes32 objects\n if is_base_type(sub.typ, 'bytes32'):\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],\n ['sha3', MemoryPositions.FREE_VAR_SPACE, 32]\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n # Copy the data to an in-memory array\n if sub.location == \"memory\":\n # If we are hashing a value in memory, no need to copy it, just hash in-place\n return LLLnode.from_list(\n ['with', '_sub', sub, ['sha3', ['add', '_sub', 32], ['mload', '_sub']]],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n elif sub.location == \"storage\":\n lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))\n else:\n # This should never happen, but just left here for future compiler-writers.\n raise Exception(\"Unsupported location: %s\" % sub.location) # pragma: no test\n placeholder = context.new_placeholder(sub.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),\n )\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n copier,\n ['sha3', ['add', placeholder, 32], lengetter]\n ],\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n\n\ndef _make_sha256_call(inp_start, inp_len, out_start, out_len):\n return [\n 'assert', [\n 'call',\n ['gas'], # gas\n SHA256_ADDRESS, # address\n 0, # value\n inp_start,\n inp_len,\n out_start,\n out_len\n ]\n ]\n\n\n@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))\ndef sha256(expr, args, kwargs, context):\n sub = args[0]\n # Literal input\n if isinstance(sub, bytes):\n return LLLnode.from_list(\n bytes_to_int(hashlib.sha256(sub).digest()),\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n # bytes32 input\n elif is_base_type(sub.typ, 'bytes32'):\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],\n _make_sha256_call(\n inp_start=MemoryPositions.FREE_VAR_SPACE,\n inp_len=32,\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE] # push value onto stack\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + 1 * SHA256_PER_WORD_GAS\n )\n # bytearay-like input\n if sub.location == \"storage\":\n # Copy storage to memory\n placeholder = context.new_placeholder(sub.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),\n )\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n copier,\n _make_sha256_call(\n inp_start=['add', placeholder, 32],\n inp_len=['mload', placeholder],\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE]\n ],\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS\n )\n elif sub.location == \"memory\":\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n _make_sha256_call(\n inp_start=['add', '_sub', 32],\n inp_len=['mload', '_sub'],\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE]\n ]\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS\n )\n else:\n # This should never happen, but just left here for future compiler-writers.\n raise Exception(\"Unsupported location: %s\" % sub.location) # pragma: no test\n\n\n@signature('str_literal', 'name_literal')\ndef method_id(expr, args, kwargs, context):\n if b' ' in args[0]:\n raise TypeMismatchException('Invalid function signature no spaces allowed.')\n method_id = fourbytes_to_int(sha3(args[0])[:4])\n if args[1] == 'bytes32':\n return LLLnode(method_id, typ=BaseType('bytes32'), pos=getpos(expr))\n elif args[1] == 'bytes[4]':\n placeholder = LLLnode.from_list(context.new_placeholder(ByteArrayType(4)))\n return LLLnode.from_list(\n ['seq',\n ['mstore', ['add', placeholder, 4], method_id],\n ['mstore', placeholder, 4], placeholder],\n typ=ByteArrayType(4), location='memory', pos=getpos(expr))\n else:\n raise StructureException('Can only produce bytes32 or bytes[4] as outputs')\n\n\n@signature('bytes32', 'uint256', 'uint256', 'uint256')\ndef ecrecover(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n return LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, args[0]],\n ['mstore', ['add', placeholder_node, 32], args[1]],\n ['mstore', ['add', placeholder_node, 64], args[2]],\n ['mstore', ['add', placeholder_node, 96], args[3]],\n ['pop', ['call', 3000, 1, 0, placeholder_node, 128, MemoryPositions.FREE_VAR_SPACE, 32]],\n ['mload', MemoryPositions.FREE_VAR_SPACE],\n ], typ=BaseType('address'), pos=getpos(expr))\n\n\ndef avo(arg, ind, pos):\n return unwrap_location(add_variable_offset(arg, LLLnode.from_list(ind, 'int128'), pos=pos))\n\n\n@signature('uint256[2]', 'uint256[2]')\ndef ecadd(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n pos = getpos(expr)\n o = LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, avo(args[0], 0, pos)],\n ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],\n ['mstore', ['add', placeholder_node, 64], avo(args[1], 0, pos)],\n ['mstore', ['add', placeholder_node, 96], avo(args[1], 1, pos)],\n ['assert', ['call', 500, 6, 0, placeholder_node, 128, placeholder_node, 64]],\n placeholder_node,\n ], typ=ListType(BaseType('uint256'), 2), pos=getpos(expr), location='memory')\n return o\n\n\n@signature('uint256[2]', 'uint256')\ndef ecmul(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n pos = getpos(expr)\n o = LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, avo(args[0], 0, pos)],\n ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],\n ['mstore', ['add', placeholder_node, 64], args[1]],\n ['assert', ['call', 40000, 7, 0, placeholder_node, 96, placeholder_node, 64]],\n placeholder_node,\n ], typ=ListType(BaseType('uint256'), 2), pos=pos, location='memory')\n return o\n\n\ndef _memory_element_getter(index):\n return LLLnode.from_list(\n ['mload', ['add', '_sub', ['add', 32, ['mul', 32, index]]]],\n typ=BaseType('int128'),\n )\n\n\ndef _storage_element_getter(index):\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', '_sub'], ['add', 1, index]]],\n typ=BaseType('int128'),\n )\n\n\n@signature('bytes', 'int128', type=Optional('name_literal', 'bytes32'))\ndef extract32(expr, args, kwargs, context):\n sub, index = args\n ret_type = kwargs['type']\n # Get length and specific element\n if sub.location == \"memory\":\n lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))\n elementgetter = _memory_element_getter\n elif sub.location == \"storage\":\n lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))\n elementgetter = _storage_element_getter\n # TODO: unclosed if/elif clause. Undefined behavior if `sub.location`\n # isn't one of `memory`/`storage`\n\n # Special case: index known to be a multiple of 32\n if isinstance(index.value, int) and not index.value % 32:\n o = LLLnode.from_list(\n [\n 'with', '_sub', sub,\n elementgetter(['div', ['clamp', 0, index, ['sub', lengetter, 32]], 32])\n ],\n typ=BaseType(ret_type),\n annotation='extracting 32 bytes',\n )\n # General case\n else:\n o = LLLnode.from_list([\n 'with', '_sub', sub, [\n 'with', '_len', lengetter, [\n 'with', '_index', ['clamp', 0, index, ['sub', '_len', 32]], [\n 'with', '_mi32', ['mod', '_index', 32], [\n 'with', '_di32', ['div', '_index', 32],\n [\n 'if',\n '_mi32',\n [\n 'add',\n ['mul', elementgetter('_di32'), ['exp', 256, '_mi32']],\n [\n 'div',\n elementgetter(['add', '_di32', 1]),\n ['exp', 256, ['sub', 32, '_mi32']],\n ],\n ],\n elementgetter('_di32'),\n ],\n ],\n ],\n ],\n ],\n ], typ=BaseType(ret_type), pos=getpos(expr), annotation='extracting 32 bytes')\n if ret_type == 'int128':\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]],\n typ=BaseType('int128'),\n pos=getpos(expr),\n )\n elif ret_type == 'address':\n return LLLnode.from_list(\n ['uclamplt', o, ['mload', MemoryPositions.ADDRSIZE]],\n typ=BaseType(ret_type),\n pos=getpos(expr),\n )\n else:\n return o\n\n\n@signature(('num_literal', 'int128', 'uint256', 'decimal'), 'str_literal')\ndef as_wei_value(expr, args, kwargs, context):\n # Denominations\n names_denom = {\n (b\"wei\", ): 1,\n (b\"femtoether\", b\"kwei\", b\"babbage\"): 10**3,\n (b\"picoether\", b\"mwei\", b\"lovelace\"): 10**6,\n (b\"nanoether\", b\"gwei\", b\"shannon\"): 10**9,\n (b\"microether\", b\"szabo\", ): 10**12,\n (b\"milliether\", b\"finney\", ): 10**15,\n (b\"ether\", ): 10**18,\n (b\"kether\", b\"grand\"): 10**21,\n }\n\n for names, denom in names_denom.items():\n if args[1] in names:\n denomination = denom\n break\n else:\n raise InvalidLiteralException(\n \"Invalid denomination: %s, valid denominations are: %s\" % (\n args[1],\n \",\".join(x[0].decode() for x in names_denom)\n ),\n expr.args[1]\n )\n # Compute the amount of wei and return that value\n if isinstance(args[0], (int, float)):\n expr_args_0 = expr.args[0]\n # On constant reference fetch value node of constant assignment.\n if context.constants.ast_is_constant(expr.args[0]):\n expr_args_0 = context.constants._constants_ast[expr.args[0].id]\n numstring, num, den = get_number_as_fraction(expr_args_0, context)\n if denomination % den:\n raise InvalidLiteralException(\"Too many decimal places: %s\" % numstring, expr.args[0])\n sub = num * denomination // den\n elif args[0].typ.is_literal:\n if args[0].value <= 0:\n raise InvalidLiteralException(\"Negative wei value not allowed\", expr)\n sub = ['mul', args[0].value, denomination]\n elif args[0].typ.typ == 'uint256':\n sub = ['mul', args[0], denomination]\n else:\n sub = ['div', ['mul', args[0], denomination], DECIMAL_DIVISOR]\n\n return LLLnode.from_list(\n sub,\n typ=BaseType('uint256', {'wei': 1}),\n location=None,\n pos=getpos(expr),\n )\n\n\nzero_value = LLLnode.from_list(0, typ=BaseType('uint256', {'wei': 1}))\nfalse_value = LLLnode.from_list(0, typ=BaseType('bool', is_literal=True))\n\n\n@signature(\n 'address',\n 'bytes',\n outsize='num_literal',\n gas='uint256',\n value=Optional('uint256', zero_value),\n delegate_call=Optional('bool', false_value),\n)\ndef raw_call(expr, args, kwargs, context):\n to, data = args\n gas, value, outsize, delegate_call = (\n kwargs['gas'],\n kwargs['value'],\n kwargs['outsize'],\n kwargs['delegate_call'],\n )\n if delegate_call.typ.is_literal is False:\n raise TypeMismatchException(\n 'The delegate_call parameter has to be a static/literal boolean value.'\n )\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot make calls from %s\" % context.pp_constancy(),\n expr,\n )\n if value != zero_value:\n enforce_units(\n value.typ,\n get_keyword(expr, 'value'),\n BaseType('uint256', {'wei': 1}),\n )\n placeholder = context.new_placeholder(data.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=data.typ, location='memory')\n copier = make_byte_array_copier(placeholder_node, data, pos=getpos(expr))\n output_placeholder = context.new_placeholder(ByteArrayType(outsize))\n output_node = LLLnode.from_list(\n output_placeholder,\n typ=ByteArrayType(outsize),\n location='memory',\n )\n\n if delegate_call.value == 1:\n z = LLLnode.from_list(\n [\n 'seq',\n copier,\n [\n 'assert',\n [\n 'delegatecall',\n gas,\n to,\n ['add', placeholder_node, 32],\n ['mload', placeholder_node],\n ['add', output_node, 32],\n outsize,\n ],\n ],\n ['mstore', output_node, outsize],\n output_node,\n ],\n typ=ByteArrayType(outsize),\n location='memory',\n pos=getpos(expr),\n )\n else:\n z = LLLnode.from_list(\n [\n 'seq',\n copier,\n [\n 'assert',\n [\n 'call',\n gas,\n to,\n value,\n ['add', placeholder_node, 32],\n ['mload', placeholder_node],\n ['add', output_node, 32],\n outsize,\n ],\n ],\n ['mstore', output_node, outsize],\n output_node,\n ],\n typ=ByteArrayType(outsize), location='memory', pos=getpos(expr)\n )\n return z\n\n\n@signature('address', 'uint256')\ndef send(expr, args, kwargs, context):\n to, value = args\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot send ether inside %s!\" % context.pp_constancy(),\n expr,\n )\n enforce_units(value.typ, expr.args[1], BaseType('uint256', {'wei': 1}))\n return LLLnode.from_list(\n ['assert', ['call', 0, to, value, 0, 0, 0, 0]],\n typ=None,\n pos=getpos(expr),\n )\n\n\n@signature('address')\ndef selfdestruct(expr, args, kwargs, context):\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot %s inside %s!\" % (expr.func.id, context.pp_constancy()),\n expr.func,\n )\n return LLLnode.from_list(['selfdestruct', args[0]], typ=None, pos=getpos(expr))\n\n\n@signature(('uint256'))\ndef blockhash(expr, args, kwargs, contact):\n return LLLnode.from_list(\n ['blockhash', ['uclamplt', ['clampge', args[0], ['sub', ['number'], 256]], 'number']],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n\n\n@signature('bytes', '*')\ndef _RLPlist(expr, args, kwargs, context):\n # Second argument must be a list of types\n if not isinstance(args[1], ast.List):\n raise TypeMismatchException(\"Expecting list of types for second argument\", args[1])\n if len(args[1].elts) == 0:\n raise TypeMismatchException(\"RLP list must have at least one item\", expr)\n if len(args[1].elts) > 32:\n raise TypeMismatchException(\"RLP list must have at most 32 items\", expr)\n # Get the output format\n _format = []\n for arg in args[1].elts:\n if isinstance(arg, ast.Name) and arg.id == \"bytes\":\n subtyp = ByteArrayType(args[0].typ.maxlen)\n else:\n subtyp = context.parse_type(arg, 'memory')\n if not isinstance(subtyp, BaseType):\n raise TypeMismatchException(\"RLP lists only accept BaseTypes and byte arrays\", arg)\n if not is_base_type(subtyp, ('int128', 'uint256', 'bytes32', 'address', 'bool')):\n raise TypeMismatchException(\"Unsupported base type: %s\" % subtyp.typ, arg)\n _format.append(subtyp)\n output_type = TupleType(_format)\n output_placeholder_type = ByteArrayType(\n (2 * len(_format) + 1 + get_size_of_type(output_type)) * 32,\n )\n output_placeholder = context.new_placeholder(output_placeholder_type)\n output_node = LLLnode.from_list(\n output_placeholder,\n typ=output_placeholder_type,\n location='memory',\n )\n # Create a decoder for each element in the tuple\n decoder = []\n for i, typ in enumerate(_format):\n # Decoder for bytes32\n if is_base_type(typ, 'bytes32'):\n decoder.append(LLLnode.from_list(\n [\n 'seq',\n [\n 'assert',\n [\n 'eq',\n [\n 'mload',\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n 32,\n ],\n ],\n [\n 'mload',\n [\n 'add',\n 32,\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n ],\n ],\n typ,\n annotation='getting and checking bytes32 item',\n ))\n # Decoder for address\n elif is_base_type(typ, 'address'):\n decoder.append(LLLnode.from_list(\n [\n 'seq',\n [\n 'assert',\n [\n 'eq',\n [\n 'mload',\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n 20,\n ]\n ],\n [\n 'mod',\n [\n 'mload',\n [\n 'add',\n 20,\n ['add', output_node, ['mload', ['add', output_node, 32 * i]]],\n ],\n ],\n ['mload', MemoryPositions.ADDRSIZE],\n ]\n ],\n typ,\n annotation='getting and checking address item',\n ))\n # Decoder for bytes\n elif isinstance(typ, ByteArrayType):\n decoder.append(LLLnode.from_list(\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n typ,\n location='memory',\n annotation='getting byte array',\n ))\n # Decoder for num and uint256\n elif is_base_type(typ, ('int128', 'uint256')):\n bytez = LLLnode.from_list(\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n typ,\n location='memory',\n annotation='getting and checking %s' % typ.typ,\n )\n decoder.append(byte_array_to_num(bytez, expr, typ.typ))\n # Decoder for bools\n elif is_base_type(typ, ('bool')):\n # This is basically a really clever way to test for a\n # length-prefixed one or zero. We take the 32 bytes starting one\n # byte *after* the start of the length declaration; this includes\n # the last 31 bytes of the length and the first byte of the value.\n # 0 corresponds to length 0, first byte 0, and 257 corresponds to\n # length 1, first byte \\x01\n decoder.append(LLLnode.from_list(\n [\n 'with', '_ans', [\n 'mload',\n [\n 'add',\n 1,\n ['add', output_node, ['mload', ['add', output_node, 32 * i]]]\n ],\n ],\n [\n 'seq',\n ['assert', ['or', ['eq', '_ans', 0], ['eq', '_ans', 257]]],\n ['div', '_ans', 257],\n ],\n ],\n typ,\n annotation='getting and checking bool',\n ))\n else:\n # Should never reach because of top level base level check.\n raise Exception(\"Type not yet supported\") # pragma: no cover\n # Copy the input data to memory\n if args[0].location == \"memory\":\n variable_pointer = args[0]\n elif args[0].location == \"storage\":\n placeholder = context.new_placeholder(args[0].typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=args[0].typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_ptr', typ=args[0].typ, location=args[0].location),\n )\n variable_pointer = ['with', '_ptr', args[0], ['seq', copier, placeholder_node]]\n else:\n # Should never reach because of top level base level check.\n raise Exception(\"Location not yet supported\") # pragma: no cover\n # Decode the input data\n initial_setter = LLLnode.from_list(\n ['seq',\n ['with', '_sub', variable_pointer,\n ['pop', ['call',\n 1500 + 400 * len(_format) + 10 * len(args),\n LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'),\n 0,\n ['add', '_sub', 32],\n ['mload', '_sub'],\n output_node,\n 64 * len(_format) + 32 + 32 * get_size_of_type(output_type)]]],\n ['assert', ['eq', ['mload', output_node], 32 * len(_format) + 32]]],\n typ=None)\n # Shove the input data decoder in front of the first variable decoder\n decoder[0] = LLLnode.from_list(\n ['seq', initial_setter, decoder[0]],\n typ=decoder[0].typ,\n location=decoder[0].location,\n )\n return LLLnode.from_list(\n [\"multi\"] + decoder,\n typ=output_type,\n location='memory',\n pos=getpos(expr),\n )\n\n\n@signature('*', 'bytes')\ndef raw_log(expr, args, kwargs, context):\n if not isinstance(args[0], ast.List) or len(args[0].elts) > 4:\n raise StructureException(\"Expecting a list of 0-4 topics as first argument\", args[0])\n topics = []\n for elt in args[0].elts:\n arg = Expr.parse_value_expr(elt, context)\n if not is_base_type(arg.typ, 'bytes32'):\n raise TypeMismatchException(\"Expecting a bytes32 argument as topic\", elt)\n topics.append(arg)\n if args[1].location == \"memory\":\n return LLLnode.from_list([\n \"with\", \"_arr\", args[1], [\n \"log\" + str(len(topics)),\n [\"add\", \"_arr\", 32],\n [\"mload\", \"_arr\"],\n ] + topics\n ], typ=None, pos=getpos(expr))\n placeholder = context.new_placeholder(args[1].typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=args[1].typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=args[1].typ, location=args[1].location),\n pos=getpos(expr),\n )\n return LLLnode.from_list(\n [\n \"with\", \"_sub\", args[1],\n [\n \"seq\",\n copier,\n [\n \"log\" + str(len(topics)),\n [\"add\", placeholder_node, 32],\n [\"mload\", placeholder_node],\n ] + topics\n ],\n ],\n typ=None,\n pos=getpos(expr),\n )\n\n\n@signature('uint256', 'uint256')\ndef bitwise_and(expr, args, kwargs, context):\n return LLLnode.from_list(['and', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256')\ndef bitwise_or(expr, args, kwargs, context):\n return LLLnode.from_list(['or', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256')\ndef bitwise_xor(expr, args, kwargs, context):\n return LLLnode.from_list(['xor', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256', 'uint256')\ndef uint256_addmod(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'seq',\n ['assert', args[2]],\n ['assert', ['or', ['iszero', args[1]], ['gt', ['add', args[0], args[1]], args[0]]]],\n ['addmod', args[0], args[1], args[2]],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\n@signature('uint256', 'uint256', 'uint256')\ndef uint256_mulmod(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'seq',\n ['assert', args[2]],\n ['assert', [\n 'or',\n ['iszero', args[0]],\n ['eq', ['div', ['mul', args[0], args[1]], args[0]], args[1]],\n ]],\n ['mulmod', args[0], args[1], args[2]],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\n@signature('uint256')\ndef bitwise_not(expr, args, kwargs, context):\n return LLLnode.from_list(['not', args[0]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'int128')\ndef shift(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'with', '_v', args[0], [\n 'with', '_s', args[1], [\n # If second argument is positive, left-shift so multiply by a power of two\n # If it is negative, divide by a power of two\n # node that if the abs of the second argument >= 256, then in the EVM\n # 2**(second arg) = 0, and multiplying OR dividing by 0 gives 0\n 'if',\n ['slt', '_s', 0],\n ['div', '_v', ['exp', 2, ['sub', 0, '_s']]],\n ['mul', '_v', ['exp', 2, '_s']]\n ],\n ],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\ndef get_create_forwarder_to_bytecode():\n from vyper.compile_lll import (\n assembly_to_evm,\n num_to_bytearray\n )\n code_a = [\n 'PUSH1', 0x33,\n 'PUSH1', 0x0c,\n 'PUSH1', 0x00,\n 'CODECOPY',\n 'PUSH1', 0x33,\n 'PUSH1', 0x00,\n 'RETURN',\n 'CALLDATASIZE',\n 'PUSH1', 0x00,\n 'PUSH1', 0x00,\n 'CALLDATACOPY',\n 'PUSH2', num_to_bytearray(0x1000),\n 'PUSH1', 0x00,\n 'CALLDATASIZE',\n 'PUSH1', 0x00,\n 'PUSH20', # [address to delegate to]\n ]\n code_b = [\n 'GAS',\n 'DELEGATECALL',\n 'PUSH1', 0x2c, # jumpdest of whole program.\n 'JUMPI',\n 'PUSH1', 0x0,\n 'DUP1',\n 'REVERT',\n 'JUMPDEST',\n 'PUSH2', num_to_bytearray(0x1000),\n 'PUSH1', 0x00,\n 'RETURN'\n ]\n return assembly_to_evm(code_a)[0] + (b'\\x00' * 20) + assembly_to_evm(code_b)[0]\n\n\n@signature('address', value=Optional('uint256', zero_value))\ndef create_forwarder_to(expr, args, kwargs, context):\n\n value = kwargs['value']\n if value != zero_value:\n enforce_units(value.typ, get_keyword(expr, 'value'),\n BaseType('uint256', {'wei': 1}))\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot make calls from %s\" % context.pp_constancy(),\n expr,\n )\n placeholder = context.new_placeholder(ByteArrayType(96))\n\n kode = get_create_forwarder_to_bytecode()\n high = bytes_to_int(kode[:32])\n low = bytes_to_int((kode + b'\\x00' * 32)[47:79])\n\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', placeholder, high],\n ['mstore', ['add', placeholder, 27], ['mul', args[0], 2**96]],\n ['mstore', ['add', placeholder, 47], low],\n ['clamp_nonzero', ['create', value, placeholder, 96]],\n ],\n typ=BaseType('address'),\n pos=getpos(expr),\n add_gas_estimate=11000,\n )\n\n\n@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))\ndef _min(expr, args, kwargs, context):\n return minmax(expr, args, kwargs, context, True)\n\n\n@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))\ndef _max(expr, args, kwargs, context):\n return minmax(expr, args, kwargs, context, False)\n\n\ndef minmax(expr, args, kwargs, context, is_min):\n def _can_compare_with_uint256(operand):\n if operand.typ.typ == 'uint256':\n return True\n elif operand.typ.typ == 'int128' and operand.typ.is_literal and SizeLimits.in_bounds('uint256', operand.value): # noqa: E501\n return True\n return False\n\n left, right = args[0], args[1]\n if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ): # noqa: E501\n raise TypeMismatchException(\"Units must be compatible\", expr)\n if left.typ.typ == 'uint256':\n comparator = 'gt' if is_min else 'lt'\n else:\n comparator = 'sgt' if is_min else 'slt'\n if left.typ.typ == right.typ.typ:\n o = ['if', [comparator, '_l', '_r'], '_r', '_l']\n otyp = left.typ\n otyp.is_literal = False\n elif _can_compare_with_uint256(left) and _can_compare_with_uint256(right):\n o = ['if', [comparator, '_l', '_r'], '_r', '_l']\n if right.typ.typ == 'uint256':\n otyp = right.typ\n else:\n otyp = left.typ\n otyp.is_literal = False\n else:\n raise TypeMismatchException(\n \"Minmax types incompatible: %s %s\" % (left.typ.typ, right.typ.typ)\n )\n return LLLnode.from_list(\n ['with', '_l', left, ['with', '_r', right, o]],\n typ=otyp,\n pos=getpos(expr),\n )\n\n\n@signature('decimal')\ndef sqrt(expr, args, kwargs, context):\n from vyper.functions.utils import (\n generate_inline_function,\n )\n arg = args[0]\n sqrt_code = \"\"\"\nassert x >= 0.0\nz: decimal\n\nif x == 0.0:\n z = 0.0\nelse:\n z = (x + 1.0) / 2.0\n y: decimal = x\n\n for i in range(256):\n if z == y:\n break\n y = z\n z = (x / z + z) / 2.0\n \"\"\"\n\n x_type = BaseType('decimal')\n placeholder_copy = ['pass']\n # Steal current position if variable is already allocated.\n if arg.value == 'mload':\n new_var_pos = arg.args[0]\n # Other locations need to be copied.\n else:\n new_var_pos = context.new_placeholder(x_type)\n placeholder_copy = ['mstore', new_var_pos, arg]\n # Create input variables.\n variables = {\n 'x': VariableRecord(\n name='x',\n pos=new_var_pos,\n typ=x_type,\n mutable=False\n )\n }\n # Generate inline LLL.\n new_ctx, sqrt_lll = generate_inline_function(\n code=sqrt_code,\n variables=variables,\n memory_allocator=context.memory_allocator\n )\n return LLLnode.from_list(\n [\n 'seq_unchecked',\n placeholder_copy, # load x variable\n sqrt_lll,\n ['mload', new_ctx.vars['z'].pos] # unload z variable into the stack,\n ],\n typ=BaseType('decimal'),\n pos=getpos(expr),\n )\n\n\ndef _clear():\n raise ParserException(\n \"This function should never be called! `clear()` is currently handled \"\n \"differently than other functions as it self modifies its input argument \"\n \"statement. Please see `_clear()` in `stmt.py`\"\n )\n\n\ndispatch_table = {\n 'floor': floor,\n 'ceil': ceil,\n 'as_unitless_number': as_unitless_number,\n 'convert': _convert,\n 'slice': _slice,\n 'len': _len,\n 'concat': concat,\n 'sha3': _sha3,\n 'sha256': sha256,\n 'method_id': method_id,\n 'keccak256': _sha3,\n 'ecrecover': ecrecover,\n 'ecadd': ecadd,\n 'ecmul': ecmul,\n 'extract32': extract32,\n 'as_wei_value': as_wei_value,\n 'raw_call': raw_call,\n 'RLPList': _RLPlist,\n 'blockhash': blockhash,\n 'bitwise_and': bitwise_and,\n 'bitwise_or': bitwise_or,\n 'bitwise_xor': bitwise_xor,\n 'bitwise_not': bitwise_not,\n 'uint256_addmod': uint256_addmod,\n 'uint256_mulmod': uint256_mulmod,\n 'sqrt': sqrt,\n 'shift': shift,\n 'create_forwarder_to': create_forwarder_to,\n 'min': _min,\n 'max': _max,\n}\n\nstmt_dispatch_table = {\n 'clear': _clear,\n 'send': send,\n 'selfdestruct': selfdestruct,\n 'raw_call': raw_call,\n 'raw_log': raw_log,\n 'create_forwarder_to': create_forwarder_to,\n}\n\nbuilt_in_functions = [\n x for x in stmt_dispatch_table.keys()\n] + [\n x for x in dispatch_table.keys()\n]\n", "path": "vyper/functions/functions.py"}], "after_files": [{"content": "import hashlib\n\nfrom vyper import ast\nfrom vyper.exceptions import (\n ConstancyViolationException,\n InvalidLiteralException,\n ParserException,\n StructureException,\n TypeMismatchException,\n)\nfrom vyper.parser.expr import (\n Expr,\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n add_variable_offset,\n byte_array_to_num,\n get_length,\n get_number_as_fraction,\n getpos,\n make_byte_array_copier,\n make_byte_slice_copier,\n unwrap_location,\n)\nfrom vyper.signatures.function_signature import (\n VariableRecord,\n)\nfrom vyper.types import (\n BaseType,\n ByteArrayLike,\n ByteArrayType,\n ListType,\n StringType,\n TupleType,\n are_units_compatible,\n get_size_of_type,\n is_base_type,\n)\nfrom vyper.types.convert import (\n convert,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n RLP_DECODER_ADDRESS,\n MemoryPositions,\n SizeLimits,\n bytes_to_int,\n fourbytes_to_int,\n sha3,\n)\n\nfrom .signatures import (\n Optional,\n signature,\n)\n\nSHA256_ADDRESS = 2\nSHA256_BASE_GAS = 60\nSHA256_PER_WORD_GAS = 12\n\n\ndef enforce_units(typ, obj, expected):\n if not are_units_compatible(typ, expected):\n raise TypeMismatchException(\"Invalid units\", obj)\n\n\ndef get_keyword(expr, keyword):\n for kw in expr.keywords:\n if kw.arg == keyword:\n return kw.value\n # This should never happen, as kwargs['value'] will KeyError first.\n # Leaving exception for other use cases.\n raise Exception(\"Keyword %s not found\" % keyword) # pragma: no cover\n\n\n@signature('decimal')\ndef floor(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'if',\n ['slt', args[0], 0],\n ['sdiv', ['sub', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],\n ['sdiv', args[0], DECIMAL_DIVISOR]\n ],\n typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature('decimal')\ndef ceil(expr, args, kwards, context):\n return LLLnode.from_list(\n [\n 'if',\n ['slt', args[0], 0],\n ['sdiv', args[0], DECIMAL_DIVISOR],\n ['sdiv', ['add', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR]\n ],\n typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('uint256', 'int128', 'decimal'))\ndef as_unitless_number(expr, args, kwargs, context):\n return LLLnode(\n value=args[0].value,\n args=args[0].args,\n typ=BaseType(args[0].typ.typ, {}),\n pos=getpos(expr),\n )\n\n\ndef _convert(expr, context):\n return convert(expr, context)\n\n\n@signature(('bytes32', 'bytes', 'string'), start='int128', len='int128')\ndef _slice(expr, args, kwargs, context):\n\n sub, start, length = args[0], kwargs['start'], kwargs['len']\n if not are_units_compatible(start.typ, BaseType('int128')):\n raise TypeMismatchException(\"Type for slice start index must be a unitless number\", expr)\n # Expression representing the length of the slice\n if not are_units_compatible(length.typ, BaseType('int128')):\n raise TypeMismatchException(\"Type for slice length must be a unitless number\", expr)\n\n if is_base_type(sub.typ, 'bytes32'):\n if (start.typ.is_literal and length.typ.is_literal) and \\\n not (0 <= start.value + length.value <= 32):\n raise InvalidLiteralException(\n 'Invalid start / length values needs to be between 0 and 32.',\n expr,\n )\n sub_typ_maxlen = 32\n else:\n sub_typ_maxlen = sub.typ.maxlen\n\n # Get returntype string or bytes\n if isinstance(args[0].typ, ByteArrayType) or is_base_type(sub.typ, 'bytes32'):\n ReturnType = ByteArrayType\n else:\n ReturnType = StringType\n\n # Node representing the position of the output in memory\n np = context.new_placeholder(ReturnType(maxlen=sub_typ_maxlen + 32))\n placeholder_node = LLLnode.from_list(np, typ=sub.typ, location='memory')\n placeholder_plus_32_node = LLLnode.from_list(np + 32, typ=sub.typ, location='memory')\n # Copies over bytearray data\n if sub.location == 'storage':\n adj_sub = LLLnode.from_list(\n ['add', ['sha3_32', sub], ['add', ['div', '_start', 32], 1]],\n typ=sub.typ,\n location=sub.location,\n )\n else:\n adj_sub = LLLnode.from_list(\n ['add', sub, ['add', ['sub', '_start', ['mod', '_start', 32]], 32]],\n typ=sub.typ,\n location=sub.location,\n )\n\n if is_base_type(sub.typ, 'bytes32'):\n adj_sub = LLLnode.from_list(\n sub.args[0], typ=sub.typ, location=\"memory\"\n )\n\n copier = make_byte_slice_copier(\n placeholder_plus_32_node,\n adj_sub,\n ['add', '_length', 32],\n sub_typ_maxlen,\n pos=getpos(expr),\n )\n # New maximum length in the type of the result\n newmaxlen = length.value if not len(length.args) else sub_typ_maxlen\n if is_base_type(sub.typ, 'bytes32'):\n maxlen = 32\n else:\n maxlen = ['mload', Expr(sub, context=context).lll_node] # Retrieve length of the bytes.\n\n out = [\n 'with', '_start', start, [\n 'with', '_length', length, [\n 'with', '_opos', ['add', placeholder_node, ['mod', '_start', 32]], [\n 'seq',\n ['assert', ['le', ['add', '_start', '_length'], maxlen]],\n copier,\n ['mstore', '_opos', '_length'],\n '_opos'\n ],\n ],\n ],\n ]\n return LLLnode.from_list(out, typ=ReturnType(newmaxlen), location='memory', pos=getpos(expr))\n\n\n@signature(('bytes', 'string'))\ndef _len(expr, args, kwargs, context):\n return get_length(args[0])\n\n\ndef concat(expr, context):\n args = [Expr(arg, context).lll_node for arg in expr.args]\n if len(args) < 2:\n raise StructureException(\"Concat expects at least two arguments\", expr)\n\n prev_type = ''\n for _, (expr_arg, arg) in enumerate(zip(expr.args, args)):\n if not isinstance(arg.typ, ByteArrayLike) and not is_base_type(arg.typ, 'bytes32'):\n raise TypeMismatchException(\"Concat expects string, bytes or bytes32 objects\", expr_arg)\n\n current_type = (\n 'bytes'\n if isinstance(arg.typ, ByteArrayType) or is_base_type(arg.typ, 'bytes32')\n else 'string'\n )\n if prev_type and current_type != prev_type:\n raise TypeMismatchException(\n (\n \"Concat expects consistant use of string or byte types, \"\n \"user either bytes or string.\"\n ),\n expr_arg,\n )\n prev_type = current_type\n\n if current_type == 'string':\n ReturnType = StringType\n else:\n ReturnType = ByteArrayType\n\n # Maximum length of the output\n total_maxlen = sum([\n arg.typ.maxlen if isinstance(arg.typ, ByteArrayLike) else 32 for arg in args\n ])\n # Node representing the position of the output in memory\n placeholder = context.new_placeholder(ReturnType(total_maxlen))\n # Object representing the output\n seq = []\n # For each argument we are concatenating...\n for arg in args:\n # Start pasting into a position the starts at zero, and keeps\n # incrementing as we concatenate arguments\n placeholder_node = LLLnode.from_list(\n ['add', placeholder, '_poz'],\n typ=ReturnType(total_maxlen),\n location='memory',\n )\n placeholder_node_plus_32 = LLLnode.from_list(\n ['add', ['add', placeholder, '_poz'], 32],\n typ=ReturnType(total_maxlen),\n location='memory',\n )\n if isinstance(arg.typ, ReturnType):\n # Ignore empty strings\n if arg.typ.maxlen == 0:\n continue\n # Get the length of the current argument\n if arg.location == \"memory\":\n length = LLLnode.from_list(['mload', '_arg'], typ=BaseType('int128'))\n argstart = LLLnode.from_list(\n ['add', '_arg', 32],\n typ=arg.typ,\n location=arg.location,\n )\n elif arg.location == \"storage\":\n length = LLLnode.from_list(['sload', ['sha3_32', '_arg']], typ=BaseType('int128'))\n argstart = LLLnode.from_list(\n ['add', ['sha3_32', '_arg'], 1],\n typ=arg.typ,\n location=arg.location,\n )\n # Make a copier to copy over data from that argument\n seq.append([\n 'with', '_arg', arg, [\n 'seq',\n make_byte_slice_copier(\n placeholder_node_plus_32,\n argstart,\n length,\n arg.typ.maxlen, pos=getpos(expr),\n ),\n # Change the position to start at the correct\n # place to paste the next value\n ['set', '_poz', ['add', '_poz', length]],\n ],\n ])\n else:\n seq.append([\n 'seq',\n ['mstore', ['add', placeholder_node, 32], unwrap_location(arg)],\n ['set', '_poz', ['add', '_poz', 32]],\n ])\n # The position, after all arguments are processing, equals the total\n # length. Paste this in to make the output a proper bytearray\n seq.append(['mstore', placeholder, '_poz'])\n # Memory location of the output\n seq.append(placeholder)\n return LLLnode.from_list(\n ['with', '_poz', 0, ['seq'] + seq],\n typ=ReturnType(total_maxlen),\n location='memory',\n pos=getpos(expr),\n annotation='concat',\n )\n\n\n@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))\ndef _sha3(expr, args, kwargs, context):\n sub = args[0]\n # Can hash literals\n if isinstance(sub, bytes):\n return LLLnode.from_list(\n bytes_to_int(sha3(sub)),\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n # Can hash bytes32 objects\n if is_base_type(sub.typ, 'bytes32'):\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],\n ['sha3', MemoryPositions.FREE_VAR_SPACE, 32]\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n # Copy the data to an in-memory array\n if sub.location == \"memory\":\n # If we are hashing a value in memory, no need to copy it, just hash in-place\n return LLLnode.from_list(\n ['with', '_sub', sub, ['sha3', ['add', '_sub', 32], ['mload', '_sub']]],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n elif sub.location == \"storage\":\n lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))\n else:\n # This should never happen, but just left here for future compiler-writers.\n raise Exception(\"Unsupported location: %s\" % sub.location) # pragma: no test\n placeholder = context.new_placeholder(sub.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),\n )\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n copier,\n ['sha3', ['add', placeholder, 32], lengetter]\n ],\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n\n\ndef _make_sha256_call(inp_start, inp_len, out_start, out_len):\n return [\n 'assert', [\n 'call',\n ['gas'], # gas\n SHA256_ADDRESS, # address\n 0, # value\n inp_start,\n inp_len,\n out_start,\n out_len\n ]\n ]\n\n\n@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))\ndef sha256(expr, args, kwargs, context):\n sub = args[0]\n # Literal input\n if isinstance(sub, bytes):\n return LLLnode.from_list(\n bytes_to_int(hashlib.sha256(sub).digest()),\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n # bytes32 input\n elif is_base_type(sub.typ, 'bytes32'):\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],\n _make_sha256_call(\n inp_start=MemoryPositions.FREE_VAR_SPACE,\n inp_len=32,\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE] # push value onto stack\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + 1 * SHA256_PER_WORD_GAS\n )\n # bytearay-like input\n if sub.location == \"storage\":\n # Copy storage to memory\n placeholder = context.new_placeholder(sub.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),\n )\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n copier,\n _make_sha256_call(\n inp_start=['add', placeholder, 32],\n inp_len=['mload', placeholder],\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE]\n ],\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS\n )\n elif sub.location == \"memory\":\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n _make_sha256_call(\n inp_start=['add', '_sub', 32],\n inp_len=['mload', '_sub'],\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE]\n ]\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS\n )\n else:\n # This should never happen, but just left here for future compiler-writers.\n raise Exception(\"Unsupported location: %s\" % sub.location) # pragma: no test\n\n\n@signature('str_literal', 'name_literal')\ndef method_id(expr, args, kwargs, context):\n if b' ' in args[0]:\n raise TypeMismatchException('Invalid function signature no spaces allowed.')\n method_id = fourbytes_to_int(sha3(args[0])[:4])\n if args[1] == 'bytes32':\n return LLLnode(method_id, typ=BaseType('bytes32'), pos=getpos(expr))\n elif args[1] == 'bytes[4]':\n placeholder = LLLnode.from_list(context.new_placeholder(ByteArrayType(4)))\n return LLLnode.from_list(\n ['seq',\n ['mstore', ['add', placeholder, 4], method_id],\n ['mstore', placeholder, 4], placeholder],\n typ=ByteArrayType(4), location='memory', pos=getpos(expr))\n else:\n raise StructureException('Can only produce bytes32 or bytes[4] as outputs')\n\n\n@signature('bytes32', 'uint256', 'uint256', 'uint256')\ndef ecrecover(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n return LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, args[0]],\n ['mstore', ['add', placeholder_node, 32], args[1]],\n ['mstore', ['add', placeholder_node, 64], args[2]],\n ['mstore', ['add', placeholder_node, 96], args[3]],\n ['pop', ['call', 3000, 1, 0, placeholder_node, 128, MemoryPositions.FREE_VAR_SPACE, 32]],\n ['mload', MemoryPositions.FREE_VAR_SPACE],\n ], typ=BaseType('address'), pos=getpos(expr))\n\n\ndef avo(arg, ind, pos):\n return unwrap_location(add_variable_offset(arg, LLLnode.from_list(ind, 'int128'), pos=pos))\n\n\n@signature('uint256[2]', 'uint256[2]')\ndef ecadd(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n pos = getpos(expr)\n o = LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, avo(args[0], 0, pos)],\n ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],\n ['mstore', ['add', placeholder_node, 64], avo(args[1], 0, pos)],\n ['mstore', ['add', placeholder_node, 96], avo(args[1], 1, pos)],\n ['assert', ['call', 500, 6, 0, placeholder_node, 128, placeholder_node, 64]],\n placeholder_node,\n ], typ=ListType(BaseType('uint256'), 2), pos=getpos(expr), location='memory')\n return o\n\n\n@signature('uint256[2]', 'uint256')\ndef ecmul(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n pos = getpos(expr)\n o = LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, avo(args[0], 0, pos)],\n ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],\n ['mstore', ['add', placeholder_node, 64], args[1]],\n ['assert', ['call', 40000, 7, 0, placeholder_node, 96, placeholder_node, 64]],\n placeholder_node,\n ], typ=ListType(BaseType('uint256'), 2), pos=pos, location='memory')\n return o\n\n\ndef _memory_element_getter(index):\n return LLLnode.from_list(\n ['mload', ['add', '_sub', ['add', 32, ['mul', 32, index]]]],\n typ=BaseType('int128'),\n )\n\n\ndef _storage_element_getter(index):\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', '_sub'], ['add', 1, index]]],\n typ=BaseType('int128'),\n )\n\n\n@signature('bytes', 'int128', type=Optional('name_literal', 'bytes32'))\ndef extract32(expr, args, kwargs, context):\n sub, index = args\n ret_type = kwargs['type']\n # Get length and specific element\n if sub.location == \"memory\":\n lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))\n elementgetter = _memory_element_getter\n elif sub.location == \"storage\":\n lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))\n elementgetter = _storage_element_getter\n # TODO: unclosed if/elif clause. Undefined behavior if `sub.location`\n # isn't one of `memory`/`storage`\n\n # Special case: index known to be a multiple of 32\n if isinstance(index.value, int) and not index.value % 32:\n o = LLLnode.from_list(\n [\n 'with', '_sub', sub,\n elementgetter(['div', ['clamp', 0, index, ['sub', lengetter, 32]], 32])\n ],\n typ=BaseType(ret_type),\n annotation='extracting 32 bytes',\n )\n # General case\n else:\n o = LLLnode.from_list([\n 'with', '_sub', sub, [\n 'with', '_len', lengetter, [\n 'with', '_index', ['clamp', 0, index, ['sub', '_len', 32]], [\n 'with', '_mi32', ['mod', '_index', 32], [\n 'with', '_di32', ['div', '_index', 32],\n [\n 'if',\n '_mi32',\n [\n 'add',\n ['mul', elementgetter('_di32'), ['exp', 256, '_mi32']],\n [\n 'div',\n elementgetter(['add', '_di32', 1]),\n ['exp', 256, ['sub', 32, '_mi32']],\n ],\n ],\n elementgetter('_di32'),\n ],\n ],\n ],\n ],\n ],\n ], typ=BaseType(ret_type), pos=getpos(expr), annotation='extracting 32 bytes')\n if ret_type == 'int128':\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]],\n typ=BaseType('int128'),\n pos=getpos(expr),\n )\n elif ret_type == 'address':\n return LLLnode.from_list(\n ['uclamplt', o, ['mload', MemoryPositions.ADDRSIZE]],\n typ=BaseType(ret_type),\n pos=getpos(expr),\n )\n else:\n return o\n\n\n@signature(('num_literal', 'int128', 'uint256', 'decimal'), 'str_literal')\ndef as_wei_value(expr, args, kwargs, context):\n # Denominations\n names_denom = {\n (b\"wei\", ): 1,\n (b\"femtoether\", b\"kwei\", b\"babbage\"): 10**3,\n (b\"picoether\", b\"mwei\", b\"lovelace\"): 10**6,\n (b\"nanoether\", b\"gwei\", b\"shannon\"): 10**9,\n (b\"microether\", b\"szabo\", ): 10**12,\n (b\"milliether\", b\"finney\", ): 10**15,\n (b\"ether\", ): 10**18,\n (b\"kether\", b\"grand\"): 10**21,\n }\n\n for names, denom in names_denom.items():\n if args[1] in names:\n denomination = denom\n break\n else:\n raise InvalidLiteralException(\n \"Invalid denomination: %s, valid denominations are: %s\" % (\n args[1],\n \",\".join(x[0].decode() for x in names_denom)\n ),\n expr.args[1]\n )\n # Compute the amount of wei and return that value\n if isinstance(args[0], (int, float)):\n expr_args_0 = expr.args[0]\n # On constant reference fetch value node of constant assignment.\n if context.constants.ast_is_constant(expr.args[0]):\n expr_args_0 = context.constants._constants_ast[expr.args[0].id]\n numstring, num, den = get_number_as_fraction(expr_args_0, context)\n if denomination % den:\n raise InvalidLiteralException(\"Too many decimal places: %s\" % numstring, expr.args[0])\n sub = num * denomination // den\n elif args[0].typ.is_literal:\n if args[0].value <= 0:\n raise InvalidLiteralException(\"Negative wei value not allowed\", expr)\n sub = ['mul', args[0].value, denomination]\n elif args[0].typ.typ == 'uint256':\n sub = ['mul', args[0], denomination]\n else:\n sub = ['div', ['mul', args[0], denomination], DECIMAL_DIVISOR]\n\n return LLLnode.from_list(\n sub,\n typ=BaseType('uint256', {'wei': 1}),\n location=None,\n pos=getpos(expr),\n )\n\n\nzero_value = LLLnode.from_list(0, typ=BaseType('uint256', {'wei': 1}))\nfalse_value = LLLnode.from_list(0, typ=BaseType('bool', is_literal=True))\n\n\n@signature(\n 'address',\n 'bytes',\n outsize='num_literal',\n gas='uint256',\n value=Optional('uint256', zero_value),\n delegate_call=Optional('bool', false_value),\n)\ndef raw_call(expr, args, kwargs, context):\n to, data = args\n gas, value, outsize, delegate_call = (\n kwargs['gas'],\n kwargs['value'],\n kwargs['outsize'],\n kwargs['delegate_call'],\n )\n if delegate_call.typ.is_literal is False:\n raise TypeMismatchException(\n 'The delegate_call parameter has to be a static/literal boolean value.'\n )\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot make calls from %s\" % context.pp_constancy(),\n expr,\n )\n if value != zero_value:\n enforce_units(\n value.typ,\n get_keyword(expr, 'value'),\n BaseType('uint256', {'wei': 1}),\n )\n placeholder = context.new_placeholder(data.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=data.typ, location='memory')\n copier = make_byte_array_copier(placeholder_node, data, pos=getpos(expr))\n output_placeholder = context.new_placeholder(ByteArrayType(outsize))\n output_node = LLLnode.from_list(\n output_placeholder,\n typ=ByteArrayType(outsize),\n location='memory',\n )\n\n if delegate_call.value == 1:\n z = LLLnode.from_list(\n [\n 'seq',\n copier,\n [\n 'assert',\n [\n 'delegatecall',\n gas,\n to,\n ['add', placeholder_node, 32],\n ['mload', placeholder_node],\n ['add', output_node, 32],\n outsize,\n ],\n ],\n ['mstore', output_node, outsize],\n output_node,\n ],\n typ=ByteArrayType(outsize),\n location='memory',\n pos=getpos(expr),\n )\n else:\n z = LLLnode.from_list(\n [\n 'seq',\n copier,\n [\n 'assert',\n [\n 'call',\n gas,\n to,\n value,\n ['add', placeholder_node, 32],\n ['mload', placeholder_node],\n ['add', output_node, 32],\n outsize,\n ],\n ],\n ['mstore', output_node, outsize],\n output_node,\n ],\n typ=ByteArrayType(outsize), location='memory', pos=getpos(expr)\n )\n return z\n\n\n@signature('address', 'uint256')\ndef send(expr, args, kwargs, context):\n to, value = args\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot send ether inside %s!\" % context.pp_constancy(),\n expr,\n )\n enforce_units(value.typ, expr.args[1], BaseType('uint256', {'wei': 1}))\n return LLLnode.from_list(\n ['assert', ['call', 0, to, value, 0, 0, 0, 0]],\n typ=None,\n pos=getpos(expr),\n )\n\n\n@signature('address')\ndef selfdestruct(expr, args, kwargs, context):\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot %s inside %s!\" % (expr.func.id, context.pp_constancy()),\n expr.func,\n )\n return LLLnode.from_list(['selfdestruct', args[0]], typ=None, pos=getpos(expr))\n\n\n@signature(('uint256'))\ndef blockhash(expr, args, kwargs, contact):\n return LLLnode.from_list(\n ['blockhash', ['uclamplt', ['clampge', args[0], ['sub', ['number'], 256]], 'number']],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n\n\n@signature('bytes', '*')\ndef _RLPlist(expr, args, kwargs, context):\n # Second argument must be a list of types\n if not isinstance(args[1], ast.List):\n raise TypeMismatchException(\"Expecting list of types for second argument\", args[1])\n if len(args[1].elts) == 0:\n raise TypeMismatchException(\"RLP list must have at least one item\", expr)\n if len(args[1].elts) > 32:\n raise TypeMismatchException(\"RLP list must have at most 32 items\", expr)\n # Get the output format\n _format = []\n for arg in args[1].elts:\n if isinstance(arg, ast.Name) and arg.id == \"bytes\":\n subtyp = ByteArrayType(args[0].typ.maxlen)\n else:\n subtyp = context.parse_type(arg, 'memory')\n if not isinstance(subtyp, BaseType):\n raise TypeMismatchException(\"RLP lists only accept BaseTypes and byte arrays\", arg)\n if not is_base_type(subtyp, ('int128', 'uint256', 'bytes32', 'address', 'bool')):\n raise TypeMismatchException(\"Unsupported base type: %s\" % subtyp.typ, arg)\n _format.append(subtyp)\n output_type = TupleType(_format)\n output_placeholder_type = ByteArrayType(\n (2 * len(_format) + 1 + get_size_of_type(output_type)) * 32,\n )\n output_placeholder = context.new_placeholder(output_placeholder_type)\n output_node = LLLnode.from_list(\n output_placeholder,\n typ=output_placeholder_type,\n location='memory',\n )\n # Create a decoder for each element in the tuple\n decoder = []\n for i, typ in enumerate(_format):\n # Decoder for bytes32\n if is_base_type(typ, 'bytes32'):\n decoder.append(LLLnode.from_list(\n [\n 'seq',\n [\n 'assert',\n [\n 'eq',\n [\n 'mload',\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n 32,\n ],\n ],\n [\n 'mload',\n [\n 'add',\n 32,\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n ],\n ],\n typ,\n annotation='getting and checking bytes32 item',\n ))\n # Decoder for address\n elif is_base_type(typ, 'address'):\n decoder.append(LLLnode.from_list(\n [\n 'seq',\n [\n 'assert',\n [\n 'eq',\n [\n 'mload',\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n 20,\n ]\n ],\n [\n 'mod',\n [\n 'mload',\n [\n 'add',\n 20,\n ['add', output_node, ['mload', ['add', output_node, 32 * i]]],\n ],\n ],\n ['mload', MemoryPositions.ADDRSIZE],\n ]\n ],\n typ,\n annotation='getting and checking address item',\n ))\n # Decoder for bytes\n elif isinstance(typ, ByteArrayType):\n decoder.append(LLLnode.from_list(\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n typ,\n location='memory',\n annotation='getting byte array',\n ))\n # Decoder for num and uint256\n elif is_base_type(typ, ('int128', 'uint256')):\n bytez = LLLnode.from_list(\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n typ,\n location='memory',\n annotation='getting and checking %s' % typ.typ,\n )\n decoder.append(byte_array_to_num(bytez, expr, typ.typ))\n # Decoder for bools\n elif is_base_type(typ, ('bool')):\n # This is basically a really clever way to test for a\n # length-prefixed one or zero. We take the 32 bytes starting one\n # byte *after* the start of the length declaration; this includes\n # the last 31 bytes of the length and the first byte of the value.\n # 0 corresponds to length 0, first byte 0, and 257 corresponds to\n # length 1, first byte \\x01\n decoder.append(LLLnode.from_list(\n [\n 'with', '_ans', [\n 'mload',\n [\n 'add',\n 1,\n ['add', output_node, ['mload', ['add', output_node, 32 * i]]]\n ],\n ],\n [\n 'seq',\n ['assert', ['or', ['eq', '_ans', 0], ['eq', '_ans', 257]]],\n ['div', '_ans', 257],\n ],\n ],\n typ,\n annotation='getting and checking bool',\n ))\n else:\n # Should never reach because of top level base level check.\n raise Exception(\"Type not yet supported\") # pragma: no cover\n # Copy the input data to memory\n if args[0].location == \"memory\":\n variable_pointer = args[0]\n elif args[0].location == \"storage\":\n placeholder = context.new_placeholder(args[0].typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=args[0].typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_ptr', typ=args[0].typ, location=args[0].location),\n )\n variable_pointer = ['with', '_ptr', args[0], ['seq', copier, placeholder_node]]\n else:\n # Should never reach because of top level base level check.\n raise Exception(\"Location not yet supported\") # pragma: no cover\n # Decode the input data\n initial_setter = LLLnode.from_list(\n ['seq',\n ['with', '_sub', variable_pointer,\n ['pop', ['call',\n 1500 + 400 * len(_format) + 10 * len(args),\n LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'),\n 0,\n ['add', '_sub', 32],\n ['mload', '_sub'],\n output_node,\n 64 * len(_format) + 32 + 32 * get_size_of_type(output_type)]]],\n ['assert', ['eq', ['mload', output_node], 32 * len(_format) + 32]]],\n typ=None)\n # Shove the input data decoder in front of the first variable decoder\n decoder[0] = LLLnode.from_list(\n ['seq', initial_setter, decoder[0]],\n typ=decoder[0].typ,\n location=decoder[0].location,\n )\n return LLLnode.from_list(\n [\"multi\"] + decoder,\n typ=output_type,\n location='memory',\n pos=getpos(expr),\n )\n\n\n@signature('*', 'bytes')\ndef raw_log(expr, args, kwargs, context):\n if not isinstance(args[0], ast.List) or len(args[0].elts) > 4:\n raise StructureException(\"Expecting a list of 0-4 topics as first argument\", args[0])\n topics = []\n for elt in args[0].elts:\n arg = Expr.parse_value_expr(elt, context)\n if not is_base_type(arg.typ, 'bytes32'):\n raise TypeMismatchException(\"Expecting a bytes32 argument as topic\", elt)\n topics.append(arg)\n if args[1].location == \"memory\":\n return LLLnode.from_list([\n \"with\", \"_arr\", args[1], [\n \"log\" + str(len(topics)),\n [\"add\", \"_arr\", 32],\n [\"mload\", \"_arr\"],\n ] + topics\n ], typ=None, pos=getpos(expr))\n placeholder = context.new_placeholder(args[1].typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=args[1].typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=args[1].typ, location=args[1].location),\n pos=getpos(expr),\n )\n return LLLnode.from_list(\n [\n \"with\", \"_sub\", args[1],\n [\n \"seq\",\n copier,\n [\n \"log\" + str(len(topics)),\n [\"add\", placeholder_node, 32],\n [\"mload\", placeholder_node],\n ] + topics\n ],\n ],\n typ=None,\n pos=getpos(expr),\n )\n\n\n@signature('uint256', 'uint256')\ndef bitwise_and(expr, args, kwargs, context):\n return LLLnode.from_list(['and', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256')\ndef bitwise_or(expr, args, kwargs, context):\n return LLLnode.from_list(['or', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256')\ndef bitwise_xor(expr, args, kwargs, context):\n return LLLnode.from_list(['xor', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256', 'uint256')\ndef uint256_addmod(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'seq',\n ['assert', args[2]],\n ['assert', ['or', ['iszero', args[1]], ['gt', ['add', args[0], args[1]], args[0]]]],\n ['addmod', args[0], args[1], args[2]],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\n@signature('uint256', 'uint256', 'uint256')\ndef uint256_mulmod(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'seq',\n ['assert', args[2]],\n ['assert', [\n 'or',\n ['iszero', args[0]],\n ['eq', ['div', ['mul', args[0], args[1]], args[0]], args[1]],\n ]],\n ['mulmod', args[0], args[1], args[2]],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\n@signature('uint256')\ndef bitwise_not(expr, args, kwargs, context):\n return LLLnode.from_list(['not', args[0]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'int128')\ndef shift(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'with', '_v', args[0], [\n 'with', '_s', args[1], [\n # If second argument is positive, left-shift so multiply by a power of two\n # If it is negative, divide by a power of two\n # node that if the abs of the second argument >= 256, then in the EVM\n # 2**(second arg) = 0, and multiplying OR dividing by 0 gives 0\n 'if',\n ['slt', '_s', 0],\n ['div', '_v', ['exp', 2, ['sub', 0, '_s']]],\n ['mul', '_v', ['exp', 2, '_s']]\n ],\n ],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\ndef get_create_forwarder_to_bytecode():\n from vyper.compile_lll import (\n assembly_to_evm,\n num_to_bytearray\n )\n code_a = [\n 'PUSH1', 0x33,\n 'PUSH1', 0x0c,\n 'PUSH1', 0x00,\n 'CODECOPY',\n 'PUSH1', 0x33,\n 'PUSH1', 0x00,\n 'RETURN',\n 'CALLDATASIZE',\n 'PUSH1', 0x00,\n 'PUSH1', 0x00,\n 'CALLDATACOPY',\n 'PUSH2', num_to_bytearray(0x1000),\n 'PUSH1', 0x00,\n 'CALLDATASIZE',\n 'PUSH1', 0x00,\n 'PUSH20', # [address to delegate to]\n ]\n code_b = [\n 'GAS',\n 'DELEGATECALL',\n 'PUSH1', 0x2c, # jumpdest of whole program.\n 'JUMPI',\n 'PUSH1', 0x0,\n 'DUP1',\n 'REVERT',\n 'JUMPDEST',\n 'PUSH2', num_to_bytearray(0x1000),\n 'PUSH1', 0x00,\n 'RETURN'\n ]\n return assembly_to_evm(code_a)[0] + (b'\\x00' * 20) + assembly_to_evm(code_b)[0]\n\n\n@signature('address', value=Optional('uint256', zero_value))\ndef create_forwarder_to(expr, args, kwargs, context):\n\n value = kwargs['value']\n if value != zero_value:\n enforce_units(value.typ, get_keyword(expr, 'value'),\n BaseType('uint256', {'wei': 1}))\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot make calls from %s\" % context.pp_constancy(),\n expr,\n )\n placeholder = context.new_placeholder(ByteArrayType(96))\n\n kode = get_create_forwarder_to_bytecode()\n high = bytes_to_int(kode[:32])\n low = bytes_to_int((kode + b'\\x00' * 32)[47:79])\n\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', placeholder, high],\n ['mstore', ['add', placeholder, 27], ['mul', args[0], 2**96]],\n ['mstore', ['add', placeholder, 47], low],\n ['clamp_nonzero', ['create', value, placeholder, 96]],\n ],\n typ=BaseType('address'),\n pos=getpos(expr),\n add_gas_estimate=11000,\n )\n\n\n@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))\ndef _min(expr, args, kwargs, context):\n return minmax(expr, args, kwargs, context, True)\n\n\n@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))\ndef _max(expr, args, kwargs, context):\n return minmax(expr, args, kwargs, context, False)\n\n\ndef minmax(expr, args, kwargs, context, is_min):\n def _can_compare_with_uint256(operand):\n if operand.typ.typ == 'uint256':\n return True\n elif operand.typ.typ == 'int128' and operand.typ.is_literal and SizeLimits.in_bounds('uint256', operand.value): # noqa: E501\n return True\n return False\n\n left, right = args[0], args[1]\n if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ): # noqa: E501\n raise TypeMismatchException(\"Units must be compatible\", expr)\n if left.typ.typ == 'uint256':\n comparator = 'gt' if is_min else 'lt'\n else:\n comparator = 'sgt' if is_min else 'slt'\n if left.typ.typ == right.typ.typ:\n o = ['if', [comparator, '_l', '_r'], '_r', '_l']\n otyp = left.typ\n otyp.is_literal = False\n elif _can_compare_with_uint256(left) and _can_compare_with_uint256(right):\n o = ['if', [comparator, '_l', '_r'], '_r', '_l']\n if right.typ.typ == 'uint256':\n otyp = right.typ\n else:\n otyp = left.typ\n otyp.is_literal = False\n else:\n raise TypeMismatchException(\n \"Minmax types incompatible: %s %s\" % (left.typ.typ, right.typ.typ)\n )\n return LLLnode.from_list(\n ['with', '_l', left, ['with', '_r', right, o]],\n typ=otyp,\n pos=getpos(expr),\n )\n\n\n@signature('decimal')\ndef sqrt(expr, args, kwargs, context):\n from vyper.functions.utils import (\n generate_inline_function,\n )\n arg = args[0]\n sqrt_code = \"\"\"\nassert x >= 0.0\nz: decimal\n\nif x == 0.0:\n z = 0.0\nelse:\n z = (x + 1.0) / 2.0\n y: decimal = x\n\n for i in range(256):\n if z == y:\n break\n y = z\n z = (x / z + z) / 2.0\n \"\"\"\n\n x_type = BaseType('decimal')\n placeholder_copy = ['pass']\n # Steal current position if variable is already allocated.\n if arg.value == 'mload':\n new_var_pos = arg.args[0]\n # Other locations need to be copied.\n else:\n new_var_pos = context.new_placeholder(x_type)\n placeholder_copy = ['mstore', new_var_pos, arg]\n # Create input variables.\n variables = {\n 'x': VariableRecord(\n name='x',\n pos=new_var_pos,\n typ=x_type,\n mutable=False\n )\n }\n # Generate inline LLL.\n new_ctx, sqrt_lll = generate_inline_function(\n code=sqrt_code,\n variables=variables,\n memory_allocator=context.memory_allocator\n )\n return LLLnode.from_list(\n [\n 'seq_unchecked',\n placeholder_copy, # load x variable\n sqrt_lll,\n ['mload', new_ctx.vars['z'].pos] # unload z variable into the stack,\n ],\n typ=BaseType('decimal'),\n pos=getpos(expr),\n )\n\n\ndef _clear():\n raise ParserException(\n \"This function should never be called! `clear()` is currently handled \"\n \"differently than other functions as it self modifies its input argument \"\n \"statement. Please see `_clear()` in `stmt.py`\"\n )\n\n\ndispatch_table = {\n 'floor': floor,\n 'ceil': ceil,\n 'as_unitless_number': as_unitless_number,\n 'convert': _convert,\n 'slice': _slice,\n 'len': _len,\n 'concat': concat,\n 'sha3': _sha3,\n 'sha256': sha256,\n 'method_id': method_id,\n 'keccak256': _sha3,\n 'ecrecover': ecrecover,\n 'ecadd': ecadd,\n 'ecmul': ecmul,\n 'extract32': extract32,\n 'as_wei_value': as_wei_value,\n 'raw_call': raw_call,\n 'RLPList': _RLPlist,\n 'blockhash': blockhash,\n 'bitwise_and': bitwise_and,\n 'bitwise_or': bitwise_or,\n 'bitwise_xor': bitwise_xor,\n 'bitwise_not': bitwise_not,\n 'uint256_addmod': uint256_addmod,\n 'uint256_mulmod': uint256_mulmod,\n 'sqrt': sqrt,\n 'shift': shift,\n 'create_forwarder_to': create_forwarder_to,\n 'min': _min,\n 'max': _max,\n}\n\nstmt_dispatch_table = {\n 'clear': _clear,\n 'send': send,\n 'selfdestruct': selfdestruct,\n 'raw_call': raw_call,\n 'raw_log': raw_log,\n 'create_forwarder_to': create_forwarder_to,\n}\n\nbuilt_in_functions = [\n x for x in stmt_dispatch_table.keys()\n] + [\n x for x in dispatch_table.keys()\n]\n", "path": "vyper/functions/functions.py"}]} |
gh_patches_debug_1552 | rasdani/github-patches | git_diff | python-discord__bot-822 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
!clean command returns malformed API request
Since we added attachments to the deleted message logs the clean command was not updated to store this.
When running a clean command the following error is returned:
```
web_1 | Bad Request: /bot/deleted-messages
web_1 | "POST /bot/deleted-messages HTTP/1.1" 400 792
bot_1 | 2020-02-25 19:32:41,081 | bot.cogs.error_handler | DEBUG | API responded with 400 for command clean all: {'deletedmessage_set': [{'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}]}.
```
This is not critical since the clean command still operates, it just does not store deleted messages like intended.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/moderation/modlog.py`
Content:
```
1 import asyncio
2 import difflib
3 import itertools
4 import logging
5 import typing as t
6 from datetime import datetime
7 from itertools import zip_longest
8
9 import discord
10 from dateutil.relativedelta import relativedelta
11 from deepdiff import DeepDiff
12 from discord import Colour
13 from discord.abc import GuildChannel
14 from discord.ext.commands import Cog, Context
15
16 from bot.bot import Bot
17 from bot.constants import Channels, Colours, Emojis, Event, Guild as GuildConstant, Icons, URLs
18 from bot.utils.time import humanize_delta
19
20 log = logging.getLogger(__name__)
21
22 GUILD_CHANNEL = t.Union[discord.CategoryChannel, discord.TextChannel, discord.VoiceChannel]
23
24 CHANNEL_CHANGES_UNSUPPORTED = ("permissions",)
25 CHANNEL_CHANGES_SUPPRESSED = ("_overwrites", "position")
26 MEMBER_CHANGES_SUPPRESSED = ("status", "activities", "_client_status", "nick")
27 ROLE_CHANGES_UNSUPPORTED = ("colour", "permissions")
28
29 VOICE_STATE_ATTRIBUTES = {
30 "channel.name": "Channel",
31 "self_stream": "Streaming",
32 "self_video": "Broadcasting",
33 }
34
35
36 class ModLog(Cog, name="ModLog"):
37 """Logging for server events and staff actions."""
38
39 def __init__(self, bot: Bot):
40 self.bot = bot
41 self._ignored = {event: [] for event in Event}
42
43 self._cached_deletes = []
44 self._cached_edits = []
45
46 async def upload_log(
47 self,
48 messages: t.Iterable[discord.Message],
49 actor_id: int,
50 attachments: t.Iterable[t.List[str]] = None
51 ) -> str:
52 """Upload message logs to the database and return a URL to a page for viewing the logs."""
53 if attachments is None:
54 attachments = []
55
56 response = await self.bot.api_client.post(
57 'bot/deleted-messages',
58 json={
59 'actor': actor_id,
60 'creation': datetime.utcnow().isoformat(),
61 'deletedmessage_set': [
62 {
63 'id': message.id,
64 'author': message.author.id,
65 'channel_id': message.channel.id,
66 'content': message.content,
67 'embeds': [embed.to_dict() for embed in message.embeds],
68 'attachments': attachment,
69 }
70 for message, attachment in zip_longest(messages, attachments)
71 ]
72 }
73 )
74
75 return f"{URLs.site_logs_view}/{response['id']}"
76
77 def ignore(self, event: Event, *items: int) -> None:
78 """Add event to ignored events to suppress log emission."""
79 for item in items:
80 if item not in self._ignored[event]:
81 self._ignored[event].append(item)
82
83 async def send_log_message(
84 self,
85 icon_url: t.Optional[str],
86 colour: t.Union[discord.Colour, int],
87 title: t.Optional[str],
88 text: str,
89 thumbnail: t.Optional[t.Union[str, discord.Asset]] = None,
90 channel_id: int = Channels.mod_log,
91 ping_everyone: bool = False,
92 files: t.Optional[t.List[discord.File]] = None,
93 content: t.Optional[str] = None,
94 additional_embeds: t.Optional[t.List[discord.Embed]] = None,
95 additional_embeds_msg: t.Optional[str] = None,
96 timestamp_override: t.Optional[datetime] = None,
97 footer: t.Optional[str] = None,
98 ) -> Context:
99 """Generate log embed and send to logging channel."""
100 embed = discord.Embed(description=text)
101
102 if title and icon_url:
103 embed.set_author(name=title, icon_url=icon_url)
104
105 embed.colour = colour
106 embed.timestamp = timestamp_override or datetime.utcnow()
107
108 if footer:
109 embed.set_footer(text=footer)
110
111 if thumbnail:
112 embed.set_thumbnail(url=thumbnail)
113
114 if ping_everyone:
115 if content:
116 content = f"@everyone\n{content}"
117 else:
118 content = "@everyone"
119
120 channel = self.bot.get_channel(channel_id)
121 log_message = await channel.send(content=content, embed=embed, files=files)
122
123 if additional_embeds:
124 if additional_embeds_msg:
125 await channel.send(additional_embeds_msg)
126 for additional_embed in additional_embeds:
127 await channel.send(embed=additional_embed)
128
129 return await self.bot.get_context(log_message) # Optionally return for use with antispam
130
131 @Cog.listener()
132 async def on_guild_channel_create(self, channel: GUILD_CHANNEL) -> None:
133 """Log channel create event to mod log."""
134 if channel.guild.id != GuildConstant.id:
135 return
136
137 if isinstance(channel, discord.CategoryChannel):
138 title = "Category created"
139 message = f"{channel.name} (`{channel.id}`)"
140 elif isinstance(channel, discord.VoiceChannel):
141 title = "Voice channel created"
142
143 if channel.category:
144 message = f"{channel.category}/{channel.name} (`{channel.id}`)"
145 else:
146 message = f"{channel.name} (`{channel.id}`)"
147 else:
148 title = "Text channel created"
149
150 if channel.category:
151 message = f"{channel.category}/{channel.name} (`{channel.id}`)"
152 else:
153 message = f"{channel.name} (`{channel.id}`)"
154
155 await self.send_log_message(Icons.hash_green, Colours.soft_green, title, message)
156
157 @Cog.listener()
158 async def on_guild_channel_delete(self, channel: GUILD_CHANNEL) -> None:
159 """Log channel delete event to mod log."""
160 if channel.guild.id != GuildConstant.id:
161 return
162
163 if isinstance(channel, discord.CategoryChannel):
164 title = "Category deleted"
165 elif isinstance(channel, discord.VoiceChannel):
166 title = "Voice channel deleted"
167 else:
168 title = "Text channel deleted"
169
170 if channel.category and not isinstance(channel, discord.CategoryChannel):
171 message = f"{channel.category}/{channel.name} (`{channel.id}`)"
172 else:
173 message = f"{channel.name} (`{channel.id}`)"
174
175 await self.send_log_message(
176 Icons.hash_red, Colours.soft_red,
177 title, message
178 )
179
180 @Cog.listener()
181 async def on_guild_channel_update(self, before: GUILD_CHANNEL, after: GuildChannel) -> None:
182 """Log channel update event to mod log."""
183 if before.guild.id != GuildConstant.id:
184 return
185
186 if before.id in self._ignored[Event.guild_channel_update]:
187 self._ignored[Event.guild_channel_update].remove(before.id)
188 return
189
190 diff = DeepDiff(before, after)
191 changes = []
192 done = []
193
194 diff_values = diff.get("values_changed", {})
195 diff_values.update(diff.get("type_changes", {}))
196
197 for key, value in diff_values.items():
198 if not key: # Not sure why, but it happens
199 continue
200
201 key = key[5:] # Remove "root." prefix
202
203 if "[" in key:
204 key = key.split("[", 1)[0]
205
206 if "." in key:
207 key = key.split(".", 1)[0]
208
209 if key in done or key in CHANNEL_CHANGES_SUPPRESSED:
210 continue
211
212 if key in CHANNEL_CHANGES_UNSUPPORTED:
213 changes.append(f"**{key.title()}** updated")
214 else:
215 new = value["new_value"]
216 old = value["old_value"]
217
218 changes.append(f"**{key.title()}:** `{old}` **→** `{new}`")
219
220 done.append(key)
221
222 if not changes:
223 return
224
225 message = ""
226
227 for item in sorted(changes):
228 message += f"{Emojis.bullet} {item}\n"
229
230 if after.category:
231 message = f"**{after.category}/#{after.name} (`{after.id}`)**\n{message}"
232 else:
233 message = f"**#{after.name}** (`{after.id}`)\n{message}"
234
235 await self.send_log_message(
236 Icons.hash_blurple, Colour.blurple(),
237 "Channel updated", message
238 )
239
240 @Cog.listener()
241 async def on_guild_role_create(self, role: discord.Role) -> None:
242 """Log role create event to mod log."""
243 if role.guild.id != GuildConstant.id:
244 return
245
246 await self.send_log_message(
247 Icons.crown_green, Colours.soft_green,
248 "Role created", f"`{role.id}`"
249 )
250
251 @Cog.listener()
252 async def on_guild_role_delete(self, role: discord.Role) -> None:
253 """Log role delete event to mod log."""
254 if role.guild.id != GuildConstant.id:
255 return
256
257 await self.send_log_message(
258 Icons.crown_red, Colours.soft_red,
259 "Role removed", f"{role.name} (`{role.id}`)"
260 )
261
262 @Cog.listener()
263 async def on_guild_role_update(self, before: discord.Role, after: discord.Role) -> None:
264 """Log role update event to mod log."""
265 if before.guild.id != GuildConstant.id:
266 return
267
268 diff = DeepDiff(before, after)
269 changes = []
270 done = []
271
272 diff_values = diff.get("values_changed", {})
273 diff_values.update(diff.get("type_changes", {}))
274
275 for key, value in diff_values.items():
276 if not key: # Not sure why, but it happens
277 continue
278
279 key = key[5:] # Remove "root." prefix
280
281 if "[" in key:
282 key = key.split("[", 1)[0]
283
284 if "." in key:
285 key = key.split(".", 1)[0]
286
287 if key in done or key == "color":
288 continue
289
290 if key in ROLE_CHANGES_UNSUPPORTED:
291 changes.append(f"**{key.title()}** updated")
292 else:
293 new = value["new_value"]
294 old = value["old_value"]
295
296 changes.append(f"**{key.title()}:** `{old}` **→** `{new}`")
297
298 done.append(key)
299
300 if not changes:
301 return
302
303 message = ""
304
305 for item in sorted(changes):
306 message += f"{Emojis.bullet} {item}\n"
307
308 message = f"**{after.name}** (`{after.id}`)\n{message}"
309
310 await self.send_log_message(
311 Icons.crown_blurple, Colour.blurple(),
312 "Role updated", message
313 )
314
315 @Cog.listener()
316 async def on_guild_update(self, before: discord.Guild, after: discord.Guild) -> None:
317 """Log guild update event to mod log."""
318 if before.id != GuildConstant.id:
319 return
320
321 diff = DeepDiff(before, after)
322 changes = []
323 done = []
324
325 diff_values = diff.get("values_changed", {})
326 diff_values.update(diff.get("type_changes", {}))
327
328 for key, value in diff_values.items():
329 if not key: # Not sure why, but it happens
330 continue
331
332 key = key[5:] # Remove "root." prefix
333
334 if "[" in key:
335 key = key.split("[", 1)[0]
336
337 if "." in key:
338 key = key.split(".", 1)[0]
339
340 if key in done:
341 continue
342
343 new = value["new_value"]
344 old = value["old_value"]
345
346 changes.append(f"**{key.title()}:** `{old}` **→** `{new}`")
347
348 done.append(key)
349
350 if not changes:
351 return
352
353 message = ""
354
355 for item in sorted(changes):
356 message += f"{Emojis.bullet} {item}\n"
357
358 message = f"**{after.name}** (`{after.id}`)\n{message}"
359
360 await self.send_log_message(
361 Icons.guild_update, Colour.blurple(),
362 "Guild updated", message,
363 thumbnail=after.icon_url_as(format="png")
364 )
365
366 @Cog.listener()
367 async def on_member_ban(self, guild: discord.Guild, member: discord.Member) -> None:
368 """Log ban event to user log."""
369 if guild.id != GuildConstant.id:
370 return
371
372 if member.id in self._ignored[Event.member_ban]:
373 self._ignored[Event.member_ban].remove(member.id)
374 return
375
376 await self.send_log_message(
377 Icons.user_ban, Colours.soft_red,
378 "User banned", f"{member} (`{member.id}`)",
379 thumbnail=member.avatar_url_as(static_format="png"),
380 channel_id=Channels.user_log
381 )
382
383 @Cog.listener()
384 async def on_member_join(self, member: discord.Member) -> None:
385 """Log member join event to user log."""
386 if member.guild.id != GuildConstant.id:
387 return
388
389 message = f"{member} (`{member.id}`)"
390 now = datetime.utcnow()
391 difference = abs(relativedelta(now, member.created_at))
392
393 message += "\n\n**Account age:** " + humanize_delta(difference)
394
395 if difference.days < 1 and difference.months < 1 and difference.years < 1: # New user account!
396 message = f"{Emojis.new} {message}"
397
398 await self.send_log_message(
399 Icons.sign_in, Colours.soft_green,
400 "User joined", message,
401 thumbnail=member.avatar_url_as(static_format="png"),
402 channel_id=Channels.user_log
403 )
404
405 @Cog.listener()
406 async def on_member_remove(self, member: discord.Member) -> None:
407 """Log member leave event to user log."""
408 if member.guild.id != GuildConstant.id:
409 return
410
411 if member.id in self._ignored[Event.member_remove]:
412 self._ignored[Event.member_remove].remove(member.id)
413 return
414
415 await self.send_log_message(
416 Icons.sign_out, Colours.soft_red,
417 "User left", f"{member} (`{member.id}`)",
418 thumbnail=member.avatar_url_as(static_format="png"),
419 channel_id=Channels.user_log
420 )
421
422 @Cog.listener()
423 async def on_member_unban(self, guild: discord.Guild, member: discord.User) -> None:
424 """Log member unban event to mod log."""
425 if guild.id != GuildConstant.id:
426 return
427
428 if member.id in self._ignored[Event.member_unban]:
429 self._ignored[Event.member_unban].remove(member.id)
430 return
431
432 await self.send_log_message(
433 Icons.user_unban, Colour.blurple(),
434 "User unbanned", f"{member} (`{member.id}`)",
435 thumbnail=member.avatar_url_as(static_format="png"),
436 channel_id=Channels.mod_log
437 )
438
439 @Cog.listener()
440 async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:
441 """Log member update event to user log."""
442 if before.guild.id != GuildConstant.id:
443 return
444
445 if before.id in self._ignored[Event.member_update]:
446 self._ignored[Event.member_update].remove(before.id)
447 return
448
449 diff = DeepDiff(before, after)
450 changes = []
451 done = []
452
453 diff_values = {}
454
455 diff_values.update(diff.get("values_changed", {}))
456 diff_values.update(diff.get("type_changes", {}))
457 diff_values.update(diff.get("iterable_item_removed", {}))
458 diff_values.update(diff.get("iterable_item_added", {}))
459
460 diff_user = DeepDiff(before._user, after._user)
461
462 diff_values.update(diff_user.get("values_changed", {}))
463 diff_values.update(diff_user.get("type_changes", {}))
464 diff_values.update(diff_user.get("iterable_item_removed", {}))
465 diff_values.update(diff_user.get("iterable_item_added", {}))
466
467 for key, value in diff_values.items():
468 if not key: # Not sure why, but it happens
469 continue
470
471 key = key[5:] # Remove "root." prefix
472
473 if "[" in key:
474 key = key.split("[", 1)[0]
475
476 if "." in key:
477 key = key.split(".", 1)[0]
478
479 if key in done or key in MEMBER_CHANGES_SUPPRESSED:
480 continue
481
482 if key == "_roles":
483 new_roles = after.roles
484 old_roles = before.roles
485
486 for role in old_roles:
487 if role not in new_roles:
488 changes.append(f"**Role removed:** {role.name} (`{role.id}`)")
489
490 for role in new_roles:
491 if role not in old_roles:
492 changes.append(f"**Role added:** {role.name} (`{role.id}`)")
493
494 else:
495 new = value.get("new_value")
496 old = value.get("old_value")
497
498 if new and old:
499 changes.append(f"**{key.title()}:** `{old}` **→** `{new}`")
500
501 done.append(key)
502
503 if before.name != after.name:
504 changes.append(
505 f"**Username:** `{before.name}` **→** `{after.name}`"
506 )
507
508 if before.discriminator != after.discriminator:
509 changes.append(
510 f"**Discriminator:** `{before.discriminator}` **→** `{after.discriminator}`"
511 )
512
513 if before.display_name != after.display_name:
514 changes.append(
515 f"**Display name:** `{before.display_name}` **→** `{after.display_name}`"
516 )
517
518 if not changes:
519 return
520
521 message = ""
522
523 for item in sorted(changes):
524 message += f"{Emojis.bullet} {item}\n"
525
526 message = f"**{after}** (`{after.id}`)\n{message}"
527
528 await self.send_log_message(
529 Icons.user_update, Colour.blurple(),
530 "Member updated", message,
531 thumbnail=after.avatar_url_as(static_format="png"),
532 channel_id=Channels.user_log
533 )
534
535 @Cog.listener()
536 async def on_message_delete(self, message: discord.Message) -> None:
537 """Log message delete event to message change log."""
538 channel = message.channel
539 author = message.author
540
541 if message.guild.id != GuildConstant.id or channel.id in GuildConstant.modlog_blacklist:
542 return
543
544 self._cached_deletes.append(message.id)
545
546 if message.id in self._ignored[Event.message_delete]:
547 self._ignored[Event.message_delete].remove(message.id)
548 return
549
550 if author.bot:
551 return
552
553 if channel.category:
554 response = (
555 f"**Author:** {author} (`{author.id}`)\n"
556 f"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\n"
557 f"**Message ID:** `{message.id}`\n"
558 "\n"
559 )
560 else:
561 response = (
562 f"**Author:** {author} (`{author.id}`)\n"
563 f"**Channel:** #{channel.name} (`{channel.id}`)\n"
564 f"**Message ID:** `{message.id}`\n"
565 "\n"
566 )
567
568 if message.attachments:
569 # Prepend the message metadata with the number of attachments
570 response = f"**Attachments:** {len(message.attachments)}\n" + response
571
572 # Shorten the message content if necessary
573 content = message.clean_content
574 remaining_chars = 2040 - len(response)
575
576 if len(content) > remaining_chars:
577 botlog_url = await self.upload_log(messages=[message], actor_id=message.author.id)
578 ending = f"\n\nMessage truncated, [full message here]({botlog_url})."
579 truncation_point = remaining_chars - len(ending)
580 content = f"{content[:truncation_point]}...{ending}"
581
582 response += f"{content}"
583
584 await self.send_log_message(
585 Icons.message_delete, Colours.soft_red,
586 "Message deleted",
587 response,
588 channel_id=Channels.message_log
589 )
590
591 @Cog.listener()
592 async def on_raw_message_delete(self, event: discord.RawMessageDeleteEvent) -> None:
593 """Log raw message delete event to message change log."""
594 if event.guild_id != GuildConstant.id or event.channel_id in GuildConstant.modlog_blacklist:
595 return
596
597 await asyncio.sleep(1) # Wait here in case the normal event was fired
598
599 if event.message_id in self._cached_deletes:
600 # It was in the cache and the normal event was fired, so we can just ignore it
601 self._cached_deletes.remove(event.message_id)
602 return
603
604 if event.message_id in self._ignored[Event.message_delete]:
605 self._ignored[Event.message_delete].remove(event.message_id)
606 return
607
608 channel = self.bot.get_channel(event.channel_id)
609
610 if channel.category:
611 response = (
612 f"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\n"
613 f"**Message ID:** `{event.message_id}`\n"
614 "\n"
615 "This message was not cached, so the message content cannot be displayed."
616 )
617 else:
618 response = (
619 f"**Channel:** #{channel.name} (`{channel.id}`)\n"
620 f"**Message ID:** `{event.message_id}`\n"
621 "\n"
622 "This message was not cached, so the message content cannot be displayed."
623 )
624
625 await self.send_log_message(
626 Icons.message_delete, Colours.soft_red,
627 "Message deleted",
628 response,
629 channel_id=Channels.message_log
630 )
631
632 @Cog.listener()
633 async def on_message_edit(self, msg_before: discord.Message, msg_after: discord.Message) -> None:
634 """Log message edit event to message change log."""
635 if (
636 not msg_before.guild
637 or msg_before.guild.id != GuildConstant.id
638 or msg_before.channel.id in GuildConstant.modlog_blacklist
639 or msg_before.author.bot
640 ):
641 return
642
643 self._cached_edits.append(msg_before.id)
644
645 if msg_before.content == msg_after.content:
646 return
647
648 author = msg_before.author
649 channel = msg_before.channel
650 channel_name = f"{channel.category}/#{channel.name}" if channel.category else f"#{channel.name}"
651
652 # Getting the difference per words and group them by type - add, remove, same
653 # Note that this is intended grouping without sorting
654 diff = difflib.ndiff(msg_before.clean_content.split(), msg_after.clean_content.split())
655 diff_groups = tuple(
656 (diff_type, tuple(s[2:] for s in diff_words))
657 for diff_type, diff_words in itertools.groupby(diff, key=lambda s: s[0])
658 )
659
660 content_before: t.List[str] = []
661 content_after: t.List[str] = []
662
663 for index, (diff_type, words) in enumerate(diff_groups):
664 sub = ' '.join(words)
665 if diff_type == '-':
666 content_before.append(f"[{sub}](http://o.hi)")
667 elif diff_type == '+':
668 content_after.append(f"[{sub}](http://o.hi)")
669 elif diff_type == ' ':
670 if len(words) > 2:
671 sub = (
672 f"{words[0] if index > 0 else ''}"
673 " ... "
674 f"{words[-1] if index < len(diff_groups) - 1 else ''}"
675 )
676 content_before.append(sub)
677 content_after.append(sub)
678
679 response = (
680 f"**Author:** {author} (`{author.id}`)\n"
681 f"**Channel:** {channel_name} (`{channel.id}`)\n"
682 f"**Message ID:** `{msg_before.id}`\n"
683 "\n"
684 f"**Before**:\n{' '.join(content_before)}\n"
685 f"**After**:\n{' '.join(content_after)}\n"
686 "\n"
687 f"[Jump to message]({msg_after.jump_url})"
688 )
689
690 if msg_before.edited_at:
691 # Message was previously edited, to assist with self-bot detection, use the edited_at
692 # datetime as the baseline and create a human-readable delta between this edit event
693 # and the last time the message was edited
694 timestamp = msg_before.edited_at
695 delta = humanize_delta(relativedelta(msg_after.edited_at, msg_before.edited_at))
696 footer = f"Last edited {delta} ago"
697 else:
698 # Message was not previously edited, use the created_at datetime as the baseline, no
699 # delta calculation needed
700 timestamp = msg_before.created_at
701 footer = None
702
703 await self.send_log_message(
704 Icons.message_edit, Colour.blurple(), "Message edited", response,
705 channel_id=Channels.message_log, timestamp_override=timestamp, footer=footer
706 )
707
708 @Cog.listener()
709 async def on_raw_message_edit(self, event: discord.RawMessageUpdateEvent) -> None:
710 """Log raw message edit event to message change log."""
711 try:
712 channel = self.bot.get_channel(int(event.data["channel_id"]))
713 message = await channel.fetch_message(event.message_id)
714 except discord.NotFound: # Was deleted before we got the event
715 return
716
717 if (
718 not message.guild
719 or message.guild.id != GuildConstant.id
720 or message.channel.id in GuildConstant.modlog_blacklist
721 or message.author.bot
722 ):
723 return
724
725 await asyncio.sleep(1) # Wait here in case the normal event was fired
726
727 if event.message_id in self._cached_edits:
728 # It was in the cache and the normal event was fired, so we can just ignore it
729 self._cached_edits.remove(event.message_id)
730 return
731
732 author = message.author
733 channel = message.channel
734 channel_name = f"{channel.category}/#{channel.name}" if channel.category else f"#{channel.name}"
735
736 before_response = (
737 f"**Author:** {author} (`{author.id}`)\n"
738 f"**Channel:** {channel_name} (`{channel.id}`)\n"
739 f"**Message ID:** `{message.id}`\n"
740 "\n"
741 "This message was not cached, so the message content cannot be displayed."
742 )
743
744 after_response = (
745 f"**Author:** {author} (`{author.id}`)\n"
746 f"**Channel:** {channel_name} (`{channel.id}`)\n"
747 f"**Message ID:** `{message.id}`\n"
748 "\n"
749 f"{message.clean_content}"
750 )
751
752 await self.send_log_message(
753 Icons.message_edit, Colour.blurple(), "Message edited (Before)",
754 before_response, channel_id=Channels.message_log
755 )
756
757 await self.send_log_message(
758 Icons.message_edit, Colour.blurple(), "Message edited (After)",
759 after_response, channel_id=Channels.message_log
760 )
761
762 @Cog.listener()
763 async def on_voice_state_update(
764 self,
765 member: discord.Member,
766 before: discord.VoiceState,
767 after: discord.VoiceState
768 ) -> None:
769 """Log member voice state changes to the voice log channel."""
770 if (
771 member.guild.id != GuildConstant.id
772 or (before.channel and before.channel.id in GuildConstant.modlog_blacklist)
773 ):
774 return
775
776 if member.id in self._ignored[Event.voice_state_update]:
777 self._ignored[Event.voice_state_update].remove(member.id)
778 return
779
780 # Exclude all channel attributes except the name.
781 diff = DeepDiff(
782 before,
783 after,
784 exclude_paths=("root.session_id", "root.afk"),
785 exclude_regex_paths=r"root\.channel\.(?!name)",
786 )
787
788 # A type change seems to always take precedent over a value change. Furthermore, it will
789 # include the value change along with the type change anyway. Therefore, it's OK to
790 # "overwrite" values_changed; in practice there will never even be anything to overwrite.
791 diff_values = {**diff.get("values_changed", {}), **diff.get("type_changes", {})}
792
793 icon = Icons.voice_state_blue
794 colour = Colour.blurple()
795 changes = []
796
797 for attr, values in diff_values.items():
798 if not attr: # Not sure why, but it happens.
799 continue
800
801 old = values["old_value"]
802 new = values["new_value"]
803
804 attr = attr[5:] # Remove "root." prefix.
805 attr = VOICE_STATE_ATTRIBUTES.get(attr, attr.replace("_", " ").capitalize())
806
807 changes.append(f"**{attr}:** `{old}` **→** `{new}`")
808
809 # Set the embed icon and colour depending on which attribute changed.
810 if any(name in attr for name in ("Channel", "deaf", "mute")):
811 if new is None or new is True:
812 # Left a channel or was muted/deafened.
813 icon = Icons.voice_state_red
814 colour = Colours.soft_red
815 elif old is None or old is True:
816 # Joined a channel or was unmuted/undeafened.
817 icon = Icons.voice_state_green
818 colour = Colours.soft_green
819
820 if not changes:
821 return
822
823 message = "\n".join(f"{Emojis.bullet} {item}" for item in sorted(changes))
824 message = f"**{member}** (`{member.id}`)\n{message}"
825
826 await self.send_log_message(
827 icon_url=icon,
828 colour=colour,
829 title="Voice state updated",
830 text=message,
831 thumbnail=member.avatar_url_as(static_format="png"),
832 channel_id=Channels.voice_log
833 )
834
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/moderation/modlog.py b/bot/cogs/moderation/modlog.py
--- a/bot/cogs/moderation/modlog.py
+++ b/bot/cogs/moderation/modlog.py
@@ -67,7 +67,7 @@
'embeds': [embed.to_dict() for embed in message.embeds],
'attachments': attachment,
}
- for message, attachment in zip_longest(messages, attachments)
+ for message, attachment in zip_longest(messages, attachments, fillvalue=[])
]
}
)
| {"golden_diff": "diff --git a/bot/cogs/moderation/modlog.py b/bot/cogs/moderation/modlog.py\n--- a/bot/cogs/moderation/modlog.py\n+++ b/bot/cogs/moderation/modlog.py\n@@ -67,7 +67,7 @@\n 'embeds': [embed.to_dict() for embed in message.embeds],\n 'attachments': attachment,\n }\n- for message, attachment in zip_longest(messages, attachments)\n+ for message, attachment in zip_longest(messages, attachments, fillvalue=[])\n ]\n }\n )\n", "issue": "!clean command returns malformed API request\nSince we added attachments to the deleted message logs the clean command was not updated to store this.\r\n\r\nWhen running a clean command the following error is returned:\r\n```\r\nweb_1 | Bad Request: /bot/deleted-messages\r\nweb_1 | \"POST /bot/deleted-messages HTTP/1.1\" 400 792\r\nbot_1 | 2020-02-25 19:32:41,081 | bot.cogs.error_handler | DEBUG | API responded with 400 for command clean all: {'deletedmessage_set': [{'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}]}.\r\n```\r\n\r\nThis is not critical since the clean command still operates, it just does not store deleted messages like intended.\n", "before_files": [{"content": "import asyncio\nimport difflib\nimport itertools\nimport logging\nimport typing as t\nfrom datetime import datetime\nfrom itertools import zip_longest\n\nimport discord\nfrom dateutil.relativedelta import relativedelta\nfrom deepdiff import DeepDiff\nfrom discord import Colour\nfrom discord.abc import GuildChannel\nfrom discord.ext.commands import Cog, Context\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, Colours, Emojis, Event, Guild as GuildConstant, Icons, URLs\nfrom bot.utils.time import humanize_delta\n\nlog = logging.getLogger(__name__)\n\nGUILD_CHANNEL = t.Union[discord.CategoryChannel, discord.TextChannel, discord.VoiceChannel]\n\nCHANNEL_CHANGES_UNSUPPORTED = (\"permissions\",)\nCHANNEL_CHANGES_SUPPRESSED = (\"_overwrites\", \"position\")\nMEMBER_CHANGES_SUPPRESSED = (\"status\", \"activities\", \"_client_status\", \"nick\")\nROLE_CHANGES_UNSUPPORTED = (\"colour\", \"permissions\")\n\nVOICE_STATE_ATTRIBUTES = {\n \"channel.name\": \"Channel\",\n \"self_stream\": \"Streaming\",\n \"self_video\": \"Broadcasting\",\n}\n\n\nclass ModLog(Cog, name=\"ModLog\"):\n \"\"\"Logging for server events and staff actions.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self._ignored = {event: [] for event in Event}\n\n self._cached_deletes = []\n self._cached_edits = []\n\n async def upload_log(\n self,\n messages: t.Iterable[discord.Message],\n actor_id: int,\n attachments: t.Iterable[t.List[str]] = None\n ) -> str:\n \"\"\"Upload message logs to the database and return a URL to a page for viewing the logs.\"\"\"\n if attachments is None:\n attachments = []\n\n response = await self.bot.api_client.post(\n 'bot/deleted-messages',\n json={\n 'actor': actor_id,\n 'creation': datetime.utcnow().isoformat(),\n 'deletedmessage_set': [\n {\n 'id': message.id,\n 'author': message.author.id,\n 'channel_id': message.channel.id,\n 'content': message.content,\n 'embeds': [embed.to_dict() for embed in message.embeds],\n 'attachments': attachment,\n }\n for message, attachment in zip_longest(messages, attachments)\n ]\n }\n )\n\n return f\"{URLs.site_logs_view}/{response['id']}\"\n\n def ignore(self, event: Event, *items: int) -> None:\n \"\"\"Add event to ignored events to suppress log emission.\"\"\"\n for item in items:\n if item not in self._ignored[event]:\n self._ignored[event].append(item)\n\n async def send_log_message(\n self,\n icon_url: t.Optional[str],\n colour: t.Union[discord.Colour, int],\n title: t.Optional[str],\n text: str,\n thumbnail: t.Optional[t.Union[str, discord.Asset]] = None,\n channel_id: int = Channels.mod_log,\n ping_everyone: bool = False,\n files: t.Optional[t.List[discord.File]] = None,\n content: t.Optional[str] = None,\n additional_embeds: t.Optional[t.List[discord.Embed]] = None,\n additional_embeds_msg: t.Optional[str] = None,\n timestamp_override: t.Optional[datetime] = None,\n footer: t.Optional[str] = None,\n ) -> Context:\n \"\"\"Generate log embed and send to logging channel.\"\"\"\n embed = discord.Embed(description=text)\n\n if title and icon_url:\n embed.set_author(name=title, icon_url=icon_url)\n\n embed.colour = colour\n embed.timestamp = timestamp_override or datetime.utcnow()\n\n if footer:\n embed.set_footer(text=footer)\n\n if thumbnail:\n embed.set_thumbnail(url=thumbnail)\n\n if ping_everyone:\n if content:\n content = f\"@everyone\\n{content}\"\n else:\n content = \"@everyone\"\n\n channel = self.bot.get_channel(channel_id)\n log_message = await channel.send(content=content, embed=embed, files=files)\n\n if additional_embeds:\n if additional_embeds_msg:\n await channel.send(additional_embeds_msg)\n for additional_embed in additional_embeds:\n await channel.send(embed=additional_embed)\n\n return await self.bot.get_context(log_message) # Optionally return for use with antispam\n\n @Cog.listener()\n async def on_guild_channel_create(self, channel: GUILD_CHANNEL) -> None:\n \"\"\"Log channel create event to mod log.\"\"\"\n if channel.guild.id != GuildConstant.id:\n return\n\n if isinstance(channel, discord.CategoryChannel):\n title = \"Category created\"\n message = f\"{channel.name} (`{channel.id}`)\"\n elif isinstance(channel, discord.VoiceChannel):\n title = \"Voice channel created\"\n\n if channel.category:\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n else:\n title = \"Text channel created\"\n\n if channel.category:\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n\n await self.send_log_message(Icons.hash_green, Colours.soft_green, title, message)\n\n @Cog.listener()\n async def on_guild_channel_delete(self, channel: GUILD_CHANNEL) -> None:\n \"\"\"Log channel delete event to mod log.\"\"\"\n if channel.guild.id != GuildConstant.id:\n return\n\n if isinstance(channel, discord.CategoryChannel):\n title = \"Category deleted\"\n elif isinstance(channel, discord.VoiceChannel):\n title = \"Voice channel deleted\"\n else:\n title = \"Text channel deleted\"\n\n if channel.category and not isinstance(channel, discord.CategoryChannel):\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n\n await self.send_log_message(\n Icons.hash_red, Colours.soft_red,\n title, message\n )\n\n @Cog.listener()\n async def on_guild_channel_update(self, before: GUILD_CHANNEL, after: GuildChannel) -> None:\n \"\"\"Log channel update event to mod log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n if before.id in self._ignored[Event.guild_channel_update]:\n self._ignored[Event.guild_channel_update].remove(before.id)\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key in CHANNEL_CHANGES_SUPPRESSED:\n continue\n\n if key in CHANNEL_CHANGES_UNSUPPORTED:\n changes.append(f\"**{key.title()}** updated\")\n else:\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **\u2192** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n if after.category:\n message = f\"**{after.category}/#{after.name} (`{after.id}`)**\\n{message}\"\n else:\n message = f\"**#{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.hash_blurple, Colour.blurple(),\n \"Channel updated\", message\n )\n\n @Cog.listener()\n async def on_guild_role_create(self, role: discord.Role) -> None:\n \"\"\"Log role create event to mod log.\"\"\"\n if role.guild.id != GuildConstant.id:\n return\n\n await self.send_log_message(\n Icons.crown_green, Colours.soft_green,\n \"Role created\", f\"`{role.id}`\"\n )\n\n @Cog.listener()\n async def on_guild_role_delete(self, role: discord.Role) -> None:\n \"\"\"Log role delete event to mod log.\"\"\"\n if role.guild.id != GuildConstant.id:\n return\n\n await self.send_log_message(\n Icons.crown_red, Colours.soft_red,\n \"Role removed\", f\"{role.name} (`{role.id}`)\"\n )\n\n @Cog.listener()\n async def on_guild_role_update(self, before: discord.Role, after: discord.Role) -> None:\n \"\"\"Log role update event to mod log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key == \"color\":\n continue\n\n if key in ROLE_CHANGES_UNSUPPORTED:\n changes.append(f\"**{key.title()}** updated\")\n else:\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **\u2192** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.crown_blurple, Colour.blurple(),\n \"Role updated\", message\n )\n\n @Cog.listener()\n async def on_guild_update(self, before: discord.Guild, after: discord.Guild) -> None:\n \"\"\"Log guild update event to mod log.\"\"\"\n if before.id != GuildConstant.id:\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done:\n continue\n\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **\u2192** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.guild_update, Colour.blurple(),\n \"Guild updated\", message,\n thumbnail=after.icon_url_as(format=\"png\")\n )\n\n @Cog.listener()\n async def on_member_ban(self, guild: discord.Guild, member: discord.Member) -> None:\n \"\"\"Log ban event to user log.\"\"\"\n if guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_ban]:\n self._ignored[Event.member_ban].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.user_ban, Colours.soft_red,\n \"User banned\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_join(self, member: discord.Member) -> None:\n \"\"\"Log member join event to user log.\"\"\"\n if member.guild.id != GuildConstant.id:\n return\n\n message = f\"{member} (`{member.id}`)\"\n now = datetime.utcnow()\n difference = abs(relativedelta(now, member.created_at))\n\n message += \"\\n\\n**Account age:** \" + humanize_delta(difference)\n\n if difference.days < 1 and difference.months < 1 and difference.years < 1: # New user account!\n message = f\"{Emojis.new} {message}\"\n\n await self.send_log_message(\n Icons.sign_in, Colours.soft_green,\n \"User joined\", message,\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_remove(self, member: discord.Member) -> None:\n \"\"\"Log member leave event to user log.\"\"\"\n if member.guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_remove]:\n self._ignored[Event.member_remove].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.sign_out, Colours.soft_red,\n \"User left\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_unban(self, guild: discord.Guild, member: discord.User) -> None:\n \"\"\"Log member unban event to mod log.\"\"\"\n if guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_unban]:\n self._ignored[Event.member_unban].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.user_unban, Colour.blurple(),\n \"User unbanned\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_log\n )\n\n @Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:\n \"\"\"Log member update event to user log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n if before.id in self._ignored[Event.member_update]:\n self._ignored[Event.member_update].remove(before.id)\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = {}\n\n diff_values.update(diff.get(\"values_changed\", {}))\n diff_values.update(diff.get(\"type_changes\", {}))\n diff_values.update(diff.get(\"iterable_item_removed\", {}))\n diff_values.update(diff.get(\"iterable_item_added\", {}))\n\n diff_user = DeepDiff(before._user, after._user)\n\n diff_values.update(diff_user.get(\"values_changed\", {}))\n diff_values.update(diff_user.get(\"type_changes\", {}))\n diff_values.update(diff_user.get(\"iterable_item_removed\", {}))\n diff_values.update(diff_user.get(\"iterable_item_added\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key in MEMBER_CHANGES_SUPPRESSED:\n continue\n\n if key == \"_roles\":\n new_roles = after.roles\n old_roles = before.roles\n\n for role in old_roles:\n if role not in new_roles:\n changes.append(f\"**Role removed:** {role.name} (`{role.id}`)\")\n\n for role in new_roles:\n if role not in old_roles:\n changes.append(f\"**Role added:** {role.name} (`{role.id}`)\")\n\n else:\n new = value.get(\"new_value\")\n old = value.get(\"old_value\")\n\n if new and old:\n changes.append(f\"**{key.title()}:** `{old}` **\u2192** `{new}`\")\n\n done.append(key)\n\n if before.name != after.name:\n changes.append(\n f\"**Username:** `{before.name}` **\u2192** `{after.name}`\"\n )\n\n if before.discriminator != after.discriminator:\n changes.append(\n f\"**Discriminator:** `{before.discriminator}` **\u2192** `{after.discriminator}`\"\n )\n\n if before.display_name != after.display_name:\n changes.append(\n f\"**Display name:** `{before.display_name}` **\u2192** `{after.display_name}`\"\n )\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.user_update, Colour.blurple(),\n \"Member updated\", message,\n thumbnail=after.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_message_delete(self, message: discord.Message) -> None:\n \"\"\"Log message delete event to message change log.\"\"\"\n channel = message.channel\n author = message.author\n\n if message.guild.id != GuildConstant.id or channel.id in GuildConstant.modlog_blacklist:\n return\n\n self._cached_deletes.append(message.id)\n\n if message.id in self._ignored[Event.message_delete]:\n self._ignored[Event.message_delete].remove(message.id)\n return\n\n if author.bot:\n return\n\n if channel.category:\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n )\n else:\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** #{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n )\n\n if message.attachments:\n # Prepend the message metadata with the number of attachments\n response = f\"**Attachments:** {len(message.attachments)}\\n\" + response\n\n # Shorten the message content if necessary\n content = message.clean_content\n remaining_chars = 2040 - len(response)\n\n if len(content) > remaining_chars:\n botlog_url = await self.upload_log(messages=[message], actor_id=message.author.id)\n ending = f\"\\n\\nMessage truncated, [full message here]({botlog_url}).\"\n truncation_point = remaining_chars - len(ending)\n content = f\"{content[:truncation_point]}...{ending}\"\n\n response += f\"{content}\"\n\n await self.send_log_message(\n Icons.message_delete, Colours.soft_red,\n \"Message deleted\",\n response,\n channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_raw_message_delete(self, event: discord.RawMessageDeleteEvent) -> None:\n \"\"\"Log raw message delete event to message change log.\"\"\"\n if event.guild_id != GuildConstant.id or event.channel_id in GuildConstant.modlog_blacklist:\n return\n\n await asyncio.sleep(1) # Wait here in case the normal event was fired\n\n if event.message_id in self._cached_deletes:\n # It was in the cache and the normal event was fired, so we can just ignore it\n self._cached_deletes.remove(event.message_id)\n return\n\n if event.message_id in self._ignored[Event.message_delete]:\n self._ignored[Event.message_delete].remove(event.message_id)\n return\n\n channel = self.bot.get_channel(event.channel_id)\n\n if channel.category:\n response = (\n f\"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{event.message_id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n else:\n response = (\n f\"**Channel:** #{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{event.message_id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n\n await self.send_log_message(\n Icons.message_delete, Colours.soft_red,\n \"Message deleted\",\n response,\n channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_message_edit(self, msg_before: discord.Message, msg_after: discord.Message) -> None:\n \"\"\"Log message edit event to message change log.\"\"\"\n if (\n not msg_before.guild\n or msg_before.guild.id != GuildConstant.id\n or msg_before.channel.id in GuildConstant.modlog_blacklist\n or msg_before.author.bot\n ):\n return\n\n self._cached_edits.append(msg_before.id)\n\n if msg_before.content == msg_after.content:\n return\n\n author = msg_before.author\n channel = msg_before.channel\n channel_name = f\"{channel.category}/#{channel.name}\" if channel.category else f\"#{channel.name}\"\n\n # Getting the difference per words and group them by type - add, remove, same\n # Note that this is intended grouping without sorting\n diff = difflib.ndiff(msg_before.clean_content.split(), msg_after.clean_content.split())\n diff_groups = tuple(\n (diff_type, tuple(s[2:] for s in diff_words))\n for diff_type, diff_words in itertools.groupby(diff, key=lambda s: s[0])\n )\n\n content_before: t.List[str] = []\n content_after: t.List[str] = []\n\n for index, (diff_type, words) in enumerate(diff_groups):\n sub = ' '.join(words)\n if diff_type == '-':\n content_before.append(f\"[{sub}](http://o.hi)\")\n elif diff_type == '+':\n content_after.append(f\"[{sub}](http://o.hi)\")\n elif diff_type == ' ':\n if len(words) > 2:\n sub = (\n f\"{words[0] if index > 0 else ''}\"\n \" ... \"\n f\"{words[-1] if index < len(diff_groups) - 1 else ''}\"\n )\n content_before.append(sub)\n content_after.append(sub)\n\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{msg_before.id}`\\n\"\n \"\\n\"\n f\"**Before**:\\n{' '.join(content_before)}\\n\"\n f\"**After**:\\n{' '.join(content_after)}\\n\"\n \"\\n\"\n f\"[Jump to message]({msg_after.jump_url})\"\n )\n\n if msg_before.edited_at:\n # Message was previously edited, to assist with self-bot detection, use the edited_at\n # datetime as the baseline and create a human-readable delta between this edit event\n # and the last time the message was edited\n timestamp = msg_before.edited_at\n delta = humanize_delta(relativedelta(msg_after.edited_at, msg_before.edited_at))\n footer = f\"Last edited {delta} ago\"\n else:\n # Message was not previously edited, use the created_at datetime as the baseline, no\n # delta calculation needed\n timestamp = msg_before.created_at\n footer = None\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited\", response,\n channel_id=Channels.message_log, timestamp_override=timestamp, footer=footer\n )\n\n @Cog.listener()\n async def on_raw_message_edit(self, event: discord.RawMessageUpdateEvent) -> None:\n \"\"\"Log raw message edit event to message change log.\"\"\"\n try:\n channel = self.bot.get_channel(int(event.data[\"channel_id\"]))\n message = await channel.fetch_message(event.message_id)\n except discord.NotFound: # Was deleted before we got the event\n return\n\n if (\n not message.guild\n or message.guild.id != GuildConstant.id\n or message.channel.id in GuildConstant.modlog_blacklist\n or message.author.bot\n ):\n return\n\n await asyncio.sleep(1) # Wait here in case the normal event was fired\n\n if event.message_id in self._cached_edits:\n # It was in the cache and the normal event was fired, so we can just ignore it\n self._cached_edits.remove(event.message_id)\n return\n\n author = message.author\n channel = message.channel\n channel_name = f\"{channel.category}/#{channel.name}\" if channel.category else f\"#{channel.name}\"\n\n before_response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n\n after_response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n f\"{message.clean_content}\"\n )\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited (Before)\",\n before_response, channel_id=Channels.message_log\n )\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited (After)\",\n after_response, channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_voice_state_update(\n self,\n member: discord.Member,\n before: discord.VoiceState,\n after: discord.VoiceState\n ) -> None:\n \"\"\"Log member voice state changes to the voice log channel.\"\"\"\n if (\n member.guild.id != GuildConstant.id\n or (before.channel and before.channel.id in GuildConstant.modlog_blacklist)\n ):\n return\n\n if member.id in self._ignored[Event.voice_state_update]:\n self._ignored[Event.voice_state_update].remove(member.id)\n return\n\n # Exclude all channel attributes except the name.\n diff = DeepDiff(\n before,\n after,\n exclude_paths=(\"root.session_id\", \"root.afk\"),\n exclude_regex_paths=r\"root\\.channel\\.(?!name)\",\n )\n\n # A type change seems to always take precedent over a value change. Furthermore, it will\n # include the value change along with the type change anyway. Therefore, it's OK to\n # \"overwrite\" values_changed; in practice there will never even be anything to overwrite.\n diff_values = {**diff.get(\"values_changed\", {}), **diff.get(\"type_changes\", {})}\n\n icon = Icons.voice_state_blue\n colour = Colour.blurple()\n changes = []\n\n for attr, values in diff_values.items():\n if not attr: # Not sure why, but it happens.\n continue\n\n old = values[\"old_value\"]\n new = values[\"new_value\"]\n\n attr = attr[5:] # Remove \"root.\" prefix.\n attr = VOICE_STATE_ATTRIBUTES.get(attr, attr.replace(\"_\", \" \").capitalize())\n\n changes.append(f\"**{attr}:** `{old}` **\u2192** `{new}`\")\n\n # Set the embed icon and colour depending on which attribute changed.\n if any(name in attr for name in (\"Channel\", \"deaf\", \"mute\")):\n if new is None or new is True:\n # Left a channel or was muted/deafened.\n icon = Icons.voice_state_red\n colour = Colours.soft_red\n elif old is None or old is True:\n # Joined a channel or was unmuted/undeafened.\n icon = Icons.voice_state_green\n colour = Colours.soft_green\n\n if not changes:\n return\n\n message = \"\\n\".join(f\"{Emojis.bullet} {item}\" for item in sorted(changes))\n message = f\"**{member}** (`{member.id}`)\\n{message}\"\n\n await self.send_log_message(\n icon_url=icon,\n colour=colour,\n title=\"Voice state updated\",\n text=message,\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.voice_log\n )\n", "path": "bot/cogs/moderation/modlog.py"}], "after_files": [{"content": "import asyncio\nimport difflib\nimport itertools\nimport logging\nimport typing as t\nfrom datetime import datetime\nfrom itertools import zip_longest\n\nimport discord\nfrom dateutil.relativedelta import relativedelta\nfrom deepdiff import DeepDiff\nfrom discord import Colour\nfrom discord.abc import GuildChannel\nfrom discord.ext.commands import Cog, Context\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, Colours, Emojis, Event, Guild as GuildConstant, Icons, URLs\nfrom bot.utils.time import humanize_delta\n\nlog = logging.getLogger(__name__)\n\nGUILD_CHANNEL = t.Union[discord.CategoryChannel, discord.TextChannel, discord.VoiceChannel]\n\nCHANNEL_CHANGES_UNSUPPORTED = (\"permissions\",)\nCHANNEL_CHANGES_SUPPRESSED = (\"_overwrites\", \"position\")\nMEMBER_CHANGES_SUPPRESSED = (\"status\", \"activities\", \"_client_status\", \"nick\")\nROLE_CHANGES_UNSUPPORTED = (\"colour\", \"permissions\")\n\nVOICE_STATE_ATTRIBUTES = {\n \"channel.name\": \"Channel\",\n \"self_stream\": \"Streaming\",\n \"self_video\": \"Broadcasting\",\n}\n\n\nclass ModLog(Cog, name=\"ModLog\"):\n \"\"\"Logging for server events and staff actions.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self._ignored = {event: [] for event in Event}\n\n self._cached_deletes = []\n self._cached_edits = []\n\n async def upload_log(\n self,\n messages: t.Iterable[discord.Message],\n actor_id: int,\n attachments: t.Iterable[t.List[str]] = None\n ) -> str:\n \"\"\"Upload message logs to the database and return a URL to a page for viewing the logs.\"\"\"\n if attachments is None:\n attachments = []\n\n response = await self.bot.api_client.post(\n 'bot/deleted-messages',\n json={\n 'actor': actor_id,\n 'creation': datetime.utcnow().isoformat(),\n 'deletedmessage_set': [\n {\n 'id': message.id,\n 'author': message.author.id,\n 'channel_id': message.channel.id,\n 'content': message.content,\n 'embeds': [embed.to_dict() for embed in message.embeds],\n 'attachments': attachment,\n }\n for message, attachment in zip_longest(messages, attachments, fillvalue=[])\n ]\n }\n )\n\n return f\"{URLs.site_logs_view}/{response['id']}\"\n\n def ignore(self, event: Event, *items: int) -> None:\n \"\"\"Add event to ignored events to suppress log emission.\"\"\"\n for item in items:\n if item not in self._ignored[event]:\n self._ignored[event].append(item)\n\n async def send_log_message(\n self,\n icon_url: t.Optional[str],\n colour: t.Union[discord.Colour, int],\n title: t.Optional[str],\n text: str,\n thumbnail: t.Optional[t.Union[str, discord.Asset]] = None,\n channel_id: int = Channels.mod_log,\n ping_everyone: bool = False,\n files: t.Optional[t.List[discord.File]] = None,\n content: t.Optional[str] = None,\n additional_embeds: t.Optional[t.List[discord.Embed]] = None,\n additional_embeds_msg: t.Optional[str] = None,\n timestamp_override: t.Optional[datetime] = None,\n footer: t.Optional[str] = None,\n ) -> Context:\n \"\"\"Generate log embed and send to logging channel.\"\"\"\n embed = discord.Embed(description=text)\n\n if title and icon_url:\n embed.set_author(name=title, icon_url=icon_url)\n\n embed.colour = colour\n embed.timestamp = timestamp_override or datetime.utcnow()\n\n if footer:\n embed.set_footer(text=footer)\n\n if thumbnail:\n embed.set_thumbnail(url=thumbnail)\n\n if ping_everyone:\n if content:\n content = f\"@everyone\\n{content}\"\n else:\n content = \"@everyone\"\n\n channel = self.bot.get_channel(channel_id)\n log_message = await channel.send(content=content, embed=embed, files=files)\n\n if additional_embeds:\n if additional_embeds_msg:\n await channel.send(additional_embeds_msg)\n for additional_embed in additional_embeds:\n await channel.send(embed=additional_embed)\n\n return await self.bot.get_context(log_message) # Optionally return for use with antispam\n\n @Cog.listener()\n async def on_guild_channel_create(self, channel: GUILD_CHANNEL) -> None:\n \"\"\"Log channel create event to mod log.\"\"\"\n if channel.guild.id != GuildConstant.id:\n return\n\n if isinstance(channel, discord.CategoryChannel):\n title = \"Category created\"\n message = f\"{channel.name} (`{channel.id}`)\"\n elif isinstance(channel, discord.VoiceChannel):\n title = \"Voice channel created\"\n\n if channel.category:\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n else:\n title = \"Text channel created\"\n\n if channel.category:\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n\n await self.send_log_message(Icons.hash_green, Colours.soft_green, title, message)\n\n @Cog.listener()\n async def on_guild_channel_delete(self, channel: GUILD_CHANNEL) -> None:\n \"\"\"Log channel delete event to mod log.\"\"\"\n if channel.guild.id != GuildConstant.id:\n return\n\n if isinstance(channel, discord.CategoryChannel):\n title = \"Category deleted\"\n elif isinstance(channel, discord.VoiceChannel):\n title = \"Voice channel deleted\"\n else:\n title = \"Text channel deleted\"\n\n if channel.category and not isinstance(channel, discord.CategoryChannel):\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n\n await self.send_log_message(\n Icons.hash_red, Colours.soft_red,\n title, message\n )\n\n @Cog.listener()\n async def on_guild_channel_update(self, before: GUILD_CHANNEL, after: GuildChannel) -> None:\n \"\"\"Log channel update event to mod log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n if before.id in self._ignored[Event.guild_channel_update]:\n self._ignored[Event.guild_channel_update].remove(before.id)\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key in CHANNEL_CHANGES_SUPPRESSED:\n continue\n\n if key in CHANNEL_CHANGES_UNSUPPORTED:\n changes.append(f\"**{key.title()}** updated\")\n else:\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **\u2192** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n if after.category:\n message = f\"**{after.category}/#{after.name} (`{after.id}`)**\\n{message}\"\n else:\n message = f\"**#{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.hash_blurple, Colour.blurple(),\n \"Channel updated\", message\n )\n\n @Cog.listener()\n async def on_guild_role_create(self, role: discord.Role) -> None:\n \"\"\"Log role create event to mod log.\"\"\"\n if role.guild.id != GuildConstant.id:\n return\n\n await self.send_log_message(\n Icons.crown_green, Colours.soft_green,\n \"Role created\", f\"`{role.id}`\"\n )\n\n @Cog.listener()\n async def on_guild_role_delete(self, role: discord.Role) -> None:\n \"\"\"Log role delete event to mod log.\"\"\"\n if role.guild.id != GuildConstant.id:\n return\n\n await self.send_log_message(\n Icons.crown_red, Colours.soft_red,\n \"Role removed\", f\"{role.name} (`{role.id}`)\"\n )\n\n @Cog.listener()\n async def on_guild_role_update(self, before: discord.Role, after: discord.Role) -> None:\n \"\"\"Log role update event to mod log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key == \"color\":\n continue\n\n if key in ROLE_CHANGES_UNSUPPORTED:\n changes.append(f\"**{key.title()}** updated\")\n else:\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **\u2192** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.crown_blurple, Colour.blurple(),\n \"Role updated\", message\n )\n\n @Cog.listener()\n async def on_guild_update(self, before: discord.Guild, after: discord.Guild) -> None:\n \"\"\"Log guild update event to mod log.\"\"\"\n if before.id != GuildConstant.id:\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done:\n continue\n\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **\u2192** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.guild_update, Colour.blurple(),\n \"Guild updated\", message,\n thumbnail=after.icon_url_as(format=\"png\")\n )\n\n @Cog.listener()\n async def on_member_ban(self, guild: discord.Guild, member: discord.Member) -> None:\n \"\"\"Log ban event to user log.\"\"\"\n if guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_ban]:\n self._ignored[Event.member_ban].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.user_ban, Colours.soft_red,\n \"User banned\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_join(self, member: discord.Member) -> None:\n \"\"\"Log member join event to user log.\"\"\"\n if member.guild.id != GuildConstant.id:\n return\n\n message = f\"{member} (`{member.id}`)\"\n now = datetime.utcnow()\n difference = abs(relativedelta(now, member.created_at))\n\n message += \"\\n\\n**Account age:** \" + humanize_delta(difference)\n\n if difference.days < 1 and difference.months < 1 and difference.years < 1: # New user account!\n message = f\"{Emojis.new} {message}\"\n\n await self.send_log_message(\n Icons.sign_in, Colours.soft_green,\n \"User joined\", message,\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_remove(self, member: discord.Member) -> None:\n \"\"\"Log member leave event to user log.\"\"\"\n if member.guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_remove]:\n self._ignored[Event.member_remove].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.sign_out, Colours.soft_red,\n \"User left\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_unban(self, guild: discord.Guild, member: discord.User) -> None:\n \"\"\"Log member unban event to mod log.\"\"\"\n if guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_unban]:\n self._ignored[Event.member_unban].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.user_unban, Colour.blurple(),\n \"User unbanned\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_log\n )\n\n @Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:\n \"\"\"Log member update event to user log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n if before.id in self._ignored[Event.member_update]:\n self._ignored[Event.member_update].remove(before.id)\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = {}\n\n diff_values.update(diff.get(\"values_changed\", {}))\n diff_values.update(diff.get(\"type_changes\", {}))\n diff_values.update(diff.get(\"iterable_item_removed\", {}))\n diff_values.update(diff.get(\"iterable_item_added\", {}))\n\n diff_user = DeepDiff(before._user, after._user)\n\n diff_values.update(diff_user.get(\"values_changed\", {}))\n diff_values.update(diff_user.get(\"type_changes\", {}))\n diff_values.update(diff_user.get(\"iterable_item_removed\", {}))\n diff_values.update(diff_user.get(\"iterable_item_added\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key in MEMBER_CHANGES_SUPPRESSED:\n continue\n\n if key == \"_roles\":\n new_roles = after.roles\n old_roles = before.roles\n\n for role in old_roles:\n if role not in new_roles:\n changes.append(f\"**Role removed:** {role.name} (`{role.id}`)\")\n\n for role in new_roles:\n if role not in old_roles:\n changes.append(f\"**Role added:** {role.name} (`{role.id}`)\")\n\n else:\n new = value.get(\"new_value\")\n old = value.get(\"old_value\")\n\n if new and old:\n changes.append(f\"**{key.title()}:** `{old}` **\u2192** `{new}`\")\n\n done.append(key)\n\n if before.name != after.name:\n changes.append(\n f\"**Username:** `{before.name}` **\u2192** `{after.name}`\"\n )\n\n if before.discriminator != after.discriminator:\n changes.append(\n f\"**Discriminator:** `{before.discriminator}` **\u2192** `{after.discriminator}`\"\n )\n\n if before.display_name != after.display_name:\n changes.append(\n f\"**Display name:** `{before.display_name}` **\u2192** `{after.display_name}`\"\n )\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.user_update, Colour.blurple(),\n \"Member updated\", message,\n thumbnail=after.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_message_delete(self, message: discord.Message) -> None:\n \"\"\"Log message delete event to message change log.\"\"\"\n channel = message.channel\n author = message.author\n\n if message.guild.id != GuildConstant.id or channel.id in GuildConstant.modlog_blacklist:\n return\n\n self._cached_deletes.append(message.id)\n\n if message.id in self._ignored[Event.message_delete]:\n self._ignored[Event.message_delete].remove(message.id)\n return\n\n if author.bot:\n return\n\n if channel.category:\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n )\n else:\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** #{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n )\n\n if message.attachments:\n # Prepend the message metadata with the number of attachments\n response = f\"**Attachments:** {len(message.attachments)}\\n\" + response\n\n # Shorten the message content if necessary\n content = message.clean_content\n remaining_chars = 2040 - len(response)\n\n if len(content) > remaining_chars:\n botlog_url = await self.upload_log(messages=[message], actor_id=message.author.id)\n ending = f\"\\n\\nMessage truncated, [full message here]({botlog_url}).\"\n truncation_point = remaining_chars - len(ending)\n content = f\"{content[:truncation_point]}...{ending}\"\n\n response += f\"{content}\"\n\n await self.send_log_message(\n Icons.message_delete, Colours.soft_red,\n \"Message deleted\",\n response,\n channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_raw_message_delete(self, event: discord.RawMessageDeleteEvent) -> None:\n \"\"\"Log raw message delete event to message change log.\"\"\"\n if event.guild_id != GuildConstant.id or event.channel_id in GuildConstant.modlog_blacklist:\n return\n\n await asyncio.sleep(1) # Wait here in case the normal event was fired\n\n if event.message_id in self._cached_deletes:\n # It was in the cache and the normal event was fired, so we can just ignore it\n self._cached_deletes.remove(event.message_id)\n return\n\n if event.message_id in self._ignored[Event.message_delete]:\n self._ignored[Event.message_delete].remove(event.message_id)\n return\n\n channel = self.bot.get_channel(event.channel_id)\n\n if channel.category:\n response = (\n f\"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{event.message_id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n else:\n response = (\n f\"**Channel:** #{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{event.message_id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n\n await self.send_log_message(\n Icons.message_delete, Colours.soft_red,\n \"Message deleted\",\n response,\n channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_message_edit(self, msg_before: discord.Message, msg_after: discord.Message) -> None:\n \"\"\"Log message edit event to message change log.\"\"\"\n if (\n not msg_before.guild\n or msg_before.guild.id != GuildConstant.id\n or msg_before.channel.id in GuildConstant.modlog_blacklist\n or msg_before.author.bot\n ):\n return\n\n self._cached_edits.append(msg_before.id)\n\n if msg_before.content == msg_after.content:\n return\n\n author = msg_before.author\n channel = msg_before.channel\n channel_name = f\"{channel.category}/#{channel.name}\" if channel.category else f\"#{channel.name}\"\n\n # Getting the difference per words and group them by type - add, remove, same\n # Note that this is intended grouping without sorting\n diff = difflib.ndiff(msg_before.clean_content.split(), msg_after.clean_content.split())\n diff_groups = tuple(\n (diff_type, tuple(s[2:] for s in diff_words))\n for diff_type, diff_words in itertools.groupby(diff, key=lambda s: s[0])\n )\n\n content_before: t.List[str] = []\n content_after: t.List[str] = []\n\n for index, (diff_type, words) in enumerate(diff_groups):\n sub = ' '.join(words)\n if diff_type == '-':\n content_before.append(f\"[{sub}](http://o.hi)\")\n elif diff_type == '+':\n content_after.append(f\"[{sub}](http://o.hi)\")\n elif diff_type == ' ':\n if len(words) > 2:\n sub = (\n f\"{words[0] if index > 0 else ''}\"\n \" ... \"\n f\"{words[-1] if index < len(diff_groups) - 1 else ''}\"\n )\n content_before.append(sub)\n content_after.append(sub)\n\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{msg_before.id}`\\n\"\n \"\\n\"\n f\"**Before**:\\n{' '.join(content_before)}\\n\"\n f\"**After**:\\n{' '.join(content_after)}\\n\"\n \"\\n\"\n f\"[Jump to message]({msg_after.jump_url})\"\n )\n\n if msg_before.edited_at:\n # Message was previously edited, to assist with self-bot detection, use the edited_at\n # datetime as the baseline and create a human-readable delta between this edit event\n # and the last time the message was edited\n timestamp = msg_before.edited_at\n delta = humanize_delta(relativedelta(msg_after.edited_at, msg_before.edited_at))\n footer = f\"Last edited {delta} ago\"\n else:\n # Message was not previously edited, use the created_at datetime as the baseline, no\n # delta calculation needed\n timestamp = msg_before.created_at\n footer = None\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited\", response,\n channel_id=Channels.message_log, timestamp_override=timestamp, footer=footer\n )\n\n @Cog.listener()\n async def on_raw_message_edit(self, event: discord.RawMessageUpdateEvent) -> None:\n \"\"\"Log raw message edit event to message change log.\"\"\"\n try:\n channel = self.bot.get_channel(int(event.data[\"channel_id\"]))\n message = await channel.fetch_message(event.message_id)\n except discord.NotFound: # Was deleted before we got the event\n return\n\n if (\n not message.guild\n or message.guild.id != GuildConstant.id\n or message.channel.id in GuildConstant.modlog_blacklist\n or message.author.bot\n ):\n return\n\n await asyncio.sleep(1) # Wait here in case the normal event was fired\n\n if event.message_id in self._cached_edits:\n # It was in the cache and the normal event was fired, so we can just ignore it\n self._cached_edits.remove(event.message_id)\n return\n\n author = message.author\n channel = message.channel\n channel_name = f\"{channel.category}/#{channel.name}\" if channel.category else f\"#{channel.name}\"\n\n before_response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n\n after_response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n f\"{message.clean_content}\"\n )\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited (Before)\",\n before_response, channel_id=Channels.message_log\n )\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited (After)\",\n after_response, channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_voice_state_update(\n self,\n member: discord.Member,\n before: discord.VoiceState,\n after: discord.VoiceState\n ) -> None:\n \"\"\"Log member voice state changes to the voice log channel.\"\"\"\n if (\n member.guild.id != GuildConstant.id\n or (before.channel and before.channel.id in GuildConstant.modlog_blacklist)\n ):\n return\n\n if member.id in self._ignored[Event.voice_state_update]:\n self._ignored[Event.voice_state_update].remove(member.id)\n return\n\n # Exclude all channel attributes except the name.\n diff = DeepDiff(\n before,\n after,\n exclude_paths=(\"root.session_id\", \"root.afk\"),\n exclude_regex_paths=r\"root\\.channel\\.(?!name)\",\n )\n\n # A type change seems to always take precedent over a value change. Furthermore, it will\n # include the value change along with the type change anyway. Therefore, it's OK to\n # \"overwrite\" values_changed; in practice there will never even be anything to overwrite.\n diff_values = {**diff.get(\"values_changed\", {}), **diff.get(\"type_changes\", {})}\n\n icon = Icons.voice_state_blue\n colour = Colour.blurple()\n changes = []\n\n for attr, values in diff_values.items():\n if not attr: # Not sure why, but it happens.\n continue\n\n old = values[\"old_value\"]\n new = values[\"new_value\"]\n\n attr = attr[5:] # Remove \"root.\" prefix.\n attr = VOICE_STATE_ATTRIBUTES.get(attr, attr.replace(\"_\", \" \").capitalize())\n\n changes.append(f\"**{attr}:** `{old}` **\u2192** `{new}`\")\n\n # Set the embed icon and colour depending on which attribute changed.\n if any(name in attr for name in (\"Channel\", \"deaf\", \"mute\")):\n if new is None or new is True:\n # Left a channel or was muted/deafened.\n icon = Icons.voice_state_red\n colour = Colours.soft_red\n elif old is None or old is True:\n # Joined a channel or was unmuted/undeafened.\n icon = Icons.voice_state_green\n colour = Colours.soft_green\n\n if not changes:\n return\n\n message = \"\\n\".join(f\"{Emojis.bullet} {item}\" for item in sorted(changes))\n message = f\"**{member}** (`{member.id}`)\\n{message}\"\n\n await self.send_log_message(\n icon_url=icon,\n colour=colour,\n title=\"Voice state updated\",\n text=message,\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.voice_log\n )\n", "path": "bot/cogs/moderation/modlog.py"}]} |
gh_patches_debug_1553 | rasdani/github-patches | git_diff | beetbox__beets-3905 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lyrics plugin crashing on tekstowo.pl
I just found it crashing while searching tekstowo.pl (I'm on https://github.com/beetbox/beets/commit/afc072801c4a254e6b4114d89133d6bebc3e34b9).
These are the relevant lines of error.
```
return html.find("div", class_="song-text").get_text()
AttributeError: 'NoneType' object has no attribute 'get_text'
```
I also printed the html to the console, but I am unsure why the error is appearing. The div with the class song-text seems to exist and I've never worked on beets before. Could it have something to do with there being two divs with the class?
I uploaded the html, but had to do it with txt extension due to GitHub limitations.
[temp.txt](https://github.com/beetbox/beets/files/6292601/temp.txt)
_Originally posted by @njelich in https://github.com/beetbox/beets/issues/3903#issuecomment-817359091_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/lyrics.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Adrian Sampson.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Fetches, embeds, and displays lyrics.
17 """
18
19 from __future__ import absolute_import, division, print_function
20
21 import difflib
22 import errno
23 import itertools
24 import json
25 import struct
26 import os.path
27 import re
28 import requests
29 import unicodedata
30 from unidecode import unidecode
31 import warnings
32 import six
33 from six.moves import urllib
34
35 try:
36 from bs4 import SoupStrainer, BeautifulSoup
37 HAS_BEAUTIFUL_SOUP = True
38 except ImportError:
39 HAS_BEAUTIFUL_SOUP = False
40
41 try:
42 import langdetect
43 HAS_LANGDETECT = True
44 except ImportError:
45 HAS_LANGDETECT = False
46
47 try:
48 # PY3: HTMLParseError was removed in 3.5 as strict mode
49 # was deprecated in 3.3.
50 # https://docs.python.org/3.3/library/html.parser.html
51 from six.moves.html_parser import HTMLParseError
52 except ImportError:
53 class HTMLParseError(Exception):
54 pass
55
56 from beets import plugins
57 from beets import ui
58 import beets
59
60 DIV_RE = re.compile(r'<(/?)div>?', re.I)
61 COMMENT_RE = re.compile(r'<!--.*-->', re.S)
62 TAG_RE = re.compile(r'<[^>]*>')
63 BREAK_RE = re.compile(r'\n?\s*<br([\s|/][^>]*)*>\s*\n?', re.I)
64 URL_CHARACTERS = {
65 u'\u2018': u"'",
66 u'\u2019': u"'",
67 u'\u201c': u'"',
68 u'\u201d': u'"',
69 u'\u2010': u'-',
70 u'\u2011': u'-',
71 u'\u2012': u'-',
72 u'\u2013': u'-',
73 u'\u2014': u'-',
74 u'\u2015': u'-',
75 u'\u2016': u'-',
76 u'\u2026': u'...',
77 }
78 USER_AGENT = 'beets/{}'.format(beets.__version__)
79
80 # The content for the base index.rst generated in ReST mode.
81 REST_INDEX_TEMPLATE = u'''Lyrics
82 ======
83
84 * :ref:`Song index <genindex>`
85 * :ref:`search`
86
87 Artist index:
88
89 .. toctree::
90 :maxdepth: 1
91 :glob:
92
93 artists/*
94 '''
95
96 # The content for the base conf.py generated.
97 REST_CONF_TEMPLATE = u'''# -*- coding: utf-8 -*-
98 master_doc = 'index'
99 project = u'Lyrics'
100 copyright = u'none'
101 author = u'Various Authors'
102 latex_documents = [
103 (master_doc, 'Lyrics.tex', project,
104 author, 'manual'),
105 ]
106 epub_title = project
107 epub_author = author
108 epub_publisher = author
109 epub_copyright = copyright
110 epub_exclude_files = ['search.html']
111 epub_tocdepth = 1
112 epub_tocdup = False
113 '''
114
115
116 # Utilities.
117
118 def unichar(i):
119 try:
120 return six.unichr(i)
121 except ValueError:
122 return struct.pack('i', i).decode('utf-32')
123
124
125 def unescape(text):
126 """Resolve &#xxx; HTML entities (and some others)."""
127 if isinstance(text, bytes):
128 text = text.decode('utf-8', 'ignore')
129 out = text.replace(u' ', u' ')
130
131 def replchar(m):
132 num = m.group(1)
133 return unichar(int(num))
134 out = re.sub(u"&#(\\d+);", replchar, out)
135 return out
136
137
138 def extract_text_between(html, start_marker, end_marker):
139 try:
140 _, html = html.split(start_marker, 1)
141 html, _ = html.split(end_marker, 1)
142 except ValueError:
143 return u''
144 return html
145
146
147 def search_pairs(item):
148 """Yield a pairs of artists and titles to search for.
149
150 The first item in the pair is the name of the artist, the second
151 item is a list of song names.
152
153 In addition to the artist and title obtained from the `item` the
154 method tries to strip extra information like paranthesized suffixes
155 and featured artists from the strings and add them as candidates.
156 The artist sort name is added as a fallback candidate to help in
157 cases where artist name includes special characters or is in a
158 non-latin script.
159 The method also tries to split multiple titles separated with `/`.
160 """
161 def generate_alternatives(string, patterns):
162 """Generate string alternatives by extracting first matching group for
163 each given pattern.
164 """
165 alternatives = [string]
166 for pattern in patterns:
167 match = re.search(pattern, string, re.IGNORECASE)
168 if match:
169 alternatives.append(match.group(1))
170 return alternatives
171
172 title, artist, artist_sort = item.title, item.artist, item.artist_sort
173
174 patterns = [
175 # Remove any featuring artists from the artists name
176 r"(.*?) {0}".format(plugins.feat_tokens())]
177 artists = generate_alternatives(artist, patterns)
178 # Use the artist_sort as fallback only if it differs from artist to avoid
179 # repeated remote requests with the same search terms
180 if artist != artist_sort:
181 artists.append(artist_sort)
182
183 patterns = [
184 # Remove a parenthesized suffix from a title string. Common
185 # examples include (live), (remix), and (acoustic).
186 r"(.+?)\s+[(].*[)]$",
187 # Remove any featuring artists from the title
188 r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False)),
189 # Remove part of title after colon ':' for songs with subtitles
190 r"(.+?)\s*:.*"]
191 titles = generate_alternatives(title, patterns)
192
193 # Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)
194 # and each of them.
195 multi_titles = []
196 for title in titles:
197 multi_titles.append([title])
198 if '/' in title:
199 multi_titles.append([x.strip() for x in title.split('/')])
200
201 return itertools.product(artists, multi_titles)
202
203
204 def slug(text):
205 """Make a URL-safe, human-readable version of the given text
206
207 This will do the following:
208
209 1. decode unicode characters into ASCII
210 2. shift everything to lowercase
211 3. strip whitespace
212 4. replace other non-word characters with dashes
213 5. strip extra dashes
214
215 This somewhat duplicates the :func:`Google.slugify` function but
216 slugify is not as generic as this one, which can be reused
217 elsewhere.
218 """
219 return re.sub(r'\W+', '-', unidecode(text).lower().strip()).strip('-')
220
221
222 class Backend(object):
223 def __init__(self, config, log):
224 self._log = log
225
226 @staticmethod
227 def _encode(s):
228 """Encode the string for inclusion in a URL"""
229 if isinstance(s, six.text_type):
230 for char, repl in URL_CHARACTERS.items():
231 s = s.replace(char, repl)
232 s = s.encode('utf-8', 'ignore')
233 return urllib.parse.quote(s)
234
235 def build_url(self, artist, title):
236 return self.URL_PATTERN % (self._encode(artist.title()),
237 self._encode(title.title()))
238
239 def fetch_url(self, url):
240 """Retrieve the content at a given URL, or return None if the source
241 is unreachable.
242 """
243 try:
244 # Disable the InsecureRequestWarning that comes from using
245 # `verify=false`.
246 # https://github.com/kennethreitz/requests/issues/2214
247 # We're not overly worried about the NSA MITMing our lyrics scraper
248 with warnings.catch_warnings():
249 warnings.simplefilter('ignore')
250 r = requests.get(url, verify=False, headers={
251 'User-Agent': USER_AGENT,
252 })
253 except requests.RequestException as exc:
254 self._log.debug(u'lyrics request failed: {0}', exc)
255 return
256 if r.status_code == requests.codes.ok:
257 return r.text
258 else:
259 self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code)
260
261 def fetch(self, artist, title):
262 raise NotImplementedError()
263
264
265 class MusiXmatch(Backend):
266 REPLACEMENTS = {
267 r'\s+': '-',
268 '<': 'Less_Than',
269 '>': 'Greater_Than',
270 '#': 'Number_',
271 r'[\[\{]': '(',
272 r'[\]\}]': ')',
273 }
274
275 URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'
276
277 @classmethod
278 def _encode(cls, s):
279 for old, new in cls.REPLACEMENTS.items():
280 s = re.sub(old, new, s)
281
282 return super(MusiXmatch, cls)._encode(s)
283
284 def fetch(self, artist, title):
285 url = self.build_url(artist, title)
286
287 html = self.fetch_url(url)
288 if not html:
289 return
290 if "We detected that your IP is blocked" in html:
291 self._log.warning(u'we are blocked at MusixMatch: url %s failed'
292 % url)
293 return
294 html_parts = html.split('<p class="mxm-lyrics__content')
295 # Sometimes lyrics come in 2 or more parts
296 lyrics_parts = []
297 for html_part in html_parts:
298 lyrics_parts.append(extract_text_between(html_part, '>', '</p>'))
299 lyrics = '\n'.join(lyrics_parts)
300 lyrics = lyrics.strip(',"').replace('\\n', '\n')
301 # another odd case: sometimes only that string remains, for
302 # missing songs. this seems to happen after being blocked
303 # above, when filling in the CAPTCHA.
304 if "Instant lyrics for all your music." in lyrics:
305 return
306 # sometimes there are non-existent lyrics with some content
307 if 'Lyrics | Musixmatch' in lyrics:
308 return
309 return lyrics
310
311
312 class Genius(Backend):
313 """Fetch lyrics from Genius via genius-api.
314
315 Simply adapted from
316 bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/
317 """
318
319 base_url = "https://api.genius.com"
320
321 def __init__(self, config, log):
322 super(Genius, self).__init__(config, log)
323 self.api_key = config['genius_api_key'].as_str()
324 self.headers = {
325 'Authorization': "Bearer %s" % self.api_key,
326 'User-Agent': USER_AGENT,
327 }
328
329 def fetch(self, artist, title):
330 """Fetch lyrics from genius.com
331
332 Because genius doesn't allow accesssing lyrics via the api,
333 we first query the api for a url matching our artist & title,
334 then attempt to scrape that url for the lyrics.
335 """
336 json = self._search(artist, title)
337 if not json:
338 self._log.debug(u'Genius API request returned invalid JSON')
339 return None
340
341 # find a matching artist in the json
342 for hit in json["response"]["hits"]:
343 hit_artist = hit["result"]["primary_artist"]["name"]
344
345 if slug(hit_artist) == slug(artist):
346 return self._scrape_lyrics_from_html(
347 self.fetch_url(hit["result"]["url"]))
348
349 self._log.debug(u'Genius failed to find a matching artist for \'{0}\'',
350 artist)
351
352 def _search(self, artist, title):
353 """Searches the genius api for a given artist and title
354
355 https://docs.genius.com/#search-h2
356
357 :returns: json response
358 """
359 search_url = self.base_url + "/search"
360 data = {'q': title + " " + artist.lower()}
361 try:
362 response = requests.get(
363 search_url, data=data, headers=self.headers)
364 except requests.RequestException as exc:
365 self._log.debug(u'Genius API request failed: {0}', exc)
366 return None
367
368 try:
369 return response.json()
370 except ValueError:
371 return None
372
373 def _scrape_lyrics_from_html(self, html):
374 """Scrape lyrics from a given genius.com html"""
375
376 html = BeautifulSoup(html, "html.parser")
377
378 # Remove script tags that they put in the middle of the lyrics.
379 [h.extract() for h in html('script')]
380
381 # Most of the time, the page contains a div with class="lyrics" where
382 # all of the lyrics can be found already correctly formatted
383 # Sometimes, though, it packages the lyrics into separate divs, most
384 # likely for easier ad placement
385 lyrics_div = html.find("div", class_="lyrics")
386 if not lyrics_div:
387 self._log.debug(u'Received unusual song page html')
388 verse_div = html.find("div",
389 class_=re.compile("Lyrics__Container"))
390 if not verse_div:
391 if html.find("div",
392 class_=re.compile("LyricsPlaceholder__Message"),
393 string="This song is an instrumental"):
394 self._log.debug('Detected instrumental')
395 return "[Instrumental]"
396 else:
397 self._log.debug("Couldn't scrape page using known layouts")
398 return None
399
400 lyrics_div = verse_div.parent
401 for br in lyrics_div.find_all("br"):
402 br.replace_with("\n")
403 ads = lyrics_div.find_all("div",
404 class_=re.compile("InreadAd__Container"))
405 for ad in ads:
406 ad.replace_with("\n")
407
408 return lyrics_div.get_text()
409
410
411 class Tekstowo(Backend):
412 # Fetch lyrics from Tekstowo.pl.
413
414 BASE_URL = 'http://www.tekstowo.pl'
415 URL_PATTERN = BASE_URL + '/wyszukaj.html?search-title=%s&search-artist=%s'
416
417 def fetch(self, artist, title):
418 url = self.build_url(title, artist)
419 search_results = self.fetch_url(url)
420 song_page_url = self.parse_search_results(search_results)
421
422 if not song_page_url:
423 return None
424
425 song_page_html = self.fetch_url(song_page_url)
426 return self.extract_lyrics(song_page_html)
427
428 def parse_search_results(self, html):
429 if not HAS_BEAUTIFUL_SOUP:
430 return None
431
432 html = _scrape_strip_cruft(html)
433 html = _scrape_merge_paragraphs(html)
434
435 try:
436 html = BeautifulSoup(html, "html.parser")
437 except HTMLParseError:
438 return None
439
440 song_row = html.find("div", class_="content"). \
441 find_all("div", class_="box-przeboje")[0]
442
443 if not song_row:
444 return None
445
446 href = song_row.find('a').get('href')
447 return self.BASE_URL + href
448
449 def extract_lyrics(self, html):
450 html = _scrape_strip_cruft(html)
451 html = _scrape_merge_paragraphs(html)
452
453 try:
454 html = BeautifulSoup(html, "html.parser")
455 except HTMLParseError:
456 return None
457
458 return html.find("div", class_="song-text").get_text()
459
460
461 def remove_credits(text):
462 """Remove first/last line of text if it contains the word 'lyrics'
463 eg 'Lyrics by songsdatabase.com'
464 """
465 textlines = text.split('\n')
466 credits = None
467 for i in (0, -1):
468 if textlines and 'lyrics' in textlines[i].lower():
469 credits = textlines.pop(i)
470 if credits:
471 text = '\n'.join(textlines)
472 return text
473
474
475 def _scrape_strip_cruft(html, plain_text_out=False):
476 """Clean up HTML
477 """
478 html = unescape(html)
479
480 html = html.replace('\r', '\n') # Normalize EOL.
481 html = re.sub(r' +', ' ', html) # Whitespaces collapse.
482 html = BREAK_RE.sub('\n', html) # <br> eats up surrounding '\n'.
483 html = re.sub(r'(?s)<(script).*?</\1>', '', html) # Strip script tags.
484 html = re.sub(u'\u2005', " ", html) # replace unicode with regular space
485
486 if plain_text_out: # Strip remaining HTML tags
487 html = COMMENT_RE.sub('', html)
488 html = TAG_RE.sub('', html)
489
490 html = '\n'.join([x.strip() for x in html.strip().split('\n')])
491 html = re.sub(r'\n{3,}', r'\n\n', html)
492 return html
493
494
495 def _scrape_merge_paragraphs(html):
496 html = re.sub(r'</p>\s*<p(\s*[^>]*)>', '\n', html)
497 return re.sub(r'<div .*>\s*</div>', '\n', html)
498
499
500 def scrape_lyrics_from_html(html):
501 """Scrape lyrics from a URL. If no lyrics can be found, return None
502 instead.
503 """
504 if not HAS_BEAUTIFUL_SOUP:
505 return None
506
507 if not html:
508 return None
509
510 def is_text_notcode(text):
511 length = len(text)
512 return (length > 20 and
513 text.count(' ') > length / 25 and
514 (text.find('{') == -1 or text.find(';') == -1))
515 html = _scrape_strip_cruft(html)
516 html = _scrape_merge_paragraphs(html)
517
518 # extract all long text blocks that are not code
519 try:
520 soup = BeautifulSoup(html, "html.parser",
521 parse_only=SoupStrainer(text=is_text_notcode))
522 except HTMLParseError:
523 return None
524
525 # Get the longest text element (if any).
526 strings = sorted(soup.stripped_strings, key=len, reverse=True)
527 if strings:
528 return strings[0]
529 else:
530 return None
531
532
533 class Google(Backend):
534 """Fetch lyrics from Google search results."""
535
536 def __init__(self, config, log):
537 super(Google, self).__init__(config, log)
538 self.api_key = config['google_API_key'].as_str()
539 self.engine_id = config['google_engine_ID'].as_str()
540
541 def is_lyrics(self, text, artist=None):
542 """Determine whether the text seems to be valid lyrics.
543 """
544 if not text:
545 return False
546 bad_triggers_occ = []
547 nb_lines = text.count('\n')
548 if nb_lines <= 1:
549 self._log.debug(u"Ignoring too short lyrics '{0}'", text)
550 return False
551 elif nb_lines < 5:
552 bad_triggers_occ.append('too_short')
553 else:
554 # Lyrics look legit, remove credits to avoid being penalized
555 # further down
556 text = remove_credits(text)
557
558 bad_triggers = ['lyrics', 'copyright', 'property', 'links']
559 if artist:
560 bad_triggers += [artist]
561
562 for item in bad_triggers:
563 bad_triggers_occ += [item] * len(re.findall(r'\W%s\W' % item,
564 text, re.I))
565
566 if bad_triggers_occ:
567 self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ)
568 return len(bad_triggers_occ) < 2
569
570 def slugify(self, text):
571 """Normalize a string and remove non-alphanumeric characters.
572 """
573 text = re.sub(r"[-'_\s]", '_', text)
574 text = re.sub(r"_+", '_', text).strip('_')
575 pat = r"([^,\(]*)\((.*?)\)" # Remove content within parentheses
576 text = re.sub(pat, r'\g<1>', text).strip()
577 try:
578 text = unicodedata.normalize('NFKD', text).encode('ascii',
579 'ignore')
580 text = six.text_type(re.sub(r'[-\s]+', ' ', text.decode('utf-8')))
581 except UnicodeDecodeError:
582 self._log.exception(u"Failing to normalize '{0}'", text)
583 return text
584
585 BY_TRANS = ['by', 'par', 'de', 'von']
586 LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']
587
588 def is_page_candidate(self, url_link, url_title, title, artist):
589 """Return True if the URL title makes it a good candidate to be a
590 page that contains lyrics of title by artist.
591 """
592 title = self.slugify(title.lower())
593 artist = self.slugify(artist.lower())
594 sitename = re.search(u"//([^/]+)/.*",
595 self.slugify(url_link.lower())).group(1)
596 url_title = self.slugify(url_title.lower())
597
598 # Check if URL title contains song title (exact match)
599 if url_title.find(title) != -1:
600 return True
601
602 # or try extracting song title from URL title and check if
603 # they are close enough
604 tokens = [by + '_' + artist for by in self.BY_TRANS] + \
605 [artist, sitename, sitename.replace('www.', '')] + \
606 self.LYRICS_TRANS
607 tokens = [re.escape(t) for t in tokens]
608 song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title)
609
610 song_title = song_title.strip('_|')
611 typo_ratio = .9
612 ratio = difflib.SequenceMatcher(None, song_title, title).ratio()
613 return ratio >= typo_ratio
614
615 def fetch(self, artist, title):
616 query = u"%s %s" % (artist, title)
617 url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
618 % (self.api_key, self.engine_id,
619 urllib.parse.quote(query.encode('utf-8')))
620
621 data = self.fetch_url(url)
622 if not data:
623 self._log.debug(u'google backend returned no data')
624 return None
625 try:
626 data = json.loads(data)
627 except ValueError as exc:
628 self._log.debug(u'google backend returned malformed JSON: {}', exc)
629 if 'error' in data:
630 reason = data['error']['errors'][0]['reason']
631 self._log.debug(u'google backend error: {0}', reason)
632 return None
633
634 if 'items' in data.keys():
635 for item in data['items']:
636 url_link = item['link']
637 url_title = item.get('title', u'')
638 if not self.is_page_candidate(url_link, url_title,
639 title, artist):
640 continue
641 html = self.fetch_url(url_link)
642 lyrics = scrape_lyrics_from_html(html)
643 if not lyrics:
644 continue
645
646 if self.is_lyrics(lyrics, artist):
647 self._log.debug(u'got lyrics from {0}',
648 item['displayLink'])
649 return lyrics
650
651
652 class LyricsPlugin(plugins.BeetsPlugin):
653 SOURCES = ['google', 'musixmatch', 'genius', 'tekstowo']
654 BS_SOURCES = ['google', 'genius', 'tekstowo']
655 SOURCE_BACKENDS = {
656 'google': Google,
657 'musixmatch': MusiXmatch,
658 'genius': Genius,
659 'tekstowo': Tekstowo,
660 }
661
662 def __init__(self):
663 super(LyricsPlugin, self).__init__()
664 self.import_stages = [self.imported]
665 self.config.add({
666 'auto': True,
667 'bing_client_secret': None,
668 'bing_lang_from': [],
669 'bing_lang_to': None,
670 'google_API_key': None,
671 'google_engine_ID': u'009217259823014548361:lndtuqkycfu',
672 'genius_api_key':
673 "Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W"
674 "76V-uFL5jks5dNvcGCdarqFjDhP9c",
675 'fallback': None,
676 'force': False,
677 'local': False,
678 'sources': self.SOURCES,
679 })
680 self.config['bing_client_secret'].redact = True
681 self.config['google_API_key'].redact = True
682 self.config['google_engine_ID'].redact = True
683 self.config['genius_api_key'].redact = True
684
685 # State information for the ReST writer.
686 # First, the current artist we're writing.
687 self.artist = u'Unknown artist'
688 # The current album: False means no album yet.
689 self.album = False
690 # The current rest file content. None means the file is not
691 # open yet.
692 self.rest = None
693
694 available_sources = list(self.SOURCES)
695 sources = plugins.sanitize_choices(
696 self.config['sources'].as_str_seq(), available_sources)
697
698 if not HAS_BEAUTIFUL_SOUP:
699 sources = self.sanitize_bs_sources(sources)
700
701 if 'google' in sources:
702 if not self.config['google_API_key'].get():
703 # We log a *debug* message here because the default
704 # configuration includes `google`. This way, the source
705 # is silent by default but can be enabled just by
706 # setting an API key.
707 self._log.debug(u'Disabling google source: '
708 u'no API key configured.')
709 sources.remove('google')
710
711 self.config['bing_lang_from'] = [
712 x.lower() for x in self.config['bing_lang_from'].as_str_seq()]
713 self.bing_auth_token = None
714
715 if not HAS_LANGDETECT and self.config['bing_client_secret'].get():
716 self._log.warning(u'To use bing translations, you need to '
717 u'install the langdetect module. See the '
718 u'documentation for further details.')
719
720 self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)
721 for source in sources]
722
723 def sanitize_bs_sources(self, sources):
724 for source in self.BS_SOURCES:
725 if source in sources:
726 self._log.debug(u'To use the %s lyrics source, you must '
727 u'install the beautifulsoup4 module. See '
728 u'the documentation for further details.'
729 % source)
730 sources.remove(source)
731
732 return sources
733
734 def get_bing_access_token(self):
735 params = {
736 'client_id': 'beets',
737 'client_secret': self.config['bing_client_secret'],
738 'scope': "https://api.microsofttranslator.com",
739 'grant_type': 'client_credentials',
740 }
741
742 oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'
743 oauth_token = json.loads(requests.post(
744 oauth_url,
745 data=urllib.parse.urlencode(params)).content)
746 if 'access_token' in oauth_token:
747 return "Bearer " + oauth_token['access_token']
748 else:
749 self._log.warning(u'Could not get Bing Translate API access token.'
750 u' Check your "bing_client_secret" password')
751
752 def commands(self):
753 cmd = ui.Subcommand('lyrics', help='fetch song lyrics')
754 cmd.parser.add_option(
755 u'-p', u'--print', dest='printlyr',
756 action='store_true', default=False,
757 help=u'print lyrics to console',
758 )
759 cmd.parser.add_option(
760 u'-r', u'--write-rest', dest='writerest',
761 action='store', default=None, metavar='dir',
762 help=u'write lyrics to given directory as ReST files',
763 )
764 cmd.parser.add_option(
765 u'-f', u'--force', dest='force_refetch',
766 action='store_true', default=False,
767 help=u'always re-download lyrics',
768 )
769 cmd.parser.add_option(
770 u'-l', u'--local', dest='local_only',
771 action='store_true', default=False,
772 help=u'do not fetch missing lyrics',
773 )
774
775 def func(lib, opts, args):
776 # The "write to files" option corresponds to the
777 # import_write config value.
778 write = ui.should_write()
779 if opts.writerest:
780 self.writerest_indexes(opts.writerest)
781 items = lib.items(ui.decargs(args))
782 for item in items:
783 if not opts.local_only and not self.config['local']:
784 self.fetch_item_lyrics(
785 lib, item, write,
786 opts.force_refetch or self.config['force'],
787 )
788 if item.lyrics:
789 if opts.printlyr:
790 ui.print_(item.lyrics)
791 if opts.writerest:
792 self.appendrest(opts.writerest, item)
793 if opts.writerest and items:
794 # flush last artist & write to ReST
795 self.writerest(opts.writerest)
796 ui.print_(u'ReST files generated. to build, use one of:')
797 ui.print_(u' sphinx-build -b html %s _build/html'
798 % opts.writerest)
799 ui.print_(u' sphinx-build -b epub %s _build/epub'
800 % opts.writerest)
801 ui.print_((u' sphinx-build -b latex %s _build/latex '
802 u'&& make -C _build/latex all-pdf')
803 % opts.writerest)
804 cmd.func = func
805 return [cmd]
806
807 def appendrest(self, directory, item):
808 """Append the item to an ReST file
809
810 This will keep state (in the `rest` variable) in order to avoid
811 writing continuously to the same files.
812 """
813
814 if slug(self.artist) != slug(item.albumartist):
815 # Write current file and start a new one ~ item.albumartist
816 self.writerest(directory)
817 self.artist = item.albumartist.strip()
818 self.rest = u"%s\n%s\n\n.. contents::\n :local:\n\n" \
819 % (self.artist,
820 u'=' * len(self.artist))
821
822 if self.album != item.album:
823 tmpalbum = self.album = item.album.strip()
824 if self.album == '':
825 tmpalbum = u'Unknown album'
826 self.rest += u"%s\n%s\n\n" % (tmpalbum, u'-' * len(tmpalbum))
827 title_str = u":index:`%s`" % item.title.strip()
828 block = u'| ' + item.lyrics.replace(u'\n', u'\n| ')
829 self.rest += u"%s\n%s\n\n%s\n\n" % (title_str,
830 u'~' * len(title_str),
831 block)
832
833 def writerest(self, directory):
834 """Write self.rest to a ReST file
835 """
836 if self.rest is not None and self.artist is not None:
837 path = os.path.join(directory, 'artists',
838 slug(self.artist) + u'.rst')
839 with open(path, 'wb') as output:
840 output.write(self.rest.encode('utf-8'))
841
842 def writerest_indexes(self, directory):
843 """Write conf.py and index.rst files necessary for Sphinx
844
845 We write minimal configurations that are necessary for Sphinx
846 to operate. We do not overwrite existing files so that
847 customizations are respected."""
848 try:
849 os.makedirs(os.path.join(directory, 'artists'))
850 except OSError as e:
851 if e.errno == errno.EEXIST:
852 pass
853 else:
854 raise
855 indexfile = os.path.join(directory, 'index.rst')
856 if not os.path.exists(indexfile):
857 with open(indexfile, 'w') as output:
858 output.write(REST_INDEX_TEMPLATE)
859 conffile = os.path.join(directory, 'conf.py')
860 if not os.path.exists(conffile):
861 with open(conffile, 'w') as output:
862 output.write(REST_CONF_TEMPLATE)
863
864 def imported(self, session, task):
865 """Import hook for fetching lyrics automatically.
866 """
867 if self.config['auto']:
868 for item in task.imported_items():
869 self.fetch_item_lyrics(session.lib, item,
870 False, self.config['force'])
871
872 def fetch_item_lyrics(self, lib, item, write, force):
873 """Fetch and store lyrics for a single item. If ``write``, then the
874 lyrics will also be written to the file itself.
875 """
876 # Skip if the item already has lyrics.
877 if not force and item.lyrics:
878 self._log.info(u'lyrics already present: {0}', item)
879 return
880
881 lyrics = None
882 for artist, titles in search_pairs(item):
883 lyrics = [self.get_lyrics(artist, title) for title in titles]
884 if any(lyrics):
885 break
886
887 lyrics = u"\n\n---\n\n".join([l for l in lyrics if l])
888
889 if lyrics:
890 self._log.info(u'fetched lyrics: {0}', item)
891 if HAS_LANGDETECT and self.config['bing_client_secret'].get():
892 lang_from = langdetect.detect(lyrics)
893 if self.config['bing_lang_to'].get() != lang_from and (
894 not self.config['bing_lang_from'] or (
895 lang_from in self.config[
896 'bing_lang_from'].as_str_seq())):
897 lyrics = self.append_translation(
898 lyrics, self.config['bing_lang_to'])
899 else:
900 self._log.info(u'lyrics not found: {0}', item)
901 fallback = self.config['fallback'].get()
902 if fallback:
903 lyrics = fallback
904 else:
905 return
906 item.lyrics = lyrics
907 if write:
908 item.try_write()
909 item.store()
910
911 def get_lyrics(self, artist, title):
912 """Fetch lyrics, trying each source in turn. Return a string or
913 None if no lyrics were found.
914 """
915 for backend in self.backends:
916 lyrics = backend.fetch(artist, title)
917 if lyrics:
918 self._log.debug(u'got lyrics from backend: {0}',
919 backend.__class__.__name__)
920 return _scrape_strip_cruft(lyrics, True)
921
922 def append_translation(self, text, to_lang):
923 from xml.etree import ElementTree
924
925 if not self.bing_auth_token:
926 self.bing_auth_token = self.get_bing_access_token()
927 if self.bing_auth_token:
928 # Extract unique lines to limit API request size per song
929 text_lines = set(text.split('\n'))
930 url = ('https://api.microsofttranslator.com/v2/Http.svc/'
931 'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))
932 r = requests.get(url,
933 headers={"Authorization ": self.bing_auth_token})
934 if r.status_code != 200:
935 self._log.debug('translation API error {}: {}', r.status_code,
936 r.text)
937 if 'token has expired' in r.text:
938 self.bing_auth_token = None
939 return self.append_translation(text, to_lang)
940 return text
941 lines_translated = ElementTree.fromstring(
942 r.text.encode('utf-8')).text
943 # Use a translation mapping dict to build resulting lyrics
944 translations = dict(zip(text_lines, lines_translated.split('|')))
945 result = ''
946 for line in text.split('\n'):
947 result += '%s / %s\n' % (line, translations[line])
948 return result
949
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/lyrics.py b/beetsplug/lyrics.py
--- a/beetsplug/lyrics.py
+++ b/beetsplug/lyrics.py
@@ -437,8 +437,14 @@
except HTMLParseError:
return None
- song_row = html.find("div", class_="content"). \
- find_all("div", class_="box-przeboje")[0]
+ song_rows = html.find("div", class_="content"). \
+ find("div", class_="card"). \
+ find_all("div", class_="box-przeboje")
+
+ if not song_rows:
+ return None
+
+ song_row = song_rows[0]
if not song_row:
return None
| {"golden_diff": "diff --git a/beetsplug/lyrics.py b/beetsplug/lyrics.py\n--- a/beetsplug/lyrics.py\n+++ b/beetsplug/lyrics.py\n@@ -437,8 +437,14 @@\n except HTMLParseError:\n return None\n \n- song_row = html.find(\"div\", class_=\"content\"). \\\n- find_all(\"div\", class_=\"box-przeboje\")[0]\n+ song_rows = html.find(\"div\", class_=\"content\"). \\\n+ find(\"div\", class_=\"card\"). \\\n+ find_all(\"div\", class_=\"box-przeboje\")\n+\n+ if not song_rows:\n+ return None\n+\n+ song_row = song_rows[0]\n \n if not song_row:\n return None\n", "issue": "Lyrics plugin crashing on tekstowo.pl\nI just found it crashing while searching tekstowo.pl (I'm on https://github.com/beetbox/beets/commit/afc072801c4a254e6b4114d89133d6bebc3e34b9).\r\n\r\nThese are the relevant lines of error.\r\n\r\n```\r\n return html.find(\"div\", class_=\"song-text\").get_text()\r\nAttributeError: 'NoneType' object has no attribute 'get_text'\r\n```\r\n\r\nI also printed the html to the console, but I am unsure why the error is appearing. The div with the class song-text seems to exist and I've never worked on beets before. Could it have something to do with there being two divs with the class?\r\n\r\nI uploaded the html, but had to do it with txt extension due to GitHub limitations.\r\n\r\n[temp.txt](https://github.com/beetbox/beets/files/6292601/temp.txt)\r\n\r\n_Originally posted by @njelich in https://github.com/beetbox/beets/issues/3903#issuecomment-817359091_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Fetches, embeds, and displays lyrics.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport difflib\nimport errno\nimport itertools\nimport json\nimport struct\nimport os.path\nimport re\nimport requests\nimport unicodedata\nfrom unidecode import unidecode\nimport warnings\nimport six\nfrom six.moves import urllib\n\ntry:\n from bs4 import SoupStrainer, BeautifulSoup\n HAS_BEAUTIFUL_SOUP = True\nexcept ImportError:\n HAS_BEAUTIFUL_SOUP = False\n\ntry:\n import langdetect\n HAS_LANGDETECT = True\nexcept ImportError:\n HAS_LANGDETECT = False\n\ntry:\n # PY3: HTMLParseError was removed in 3.5 as strict mode\n # was deprecated in 3.3.\n # https://docs.python.org/3.3/library/html.parser.html\n from six.moves.html_parser import HTMLParseError\nexcept ImportError:\n class HTMLParseError(Exception):\n pass\n\nfrom beets import plugins\nfrom beets import ui\nimport beets\n\nDIV_RE = re.compile(r'<(/?)div>?', re.I)\nCOMMENT_RE = re.compile(r'<!--.*-->', re.S)\nTAG_RE = re.compile(r'<[^>]*>')\nBREAK_RE = re.compile(r'\\n?\\s*<br([\\s|/][^>]*)*>\\s*\\n?', re.I)\nURL_CHARACTERS = {\n u'\\u2018': u\"'\",\n u'\\u2019': u\"'\",\n u'\\u201c': u'\"',\n u'\\u201d': u'\"',\n u'\\u2010': u'-',\n u'\\u2011': u'-',\n u'\\u2012': u'-',\n u'\\u2013': u'-',\n u'\\u2014': u'-',\n u'\\u2015': u'-',\n u'\\u2016': u'-',\n u'\\u2026': u'...',\n}\nUSER_AGENT = 'beets/{}'.format(beets.__version__)\n\n# The content for the base index.rst generated in ReST mode.\nREST_INDEX_TEMPLATE = u'''Lyrics\n======\n\n* :ref:`Song index <genindex>`\n* :ref:`search`\n\nArtist index:\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n artists/*\n'''\n\n# The content for the base conf.py generated.\nREST_CONF_TEMPLATE = u'''# -*- coding: utf-8 -*-\nmaster_doc = 'index'\nproject = u'Lyrics'\ncopyright = u'none'\nauthor = u'Various Authors'\nlatex_documents = [\n (master_doc, 'Lyrics.tex', project,\n author, 'manual'),\n]\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\nepub_exclude_files = ['search.html']\nepub_tocdepth = 1\nepub_tocdup = False\n'''\n\n\n# Utilities.\n\ndef unichar(i):\n try:\n return six.unichr(i)\n except ValueError:\n return struct.pack('i', i).decode('utf-32')\n\n\ndef unescape(text):\n \"\"\"Resolve &#xxx; HTML entities (and some others).\"\"\"\n if isinstance(text, bytes):\n text = text.decode('utf-8', 'ignore')\n out = text.replace(u' ', u' ')\n\n def replchar(m):\n num = m.group(1)\n return unichar(int(num))\n out = re.sub(u\"&#(\\\\d+);\", replchar, out)\n return out\n\n\ndef extract_text_between(html, start_marker, end_marker):\n try:\n _, html = html.split(start_marker, 1)\n html, _ = html.split(end_marker, 1)\n except ValueError:\n return u''\n return html\n\n\ndef search_pairs(item):\n \"\"\"Yield a pairs of artists and titles to search for.\n\n The first item in the pair is the name of the artist, the second\n item is a list of song names.\n\n In addition to the artist and title obtained from the `item` the\n method tries to strip extra information like paranthesized suffixes\n and featured artists from the strings and add them as candidates.\n The artist sort name is added as a fallback candidate to help in\n cases where artist name includes special characters or is in a\n non-latin script.\n The method also tries to split multiple titles separated with `/`.\n \"\"\"\n def generate_alternatives(string, patterns):\n \"\"\"Generate string alternatives by extracting first matching group for\n each given pattern.\n \"\"\"\n alternatives = [string]\n for pattern in patterns:\n match = re.search(pattern, string, re.IGNORECASE)\n if match:\n alternatives.append(match.group(1))\n return alternatives\n\n title, artist, artist_sort = item.title, item.artist, item.artist_sort\n\n patterns = [\n # Remove any featuring artists from the artists name\n r\"(.*?) {0}\".format(plugins.feat_tokens())]\n artists = generate_alternatives(artist, patterns)\n # Use the artist_sort as fallback only if it differs from artist to avoid\n # repeated remote requests with the same search terms\n if artist != artist_sort:\n artists.append(artist_sort)\n\n patterns = [\n # Remove a parenthesized suffix from a title string. Common\n # examples include (live), (remix), and (acoustic).\n r\"(.+?)\\s+[(].*[)]$\",\n # Remove any featuring artists from the title\n r\"(.*?) {0}\".format(plugins.feat_tokens(for_artist=False)),\n # Remove part of title after colon ':' for songs with subtitles\n r\"(.+?)\\s*:.*\"]\n titles = generate_alternatives(title, patterns)\n\n # Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)\n # and each of them.\n multi_titles = []\n for title in titles:\n multi_titles.append([title])\n if '/' in title:\n multi_titles.append([x.strip() for x in title.split('/')])\n\n return itertools.product(artists, multi_titles)\n\n\ndef slug(text):\n \"\"\"Make a URL-safe, human-readable version of the given text\n\n This will do the following:\n\n 1. decode unicode characters into ASCII\n 2. shift everything to lowercase\n 3. strip whitespace\n 4. replace other non-word characters with dashes\n 5. strip extra dashes\n\n This somewhat duplicates the :func:`Google.slugify` function but\n slugify is not as generic as this one, which can be reused\n elsewhere.\n \"\"\"\n return re.sub(r'\\W+', '-', unidecode(text).lower().strip()).strip('-')\n\n\nclass Backend(object):\n def __init__(self, config, log):\n self._log = log\n\n @staticmethod\n def _encode(s):\n \"\"\"Encode the string for inclusion in a URL\"\"\"\n if isinstance(s, six.text_type):\n for char, repl in URL_CHARACTERS.items():\n s = s.replace(char, repl)\n s = s.encode('utf-8', 'ignore')\n return urllib.parse.quote(s)\n\n def build_url(self, artist, title):\n return self.URL_PATTERN % (self._encode(artist.title()),\n self._encode(title.title()))\n\n def fetch_url(self, url):\n \"\"\"Retrieve the content at a given URL, or return None if the source\n is unreachable.\n \"\"\"\n try:\n # Disable the InsecureRequestWarning that comes from using\n # `verify=false`.\n # https://github.com/kennethreitz/requests/issues/2214\n # We're not overly worried about the NSA MITMing our lyrics scraper\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n r = requests.get(url, verify=False, headers={\n 'User-Agent': USER_AGENT,\n })\n except requests.RequestException as exc:\n self._log.debug(u'lyrics request failed: {0}', exc)\n return\n if r.status_code == requests.codes.ok:\n return r.text\n else:\n self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code)\n\n def fetch(self, artist, title):\n raise NotImplementedError()\n\n\nclass MusiXmatch(Backend):\n REPLACEMENTS = {\n r'\\s+': '-',\n '<': 'Less_Than',\n '>': 'Greater_Than',\n '#': 'Number_',\n r'[\\[\\{]': '(',\n r'[\\]\\}]': ')',\n }\n\n URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'\n\n @classmethod\n def _encode(cls, s):\n for old, new in cls.REPLACEMENTS.items():\n s = re.sub(old, new, s)\n\n return super(MusiXmatch, cls)._encode(s)\n\n def fetch(self, artist, title):\n url = self.build_url(artist, title)\n\n html = self.fetch_url(url)\n if not html:\n return\n if \"We detected that your IP is blocked\" in html:\n self._log.warning(u'we are blocked at MusixMatch: url %s failed'\n % url)\n return\n html_parts = html.split('<p class=\"mxm-lyrics__content')\n # Sometimes lyrics come in 2 or more parts\n lyrics_parts = []\n for html_part in html_parts:\n lyrics_parts.append(extract_text_between(html_part, '>', '</p>'))\n lyrics = '\\n'.join(lyrics_parts)\n lyrics = lyrics.strip(',\"').replace('\\\\n', '\\n')\n # another odd case: sometimes only that string remains, for\n # missing songs. this seems to happen after being blocked\n # above, when filling in the CAPTCHA.\n if \"Instant lyrics for all your music.\" in lyrics:\n return\n # sometimes there are non-existent lyrics with some content\n if 'Lyrics | Musixmatch' in lyrics:\n return\n return lyrics\n\n\nclass Genius(Backend):\n \"\"\"Fetch lyrics from Genius via genius-api.\n\n Simply adapted from\n bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/\n \"\"\"\n\n base_url = \"https://api.genius.com\"\n\n def __init__(self, config, log):\n super(Genius, self).__init__(config, log)\n self.api_key = config['genius_api_key'].as_str()\n self.headers = {\n 'Authorization': \"Bearer %s\" % self.api_key,\n 'User-Agent': USER_AGENT,\n }\n\n def fetch(self, artist, title):\n \"\"\"Fetch lyrics from genius.com\n\n Because genius doesn't allow accesssing lyrics via the api,\n we first query the api for a url matching our artist & title,\n then attempt to scrape that url for the lyrics.\n \"\"\"\n json = self._search(artist, title)\n if not json:\n self._log.debug(u'Genius API request returned invalid JSON')\n return None\n\n # find a matching artist in the json\n for hit in json[\"response\"][\"hits\"]:\n hit_artist = hit[\"result\"][\"primary_artist\"][\"name\"]\n\n if slug(hit_artist) == slug(artist):\n return self._scrape_lyrics_from_html(\n self.fetch_url(hit[\"result\"][\"url\"]))\n\n self._log.debug(u'Genius failed to find a matching artist for \\'{0}\\'',\n artist)\n\n def _search(self, artist, title):\n \"\"\"Searches the genius api for a given artist and title\n\n https://docs.genius.com/#search-h2\n\n :returns: json response\n \"\"\"\n search_url = self.base_url + \"/search\"\n data = {'q': title + \" \" + artist.lower()}\n try:\n response = requests.get(\n search_url, data=data, headers=self.headers)\n except requests.RequestException as exc:\n self._log.debug(u'Genius API request failed: {0}', exc)\n return None\n\n try:\n return response.json()\n except ValueError:\n return None\n\n def _scrape_lyrics_from_html(self, html):\n \"\"\"Scrape lyrics from a given genius.com html\"\"\"\n\n html = BeautifulSoup(html, \"html.parser\")\n\n # Remove script tags that they put in the middle of the lyrics.\n [h.extract() for h in html('script')]\n\n # Most of the time, the page contains a div with class=\"lyrics\" where\n # all of the lyrics can be found already correctly formatted\n # Sometimes, though, it packages the lyrics into separate divs, most\n # likely for easier ad placement\n lyrics_div = html.find(\"div\", class_=\"lyrics\")\n if not lyrics_div:\n self._log.debug(u'Received unusual song page html')\n verse_div = html.find(\"div\",\n class_=re.compile(\"Lyrics__Container\"))\n if not verse_div:\n if html.find(\"div\",\n class_=re.compile(\"LyricsPlaceholder__Message\"),\n string=\"This song is an instrumental\"):\n self._log.debug('Detected instrumental')\n return \"[Instrumental]\"\n else:\n self._log.debug(\"Couldn't scrape page using known layouts\")\n return None\n\n lyrics_div = verse_div.parent\n for br in lyrics_div.find_all(\"br\"):\n br.replace_with(\"\\n\")\n ads = lyrics_div.find_all(\"div\",\n class_=re.compile(\"InreadAd__Container\"))\n for ad in ads:\n ad.replace_with(\"\\n\")\n\n return lyrics_div.get_text()\n\n\nclass Tekstowo(Backend):\n # Fetch lyrics from Tekstowo.pl.\n\n BASE_URL = 'http://www.tekstowo.pl'\n URL_PATTERN = BASE_URL + '/wyszukaj.html?search-title=%s&search-artist=%s'\n\n def fetch(self, artist, title):\n url = self.build_url(title, artist)\n search_results = self.fetch_url(url)\n song_page_url = self.parse_search_results(search_results)\n\n if not song_page_url:\n return None\n\n song_page_html = self.fetch_url(song_page_url)\n return self.extract_lyrics(song_page_html)\n\n def parse_search_results(self, html):\n if not HAS_BEAUTIFUL_SOUP:\n return None\n\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n try:\n html = BeautifulSoup(html, \"html.parser\")\n except HTMLParseError:\n return None\n\n song_row = html.find(\"div\", class_=\"content\"). \\\n find_all(\"div\", class_=\"box-przeboje\")[0]\n\n if not song_row:\n return None\n\n href = song_row.find('a').get('href')\n return self.BASE_URL + href\n\n def extract_lyrics(self, html):\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n try:\n html = BeautifulSoup(html, \"html.parser\")\n except HTMLParseError:\n return None\n\n return html.find(\"div\", class_=\"song-text\").get_text()\n\n\ndef remove_credits(text):\n \"\"\"Remove first/last line of text if it contains the word 'lyrics'\n eg 'Lyrics by songsdatabase.com'\n \"\"\"\n textlines = text.split('\\n')\n credits = None\n for i in (0, -1):\n if textlines and 'lyrics' in textlines[i].lower():\n credits = textlines.pop(i)\n if credits:\n text = '\\n'.join(textlines)\n return text\n\n\ndef _scrape_strip_cruft(html, plain_text_out=False):\n \"\"\"Clean up HTML\n \"\"\"\n html = unescape(html)\n\n html = html.replace('\\r', '\\n') # Normalize EOL.\n html = re.sub(r' +', ' ', html) # Whitespaces collapse.\n html = BREAK_RE.sub('\\n', html) # <br> eats up surrounding '\\n'.\n html = re.sub(r'(?s)<(script).*?</\\1>', '', html) # Strip script tags.\n html = re.sub(u'\\u2005', \" \", html) # replace unicode with regular space\n\n if plain_text_out: # Strip remaining HTML tags\n html = COMMENT_RE.sub('', html)\n html = TAG_RE.sub('', html)\n\n html = '\\n'.join([x.strip() for x in html.strip().split('\\n')])\n html = re.sub(r'\\n{3,}', r'\\n\\n', html)\n return html\n\n\ndef _scrape_merge_paragraphs(html):\n html = re.sub(r'</p>\\s*<p(\\s*[^>]*)>', '\\n', html)\n return re.sub(r'<div .*>\\s*</div>', '\\n', html)\n\n\ndef scrape_lyrics_from_html(html):\n \"\"\"Scrape lyrics from a URL. If no lyrics can be found, return None\n instead.\n \"\"\"\n if not HAS_BEAUTIFUL_SOUP:\n return None\n\n if not html:\n return None\n\n def is_text_notcode(text):\n length = len(text)\n return (length > 20 and\n text.count(' ') > length / 25 and\n (text.find('{') == -1 or text.find(';') == -1))\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n # extract all long text blocks that are not code\n try:\n soup = BeautifulSoup(html, \"html.parser\",\n parse_only=SoupStrainer(text=is_text_notcode))\n except HTMLParseError:\n return None\n\n # Get the longest text element (if any).\n strings = sorted(soup.stripped_strings, key=len, reverse=True)\n if strings:\n return strings[0]\n else:\n return None\n\n\nclass Google(Backend):\n \"\"\"Fetch lyrics from Google search results.\"\"\"\n\n def __init__(self, config, log):\n super(Google, self).__init__(config, log)\n self.api_key = config['google_API_key'].as_str()\n self.engine_id = config['google_engine_ID'].as_str()\n\n def is_lyrics(self, text, artist=None):\n \"\"\"Determine whether the text seems to be valid lyrics.\n \"\"\"\n if not text:\n return False\n bad_triggers_occ = []\n nb_lines = text.count('\\n')\n if nb_lines <= 1:\n self._log.debug(u\"Ignoring too short lyrics '{0}'\", text)\n return False\n elif nb_lines < 5:\n bad_triggers_occ.append('too_short')\n else:\n # Lyrics look legit, remove credits to avoid being penalized\n # further down\n text = remove_credits(text)\n\n bad_triggers = ['lyrics', 'copyright', 'property', 'links']\n if artist:\n bad_triggers += [artist]\n\n for item in bad_triggers:\n bad_triggers_occ += [item] * len(re.findall(r'\\W%s\\W' % item,\n text, re.I))\n\n if bad_triggers_occ:\n self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ)\n return len(bad_triggers_occ) < 2\n\n def slugify(self, text):\n \"\"\"Normalize a string and remove non-alphanumeric characters.\n \"\"\"\n text = re.sub(r\"[-'_\\s]\", '_', text)\n text = re.sub(r\"_+\", '_', text).strip('_')\n pat = r\"([^,\\(]*)\\((.*?)\\)\" # Remove content within parentheses\n text = re.sub(pat, r'\\g<1>', text).strip()\n try:\n text = unicodedata.normalize('NFKD', text).encode('ascii',\n 'ignore')\n text = six.text_type(re.sub(r'[-\\s]+', ' ', text.decode('utf-8')))\n except UnicodeDecodeError:\n self._log.exception(u\"Failing to normalize '{0}'\", text)\n return text\n\n BY_TRANS = ['by', 'par', 'de', 'von']\n LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']\n\n def is_page_candidate(self, url_link, url_title, title, artist):\n \"\"\"Return True if the URL title makes it a good candidate to be a\n page that contains lyrics of title by artist.\n \"\"\"\n title = self.slugify(title.lower())\n artist = self.slugify(artist.lower())\n sitename = re.search(u\"//([^/]+)/.*\",\n self.slugify(url_link.lower())).group(1)\n url_title = self.slugify(url_title.lower())\n\n # Check if URL title contains song title (exact match)\n if url_title.find(title) != -1:\n return True\n\n # or try extracting song title from URL title and check if\n # they are close enough\n tokens = [by + '_' + artist for by in self.BY_TRANS] + \\\n [artist, sitename, sitename.replace('www.', '')] + \\\n self.LYRICS_TRANS\n tokens = [re.escape(t) for t in tokens]\n song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title)\n\n song_title = song_title.strip('_|')\n typo_ratio = .9\n ratio = difflib.SequenceMatcher(None, song_title, title).ratio()\n return ratio >= typo_ratio\n\n def fetch(self, artist, title):\n query = u\"%s %s\" % (artist, title)\n url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \\\n % (self.api_key, self.engine_id,\n urllib.parse.quote(query.encode('utf-8')))\n\n data = self.fetch_url(url)\n if not data:\n self._log.debug(u'google backend returned no data')\n return None\n try:\n data = json.loads(data)\n except ValueError as exc:\n self._log.debug(u'google backend returned malformed JSON: {}', exc)\n if 'error' in data:\n reason = data['error']['errors'][0]['reason']\n self._log.debug(u'google backend error: {0}', reason)\n return None\n\n if 'items' in data.keys():\n for item in data['items']:\n url_link = item['link']\n url_title = item.get('title', u'')\n if not self.is_page_candidate(url_link, url_title,\n title, artist):\n continue\n html = self.fetch_url(url_link)\n lyrics = scrape_lyrics_from_html(html)\n if not lyrics:\n continue\n\n if self.is_lyrics(lyrics, artist):\n self._log.debug(u'got lyrics from {0}',\n item['displayLink'])\n return lyrics\n\n\nclass LyricsPlugin(plugins.BeetsPlugin):\n SOURCES = ['google', 'musixmatch', 'genius', 'tekstowo']\n BS_SOURCES = ['google', 'genius', 'tekstowo']\n SOURCE_BACKENDS = {\n 'google': Google,\n 'musixmatch': MusiXmatch,\n 'genius': Genius,\n 'tekstowo': Tekstowo,\n }\n\n def __init__(self):\n super(LyricsPlugin, self).__init__()\n self.import_stages = [self.imported]\n self.config.add({\n 'auto': True,\n 'bing_client_secret': None,\n 'bing_lang_from': [],\n 'bing_lang_to': None,\n 'google_API_key': None,\n 'google_engine_ID': u'009217259823014548361:lndtuqkycfu',\n 'genius_api_key':\n \"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W\"\n \"76V-uFL5jks5dNvcGCdarqFjDhP9c\",\n 'fallback': None,\n 'force': False,\n 'local': False,\n 'sources': self.SOURCES,\n })\n self.config['bing_client_secret'].redact = True\n self.config['google_API_key'].redact = True\n self.config['google_engine_ID'].redact = True\n self.config['genius_api_key'].redact = True\n\n # State information for the ReST writer.\n # First, the current artist we're writing.\n self.artist = u'Unknown artist'\n # The current album: False means no album yet.\n self.album = False\n # The current rest file content. None means the file is not\n # open yet.\n self.rest = None\n\n available_sources = list(self.SOURCES)\n sources = plugins.sanitize_choices(\n self.config['sources'].as_str_seq(), available_sources)\n\n if not HAS_BEAUTIFUL_SOUP:\n sources = self.sanitize_bs_sources(sources)\n\n if 'google' in sources:\n if not self.config['google_API_key'].get():\n # We log a *debug* message here because the default\n # configuration includes `google`. This way, the source\n # is silent by default but can be enabled just by\n # setting an API key.\n self._log.debug(u'Disabling google source: '\n u'no API key configured.')\n sources.remove('google')\n\n self.config['bing_lang_from'] = [\n x.lower() for x in self.config['bing_lang_from'].as_str_seq()]\n self.bing_auth_token = None\n\n if not HAS_LANGDETECT and self.config['bing_client_secret'].get():\n self._log.warning(u'To use bing translations, you need to '\n u'install the langdetect module. See the '\n u'documentation for further details.')\n\n self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)\n for source in sources]\n\n def sanitize_bs_sources(self, sources):\n for source in self.BS_SOURCES:\n if source in sources:\n self._log.debug(u'To use the %s lyrics source, you must '\n u'install the beautifulsoup4 module. See '\n u'the documentation for further details.'\n % source)\n sources.remove(source)\n\n return sources\n\n def get_bing_access_token(self):\n params = {\n 'client_id': 'beets',\n 'client_secret': self.config['bing_client_secret'],\n 'scope': \"https://api.microsofttranslator.com\",\n 'grant_type': 'client_credentials',\n }\n\n oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'\n oauth_token = json.loads(requests.post(\n oauth_url,\n data=urllib.parse.urlencode(params)).content)\n if 'access_token' in oauth_token:\n return \"Bearer \" + oauth_token['access_token']\n else:\n self._log.warning(u'Could not get Bing Translate API access token.'\n u' Check your \"bing_client_secret\" password')\n\n def commands(self):\n cmd = ui.Subcommand('lyrics', help='fetch song lyrics')\n cmd.parser.add_option(\n u'-p', u'--print', dest='printlyr',\n action='store_true', default=False,\n help=u'print lyrics to console',\n )\n cmd.parser.add_option(\n u'-r', u'--write-rest', dest='writerest',\n action='store', default=None, metavar='dir',\n help=u'write lyrics to given directory as ReST files',\n )\n cmd.parser.add_option(\n u'-f', u'--force', dest='force_refetch',\n action='store_true', default=False,\n help=u'always re-download lyrics',\n )\n cmd.parser.add_option(\n u'-l', u'--local', dest='local_only',\n action='store_true', default=False,\n help=u'do not fetch missing lyrics',\n )\n\n def func(lib, opts, args):\n # The \"write to files\" option corresponds to the\n # import_write config value.\n write = ui.should_write()\n if opts.writerest:\n self.writerest_indexes(opts.writerest)\n items = lib.items(ui.decargs(args))\n for item in items:\n if not opts.local_only and not self.config['local']:\n self.fetch_item_lyrics(\n lib, item, write,\n opts.force_refetch or self.config['force'],\n )\n if item.lyrics:\n if opts.printlyr:\n ui.print_(item.lyrics)\n if opts.writerest:\n self.appendrest(opts.writerest, item)\n if opts.writerest and items:\n # flush last artist & write to ReST\n self.writerest(opts.writerest)\n ui.print_(u'ReST files generated. to build, use one of:')\n ui.print_(u' sphinx-build -b html %s _build/html'\n % opts.writerest)\n ui.print_(u' sphinx-build -b epub %s _build/epub'\n % opts.writerest)\n ui.print_((u' sphinx-build -b latex %s _build/latex '\n u'&& make -C _build/latex all-pdf')\n % opts.writerest)\n cmd.func = func\n return [cmd]\n\n def appendrest(self, directory, item):\n \"\"\"Append the item to an ReST file\n\n This will keep state (in the `rest` variable) in order to avoid\n writing continuously to the same files.\n \"\"\"\n\n if slug(self.artist) != slug(item.albumartist):\n # Write current file and start a new one ~ item.albumartist\n self.writerest(directory)\n self.artist = item.albumartist.strip()\n self.rest = u\"%s\\n%s\\n\\n.. contents::\\n :local:\\n\\n\" \\\n % (self.artist,\n u'=' * len(self.artist))\n\n if self.album != item.album:\n tmpalbum = self.album = item.album.strip()\n if self.album == '':\n tmpalbum = u'Unknown album'\n self.rest += u\"%s\\n%s\\n\\n\" % (tmpalbum, u'-' * len(tmpalbum))\n title_str = u\":index:`%s`\" % item.title.strip()\n block = u'| ' + item.lyrics.replace(u'\\n', u'\\n| ')\n self.rest += u\"%s\\n%s\\n\\n%s\\n\\n\" % (title_str,\n u'~' * len(title_str),\n block)\n\n def writerest(self, directory):\n \"\"\"Write self.rest to a ReST file\n \"\"\"\n if self.rest is not None and self.artist is not None:\n path = os.path.join(directory, 'artists',\n slug(self.artist) + u'.rst')\n with open(path, 'wb') as output:\n output.write(self.rest.encode('utf-8'))\n\n def writerest_indexes(self, directory):\n \"\"\"Write conf.py and index.rst files necessary for Sphinx\n\n We write minimal configurations that are necessary for Sphinx\n to operate. We do not overwrite existing files so that\n customizations are respected.\"\"\"\n try:\n os.makedirs(os.path.join(directory, 'artists'))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n indexfile = os.path.join(directory, 'index.rst')\n if not os.path.exists(indexfile):\n with open(indexfile, 'w') as output:\n output.write(REST_INDEX_TEMPLATE)\n conffile = os.path.join(directory, 'conf.py')\n if not os.path.exists(conffile):\n with open(conffile, 'w') as output:\n output.write(REST_CONF_TEMPLATE)\n\n def imported(self, session, task):\n \"\"\"Import hook for fetching lyrics automatically.\n \"\"\"\n if self.config['auto']:\n for item in task.imported_items():\n self.fetch_item_lyrics(session.lib, item,\n False, self.config['force'])\n\n def fetch_item_lyrics(self, lib, item, write, force):\n \"\"\"Fetch and store lyrics for a single item. If ``write``, then the\n lyrics will also be written to the file itself.\n \"\"\"\n # Skip if the item already has lyrics.\n if not force and item.lyrics:\n self._log.info(u'lyrics already present: {0}', item)\n return\n\n lyrics = None\n for artist, titles in search_pairs(item):\n lyrics = [self.get_lyrics(artist, title) for title in titles]\n if any(lyrics):\n break\n\n lyrics = u\"\\n\\n---\\n\\n\".join([l for l in lyrics if l])\n\n if lyrics:\n self._log.info(u'fetched lyrics: {0}', item)\n if HAS_LANGDETECT and self.config['bing_client_secret'].get():\n lang_from = langdetect.detect(lyrics)\n if self.config['bing_lang_to'].get() != lang_from and (\n not self.config['bing_lang_from'] or (\n lang_from in self.config[\n 'bing_lang_from'].as_str_seq())):\n lyrics = self.append_translation(\n lyrics, self.config['bing_lang_to'])\n else:\n self._log.info(u'lyrics not found: {0}', item)\n fallback = self.config['fallback'].get()\n if fallback:\n lyrics = fallback\n else:\n return\n item.lyrics = lyrics\n if write:\n item.try_write()\n item.store()\n\n def get_lyrics(self, artist, title):\n \"\"\"Fetch lyrics, trying each source in turn. Return a string or\n None if no lyrics were found.\n \"\"\"\n for backend in self.backends:\n lyrics = backend.fetch(artist, title)\n if lyrics:\n self._log.debug(u'got lyrics from backend: {0}',\n backend.__class__.__name__)\n return _scrape_strip_cruft(lyrics, True)\n\n def append_translation(self, text, to_lang):\n from xml.etree import ElementTree\n\n if not self.bing_auth_token:\n self.bing_auth_token = self.get_bing_access_token()\n if self.bing_auth_token:\n # Extract unique lines to limit API request size per song\n text_lines = set(text.split('\\n'))\n url = ('https://api.microsofttranslator.com/v2/Http.svc/'\n 'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))\n r = requests.get(url,\n headers={\"Authorization \": self.bing_auth_token})\n if r.status_code != 200:\n self._log.debug('translation API error {}: {}', r.status_code,\n r.text)\n if 'token has expired' in r.text:\n self.bing_auth_token = None\n return self.append_translation(text, to_lang)\n return text\n lines_translated = ElementTree.fromstring(\n r.text.encode('utf-8')).text\n # Use a translation mapping dict to build resulting lyrics\n translations = dict(zip(text_lines, lines_translated.split('|')))\n result = ''\n for line in text.split('\\n'):\n result += '%s / %s\\n' % (line, translations[line])\n return result\n", "path": "beetsplug/lyrics.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Fetches, embeds, and displays lyrics.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport difflib\nimport errno\nimport itertools\nimport json\nimport struct\nimport os.path\nimport re\nimport requests\nimport unicodedata\nfrom unidecode import unidecode\nimport warnings\nimport six\nfrom six.moves import urllib\n\ntry:\n from bs4 import SoupStrainer, BeautifulSoup\n HAS_BEAUTIFUL_SOUP = True\nexcept ImportError:\n HAS_BEAUTIFUL_SOUP = False\n\ntry:\n import langdetect\n HAS_LANGDETECT = True\nexcept ImportError:\n HAS_LANGDETECT = False\n\ntry:\n # PY3: HTMLParseError was removed in 3.5 as strict mode\n # was deprecated in 3.3.\n # https://docs.python.org/3.3/library/html.parser.html\n from six.moves.html_parser import HTMLParseError\nexcept ImportError:\n class HTMLParseError(Exception):\n pass\n\nfrom beets import plugins\nfrom beets import ui\nimport beets\n\nDIV_RE = re.compile(r'<(/?)div>?', re.I)\nCOMMENT_RE = re.compile(r'<!--.*-->', re.S)\nTAG_RE = re.compile(r'<[^>]*>')\nBREAK_RE = re.compile(r'\\n?\\s*<br([\\s|/][^>]*)*>\\s*\\n?', re.I)\nURL_CHARACTERS = {\n u'\\u2018': u\"'\",\n u'\\u2019': u\"'\",\n u'\\u201c': u'\"',\n u'\\u201d': u'\"',\n u'\\u2010': u'-',\n u'\\u2011': u'-',\n u'\\u2012': u'-',\n u'\\u2013': u'-',\n u'\\u2014': u'-',\n u'\\u2015': u'-',\n u'\\u2016': u'-',\n u'\\u2026': u'...',\n}\nUSER_AGENT = 'beets/{}'.format(beets.__version__)\n\n# The content for the base index.rst generated in ReST mode.\nREST_INDEX_TEMPLATE = u'''Lyrics\n======\n\n* :ref:`Song index <genindex>`\n* :ref:`search`\n\nArtist index:\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n artists/*\n'''\n\n# The content for the base conf.py generated.\nREST_CONF_TEMPLATE = u'''# -*- coding: utf-8 -*-\nmaster_doc = 'index'\nproject = u'Lyrics'\ncopyright = u'none'\nauthor = u'Various Authors'\nlatex_documents = [\n (master_doc, 'Lyrics.tex', project,\n author, 'manual'),\n]\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\nepub_exclude_files = ['search.html']\nepub_tocdepth = 1\nepub_tocdup = False\n'''\n\n\n# Utilities.\n\ndef unichar(i):\n try:\n return six.unichr(i)\n except ValueError:\n return struct.pack('i', i).decode('utf-32')\n\n\ndef unescape(text):\n \"\"\"Resolve &#xxx; HTML entities (and some others).\"\"\"\n if isinstance(text, bytes):\n text = text.decode('utf-8', 'ignore')\n out = text.replace(u' ', u' ')\n\n def replchar(m):\n num = m.group(1)\n return unichar(int(num))\n out = re.sub(u\"&#(\\\\d+);\", replchar, out)\n return out\n\n\ndef extract_text_between(html, start_marker, end_marker):\n try:\n _, html = html.split(start_marker, 1)\n html, _ = html.split(end_marker, 1)\n except ValueError:\n return u''\n return html\n\n\ndef search_pairs(item):\n \"\"\"Yield a pairs of artists and titles to search for.\n\n The first item in the pair is the name of the artist, the second\n item is a list of song names.\n\n In addition to the artist and title obtained from the `item` the\n method tries to strip extra information like paranthesized suffixes\n and featured artists from the strings and add them as candidates.\n The artist sort name is added as a fallback candidate to help in\n cases where artist name includes special characters or is in a\n non-latin script.\n The method also tries to split multiple titles separated with `/`.\n \"\"\"\n def generate_alternatives(string, patterns):\n \"\"\"Generate string alternatives by extracting first matching group for\n each given pattern.\n \"\"\"\n alternatives = [string]\n for pattern in patterns:\n match = re.search(pattern, string, re.IGNORECASE)\n if match:\n alternatives.append(match.group(1))\n return alternatives\n\n title, artist, artist_sort = item.title, item.artist, item.artist_sort\n\n patterns = [\n # Remove any featuring artists from the artists name\n r\"(.*?) {0}\".format(plugins.feat_tokens())]\n artists = generate_alternatives(artist, patterns)\n # Use the artist_sort as fallback only if it differs from artist to avoid\n # repeated remote requests with the same search terms\n if artist != artist_sort:\n artists.append(artist_sort)\n\n patterns = [\n # Remove a parenthesized suffix from a title string. Common\n # examples include (live), (remix), and (acoustic).\n r\"(.+?)\\s+[(].*[)]$\",\n # Remove any featuring artists from the title\n r\"(.*?) {0}\".format(plugins.feat_tokens(for_artist=False)),\n # Remove part of title after colon ':' for songs with subtitles\n r\"(.+?)\\s*:.*\"]\n titles = generate_alternatives(title, patterns)\n\n # Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)\n # and each of them.\n multi_titles = []\n for title in titles:\n multi_titles.append([title])\n if '/' in title:\n multi_titles.append([x.strip() for x in title.split('/')])\n\n return itertools.product(artists, multi_titles)\n\n\ndef slug(text):\n \"\"\"Make a URL-safe, human-readable version of the given text\n\n This will do the following:\n\n 1. decode unicode characters into ASCII\n 2. shift everything to lowercase\n 3. strip whitespace\n 4. replace other non-word characters with dashes\n 5. strip extra dashes\n\n This somewhat duplicates the :func:`Google.slugify` function but\n slugify is not as generic as this one, which can be reused\n elsewhere.\n \"\"\"\n return re.sub(r'\\W+', '-', unidecode(text).lower().strip()).strip('-')\n\n\nclass Backend(object):\n def __init__(self, config, log):\n self._log = log\n\n @staticmethod\n def _encode(s):\n \"\"\"Encode the string for inclusion in a URL\"\"\"\n if isinstance(s, six.text_type):\n for char, repl in URL_CHARACTERS.items():\n s = s.replace(char, repl)\n s = s.encode('utf-8', 'ignore')\n return urllib.parse.quote(s)\n\n def build_url(self, artist, title):\n return self.URL_PATTERN % (self._encode(artist.title()),\n self._encode(title.title()))\n\n def fetch_url(self, url):\n \"\"\"Retrieve the content at a given URL, or return None if the source\n is unreachable.\n \"\"\"\n try:\n # Disable the InsecureRequestWarning that comes from using\n # `verify=false`.\n # https://github.com/kennethreitz/requests/issues/2214\n # We're not overly worried about the NSA MITMing our lyrics scraper\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n r = requests.get(url, verify=False, headers={\n 'User-Agent': USER_AGENT,\n })\n except requests.RequestException as exc:\n self._log.debug(u'lyrics request failed: {0}', exc)\n return\n if r.status_code == requests.codes.ok:\n return r.text\n else:\n self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code)\n\n def fetch(self, artist, title):\n raise NotImplementedError()\n\n\nclass MusiXmatch(Backend):\n REPLACEMENTS = {\n r'\\s+': '-',\n '<': 'Less_Than',\n '>': 'Greater_Than',\n '#': 'Number_',\n r'[\\[\\{]': '(',\n r'[\\]\\}]': ')',\n }\n\n URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'\n\n @classmethod\n def _encode(cls, s):\n for old, new in cls.REPLACEMENTS.items():\n s = re.sub(old, new, s)\n\n return super(MusiXmatch, cls)._encode(s)\n\n def fetch(self, artist, title):\n url = self.build_url(artist, title)\n\n html = self.fetch_url(url)\n if not html:\n return\n if \"We detected that your IP is blocked\" in html:\n self._log.warning(u'we are blocked at MusixMatch: url %s failed'\n % url)\n return\n html_parts = html.split('<p class=\"mxm-lyrics__content')\n # Sometimes lyrics come in 2 or more parts\n lyrics_parts = []\n for html_part in html_parts:\n lyrics_parts.append(extract_text_between(html_part, '>', '</p>'))\n lyrics = '\\n'.join(lyrics_parts)\n lyrics = lyrics.strip(',\"').replace('\\\\n', '\\n')\n # another odd case: sometimes only that string remains, for\n # missing songs. this seems to happen after being blocked\n # above, when filling in the CAPTCHA.\n if \"Instant lyrics for all your music.\" in lyrics:\n return\n # sometimes there are non-existent lyrics with some content\n if 'Lyrics | Musixmatch' in lyrics:\n return\n return lyrics\n\n\nclass Genius(Backend):\n \"\"\"Fetch lyrics from Genius via genius-api.\n\n Simply adapted from\n bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/\n \"\"\"\n\n base_url = \"https://api.genius.com\"\n\n def __init__(self, config, log):\n super(Genius, self).__init__(config, log)\n self.api_key = config['genius_api_key'].as_str()\n self.headers = {\n 'Authorization': \"Bearer %s\" % self.api_key,\n 'User-Agent': USER_AGENT,\n }\n\n def fetch(self, artist, title):\n \"\"\"Fetch lyrics from genius.com\n\n Because genius doesn't allow accesssing lyrics via the api,\n we first query the api for a url matching our artist & title,\n then attempt to scrape that url for the lyrics.\n \"\"\"\n json = self._search(artist, title)\n if not json:\n self._log.debug(u'Genius API request returned invalid JSON')\n return None\n\n # find a matching artist in the json\n for hit in json[\"response\"][\"hits\"]:\n hit_artist = hit[\"result\"][\"primary_artist\"][\"name\"]\n\n if slug(hit_artist) == slug(artist):\n return self._scrape_lyrics_from_html(\n self.fetch_url(hit[\"result\"][\"url\"]))\n\n self._log.debug(u'Genius failed to find a matching artist for \\'{0}\\'',\n artist)\n\n def _search(self, artist, title):\n \"\"\"Searches the genius api for a given artist and title\n\n https://docs.genius.com/#search-h2\n\n :returns: json response\n \"\"\"\n search_url = self.base_url + \"/search\"\n data = {'q': title + \" \" + artist.lower()}\n try:\n response = requests.get(\n search_url, data=data, headers=self.headers)\n except requests.RequestException as exc:\n self._log.debug(u'Genius API request failed: {0}', exc)\n return None\n\n try:\n return response.json()\n except ValueError:\n return None\n\n def _scrape_lyrics_from_html(self, html):\n \"\"\"Scrape lyrics from a given genius.com html\"\"\"\n\n html = BeautifulSoup(html, \"html.parser\")\n\n # Remove script tags that they put in the middle of the lyrics.\n [h.extract() for h in html('script')]\n\n # Most of the time, the page contains a div with class=\"lyrics\" where\n # all of the lyrics can be found already correctly formatted\n # Sometimes, though, it packages the lyrics into separate divs, most\n # likely for easier ad placement\n lyrics_div = html.find(\"div\", class_=\"lyrics\")\n if not lyrics_div:\n self._log.debug(u'Received unusual song page html')\n verse_div = html.find(\"div\",\n class_=re.compile(\"Lyrics__Container\"))\n if not verse_div:\n if html.find(\"div\",\n class_=re.compile(\"LyricsPlaceholder__Message\"),\n string=\"This song is an instrumental\"):\n self._log.debug('Detected instrumental')\n return \"[Instrumental]\"\n else:\n self._log.debug(\"Couldn't scrape page using known layouts\")\n return None\n\n lyrics_div = verse_div.parent\n for br in lyrics_div.find_all(\"br\"):\n br.replace_with(\"\\n\")\n ads = lyrics_div.find_all(\"div\",\n class_=re.compile(\"InreadAd__Container\"))\n for ad in ads:\n ad.replace_with(\"\\n\")\n\n return lyrics_div.get_text()\n\n\nclass Tekstowo(Backend):\n # Fetch lyrics from Tekstowo.pl.\n\n BASE_URL = 'http://www.tekstowo.pl'\n URL_PATTERN = BASE_URL + '/wyszukaj.html?search-title=%s&search-artist=%s'\n\n def fetch(self, artist, title):\n url = self.build_url(title, artist)\n search_results = self.fetch_url(url)\n song_page_url = self.parse_search_results(search_results)\n\n if not song_page_url:\n return None\n\n song_page_html = self.fetch_url(song_page_url)\n return self.extract_lyrics(song_page_html)\n\n def parse_search_results(self, html):\n if not HAS_BEAUTIFUL_SOUP:\n return None\n\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n try:\n html = BeautifulSoup(html, \"html.parser\")\n except HTMLParseError:\n return None\n\n song_rows = html.find(\"div\", class_=\"content\"). \\\n find(\"div\", class_=\"card\"). \\\n find_all(\"div\", class_=\"box-przeboje\")\n\n if not song_rows:\n return None\n\n song_row = song_rows[0]\n\n if not song_row:\n return None\n\n href = song_row.find('a').get('href')\n return self.BASE_URL + href\n\n def extract_lyrics(self, html):\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n try:\n html = BeautifulSoup(html, \"html.parser\")\n except HTMLParseError:\n return None\n\n return html.find(\"div\", class_=\"song-text\").get_text()\n\n\ndef remove_credits(text):\n \"\"\"Remove first/last line of text if it contains the word 'lyrics'\n eg 'Lyrics by songsdatabase.com'\n \"\"\"\n textlines = text.split('\\n')\n credits = None\n for i in (0, -1):\n if textlines and 'lyrics' in textlines[i].lower():\n credits = textlines.pop(i)\n if credits:\n text = '\\n'.join(textlines)\n return text\n\n\ndef _scrape_strip_cruft(html, plain_text_out=False):\n \"\"\"Clean up HTML\n \"\"\"\n html = unescape(html)\n\n html = html.replace('\\r', '\\n') # Normalize EOL.\n html = re.sub(r' +', ' ', html) # Whitespaces collapse.\n html = BREAK_RE.sub('\\n', html) # <br> eats up surrounding '\\n'.\n html = re.sub(r'(?s)<(script).*?</\\1>', '', html) # Strip script tags.\n html = re.sub(u'\\u2005', \" \", html) # replace unicode with regular space\n\n if plain_text_out: # Strip remaining HTML tags\n html = COMMENT_RE.sub('', html)\n html = TAG_RE.sub('', html)\n\n html = '\\n'.join([x.strip() for x in html.strip().split('\\n')])\n html = re.sub(r'\\n{3,}', r'\\n\\n', html)\n return html\n\n\ndef _scrape_merge_paragraphs(html):\n html = re.sub(r'</p>\\s*<p(\\s*[^>]*)>', '\\n', html)\n return re.sub(r'<div .*>\\s*</div>', '\\n', html)\n\n\ndef scrape_lyrics_from_html(html):\n \"\"\"Scrape lyrics from a URL. If no lyrics can be found, return None\n instead.\n \"\"\"\n if not HAS_BEAUTIFUL_SOUP:\n return None\n\n if not html:\n return None\n\n def is_text_notcode(text):\n length = len(text)\n return (length > 20 and\n text.count(' ') > length / 25 and\n (text.find('{') == -1 or text.find(';') == -1))\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n # extract all long text blocks that are not code\n try:\n soup = BeautifulSoup(html, \"html.parser\",\n parse_only=SoupStrainer(text=is_text_notcode))\n except HTMLParseError:\n return None\n\n # Get the longest text element (if any).\n strings = sorted(soup.stripped_strings, key=len, reverse=True)\n if strings:\n return strings[0]\n else:\n return None\n\n\nclass Google(Backend):\n \"\"\"Fetch lyrics from Google search results.\"\"\"\n\n def __init__(self, config, log):\n super(Google, self).__init__(config, log)\n self.api_key = config['google_API_key'].as_str()\n self.engine_id = config['google_engine_ID'].as_str()\n\n def is_lyrics(self, text, artist=None):\n \"\"\"Determine whether the text seems to be valid lyrics.\n \"\"\"\n if not text:\n return False\n bad_triggers_occ = []\n nb_lines = text.count('\\n')\n if nb_lines <= 1:\n self._log.debug(u\"Ignoring too short lyrics '{0}'\", text)\n return False\n elif nb_lines < 5:\n bad_triggers_occ.append('too_short')\n else:\n # Lyrics look legit, remove credits to avoid being penalized\n # further down\n text = remove_credits(text)\n\n bad_triggers = ['lyrics', 'copyright', 'property', 'links']\n if artist:\n bad_triggers += [artist]\n\n for item in bad_triggers:\n bad_triggers_occ += [item] * len(re.findall(r'\\W%s\\W' % item,\n text, re.I))\n\n if bad_triggers_occ:\n self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ)\n return len(bad_triggers_occ) < 2\n\n def slugify(self, text):\n \"\"\"Normalize a string and remove non-alphanumeric characters.\n \"\"\"\n text = re.sub(r\"[-'_\\s]\", '_', text)\n text = re.sub(r\"_+\", '_', text).strip('_')\n pat = r\"([^,\\(]*)\\((.*?)\\)\" # Remove content within parentheses\n text = re.sub(pat, r'\\g<1>', text).strip()\n try:\n text = unicodedata.normalize('NFKD', text).encode('ascii',\n 'ignore')\n text = six.text_type(re.sub(r'[-\\s]+', ' ', text.decode('utf-8')))\n except UnicodeDecodeError:\n self._log.exception(u\"Failing to normalize '{0}'\", text)\n return text\n\n BY_TRANS = ['by', 'par', 'de', 'von']\n LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']\n\n def is_page_candidate(self, url_link, url_title, title, artist):\n \"\"\"Return True if the URL title makes it a good candidate to be a\n page that contains lyrics of title by artist.\n \"\"\"\n title = self.slugify(title.lower())\n artist = self.slugify(artist.lower())\n sitename = re.search(u\"//([^/]+)/.*\",\n self.slugify(url_link.lower())).group(1)\n url_title = self.slugify(url_title.lower())\n\n # Check if URL title contains song title (exact match)\n if url_title.find(title) != -1:\n return True\n\n # or try extracting song title from URL title and check if\n # they are close enough\n tokens = [by + '_' + artist for by in self.BY_TRANS] + \\\n [artist, sitename, sitename.replace('www.', '')] + \\\n self.LYRICS_TRANS\n tokens = [re.escape(t) for t in tokens]\n song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title)\n\n song_title = song_title.strip('_|')\n typo_ratio = .9\n ratio = difflib.SequenceMatcher(None, song_title, title).ratio()\n return ratio >= typo_ratio\n\n def fetch(self, artist, title):\n query = u\"%s %s\" % (artist, title)\n url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \\\n % (self.api_key, self.engine_id,\n urllib.parse.quote(query.encode('utf-8')))\n\n data = self.fetch_url(url)\n if not data:\n self._log.debug(u'google backend returned no data')\n return None\n try:\n data = json.loads(data)\n except ValueError as exc:\n self._log.debug(u'google backend returned malformed JSON: {}', exc)\n if 'error' in data:\n reason = data['error']['errors'][0]['reason']\n self._log.debug(u'google backend error: {0}', reason)\n return None\n\n if 'items' in data.keys():\n for item in data['items']:\n url_link = item['link']\n url_title = item.get('title', u'')\n if not self.is_page_candidate(url_link, url_title,\n title, artist):\n continue\n html = self.fetch_url(url_link)\n lyrics = scrape_lyrics_from_html(html)\n if not lyrics:\n continue\n\n if self.is_lyrics(lyrics, artist):\n self._log.debug(u'got lyrics from {0}',\n item['displayLink'])\n return lyrics\n\n\nclass LyricsPlugin(plugins.BeetsPlugin):\n SOURCES = ['google', 'musixmatch', 'genius', 'tekstowo']\n BS_SOURCES = ['google', 'genius', 'tekstowo']\n SOURCE_BACKENDS = {\n 'google': Google,\n 'musixmatch': MusiXmatch,\n 'genius': Genius,\n 'tekstowo': Tekstowo,\n }\n\n def __init__(self):\n super(LyricsPlugin, self).__init__()\n self.import_stages = [self.imported]\n self.config.add({\n 'auto': True,\n 'bing_client_secret': None,\n 'bing_lang_from': [],\n 'bing_lang_to': None,\n 'google_API_key': None,\n 'google_engine_ID': u'009217259823014548361:lndtuqkycfu',\n 'genius_api_key':\n \"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W\"\n \"76V-uFL5jks5dNvcGCdarqFjDhP9c\",\n 'fallback': None,\n 'force': False,\n 'local': False,\n 'sources': self.SOURCES,\n })\n self.config['bing_client_secret'].redact = True\n self.config['google_API_key'].redact = True\n self.config['google_engine_ID'].redact = True\n self.config['genius_api_key'].redact = True\n\n # State information for the ReST writer.\n # First, the current artist we're writing.\n self.artist = u'Unknown artist'\n # The current album: False means no album yet.\n self.album = False\n # The current rest file content. None means the file is not\n # open yet.\n self.rest = None\n\n available_sources = list(self.SOURCES)\n sources = plugins.sanitize_choices(\n self.config['sources'].as_str_seq(), available_sources)\n\n if not HAS_BEAUTIFUL_SOUP:\n sources = self.sanitize_bs_sources(sources)\n\n if 'google' in sources:\n if not self.config['google_API_key'].get():\n # We log a *debug* message here because the default\n # configuration includes `google`. This way, the source\n # is silent by default but can be enabled just by\n # setting an API key.\n self._log.debug(u'Disabling google source: '\n u'no API key configured.')\n sources.remove('google')\n\n self.config['bing_lang_from'] = [\n x.lower() for x in self.config['bing_lang_from'].as_str_seq()]\n self.bing_auth_token = None\n\n if not HAS_LANGDETECT and self.config['bing_client_secret'].get():\n self._log.warning(u'To use bing translations, you need to '\n u'install the langdetect module. See the '\n u'documentation for further details.')\n\n self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)\n for source in sources]\n\n def sanitize_bs_sources(self, sources):\n for source in self.BS_SOURCES:\n if source in sources:\n self._log.debug(u'To use the %s lyrics source, you must '\n u'install the beautifulsoup4 module. See '\n u'the documentation for further details.'\n % source)\n sources.remove(source)\n\n return sources\n\n def get_bing_access_token(self):\n params = {\n 'client_id': 'beets',\n 'client_secret': self.config['bing_client_secret'],\n 'scope': \"https://api.microsofttranslator.com\",\n 'grant_type': 'client_credentials',\n }\n\n oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'\n oauth_token = json.loads(requests.post(\n oauth_url,\n data=urllib.parse.urlencode(params)).content)\n if 'access_token' in oauth_token:\n return \"Bearer \" + oauth_token['access_token']\n else:\n self._log.warning(u'Could not get Bing Translate API access token.'\n u' Check your \"bing_client_secret\" password')\n\n def commands(self):\n cmd = ui.Subcommand('lyrics', help='fetch song lyrics')\n cmd.parser.add_option(\n u'-p', u'--print', dest='printlyr',\n action='store_true', default=False,\n help=u'print lyrics to console',\n )\n cmd.parser.add_option(\n u'-r', u'--write-rest', dest='writerest',\n action='store', default=None, metavar='dir',\n help=u'write lyrics to given directory as ReST files',\n )\n cmd.parser.add_option(\n u'-f', u'--force', dest='force_refetch',\n action='store_true', default=False,\n help=u'always re-download lyrics',\n )\n cmd.parser.add_option(\n u'-l', u'--local', dest='local_only',\n action='store_true', default=False,\n help=u'do not fetch missing lyrics',\n )\n\n def func(lib, opts, args):\n # The \"write to files\" option corresponds to the\n # import_write config value.\n write = ui.should_write()\n if opts.writerest:\n self.writerest_indexes(opts.writerest)\n items = lib.items(ui.decargs(args))\n for item in items:\n if not opts.local_only and not self.config['local']:\n self.fetch_item_lyrics(\n lib, item, write,\n opts.force_refetch or self.config['force'],\n )\n if item.lyrics:\n if opts.printlyr:\n ui.print_(item.lyrics)\n if opts.writerest:\n self.appendrest(opts.writerest, item)\n if opts.writerest and items:\n # flush last artist & write to ReST\n self.writerest(opts.writerest)\n ui.print_(u'ReST files generated. to build, use one of:')\n ui.print_(u' sphinx-build -b html %s _build/html'\n % opts.writerest)\n ui.print_(u' sphinx-build -b epub %s _build/epub'\n % opts.writerest)\n ui.print_((u' sphinx-build -b latex %s _build/latex '\n u'&& make -C _build/latex all-pdf')\n % opts.writerest)\n cmd.func = func\n return [cmd]\n\n def appendrest(self, directory, item):\n \"\"\"Append the item to an ReST file\n\n This will keep state (in the `rest` variable) in order to avoid\n writing continuously to the same files.\n \"\"\"\n\n if slug(self.artist) != slug(item.albumartist):\n # Write current file and start a new one ~ item.albumartist\n self.writerest(directory)\n self.artist = item.albumartist.strip()\n self.rest = u\"%s\\n%s\\n\\n.. contents::\\n :local:\\n\\n\" \\\n % (self.artist,\n u'=' * len(self.artist))\n\n if self.album != item.album:\n tmpalbum = self.album = item.album.strip()\n if self.album == '':\n tmpalbum = u'Unknown album'\n self.rest += u\"%s\\n%s\\n\\n\" % (tmpalbum, u'-' * len(tmpalbum))\n title_str = u\":index:`%s`\" % item.title.strip()\n block = u'| ' + item.lyrics.replace(u'\\n', u'\\n| ')\n self.rest += u\"%s\\n%s\\n\\n%s\\n\\n\" % (title_str,\n u'~' * len(title_str),\n block)\n\n def writerest(self, directory):\n \"\"\"Write self.rest to a ReST file\n \"\"\"\n if self.rest is not None and self.artist is not None:\n path = os.path.join(directory, 'artists',\n slug(self.artist) + u'.rst')\n with open(path, 'wb') as output:\n output.write(self.rest.encode('utf-8'))\n\n def writerest_indexes(self, directory):\n \"\"\"Write conf.py and index.rst files necessary for Sphinx\n\n We write minimal configurations that are necessary for Sphinx\n to operate. We do not overwrite existing files so that\n customizations are respected.\"\"\"\n try:\n os.makedirs(os.path.join(directory, 'artists'))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n indexfile = os.path.join(directory, 'index.rst')\n if not os.path.exists(indexfile):\n with open(indexfile, 'w') as output:\n output.write(REST_INDEX_TEMPLATE)\n conffile = os.path.join(directory, 'conf.py')\n if not os.path.exists(conffile):\n with open(conffile, 'w') as output:\n output.write(REST_CONF_TEMPLATE)\n\n def imported(self, session, task):\n \"\"\"Import hook for fetching lyrics automatically.\n \"\"\"\n if self.config['auto']:\n for item in task.imported_items():\n self.fetch_item_lyrics(session.lib, item,\n False, self.config['force'])\n\n def fetch_item_lyrics(self, lib, item, write, force):\n \"\"\"Fetch and store lyrics for a single item. If ``write``, then the\n lyrics will also be written to the file itself.\n \"\"\"\n # Skip if the item already has lyrics.\n if not force and item.lyrics:\n self._log.info(u'lyrics already present: {0}', item)\n return\n\n lyrics = None\n for artist, titles in search_pairs(item):\n lyrics = [self.get_lyrics(artist, title) for title in titles]\n if any(lyrics):\n break\n\n lyrics = u\"\\n\\n---\\n\\n\".join([l for l in lyrics if l])\n\n if lyrics:\n self._log.info(u'fetched lyrics: {0}', item)\n if HAS_LANGDETECT and self.config['bing_client_secret'].get():\n lang_from = langdetect.detect(lyrics)\n if self.config['bing_lang_to'].get() != lang_from and (\n not self.config['bing_lang_from'] or (\n lang_from in self.config[\n 'bing_lang_from'].as_str_seq())):\n lyrics = self.append_translation(\n lyrics, self.config['bing_lang_to'])\n else:\n self._log.info(u'lyrics not found: {0}', item)\n fallback = self.config['fallback'].get()\n if fallback:\n lyrics = fallback\n else:\n return\n item.lyrics = lyrics\n if write:\n item.try_write()\n item.store()\n\n def get_lyrics(self, artist, title):\n \"\"\"Fetch lyrics, trying each source in turn. Return a string or\n None if no lyrics were found.\n \"\"\"\n for backend in self.backends:\n lyrics = backend.fetch(artist, title)\n if lyrics:\n self._log.debug(u'got lyrics from backend: {0}',\n backend.__class__.__name__)\n return _scrape_strip_cruft(lyrics, True)\n\n def append_translation(self, text, to_lang):\n from xml.etree import ElementTree\n\n if not self.bing_auth_token:\n self.bing_auth_token = self.get_bing_access_token()\n if self.bing_auth_token:\n # Extract unique lines to limit API request size per song\n text_lines = set(text.split('\\n'))\n url = ('https://api.microsofttranslator.com/v2/Http.svc/'\n 'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))\n r = requests.get(url,\n headers={\"Authorization \": self.bing_auth_token})\n if r.status_code != 200:\n self._log.debug('translation API error {}: {}', r.status_code,\n r.text)\n if 'token has expired' in r.text:\n self.bing_auth_token = None\n return self.append_translation(text, to_lang)\n return text\n lines_translated = ElementTree.fromstring(\n r.text.encode('utf-8')).text\n # Use a translation mapping dict to build resulting lyrics\n translations = dict(zip(text_lines, lines_translated.split('|')))\n result = ''\n for line in text.split('\\n'):\n result += '%s / %s\\n' % (line, translations[line])\n return result\n", "path": "beetsplug/lyrics.py"}]} |
gh_patches_debug_1554 | rasdani/github-patches | git_diff | getredash__redash-3323 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exporting to Excel file fails when one of the columns is a dictionary
<!--
We use GitHub only for bug reports 🐛
Anything else should be posted to https://discuss.redash.io 👫
🚨For support, help & questions use https://discuss.redash.io/c/support
💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests
**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.
-->
### Issue Summary
get error when exporting query results to excel file:

environment:ec2 on ecs
### Steps to Reproduce
1. create a new query
2. excute the query,save it ,then download it as excel file
Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?
### Technical details:
* Redash Version:6.0.0+b8537
* Browser/OS:chrome
* How did you install Redash:run redash by ecs on aws
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/models/__init__.py`
Content:
```
1 import cStringIO
2 import csv
3 import datetime
4 import calendar
5 import functools
6 import hashlib
7 import itertools
8 import logging
9 import time
10 import pytz
11 from functools import reduce
12
13 import xlsxwriter
14 from six import python_2_unicode_compatible, text_type
15 from sqlalchemy import distinct, or_, and_, UniqueConstraint
16 from sqlalchemy.dialects import postgresql
17 from sqlalchemy.event import listens_for
18 from sqlalchemy.ext.hybrid import hybrid_property
19 from sqlalchemy.orm import backref, contains_eager, joinedload, subqueryload, load_only
20 from sqlalchemy.orm.exc import NoResultFound # noqa: F401
21 from sqlalchemy import func
22 from sqlalchemy_utils import generic_relationship
23 from sqlalchemy_utils.types import TSVectorType
24 from sqlalchemy_utils.models import generic_repr
25
26 from redash import redis_connection, utils
27 from redash.destinations import (get_configuration_schema_for_destination_type,
28 get_destination)
29 from redash.metrics import database # noqa: F401
30 from redash.query_runner import (get_configuration_schema_for_query_runner_type,
31 get_query_runner)
32 from redash.utils import generate_token, json_dumps, json_loads
33 from redash.utils.configuration import ConfigurationContainer
34
35 from .base import db, gfk_type, Column, GFKBase, SearchBaseQuery
36 from .changes import ChangeTrackingMixin, Change # noqa
37 from .mixins import BelongsToOrgMixin, TimestampMixin
38 from .organizations import Organization
39 from .types import Configuration, MutableDict, MutableList, PseudoJSON
40 from .users import (AccessPermission, AnonymousUser, ApiUser, Group, User) # noqa
41
42 logger = logging.getLogger(__name__)
43
44
45 class ScheduledQueriesExecutions(object):
46 KEY_NAME = 'sq:executed_at'
47
48 def __init__(self):
49 self.executions = {}
50
51 def refresh(self):
52 self.executions = redis_connection.hgetall(self.KEY_NAME)
53
54 def update(self, query_id):
55 redis_connection.hmset(self.KEY_NAME, {
56 query_id: time.time()
57 })
58
59 def get(self, query_id):
60 timestamp = self.executions.get(str(query_id))
61 if timestamp:
62 timestamp = utils.dt_from_timestamp(timestamp)
63
64 return timestamp
65
66
67 scheduled_queries_executions = ScheduledQueriesExecutions()
68
69
70 @python_2_unicode_compatible
71 @generic_repr('id', 'name', 'type', 'org_id', 'created_at')
72 class DataSource(BelongsToOrgMixin, db.Model):
73 id = Column(db.Integer, primary_key=True)
74 org_id = Column(db.Integer, db.ForeignKey('organizations.id'))
75 org = db.relationship(Organization, backref="data_sources")
76
77 name = Column(db.String(255))
78 type = Column(db.String(255))
79 options = Column(ConfigurationContainer.as_mutable(Configuration))
80 queue_name = Column(db.String(255), default="queries")
81 scheduled_queue_name = Column(db.String(255), default="scheduled_queries")
82 created_at = Column(db.DateTime(True), default=db.func.now())
83
84 data_source_groups = db.relationship("DataSourceGroup", back_populates="data_source",
85 cascade="all")
86 __tablename__ = 'data_sources'
87 __table_args__ = (db.Index('data_sources_org_id_name', 'org_id', 'name'),)
88
89 def __eq__(self, other):
90 return self.id == other.id
91
92 def to_dict(self, all=False, with_permissions_for=None):
93 d = {
94 'id': self.id,
95 'name': self.name,
96 'type': self.type,
97 'syntax': self.query_runner.syntax,
98 'paused': self.paused,
99 'pause_reason': self.pause_reason
100 }
101
102 if all:
103 schema = get_configuration_schema_for_query_runner_type(self.type)
104 self.options.set_schema(schema)
105 d['options'] = self.options.to_dict(mask_secrets=True)
106 d['queue_name'] = self.queue_name
107 d['scheduled_queue_name'] = self.scheduled_queue_name
108 d['groups'] = self.groups
109
110 if with_permissions_for is not None:
111 d['view_only'] = db.session.query(DataSourceGroup.view_only).filter(
112 DataSourceGroup.group == with_permissions_for,
113 DataSourceGroup.data_source == self).one()[0]
114
115 return d
116
117 def __str__(self):
118 return text_type(self.name)
119
120 @classmethod
121 def create_with_group(cls, *args, **kwargs):
122 data_source = cls(*args, **kwargs)
123 data_source_group = DataSourceGroup(
124 data_source=data_source,
125 group=data_source.org.default_group)
126 db.session.add_all([data_source, data_source_group])
127 return data_source
128
129 @classmethod
130 def all(cls, org, group_ids=None):
131 data_sources = cls.query.filter(cls.org == org).order_by(cls.id.asc())
132
133 if group_ids:
134 data_sources = data_sources.join(DataSourceGroup).filter(
135 DataSourceGroup.group_id.in_(group_ids))
136
137 return data_sources.distinct()
138
139 @classmethod
140 def get_by_id(cls, _id):
141 return cls.query.filter(cls.id == _id).one()
142
143 def delete(self):
144 Query.query.filter(Query.data_source == self).update(dict(data_source_id=None, latest_query_data_id=None))
145 QueryResult.query.filter(QueryResult.data_source == self).delete()
146 res = db.session.delete(self)
147 db.session.commit()
148 return res
149
150 def get_schema(self, refresh=False):
151 key = "data_source:schema:{}".format(self.id)
152
153 cache = None
154 if not refresh:
155 cache = redis_connection.get(key)
156
157 if cache is None:
158 query_runner = self.query_runner
159 schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name'])
160
161 redis_connection.set(key, json_dumps(schema))
162 else:
163 schema = json_loads(cache)
164
165 return schema
166
167 def _pause_key(self):
168 return 'ds:{}:pause'.format(self.id)
169
170 @property
171 def paused(self):
172 return redis_connection.exists(self._pause_key())
173
174 @property
175 def pause_reason(self):
176 return redis_connection.get(self._pause_key())
177
178 def pause(self, reason=None):
179 redis_connection.set(self._pause_key(), reason or '')
180
181 def resume(self):
182 redis_connection.delete(self._pause_key())
183
184 def add_group(self, group, view_only=False):
185 dsg = DataSourceGroup(group=group, data_source=self, view_only=view_only)
186 db.session.add(dsg)
187 return dsg
188
189 def remove_group(self, group):
190 DataSourceGroup.query.filter(
191 DataSourceGroup.group == group,
192 DataSourceGroup.data_source == self
193 ).delete()
194 db.session.commit()
195
196 def update_group_permission(self, group, view_only):
197 dsg = DataSourceGroup.query.filter(
198 DataSourceGroup.group == group,
199 DataSourceGroup.data_source == self).one()
200 dsg.view_only = view_only
201 db.session.add(dsg)
202 return dsg
203
204 @property
205 def query_runner(self):
206 return get_query_runner(self.type, self.options)
207
208 @classmethod
209 def get_by_name(cls, name):
210 return cls.query.filter(cls.name == name).one()
211
212 # XXX examine call sites to see if a regular SQLA collection would work better
213 @property
214 def groups(self):
215 groups = DataSourceGroup.query.filter(
216 DataSourceGroup.data_source == self
217 )
218 return dict(map(lambda g: (g.group_id, g.view_only), groups))
219
220
221 @generic_repr('id', 'data_source_id', 'group_id', 'view_only')
222 class DataSourceGroup(db.Model):
223 # XXX drop id, use datasource/group as PK
224 id = Column(db.Integer, primary_key=True)
225 data_source_id = Column(db.Integer, db.ForeignKey("data_sources.id"))
226 data_source = db.relationship(DataSource, back_populates="data_source_groups")
227 group_id = Column(db.Integer, db.ForeignKey("groups.id"))
228 group = db.relationship(Group, back_populates="data_sources")
229 view_only = Column(db.Boolean, default=False)
230
231 __tablename__ = "data_source_groups"
232
233
234 @python_2_unicode_compatible
235 @generic_repr('id', 'org_id', 'data_source_id', 'query_hash', 'runtime', 'retrieved_at')
236 class QueryResult(db.Model, BelongsToOrgMixin):
237 id = Column(db.Integer, primary_key=True)
238 org_id = Column(db.Integer, db.ForeignKey('organizations.id'))
239 org = db.relationship(Organization)
240 data_source_id = Column(db.Integer, db.ForeignKey("data_sources.id"))
241 data_source = db.relationship(DataSource, backref=backref('query_results'))
242 query_hash = Column(db.String(32), index=True)
243 query_text = Column('query', db.Text)
244 data = Column(db.Text)
245 runtime = Column(postgresql.DOUBLE_PRECISION)
246 retrieved_at = Column(db.DateTime(True))
247
248 __tablename__ = 'query_results'
249
250 def __str__(self):
251 return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
252
253 def to_dict(self):
254 return {
255 'id': self.id,
256 'query_hash': self.query_hash,
257 'query': self.query_text,
258 'data': json_loads(self.data),
259 'data_source_id': self.data_source_id,
260 'runtime': self.runtime,
261 'retrieved_at': self.retrieved_at
262 }
263
264 @classmethod
265 def unused(cls, days=7):
266 age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)
267 return (
268 cls.query.filter(
269 Query.id.is_(None),
270 cls.retrieved_at < age_threshold
271 )
272 .outerjoin(Query)
273 ).options(load_only('id'))
274
275 @classmethod
276 def get_latest(cls, data_source, query, max_age=0):
277 query_hash = utils.gen_query_hash(query)
278
279 if max_age == -1:
280 query = cls.query.filter(
281 cls.query_hash == query_hash,
282 cls.data_source == data_source
283 )
284 else:
285 query = cls.query.filter(
286 cls.query_hash == query_hash,
287 cls.data_source == data_source,
288 (
289 db.func.timezone('utc', cls.retrieved_at) +
290 datetime.timedelta(seconds=max_age) >=
291 db.func.timezone('utc', db.func.now())
292 )
293 )
294
295 return query.order_by(cls.retrieved_at.desc()).first()
296
297 @classmethod
298 def store_result(cls, org, data_source, query_hash, query, data, run_time, retrieved_at):
299 query_result = cls(org_id=org,
300 query_hash=query_hash,
301 query_text=query,
302 runtime=run_time,
303 data_source=data_source,
304 retrieved_at=retrieved_at,
305 data=data)
306 db.session.add(query_result)
307 logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id)
308 # TODO: Investigate how big an impact this select-before-update makes.
309 queries = Query.query.filter(
310 Query.query_hash == query_hash,
311 Query.data_source == data_source
312 )
313 for q in queries:
314 q.latest_query_data = query_result
315 # don't auto-update the updated_at timestamp
316 q.skip_updated_at = True
317 db.session.add(q)
318 query_ids = [q.id for q in queries]
319 logging.info("Updated %s queries with result (%s).", len(query_ids), query_hash)
320
321 return query_result, query_ids
322
323 @property
324 def groups(self):
325 return self.data_source.groups
326
327 def make_csv_content(self):
328 s = cStringIO.StringIO()
329
330 query_data = json_loads(self.data)
331 writer = csv.DictWriter(s, extrasaction="ignore", fieldnames=[col['name'] for col in query_data['columns']])
332 writer.writer = utils.UnicodeWriter(s)
333 writer.writeheader()
334 for row in query_data['rows']:
335 writer.writerow(row)
336
337 return s.getvalue()
338
339 def make_excel_content(self):
340 s = cStringIO.StringIO()
341
342 query_data = json_loads(self.data)
343 book = xlsxwriter.Workbook(s, {'constant_memory': True})
344 sheet = book.add_worksheet("result")
345
346 column_names = []
347 for (c, col) in enumerate(query_data['columns']):
348 sheet.write(0, c, col['name'])
349 column_names.append(col['name'])
350
351 for (r, row) in enumerate(query_data['rows']):
352 for (c, name) in enumerate(column_names):
353 v = row.get(name)
354 if isinstance(v, list):
355 v = str(v).encode('utf-8')
356 sheet.write(r + 1, c, v)
357
358 book.close()
359
360 return s.getvalue()
361
362
363 def should_schedule_next(previous_iteration, now, interval, time=None, day_of_week=None, failures=0):
364 # if time exists then interval > 23 hours (82800s)
365 # if day_of_week exists then interval > 6 days (518400s)
366 if (time is None):
367 ttl = int(interval)
368 next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
369 else:
370 hour, minute = time.split(':')
371 hour, minute = int(hour), int(minute)
372
373 # The following logic is needed for cases like the following:
374 # - The query scheduled to run at 23:59.
375 # - The scheduler wakes up at 00:01.
376 # - Using naive implementation of comparing timestamps, it will skip the execution.
377 normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)
378
379 if normalized_previous_iteration > previous_iteration:
380 previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
381
382 days_delay = int(interval) / 60 / 60 / 24
383
384 days_to_add = 0
385 if (day_of_week is not None):
386 days_to_add = list(calendar.day_name).index(day_of_week) - normalized_previous_iteration.weekday()
387
388 next_iteration = (previous_iteration + datetime.timedelta(days=days_delay) +
389 datetime.timedelta(days=days_to_add)).replace(hour=hour, minute=minute)
390 if failures:
391 next_iteration += datetime.timedelta(minutes=2**failures)
392 return now > next_iteration
393
394
395 @python_2_unicode_compatible
396 @gfk_type
397 @generic_repr('id', 'name', 'query_hash', 'version', 'user_id', 'org_id',
398 'data_source_id', 'query_hash', 'last_modified_by_id',
399 'is_archived', 'is_draft', 'schedule', 'schedule_failures')
400 class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
401 id = Column(db.Integer, primary_key=True)
402 version = Column(db.Integer, default=1)
403 org_id = Column(db.Integer, db.ForeignKey('organizations.id'))
404 org = db.relationship(Organization, backref="queries")
405 data_source_id = Column(db.Integer, db.ForeignKey("data_sources.id"), nullable=True)
406 data_source = db.relationship(DataSource, backref='queries')
407 latest_query_data_id = Column(db.Integer, db.ForeignKey("query_results.id"), nullable=True)
408 latest_query_data = db.relationship(QueryResult)
409 name = Column(db.String(255))
410 description = Column(db.String(4096), nullable=True)
411 query_text = Column("query", db.Text)
412 query_hash = Column(db.String(32))
413 api_key = Column(db.String(40), default=lambda: generate_token(40))
414 user_id = Column(db.Integer, db.ForeignKey("users.id"))
415 user = db.relationship(User, foreign_keys=[user_id])
416 last_modified_by_id = Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
417 last_modified_by = db.relationship(User, backref="modified_queries",
418 foreign_keys=[last_modified_by_id])
419 is_archived = Column(db.Boolean, default=False, index=True)
420 is_draft = Column(db.Boolean, default=True, index=True)
421 schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True)
422 schedule_failures = Column(db.Integer, default=0)
423 visualizations = db.relationship("Visualization", cascade="all, delete-orphan")
424 options = Column(MutableDict.as_mutable(PseudoJSON), default={})
425 search_vector = Column(TSVectorType('id', 'name', 'description', 'query',
426 weights={'name': 'A',
427 'id': 'B',
428 'description': 'C',
429 'query': 'D'}),
430 nullable=True)
431 tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)
432
433 query_class = SearchBaseQuery
434 __tablename__ = 'queries'
435 __mapper_args__ = {
436 "version_id_col": version,
437 'version_id_generator': False
438 }
439
440 def __str__(self):
441 return text_type(self.id)
442
443 def archive(self, user=None):
444 db.session.add(self)
445 self.is_archived = True
446 self.schedule = None
447
448 for vis in self.visualizations:
449 for w in vis.widgets:
450 db.session.delete(w)
451
452 for a in self.alerts:
453 db.session.delete(a)
454
455 if user:
456 self.record_changes(user)
457
458 @classmethod
459 def create(cls, **kwargs):
460 query = cls(**kwargs)
461 db.session.add(Visualization(query_rel=query,
462 name="Table",
463 description='',
464 type="TABLE",
465 options="{}"))
466 return query
467
468 @classmethod
469 def all_queries(cls, group_ids, user_id=None, drafts=False):
470 query_ids = (
471 db.session
472 .query(distinct(cls.id))
473 .join(
474 DataSourceGroup,
475 Query.data_source_id == DataSourceGroup.data_source_id
476 )
477 .filter(Query.is_archived == False)
478 .filter(DataSourceGroup.group_id.in_(group_ids))
479 )
480 q = (
481 cls
482 .query
483 .options(
484 joinedload(Query.user),
485 joinedload(
486 Query.latest_query_data
487 ).load_only(
488 'runtime',
489 'retrieved_at',
490 )
491 )
492 .filter(cls.id.in_(query_ids))
493 # Adding outer joins to be able to order by relationship
494 .outerjoin(User, User.id == Query.user_id)
495 .outerjoin(
496 QueryResult,
497 QueryResult.id == Query.latest_query_data_id
498 )
499 .options(
500 contains_eager(Query.user),
501 contains_eager(Query.latest_query_data),
502 )
503 .order_by(Query.created_at.desc())
504 )
505
506 if not drafts:
507 q = q.filter(
508 or_(
509 Query.is_draft == False,
510 Query.user_id == user_id
511 )
512 )
513 return q
514
515 @classmethod
516 def favorites(cls, user, base_query=None):
517 if base_query is None:
518 base_query = cls.all_queries(user.group_ids, user.id, drafts=True)
519 return base_query.join((
520 Favorite,
521 and_(
522 Favorite.object_type == u'Query',
523 Favorite.object_id == Query.id
524 )
525 )).filter(Favorite.user_id == user.id)
526
527 @classmethod
528 def all_tags(cls, user, include_drafts=False):
529 queries = cls.all_queries(
530 group_ids=user.group_ids,
531 user_id=user.id,
532 drafts=include_drafts,
533 )
534
535 tag_column = func.unnest(cls.tags).label('tag')
536 usage_count = func.count(1).label('usage_count')
537
538 query = (
539 db.session
540 .query(tag_column, usage_count)
541 .group_by(tag_column)
542 .filter(Query.id.in_(queries.options(load_only('id'))))
543 .order_by(usage_count.desc())
544 )
545 return query
546
547 @classmethod
548 def by_user(cls, user):
549 return cls.all_queries(user.group_ids, user.id).filter(Query.user == user)
550
551 @classmethod
552 def outdated_queries(cls):
553 queries = (Query.query
554 .options(joinedload(Query.latest_query_data).load_only('retrieved_at'))
555 .filter(Query.schedule.isnot(None))
556 .order_by(Query.id))
557
558 now = utils.utcnow()
559 outdated_queries = {}
560 scheduled_queries_executions.refresh()
561
562 for query in queries:
563 schedule_until = pytz.utc.localize(datetime.datetime.strptime(
564 query.schedule['until'], '%Y-%m-%d')) if query.schedule['until'] else None
565 if (query.schedule['interval'] == None or (
566 schedule_until != None and (
567 schedule_until <= now))):
568 continue
569
570 if query.latest_query_data:
571 retrieved_at = query.latest_query_data.retrieved_at
572 else:
573 retrieved_at = now
574
575 retrieved_at = scheduled_queries_executions.get(query.id) or retrieved_at
576
577 if should_schedule_next(retrieved_at, now, query.schedule['interval'], query.schedule['time'],
578 query.schedule['day_of_week'], query.schedule_failures):
579 key = "{}:{}".format(query.query_hash, query.data_source_id)
580 outdated_queries[key] = query
581
582 return outdated_queries.values()
583
584 @classmethod
585 def search(cls, term, group_ids, user_id=None, include_drafts=False, limit=None):
586 all_queries = cls.all_queries(group_ids, user_id=user_id, drafts=include_drafts)
587 # sort the result using the weight as defined in the search vector column
588 return all_queries.search(term, sort=True).limit(limit)
589
590 @classmethod
591 def search_by_user(cls, term, user, limit=None):
592 return cls.by_user(user).search(term, sort=True).limit(limit)
593
594 @classmethod
595 def recent(cls, group_ids, user_id=None, limit=20):
596 query = (cls.query
597 .filter(Event.created_at > (db.func.current_date() - 7))
598 .join(Event, Query.id == Event.object_id.cast(db.Integer))
599 .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)
600 .filter(
601 Event.action.in_(['edit', 'execute', 'edit_name',
602 'edit_description', 'view_source']),
603 Event.object_id != None,
604 Event.object_type == 'query',
605 DataSourceGroup.group_id.in_(group_ids),
606 or_(Query.is_draft == False, Query.user_id == user_id),
607 Query.is_archived == False)
608 .group_by(Event.object_id, Query.id)
609 .order_by(db.desc(db.func.count(0))))
610
611 if user_id:
612 query = query.filter(Event.user_id == user_id)
613
614 query = query.limit(limit)
615
616 return query
617
618 @classmethod
619 def get_by_id(cls, _id):
620 return cls.query.filter(cls.id == _id).one()
621
622 def fork(self, user):
623 forked_list = ['org', 'data_source', 'latest_query_data', 'description',
624 'query_text', 'query_hash', 'options']
625 kwargs = {a: getattr(self, a) for a in forked_list}
626 forked_query = Query.create(name=u'Copy of (#{}) {}'.format(self.id, self.name),
627 user=user, **kwargs)
628
629 for v in self.visualizations:
630 if v.type == 'TABLE':
631 continue
632 forked_v = v.copy()
633 forked_v['query_rel'] = forked_query
634 forked_query.visualizations.append(Visualization(**forked_v))
635 db.session.add(forked_query)
636 return forked_query
637
638 @property
639 def runtime(self):
640 return self.latest_query_data.runtime
641
642 @property
643 def retrieved_at(self):
644 return self.latest_query_data.retrieved_at
645
646 @property
647 def groups(self):
648 if self.data_source is None:
649 return {}
650
651 return self.data_source.groups
652
653 @hybrid_property
654 def lowercase_name(self):
655 "Optional property useful for sorting purposes."
656 return self.name.lower()
657
658 @lowercase_name.expression
659 def lowercase_name(cls):
660 "The SQLAlchemy expression for the property above."
661 return func.lower(cls.name)
662
663
664 @listens_for(Query.query_text, 'set')
665 def gen_query_hash(target, val, oldval, initiator):
666 target.query_hash = utils.gen_query_hash(val)
667 target.schedule_failures = 0
668
669
670 @listens_for(Query.user_id, 'set')
671 def query_last_modified_by(target, val, oldval, initiator):
672 target.last_modified_by_id = val
673
674
675 @generic_repr('id', 'object_type', 'object_id', 'user_id', 'org_id')
676 class Favorite(TimestampMixin, db.Model):
677 id = Column(db.Integer, primary_key=True)
678 org_id = Column(db.Integer, db.ForeignKey("organizations.id"))
679
680 object_type = Column(db.Unicode(255))
681 object_id = Column(db.Integer)
682 object = generic_relationship(object_type, object_id)
683
684 user_id = Column(db.Integer, db.ForeignKey("users.id"))
685 user = db.relationship(User, backref='favorites')
686
687 __tablename__ = "favorites"
688 __table_args__ = (
689 UniqueConstraint("object_type", "object_id", "user_id", name="unique_favorite"),
690 )
691
692 @classmethod
693 def is_favorite(cls, user, object):
694 return cls.query.filter(cls.object == object, cls.user_id == user).count() > 0
695
696 @classmethod
697 def are_favorites(cls, user, objects):
698 objects = list(objects)
699 if not objects:
700 return []
701
702 object_type = text_type(objects[0].__class__.__name__)
703 return map(lambda fav: fav.object_id, cls.query.filter(cls.object_id.in_(map(lambda o: o.id, objects)), cls.object_type == object_type, cls.user_id == user))
704
705
706 @generic_repr('id', 'name', 'query_id', 'user_id', 'state', 'last_triggered_at', 'rearm')
707 class Alert(TimestampMixin, BelongsToOrgMixin, db.Model):
708 UNKNOWN_STATE = 'unknown'
709 OK_STATE = 'ok'
710 TRIGGERED_STATE = 'triggered'
711
712 id = Column(db.Integer, primary_key=True)
713 name = Column(db.String(255))
714 query_id = Column(db.Integer, db.ForeignKey("queries.id"))
715 query_rel = db.relationship(Query, backref=backref('alerts', cascade="all"))
716 user_id = Column(db.Integer, db.ForeignKey("users.id"))
717 user = db.relationship(User, backref='alerts')
718 options = Column(MutableDict.as_mutable(PseudoJSON))
719 state = Column(db.String(255), default=UNKNOWN_STATE)
720 subscriptions = db.relationship("AlertSubscription", cascade="all, delete-orphan")
721 last_triggered_at = Column(db.DateTime(True), nullable=True)
722 rearm = Column(db.Integer, nullable=True)
723
724 __tablename__ = 'alerts'
725
726 @classmethod
727 def all(cls, group_ids):
728 return (
729 cls.query
730 .options(
731 joinedload(Alert.user),
732 joinedload(Alert.query_rel),
733 )
734 .join(Query)
735 .join(
736 DataSourceGroup,
737 DataSourceGroup.data_source_id == Query.data_source_id
738 )
739 .filter(DataSourceGroup.group_id.in_(group_ids))
740 )
741
742 @classmethod
743 def get_by_id_and_org(cls, object_id, org):
744 return super(Alert, cls).get_by_id_and_org(object_id, org, Query)
745
746 def evaluate(self):
747 data = json_loads(self.query_rel.latest_query_data.data)
748
749 if data['rows'] and self.options['column'] in data['rows'][0]:
750 value = data['rows'][0][self.options['column']]
751 op = self.options['op']
752
753 if op == 'greater than' and value > self.options['value']:
754 new_state = self.TRIGGERED_STATE
755 elif op == 'less than' and value < self.options['value']:
756 new_state = self.TRIGGERED_STATE
757 elif op == 'equals' and value == self.options['value']:
758 new_state = self.TRIGGERED_STATE
759 else:
760 new_state = self.OK_STATE
761 else:
762 new_state = self.UNKNOWN_STATE
763
764 return new_state
765
766 def subscribers(self):
767 return User.query.join(AlertSubscription).filter(AlertSubscription.alert == self)
768
769 @property
770 def groups(self):
771 return self.query_rel.groups
772
773
774 def generate_slug(ctx):
775 slug = utils.slugify(ctx.current_parameters['name'])
776 tries = 1
777 while Dashboard.query.filter(Dashboard.slug == slug).first() is not None:
778 slug = utils.slugify(ctx.current_parameters['name']) + "_" + str(tries)
779 tries += 1
780 return slug
781
782
783 @python_2_unicode_compatible
784 @gfk_type
785 @generic_repr('id', 'name', 'slug', 'user_id', 'org_id', 'version', 'is_archived', 'is_draft')
786 class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
787 id = Column(db.Integer, primary_key=True)
788 version = Column(db.Integer)
789 org_id = Column(db.Integer, db.ForeignKey("organizations.id"))
790 org = db.relationship(Organization, backref="dashboards")
791 slug = Column(db.String(140), index=True, default=generate_slug)
792 name = Column(db.String(100))
793 user_id = Column(db.Integer, db.ForeignKey("users.id"))
794 user = db.relationship(User)
795 # layout is no longer used, but kept so we know how to render old dashboards.
796 layout = Column(db.Text)
797 dashboard_filters_enabled = Column(db.Boolean, default=False)
798 is_archived = Column(db.Boolean, default=False, index=True)
799 is_draft = Column(db.Boolean, default=True, index=True)
800 widgets = db.relationship('Widget', backref='dashboard', lazy='dynamic')
801 tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)
802
803 __tablename__ = 'dashboards'
804 __mapper_args__ = {
805 "version_id_col": version
806 }
807
808 def __str__(self):
809 return u"%s=%s" % (self.id, self.name)
810
811 @classmethod
812 def all(cls, org, group_ids, user_id):
813 query = (
814 Dashboard.query
815 .options(
816 subqueryload(Dashboard.user).load_only('_profile_image_url', 'name'),
817 )
818 .outerjoin(Widget)
819 .outerjoin(Visualization)
820 .outerjoin(Query)
821 .outerjoin(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)
822 .filter(
823 Dashboard.is_archived == False,
824 (DataSourceGroup.group_id.in_(group_ids) |
825 (Dashboard.user_id == user_id) |
826 ((Widget.dashboard != None) & (Widget.visualization == None))),
827 Dashboard.org == org)
828 .distinct())
829
830 query = query.filter(or_(Dashboard.user_id == user_id, Dashboard.is_draft == False))
831
832 return query
833
834 @classmethod
835 def search(cls, org, groups_ids, user_id, search_term):
836 # TODO: switch to FTS
837 return cls.all(org, groups_ids, user_id).filter(cls.name.ilike(u'%{}%'.format(search_term)))
838
839 @classmethod
840 def all_tags(cls, org, user):
841 dashboards = cls.all(org, user.group_ids, user.id)
842
843 tag_column = func.unnest(cls.tags).label('tag')
844 usage_count = func.count(1).label('usage_count')
845
846 query = (
847 db.session
848 .query(tag_column, usage_count)
849 .group_by(tag_column)
850 .filter(Dashboard.id.in_(dashboards.options(load_only('id'))))
851 .order_by(usage_count.desc())
852 )
853 return query
854
855 @classmethod
856 def favorites(cls, user, base_query=None):
857 if base_query is None:
858 base_query = cls.all(user.org, user.group_ids, user.id)
859 return base_query.join(
860 (
861 Favorite,
862 and_(
863 Favorite.object_type == u'Dashboard',
864 Favorite.object_id == Dashboard.id
865 )
866 )
867 ).filter(Favorite.user_id == user.id)
868
869 @classmethod
870 def get_by_slug_and_org(cls, slug, org):
871 return cls.query.filter(cls.slug == slug, cls.org == org).one()
872
873 @hybrid_property
874 def lowercase_name(self):
875 "Optional property useful for sorting purposes."
876 return self.name.lower()
877
878 @lowercase_name.expression
879 def lowercase_name(cls):
880 "The SQLAlchemy expression for the property above."
881 return func.lower(cls.name)
882
883
884 @python_2_unicode_compatible
885 @generic_repr('id', 'name', 'type', 'query_id')
886 class Visualization(TimestampMixin, BelongsToOrgMixin, db.Model):
887 id = Column(db.Integer, primary_key=True)
888 type = Column(db.String(100))
889 query_id = Column(db.Integer, db.ForeignKey("queries.id"))
890 # query_rel and not query, because db.Model already has query defined.
891 query_rel = db.relationship(Query, back_populates='visualizations')
892 name = Column(db.String(255))
893 description = Column(db.String(4096), nullable=True)
894 options = Column(db.Text)
895
896 __tablename__ = 'visualizations'
897
898 def __str__(self):
899 return u"%s %s" % (self.id, self.type)
900
901 @classmethod
902 def get_by_id_and_org(cls, object_id, org):
903 return super(Visualization, cls).get_by_id_and_org(object_id, org, Query)
904
905 def copy(self):
906 return {
907 'type': self.type,
908 'name': self.name,
909 'description': self.description,
910 'options': self.options
911 }
912
913
914 @python_2_unicode_compatible
915 @generic_repr('id', 'visualization_id', 'dashboard_id')
916 class Widget(TimestampMixin, BelongsToOrgMixin, db.Model):
917 id = Column(db.Integer, primary_key=True)
918 visualization_id = Column(db.Integer, db.ForeignKey('visualizations.id'), nullable=True)
919 visualization = db.relationship(Visualization, backref='widgets')
920 text = Column(db.Text, nullable=True)
921 width = Column(db.Integer)
922 options = Column(db.Text)
923 dashboard_id = Column(db.Integer, db.ForeignKey("dashboards.id"), index=True)
924
925 __tablename__ = 'widgets'
926
927 def __str__(self):
928 return u"%s" % self.id
929
930 @classmethod
931 def get_by_id_and_org(cls, object_id, org):
932 return super(Widget, cls).get_by_id_and_org(object_id, org, Dashboard)
933
934
935 @python_2_unicode_compatible
936 @generic_repr('id', 'object_type', 'object_id', 'action', 'user_id', 'org_id', 'created_at')
937 class Event(db.Model):
938 id = Column(db.Integer, primary_key=True)
939 org_id = Column(db.Integer, db.ForeignKey("organizations.id"))
940 org = db.relationship(Organization, back_populates="events")
941 user_id = Column(db.Integer, db.ForeignKey("users.id"), nullable=True)
942 user = db.relationship(User, backref="events")
943 action = Column(db.String(255))
944 object_type = Column(db.String(255))
945 object_id = Column(db.String(255), nullable=True)
946 additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={})
947 created_at = Column(db.DateTime(True), default=db.func.now())
948
949 __tablename__ = 'events'
950
951 def __str__(self):
952 return u"%s,%s,%s,%s" % (self.user_id, self.action, self.object_type, self.object_id)
953
954 def to_dict(self):
955 return {
956 'org_id': self.org_id,
957 'user_id': self.user_id,
958 'action': self.action,
959 'object_type': self.object_type,
960 'object_id': self.object_id,
961 'additional_properties': self.additional_properties,
962 'created_at': self.created_at.isoformat()
963 }
964
965 @classmethod
966 def record(cls, event):
967 org_id = event.pop('org_id')
968 user_id = event.pop('user_id', None)
969 action = event.pop('action')
970 object_type = event.pop('object_type')
971 object_id = event.pop('object_id', None)
972
973 created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
974
975 event = cls(org_id=org_id, user_id=user_id, action=action,
976 object_type=object_type, object_id=object_id,
977 additional_properties=event,
978 created_at=created_at)
979 db.session.add(event)
980 return event
981
982
983 @generic_repr('id', 'created_by_id', 'org_id', 'active')
984 class ApiKey(TimestampMixin, GFKBase, db.Model):
985 id = Column(db.Integer, primary_key=True)
986 org_id = Column(db.Integer, db.ForeignKey("organizations.id"))
987 org = db.relationship(Organization)
988 api_key = Column(db.String(255), index=True, default=lambda: generate_token(40))
989 active = Column(db.Boolean, default=True)
990 # 'object' provided by GFKBase
991 created_by_id = Column(db.Integer, db.ForeignKey("users.id"), nullable=True)
992 created_by = db.relationship(User)
993
994 __tablename__ = 'api_keys'
995 __table_args__ = (
996 db.Index('api_keys_object_type_object_id', 'object_type', 'object_id'),
997 )
998
999 @classmethod
1000 def get_by_api_key(cls, api_key):
1001 return cls.query.filter(cls.api_key == api_key, cls.active == True).one()
1002
1003 @classmethod
1004 def get_by_object(cls, object):
1005 return cls.query.filter(
1006 cls.object_type == object.__class__.__tablename__,
1007 cls.object_id == object.id,
1008 cls.active == True
1009 ).first()
1010
1011 @classmethod
1012 def create_for_object(cls, object, user):
1013 k = cls(org=user.org, object=object, created_by=user)
1014 db.session.add(k)
1015 return k
1016
1017
1018 @python_2_unicode_compatible
1019 @generic_repr('id', 'name', 'type', 'user_id', 'org_id', 'created_at')
1020 class NotificationDestination(BelongsToOrgMixin, db.Model):
1021 id = Column(db.Integer, primary_key=True)
1022 org_id = Column(db.Integer, db.ForeignKey("organizations.id"))
1023 org = db.relationship(Organization, backref="notification_destinations")
1024 user_id = Column(db.Integer, db.ForeignKey("users.id"))
1025 user = db.relationship(User, backref="notification_destinations")
1026 name = Column(db.String(255))
1027 type = Column(db.String(255))
1028 options = Column(ConfigurationContainer.as_mutable(Configuration))
1029 created_at = Column(db.DateTime(True), default=db.func.now())
1030
1031 __tablename__ = 'notification_destinations'
1032 __table_args__ = (
1033 db.Index(
1034 'notification_destinations_org_id_name', 'org_id', 'name', unique=True
1035 ),
1036 )
1037
1038 def __str__(self):
1039 return text_type(self.name)
1040
1041 def to_dict(self, all=False):
1042 d = {
1043 'id': self.id,
1044 'name': self.name,
1045 'type': self.type,
1046 'icon': self.destination.icon()
1047 }
1048
1049 if all:
1050 schema = get_configuration_schema_for_destination_type(self.type)
1051 self.options.set_schema(schema)
1052 d['options'] = self.options.to_dict(mask_secrets=True)
1053
1054 return d
1055
1056 @property
1057 def destination(self):
1058 return get_destination(self.type, self.options)
1059
1060 @classmethod
1061 def all(cls, org):
1062 notification_destinations = cls.query.filter(cls.org == org).order_by(cls.id.asc())
1063
1064 return notification_destinations
1065
1066 def notify(self, alert, query, user, new_state, app, host):
1067 schema = get_configuration_schema_for_destination_type(self.type)
1068 self.options.set_schema(schema)
1069 return self.destination.notify(alert, query, user, new_state,
1070 app, host, self.options)
1071
1072
1073 @generic_repr('id', 'user_id', 'destination_id', 'alert_id')
1074 class AlertSubscription(TimestampMixin, db.Model):
1075 id = Column(db.Integer, primary_key=True)
1076 user_id = Column(db.Integer, db.ForeignKey("users.id"))
1077 user = db.relationship(User)
1078 destination_id = Column(db.Integer,
1079 db.ForeignKey("notification_destinations.id"),
1080 nullable=True)
1081 destination = db.relationship(NotificationDestination)
1082 alert_id = Column(db.Integer, db.ForeignKey("alerts.id"))
1083 alert = db.relationship(Alert, back_populates="subscriptions")
1084
1085 __tablename__ = 'alert_subscriptions'
1086 __table_args__ = (
1087 db.Index(
1088 'alert_subscriptions_destination_id_alert_id',
1089 'destination_id', 'alert_id', unique=True
1090 ),
1091 )
1092
1093 def to_dict(self):
1094 d = {
1095 'id': self.id,
1096 'user': self.user.to_dict(),
1097 'alert_id': self.alert_id
1098 }
1099
1100 if self.destination:
1101 d['destination'] = self.destination.to_dict()
1102
1103 return d
1104
1105 @classmethod
1106 def all(cls, alert_id):
1107 return AlertSubscription.query.join(User).filter(AlertSubscription.alert_id == alert_id)
1108
1109 def notify(self, alert, query, user, new_state, app, host):
1110 if self.destination:
1111 return self.destination.notify(alert, query, user, new_state,
1112 app, host)
1113 else:
1114 # User email subscription, so create an email destination object
1115 config = {'addresses': self.user.email}
1116 schema = get_configuration_schema_for_destination_type('email')
1117 options = ConfigurationContainer(config, schema)
1118 destination = get_destination('email', options)
1119 return destination.notify(alert, query, user, new_state, app, host, options)
1120
1121
1122 @generic_repr('id', 'trigger', 'user_id', 'org_id')
1123 class QuerySnippet(TimestampMixin, db.Model, BelongsToOrgMixin):
1124 id = Column(db.Integer, primary_key=True)
1125 org_id = Column(db.Integer, db.ForeignKey("organizations.id"))
1126 org = db.relationship(Organization, backref="query_snippets")
1127 trigger = Column(db.String(255), unique=True)
1128 description = Column(db.Text)
1129 user_id = Column(db.Integer, db.ForeignKey("users.id"))
1130 user = db.relationship(User, backref="query_snippets")
1131 snippet = Column(db.Text)
1132
1133 __tablename__ = 'query_snippets'
1134
1135 @classmethod
1136 def all(cls, org):
1137 return cls.query.filter(cls.org == org)
1138
1139 def to_dict(self):
1140 d = {
1141 'id': self.id,
1142 'trigger': self.trigger,
1143 'description': self.description,
1144 'snippet': self.snippet,
1145 'user': self.user.to_dict(),
1146 'updated_at': self.updated_at,
1147 'created_at': self.created_at
1148 }
1149
1150 return d
1151
1152
1153 def init_db():
1154 default_org = Organization(name="Default", slug='default', settings={})
1155 admin_group = Group(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP)
1156 default_group = Group(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP)
1157
1158 db.session.add_all([default_org, admin_group, default_group])
1159 # XXX remove after fixing User.group_ids
1160 db.session.commit()
1161 return default_org, admin_group, default_group
1162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/models/__init__.py b/redash/models/__init__.py
--- a/redash/models/__init__.py
+++ b/redash/models/__init__.py
@@ -351,7 +351,7 @@
for (r, row) in enumerate(query_data['rows']):
for (c, name) in enumerate(column_names):
v = row.get(name)
- if isinstance(v, list):
+ if isinstance(v, list) or isinstance(v, dict):
v = str(v).encode('utf-8')
sheet.write(r + 1, c, v)
| {"golden_diff": "diff --git a/redash/models/__init__.py b/redash/models/__init__.py\n--- a/redash/models/__init__.py\n+++ b/redash/models/__init__.py\n@@ -351,7 +351,7 @@\n for (r, row) in enumerate(query_data['rows']):\n for (c, name) in enumerate(column_names):\n v = row.get(name)\n- if isinstance(v, list):\n+ if isinstance(v, list) or isinstance(v, dict):\n v = str(v).encode('utf-8')\n sheet.write(r + 1, c, v)\n", "issue": "Exporting to Excel file fails when one of the columns is a dictionary\n<!--\r\n\r\nWe use GitHub only for bug reports \ud83d\udc1b\r\n\r\nAnything else should be posted to https://discuss.redash.io \ud83d\udc6b\r\n\r\n\ud83d\udea8For support, help & questions use https://discuss.redash.io/c/support\r\n\ud83d\udca1For feature requests & ideas use https://discuss.redash.io/c/feature-requests\r\n\r\n**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.\r\n\r\n-->\r\n\r\n### Issue Summary\r\n\r\nget error when exporting query results to excel file:\r\n\r\nenvironment:ec2 on ecs\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. create a new query\r\n2. excute the query,save it ,then download it as excel file\r\n\r\nAny other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?\r\n\r\n### Technical details:\r\n\r\n* Redash Version:6.0.0+b8537\r\n* Browser/OS:chrome\r\n* How did you install Redash:run redash by ecs on aws \r\n\n", "before_files": [{"content": "import cStringIO\nimport csv\nimport datetime\nimport calendar\nimport functools\nimport hashlib\nimport itertools\nimport logging\nimport time\nimport pytz\nfrom functools import reduce\n\nimport xlsxwriter\nfrom six import python_2_unicode_compatible, text_type\nfrom sqlalchemy import distinct, or_, and_, UniqueConstraint\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.event import listens_for\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import backref, contains_eager, joinedload, subqueryload, load_only\nfrom sqlalchemy.orm.exc import NoResultFound # noqa: F401\nfrom sqlalchemy import func\nfrom sqlalchemy_utils import generic_relationship\nfrom sqlalchemy_utils.types import TSVectorType\nfrom sqlalchemy_utils.models import generic_repr\n\nfrom redash import redis_connection, utils\nfrom redash.destinations import (get_configuration_schema_for_destination_type,\n get_destination)\nfrom redash.metrics import database # noqa: F401\nfrom redash.query_runner import (get_configuration_schema_for_query_runner_type,\n get_query_runner)\nfrom redash.utils import generate_token, json_dumps, json_loads\nfrom redash.utils.configuration import ConfigurationContainer\n\nfrom .base import db, gfk_type, Column, GFKBase, SearchBaseQuery\nfrom .changes import ChangeTrackingMixin, Change # noqa\nfrom .mixins import BelongsToOrgMixin, TimestampMixin\nfrom .organizations import Organization\nfrom .types import Configuration, MutableDict, MutableList, PseudoJSON\nfrom .users import (AccessPermission, AnonymousUser, ApiUser, Group, User) # noqa\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScheduledQueriesExecutions(object):\n KEY_NAME = 'sq:executed_at'\n\n def __init__(self):\n self.executions = {}\n\n def refresh(self):\n self.executions = redis_connection.hgetall(self.KEY_NAME)\n\n def update(self, query_id):\n redis_connection.hmset(self.KEY_NAME, {\n query_id: time.time()\n })\n\n def get(self, query_id):\n timestamp = self.executions.get(str(query_id))\n if timestamp:\n timestamp = utils.dt_from_timestamp(timestamp)\n\n return timestamp\n\n\nscheduled_queries_executions = ScheduledQueriesExecutions()\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'org_id', 'created_at')\nclass DataSource(BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization, backref=\"data_sources\")\n\n name = Column(db.String(255))\n type = Column(db.String(255))\n options = Column(ConfigurationContainer.as_mutable(Configuration))\n queue_name = Column(db.String(255), default=\"queries\")\n scheduled_queue_name = Column(db.String(255), default=\"scheduled_queries\")\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n data_source_groups = db.relationship(\"DataSourceGroup\", back_populates=\"data_source\",\n cascade=\"all\")\n __tablename__ = 'data_sources'\n __table_args__ = (db.Index('data_sources_org_id_name', 'org_id', 'name'),)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def to_dict(self, all=False, with_permissions_for=None):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'syntax': self.query_runner.syntax,\n 'paused': self.paused,\n 'pause_reason': self.pause_reason\n }\n\n if all:\n schema = get_configuration_schema_for_query_runner_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n d['queue_name'] = self.queue_name\n d['scheduled_queue_name'] = self.scheduled_queue_name\n d['groups'] = self.groups\n\n if with_permissions_for is not None:\n d['view_only'] = db.session.query(DataSourceGroup.view_only).filter(\n DataSourceGroup.group == with_permissions_for,\n DataSourceGroup.data_source == self).one()[0]\n\n return d\n\n def __str__(self):\n return text_type(self.name)\n\n @classmethod\n def create_with_group(cls, *args, **kwargs):\n data_source = cls(*args, **kwargs)\n data_source_group = DataSourceGroup(\n data_source=data_source,\n group=data_source.org.default_group)\n db.session.add_all([data_source, data_source_group])\n return data_source\n\n @classmethod\n def all(cls, org, group_ids=None):\n data_sources = cls.query.filter(cls.org == org).order_by(cls.id.asc())\n\n if group_ids:\n data_sources = data_sources.join(DataSourceGroup).filter(\n DataSourceGroup.group_id.in_(group_ids))\n\n return data_sources.distinct()\n\n @classmethod\n def get_by_id(cls, _id):\n return cls.query.filter(cls.id == _id).one()\n\n def delete(self):\n Query.query.filter(Query.data_source == self).update(dict(data_source_id=None, latest_query_data_id=None))\n QueryResult.query.filter(QueryResult.data_source == self).delete()\n res = db.session.delete(self)\n db.session.commit()\n return res\n\n def get_schema(self, refresh=False):\n key = \"data_source:schema:{}\".format(self.id)\n\n cache = None\n if not refresh:\n cache = redis_connection.get(key)\n\n if cache is None:\n query_runner = self.query_runner\n schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name'])\n\n redis_connection.set(key, json_dumps(schema))\n else:\n schema = json_loads(cache)\n\n return schema\n\n def _pause_key(self):\n return 'ds:{}:pause'.format(self.id)\n\n @property\n def paused(self):\n return redis_connection.exists(self._pause_key())\n\n @property\n def pause_reason(self):\n return redis_connection.get(self._pause_key())\n\n def pause(self, reason=None):\n redis_connection.set(self._pause_key(), reason or '')\n\n def resume(self):\n redis_connection.delete(self._pause_key())\n\n def add_group(self, group, view_only=False):\n dsg = DataSourceGroup(group=group, data_source=self, view_only=view_only)\n db.session.add(dsg)\n return dsg\n\n def remove_group(self, group):\n DataSourceGroup.query.filter(\n DataSourceGroup.group == group,\n DataSourceGroup.data_source == self\n ).delete()\n db.session.commit()\n\n def update_group_permission(self, group, view_only):\n dsg = DataSourceGroup.query.filter(\n DataSourceGroup.group == group,\n DataSourceGroup.data_source == self).one()\n dsg.view_only = view_only\n db.session.add(dsg)\n return dsg\n\n @property\n def query_runner(self):\n return get_query_runner(self.type, self.options)\n\n @classmethod\n def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).one()\n\n # XXX examine call sites to see if a regular SQLA collection would work better\n @property\n def groups(self):\n groups = DataSourceGroup.query.filter(\n DataSourceGroup.data_source == self\n )\n return dict(map(lambda g: (g.group_id, g.view_only), groups))\n\n\n@generic_repr('id', 'data_source_id', 'group_id', 'view_only')\nclass DataSourceGroup(db.Model):\n # XXX drop id, use datasource/group as PK\n id = Column(db.Integer, primary_key=True)\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"))\n data_source = db.relationship(DataSource, back_populates=\"data_source_groups\")\n group_id = Column(db.Integer, db.ForeignKey(\"groups.id\"))\n group = db.relationship(Group, back_populates=\"data_sources\")\n view_only = Column(db.Boolean, default=False)\n\n __tablename__ = \"data_source_groups\"\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'org_id', 'data_source_id', 'query_hash', 'runtime', 'retrieved_at')\nclass QueryResult(db.Model, BelongsToOrgMixin):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization)\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"))\n data_source = db.relationship(DataSource, backref=backref('query_results'))\n query_hash = Column(db.String(32), index=True)\n query_text = Column('query', db.Text)\n data = Column(db.Text)\n runtime = Column(postgresql.DOUBLE_PRECISION)\n retrieved_at = Column(db.DateTime(True))\n\n __tablename__ = 'query_results'\n\n def __str__(self):\n return u\"%d | %s | %s\" % (self.id, self.query_hash, self.retrieved_at)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'query_hash': self.query_hash,\n 'query': self.query_text,\n 'data': json_loads(self.data),\n 'data_source_id': self.data_source_id,\n 'runtime': self.runtime,\n 'retrieved_at': self.retrieved_at\n }\n\n @classmethod\n def unused(cls, days=7):\n age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)\n return (\n cls.query.filter(\n Query.id.is_(None),\n cls.retrieved_at < age_threshold\n )\n .outerjoin(Query)\n ).options(load_only('id'))\n\n @classmethod\n def get_latest(cls, data_source, query, max_age=0):\n query_hash = utils.gen_query_hash(query)\n\n if max_age == -1:\n query = cls.query.filter(\n cls.query_hash == query_hash,\n cls.data_source == data_source\n )\n else:\n query = cls.query.filter(\n cls.query_hash == query_hash,\n cls.data_source == data_source,\n (\n db.func.timezone('utc', cls.retrieved_at) +\n datetime.timedelta(seconds=max_age) >=\n db.func.timezone('utc', db.func.now())\n )\n )\n\n return query.order_by(cls.retrieved_at.desc()).first()\n\n @classmethod\n def store_result(cls, org, data_source, query_hash, query, data, run_time, retrieved_at):\n query_result = cls(org_id=org,\n query_hash=query_hash,\n query_text=query,\n runtime=run_time,\n data_source=data_source,\n retrieved_at=retrieved_at,\n data=data)\n db.session.add(query_result)\n logging.info(\"Inserted query (%s) data; id=%s\", query_hash, query_result.id)\n # TODO: Investigate how big an impact this select-before-update makes.\n queries = Query.query.filter(\n Query.query_hash == query_hash,\n Query.data_source == data_source\n )\n for q in queries:\n q.latest_query_data = query_result\n # don't auto-update the updated_at timestamp\n q.skip_updated_at = True\n db.session.add(q)\n query_ids = [q.id for q in queries]\n logging.info(\"Updated %s queries with result (%s).\", len(query_ids), query_hash)\n\n return query_result, query_ids\n\n @property\n def groups(self):\n return self.data_source.groups\n\n def make_csv_content(self):\n s = cStringIO.StringIO()\n\n query_data = json_loads(self.data)\n writer = csv.DictWriter(s, extrasaction=\"ignore\", fieldnames=[col['name'] for col in query_data['columns']])\n writer.writer = utils.UnicodeWriter(s)\n writer.writeheader()\n for row in query_data['rows']:\n writer.writerow(row)\n\n return s.getvalue()\n\n def make_excel_content(self):\n s = cStringIO.StringIO()\n\n query_data = json_loads(self.data)\n book = xlsxwriter.Workbook(s, {'constant_memory': True})\n sheet = book.add_worksheet(\"result\")\n\n column_names = []\n for (c, col) in enumerate(query_data['columns']):\n sheet.write(0, c, col['name'])\n column_names.append(col['name'])\n\n for (r, row) in enumerate(query_data['rows']):\n for (c, name) in enumerate(column_names):\n v = row.get(name)\n if isinstance(v, list):\n v = str(v).encode('utf-8')\n sheet.write(r + 1, c, v)\n\n book.close()\n\n return s.getvalue()\n\n\ndef should_schedule_next(previous_iteration, now, interval, time=None, day_of_week=None, failures=0):\n # if time exists then interval > 23 hours (82800s)\n # if day_of_week exists then interval > 6 days (518400s)\n if (time is None):\n ttl = int(interval)\n next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)\n else:\n hour, minute = time.split(':')\n hour, minute = int(hour), int(minute)\n\n # The following logic is needed for cases like the following:\n # - The query scheduled to run at 23:59.\n # - The scheduler wakes up at 00:01.\n # - Using naive implementation of comparing timestamps, it will skip the execution.\n normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)\n\n if normalized_previous_iteration > previous_iteration:\n previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)\n\n days_delay = int(interval) / 60 / 60 / 24\n\n days_to_add = 0\n if (day_of_week is not None):\n days_to_add = list(calendar.day_name).index(day_of_week) - normalized_previous_iteration.weekday()\n\n next_iteration = (previous_iteration + datetime.timedelta(days=days_delay) +\n datetime.timedelta(days=days_to_add)).replace(hour=hour, minute=minute)\n if failures:\n next_iteration += datetime.timedelta(minutes=2**failures)\n return now > next_iteration\n\n\n@python_2_unicode_compatible\n@gfk_type\n@generic_repr('id', 'name', 'query_hash', 'version', 'user_id', 'org_id',\n 'data_source_id', 'query_hash', 'last_modified_by_id',\n 'is_archived', 'is_draft', 'schedule', 'schedule_failures')\nclass Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n version = Column(db.Integer, default=1)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization, backref=\"queries\")\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"), nullable=True)\n data_source = db.relationship(DataSource, backref='queries')\n latest_query_data_id = Column(db.Integer, db.ForeignKey(\"query_results.id\"), nullable=True)\n latest_query_data = db.relationship(QueryResult)\n name = Column(db.String(255))\n description = Column(db.String(4096), nullable=True)\n query_text = Column(\"query\", db.Text)\n query_hash = Column(db.String(32))\n api_key = Column(db.String(40), default=lambda: generate_token(40))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, foreign_keys=[user_id])\n last_modified_by_id = Column(db.Integer, db.ForeignKey('users.id'), nullable=True)\n last_modified_by = db.relationship(User, backref=\"modified_queries\",\n foreign_keys=[last_modified_by_id])\n is_archived = Column(db.Boolean, default=False, index=True)\n is_draft = Column(db.Boolean, default=True, index=True)\n schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True)\n schedule_failures = Column(db.Integer, default=0)\n visualizations = db.relationship(\"Visualization\", cascade=\"all, delete-orphan\")\n options = Column(MutableDict.as_mutable(PseudoJSON), default={})\n search_vector = Column(TSVectorType('id', 'name', 'description', 'query',\n weights={'name': 'A',\n 'id': 'B',\n 'description': 'C',\n 'query': 'D'}),\n nullable=True)\n tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)\n\n query_class = SearchBaseQuery\n __tablename__ = 'queries'\n __mapper_args__ = {\n \"version_id_col\": version,\n 'version_id_generator': False\n }\n\n def __str__(self):\n return text_type(self.id)\n\n def archive(self, user=None):\n db.session.add(self)\n self.is_archived = True\n self.schedule = None\n\n for vis in self.visualizations:\n for w in vis.widgets:\n db.session.delete(w)\n\n for a in self.alerts:\n db.session.delete(a)\n\n if user:\n self.record_changes(user)\n\n @classmethod\n def create(cls, **kwargs):\n query = cls(**kwargs)\n db.session.add(Visualization(query_rel=query,\n name=\"Table\",\n description='',\n type=\"TABLE\",\n options=\"{}\"))\n return query\n\n @classmethod\n def all_queries(cls, group_ids, user_id=None, drafts=False):\n query_ids = (\n db.session\n .query(distinct(cls.id))\n .join(\n DataSourceGroup,\n Query.data_source_id == DataSourceGroup.data_source_id\n )\n .filter(Query.is_archived == False)\n .filter(DataSourceGroup.group_id.in_(group_ids))\n )\n q = (\n cls\n .query\n .options(\n joinedload(Query.user),\n joinedload(\n Query.latest_query_data\n ).load_only(\n 'runtime',\n 'retrieved_at',\n )\n )\n .filter(cls.id.in_(query_ids))\n # Adding outer joins to be able to order by relationship\n .outerjoin(User, User.id == Query.user_id)\n .outerjoin(\n QueryResult,\n QueryResult.id == Query.latest_query_data_id\n )\n .options(\n contains_eager(Query.user),\n contains_eager(Query.latest_query_data),\n )\n .order_by(Query.created_at.desc())\n )\n\n if not drafts:\n q = q.filter(\n or_(\n Query.is_draft == False,\n Query.user_id == user_id\n )\n )\n return q\n\n @classmethod\n def favorites(cls, user, base_query=None):\n if base_query is None:\n base_query = cls.all_queries(user.group_ids, user.id, drafts=True)\n return base_query.join((\n Favorite,\n and_(\n Favorite.object_type == u'Query',\n Favorite.object_id == Query.id\n )\n )).filter(Favorite.user_id == user.id)\n\n @classmethod\n def all_tags(cls, user, include_drafts=False):\n queries = cls.all_queries(\n group_ids=user.group_ids,\n user_id=user.id,\n drafts=include_drafts,\n )\n\n tag_column = func.unnest(cls.tags).label('tag')\n usage_count = func.count(1).label('usage_count')\n\n query = (\n db.session\n .query(tag_column, usage_count)\n .group_by(tag_column)\n .filter(Query.id.in_(queries.options(load_only('id'))))\n .order_by(usage_count.desc())\n )\n return query\n\n @classmethod\n def by_user(cls, user):\n return cls.all_queries(user.group_ids, user.id).filter(Query.user == user)\n\n @classmethod\n def outdated_queries(cls):\n queries = (Query.query\n .options(joinedload(Query.latest_query_data).load_only('retrieved_at'))\n .filter(Query.schedule.isnot(None))\n .order_by(Query.id))\n \n now = utils.utcnow()\n outdated_queries = {}\n scheduled_queries_executions.refresh()\n\n for query in queries:\n schedule_until = pytz.utc.localize(datetime.datetime.strptime(\n query.schedule['until'], '%Y-%m-%d')) if query.schedule['until'] else None\n if (query.schedule['interval'] == None or (\n schedule_until != None and (\n schedule_until <= now))):\n continue\n\n if query.latest_query_data:\n retrieved_at = query.latest_query_data.retrieved_at\n else:\n retrieved_at = now\n\n retrieved_at = scheduled_queries_executions.get(query.id) or retrieved_at\n\n if should_schedule_next(retrieved_at, now, query.schedule['interval'], query.schedule['time'],\n query.schedule['day_of_week'], query.schedule_failures):\n key = \"{}:{}\".format(query.query_hash, query.data_source_id)\n outdated_queries[key] = query\n\n return outdated_queries.values()\n\n @classmethod\n def search(cls, term, group_ids, user_id=None, include_drafts=False, limit=None):\n all_queries = cls.all_queries(group_ids, user_id=user_id, drafts=include_drafts)\n # sort the result using the weight as defined in the search vector column\n return all_queries.search(term, sort=True).limit(limit)\n\n @classmethod\n def search_by_user(cls, term, user, limit=None):\n return cls.by_user(user).search(term, sort=True).limit(limit)\n\n @classmethod\n def recent(cls, group_ids, user_id=None, limit=20):\n query = (cls.query\n .filter(Event.created_at > (db.func.current_date() - 7))\n .join(Event, Query.id == Event.object_id.cast(db.Integer))\n .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)\n .filter(\n Event.action.in_(['edit', 'execute', 'edit_name',\n 'edit_description', 'view_source']),\n Event.object_id != None,\n Event.object_type == 'query',\n DataSourceGroup.group_id.in_(group_ids),\n or_(Query.is_draft == False, Query.user_id == user_id),\n Query.is_archived == False)\n .group_by(Event.object_id, Query.id)\n .order_by(db.desc(db.func.count(0))))\n\n if user_id:\n query = query.filter(Event.user_id == user_id)\n\n query = query.limit(limit)\n\n return query\n\n @classmethod\n def get_by_id(cls, _id):\n return cls.query.filter(cls.id == _id).one()\n\n def fork(self, user):\n forked_list = ['org', 'data_source', 'latest_query_data', 'description',\n 'query_text', 'query_hash', 'options']\n kwargs = {a: getattr(self, a) for a in forked_list}\n forked_query = Query.create(name=u'Copy of (#{}) {}'.format(self.id, self.name),\n user=user, **kwargs)\n\n for v in self.visualizations:\n if v.type == 'TABLE':\n continue\n forked_v = v.copy()\n forked_v['query_rel'] = forked_query\n forked_query.visualizations.append(Visualization(**forked_v))\n db.session.add(forked_query)\n return forked_query\n\n @property\n def runtime(self):\n return self.latest_query_data.runtime\n\n @property\n def retrieved_at(self):\n return self.latest_query_data.retrieved_at\n\n @property\n def groups(self):\n if self.data_source is None:\n return {}\n\n return self.data_source.groups\n\n @hybrid_property\n def lowercase_name(self):\n \"Optional property useful for sorting purposes.\"\n return self.name.lower()\n\n @lowercase_name.expression\n def lowercase_name(cls):\n \"The SQLAlchemy expression for the property above.\"\n return func.lower(cls.name)\n\n\n@listens_for(Query.query_text, 'set')\ndef gen_query_hash(target, val, oldval, initiator):\n target.query_hash = utils.gen_query_hash(val)\n target.schedule_failures = 0\n\n\n@listens_for(Query.user_id, 'set')\ndef query_last_modified_by(target, val, oldval, initiator):\n target.last_modified_by_id = val\n\n\n@generic_repr('id', 'object_type', 'object_id', 'user_id', 'org_id')\nclass Favorite(TimestampMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n\n object_type = Column(db.Unicode(255))\n object_id = Column(db.Integer)\n object = generic_relationship(object_type, object_id)\n\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref='favorites')\n\n __tablename__ = \"favorites\"\n __table_args__ = (\n UniqueConstraint(\"object_type\", \"object_id\", \"user_id\", name=\"unique_favorite\"),\n )\n\n @classmethod\n def is_favorite(cls, user, object):\n return cls.query.filter(cls.object == object, cls.user_id == user).count() > 0\n\n @classmethod\n def are_favorites(cls, user, objects):\n objects = list(objects)\n if not objects:\n return []\n\n object_type = text_type(objects[0].__class__.__name__)\n return map(lambda fav: fav.object_id, cls.query.filter(cls.object_id.in_(map(lambda o: o.id, objects)), cls.object_type == object_type, cls.user_id == user))\n\n\n@generic_repr('id', 'name', 'query_id', 'user_id', 'state', 'last_triggered_at', 'rearm')\nclass Alert(TimestampMixin, BelongsToOrgMixin, db.Model):\n UNKNOWN_STATE = 'unknown'\n OK_STATE = 'ok'\n TRIGGERED_STATE = 'triggered'\n\n id = Column(db.Integer, primary_key=True)\n name = Column(db.String(255))\n query_id = Column(db.Integer, db.ForeignKey(\"queries.id\"))\n query_rel = db.relationship(Query, backref=backref('alerts', cascade=\"all\"))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref='alerts')\n options = Column(MutableDict.as_mutable(PseudoJSON))\n state = Column(db.String(255), default=UNKNOWN_STATE)\n subscriptions = db.relationship(\"AlertSubscription\", cascade=\"all, delete-orphan\")\n last_triggered_at = Column(db.DateTime(True), nullable=True)\n rearm = Column(db.Integer, nullable=True)\n\n __tablename__ = 'alerts'\n\n @classmethod\n def all(cls, group_ids):\n return (\n cls.query\n .options(\n joinedload(Alert.user),\n joinedload(Alert.query_rel),\n )\n .join(Query)\n .join(\n DataSourceGroup,\n DataSourceGroup.data_source_id == Query.data_source_id\n )\n .filter(DataSourceGroup.group_id.in_(group_ids))\n )\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Alert, cls).get_by_id_and_org(object_id, org, Query)\n\n def evaluate(self):\n data = json_loads(self.query_rel.latest_query_data.data)\n\n if data['rows'] and self.options['column'] in data['rows'][0]:\n value = data['rows'][0][self.options['column']]\n op = self.options['op']\n\n if op == 'greater than' and value > self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'less than' and value < self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'equals' and value == self.options['value']:\n new_state = self.TRIGGERED_STATE\n else:\n new_state = self.OK_STATE\n else:\n new_state = self.UNKNOWN_STATE\n\n return new_state\n\n def subscribers(self):\n return User.query.join(AlertSubscription).filter(AlertSubscription.alert == self)\n\n @property\n def groups(self):\n return self.query_rel.groups\n\n\ndef generate_slug(ctx):\n slug = utils.slugify(ctx.current_parameters['name'])\n tries = 1\n while Dashboard.query.filter(Dashboard.slug == slug).first() is not None:\n slug = utils.slugify(ctx.current_parameters['name']) + \"_\" + str(tries)\n tries += 1\n return slug\n\n\n@python_2_unicode_compatible\n@gfk_type\n@generic_repr('id', 'name', 'slug', 'user_id', 'org_id', 'version', 'is_archived', 'is_draft')\nclass Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n version = Column(db.Integer)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"dashboards\")\n slug = Column(db.String(140), index=True, default=generate_slug)\n name = Column(db.String(100))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User)\n # layout is no longer used, but kept so we know how to render old dashboards.\n layout = Column(db.Text)\n dashboard_filters_enabled = Column(db.Boolean, default=False)\n is_archived = Column(db.Boolean, default=False, index=True)\n is_draft = Column(db.Boolean, default=True, index=True)\n widgets = db.relationship('Widget', backref='dashboard', lazy='dynamic')\n tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)\n\n __tablename__ = 'dashboards'\n __mapper_args__ = {\n \"version_id_col\": version\n }\n\n def __str__(self):\n return u\"%s=%s\" % (self.id, self.name)\n\n @classmethod\n def all(cls, org, group_ids, user_id):\n query = (\n Dashboard.query\n .options(\n subqueryload(Dashboard.user).load_only('_profile_image_url', 'name'),\n )\n .outerjoin(Widget)\n .outerjoin(Visualization)\n .outerjoin(Query)\n .outerjoin(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)\n .filter(\n Dashboard.is_archived == False,\n (DataSourceGroup.group_id.in_(group_ids) |\n (Dashboard.user_id == user_id) |\n ((Widget.dashboard != None) & (Widget.visualization == None))),\n Dashboard.org == org)\n .distinct())\n\n query = query.filter(or_(Dashboard.user_id == user_id, Dashboard.is_draft == False))\n\n return query\n\n @classmethod\n def search(cls, org, groups_ids, user_id, search_term):\n # TODO: switch to FTS\n return cls.all(org, groups_ids, user_id).filter(cls.name.ilike(u'%{}%'.format(search_term)))\n\n @classmethod\n def all_tags(cls, org, user):\n dashboards = cls.all(org, user.group_ids, user.id)\n\n tag_column = func.unnest(cls.tags).label('tag')\n usage_count = func.count(1).label('usage_count')\n\n query = (\n db.session\n .query(tag_column, usage_count)\n .group_by(tag_column)\n .filter(Dashboard.id.in_(dashboards.options(load_only('id'))))\n .order_by(usage_count.desc())\n )\n return query\n\n @classmethod\n def favorites(cls, user, base_query=None):\n if base_query is None:\n base_query = cls.all(user.org, user.group_ids, user.id)\n return base_query.join(\n (\n Favorite,\n and_(\n Favorite.object_type == u'Dashboard',\n Favorite.object_id == Dashboard.id\n )\n )\n ).filter(Favorite.user_id == user.id)\n\n @classmethod\n def get_by_slug_and_org(cls, slug, org):\n return cls.query.filter(cls.slug == slug, cls.org == org).one()\n\n @hybrid_property\n def lowercase_name(self):\n \"Optional property useful for sorting purposes.\"\n return self.name.lower()\n\n @lowercase_name.expression\n def lowercase_name(cls):\n \"The SQLAlchemy expression for the property above.\"\n return func.lower(cls.name)\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'query_id')\nclass Visualization(TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n type = Column(db.String(100))\n query_id = Column(db.Integer, db.ForeignKey(\"queries.id\"))\n # query_rel and not query, because db.Model already has query defined.\n query_rel = db.relationship(Query, back_populates='visualizations')\n name = Column(db.String(255))\n description = Column(db.String(4096), nullable=True)\n options = Column(db.Text)\n\n __tablename__ = 'visualizations'\n\n def __str__(self):\n return u\"%s %s\" % (self.id, self.type)\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Visualization, cls).get_by_id_and_org(object_id, org, Query)\n\n def copy(self):\n return {\n 'type': self.type,\n 'name': self.name,\n 'description': self.description,\n 'options': self.options\n }\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'visualization_id', 'dashboard_id')\nclass Widget(TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n visualization_id = Column(db.Integer, db.ForeignKey('visualizations.id'), nullable=True)\n visualization = db.relationship(Visualization, backref='widgets')\n text = Column(db.Text, nullable=True)\n width = Column(db.Integer)\n options = Column(db.Text)\n dashboard_id = Column(db.Integer, db.ForeignKey(\"dashboards.id\"), index=True)\n\n __tablename__ = 'widgets'\n\n def __str__(self):\n return u\"%s\" % self.id\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Widget, cls).get_by_id_and_org(object_id, org, Dashboard)\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'object_type', 'object_id', 'action', 'user_id', 'org_id', 'created_at')\nclass Event(db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, back_populates=\"events\")\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=True)\n user = db.relationship(User, backref=\"events\")\n action = Column(db.String(255))\n object_type = Column(db.String(255))\n object_id = Column(db.String(255), nullable=True)\n additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={})\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n __tablename__ = 'events'\n\n def __str__(self):\n return u\"%s,%s,%s,%s\" % (self.user_id, self.action, self.object_type, self.object_id)\n\n def to_dict(self):\n return {\n 'org_id': self.org_id,\n 'user_id': self.user_id,\n 'action': self.action,\n 'object_type': self.object_type,\n 'object_id': self.object_id,\n 'additional_properties': self.additional_properties,\n 'created_at': self.created_at.isoformat()\n }\n\n @classmethod\n def record(cls, event):\n org_id = event.pop('org_id')\n user_id = event.pop('user_id', None)\n action = event.pop('action')\n object_type = event.pop('object_type')\n object_id = event.pop('object_id', None)\n\n created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))\n\n event = cls(org_id=org_id, user_id=user_id, action=action,\n object_type=object_type, object_id=object_id,\n additional_properties=event,\n created_at=created_at)\n db.session.add(event)\n return event\n\n\n@generic_repr('id', 'created_by_id', 'org_id', 'active')\nclass ApiKey(TimestampMixin, GFKBase, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization)\n api_key = Column(db.String(255), index=True, default=lambda: generate_token(40))\n active = Column(db.Boolean, default=True)\n # 'object' provided by GFKBase\n created_by_id = Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=True)\n created_by = db.relationship(User)\n\n __tablename__ = 'api_keys'\n __table_args__ = (\n db.Index('api_keys_object_type_object_id', 'object_type', 'object_id'),\n )\n\n @classmethod\n def get_by_api_key(cls, api_key):\n return cls.query.filter(cls.api_key == api_key, cls.active == True).one()\n\n @classmethod\n def get_by_object(cls, object):\n return cls.query.filter(\n cls.object_type == object.__class__.__tablename__,\n cls.object_id == object.id,\n cls.active == True\n ).first()\n\n @classmethod\n def create_for_object(cls, object, user):\n k = cls(org=user.org, object=object, created_by=user)\n db.session.add(k)\n return k\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'user_id', 'org_id', 'created_at')\nclass NotificationDestination(BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"notification_destinations\")\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref=\"notification_destinations\")\n name = Column(db.String(255))\n type = Column(db.String(255))\n options = Column(ConfigurationContainer.as_mutable(Configuration))\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n __tablename__ = 'notification_destinations'\n __table_args__ = (\n db.Index(\n 'notification_destinations_org_id_name', 'org_id', 'name', unique=True\n ),\n )\n\n def __str__(self):\n return text_type(self.name)\n\n def to_dict(self, all=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'icon': self.destination.icon()\n }\n\n if all:\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n\n return d\n\n @property\n def destination(self):\n return get_destination(self.type, self.options)\n\n @classmethod\n def all(cls, org):\n notification_destinations = cls.query.filter(cls.org == org).order_by(cls.id.asc())\n\n return notification_destinations\n\n def notify(self, alert, query, user, new_state, app, host):\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n return self.destination.notify(alert, query, user, new_state,\n app, host, self.options)\n\n\n@generic_repr('id', 'user_id', 'destination_id', 'alert_id')\nclass AlertSubscription(TimestampMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User)\n destination_id = Column(db.Integer,\n db.ForeignKey(\"notification_destinations.id\"),\n nullable=True)\n destination = db.relationship(NotificationDestination)\n alert_id = Column(db.Integer, db.ForeignKey(\"alerts.id\"))\n alert = db.relationship(Alert, back_populates=\"subscriptions\")\n\n __tablename__ = 'alert_subscriptions'\n __table_args__ = (\n db.Index(\n 'alert_subscriptions_destination_id_alert_id',\n 'destination_id', 'alert_id', unique=True\n ),\n )\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'user': self.user.to_dict(),\n 'alert_id': self.alert_id\n }\n\n if self.destination:\n d['destination'] = self.destination.to_dict()\n\n return d\n\n @classmethod\n def all(cls, alert_id):\n return AlertSubscription.query.join(User).filter(AlertSubscription.alert_id == alert_id)\n\n def notify(self, alert, query, user, new_state, app, host):\n if self.destination:\n return self.destination.notify(alert, query, user, new_state,\n app, host)\n else:\n # User email subscription, so create an email destination object\n config = {'addresses': self.user.email}\n schema = get_configuration_schema_for_destination_type('email')\n options = ConfigurationContainer(config, schema)\n destination = get_destination('email', options)\n return destination.notify(alert, query, user, new_state, app, host, options)\n\n\n@generic_repr('id', 'trigger', 'user_id', 'org_id')\nclass QuerySnippet(TimestampMixin, db.Model, BelongsToOrgMixin):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"query_snippets\")\n trigger = Column(db.String(255), unique=True)\n description = Column(db.Text)\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref=\"query_snippets\")\n snippet = Column(db.Text)\n\n __tablename__ = 'query_snippets'\n\n @classmethod\n def all(cls, org):\n return cls.query.filter(cls.org == org)\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'trigger': self.trigger,\n 'description': self.description,\n 'snippet': self.snippet,\n 'user': self.user.to_dict(),\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n return d\n\n\ndef init_db():\n default_org = Organization(name=\"Default\", slug='default', settings={})\n admin_group = Group(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP)\n default_group = Group(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP)\n\n db.session.add_all([default_org, admin_group, default_group])\n # XXX remove after fixing User.group_ids\n db.session.commit()\n return default_org, admin_group, default_group\n", "path": "redash/models/__init__.py"}], "after_files": [{"content": "import cStringIO\nimport csv\nimport datetime\nimport calendar\nimport functools\nimport hashlib\nimport itertools\nimport logging\nimport time\nimport pytz\nfrom functools import reduce\n\nimport xlsxwriter\nfrom six import python_2_unicode_compatible, text_type\nfrom sqlalchemy import distinct, or_, and_, UniqueConstraint\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.event import listens_for\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import backref, contains_eager, joinedload, subqueryload, load_only\nfrom sqlalchemy.orm.exc import NoResultFound # noqa: F401\nfrom sqlalchemy import func\nfrom sqlalchemy_utils import generic_relationship\nfrom sqlalchemy_utils.types import TSVectorType\nfrom sqlalchemy_utils.models import generic_repr\n\nfrom redash import redis_connection, utils\nfrom redash.destinations import (get_configuration_schema_for_destination_type,\n get_destination)\nfrom redash.metrics import database # noqa: F401\nfrom redash.query_runner import (get_configuration_schema_for_query_runner_type,\n get_query_runner)\nfrom redash.utils import generate_token, json_dumps, json_loads\nfrom redash.utils.configuration import ConfigurationContainer\n\nfrom .base import db, gfk_type, Column, GFKBase, SearchBaseQuery\nfrom .changes import ChangeTrackingMixin, Change # noqa\nfrom .mixins import BelongsToOrgMixin, TimestampMixin\nfrom .organizations import Organization\nfrom .types import Configuration, MutableDict, MutableList, PseudoJSON\nfrom .users import (AccessPermission, AnonymousUser, ApiUser, Group, User) # noqa\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScheduledQueriesExecutions(object):\n KEY_NAME = 'sq:executed_at'\n\n def __init__(self):\n self.executions = {}\n\n def refresh(self):\n self.executions = redis_connection.hgetall(self.KEY_NAME)\n\n def update(self, query_id):\n redis_connection.hmset(self.KEY_NAME, {\n query_id: time.time()\n })\n\n def get(self, query_id):\n timestamp = self.executions.get(str(query_id))\n if timestamp:\n timestamp = utils.dt_from_timestamp(timestamp)\n\n return timestamp\n\n\nscheduled_queries_executions = ScheduledQueriesExecutions()\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'org_id', 'created_at')\nclass DataSource(BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization, backref=\"data_sources\")\n\n name = Column(db.String(255))\n type = Column(db.String(255))\n options = Column(ConfigurationContainer.as_mutable(Configuration))\n queue_name = Column(db.String(255), default=\"queries\")\n scheduled_queue_name = Column(db.String(255), default=\"scheduled_queries\")\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n data_source_groups = db.relationship(\"DataSourceGroup\", back_populates=\"data_source\",\n cascade=\"all\")\n __tablename__ = 'data_sources'\n __table_args__ = (db.Index('data_sources_org_id_name', 'org_id', 'name'),)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def to_dict(self, all=False, with_permissions_for=None):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'syntax': self.query_runner.syntax,\n 'paused': self.paused,\n 'pause_reason': self.pause_reason\n }\n\n if all:\n schema = get_configuration_schema_for_query_runner_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n d['queue_name'] = self.queue_name\n d['scheduled_queue_name'] = self.scheduled_queue_name\n d['groups'] = self.groups\n\n if with_permissions_for is not None:\n d['view_only'] = db.session.query(DataSourceGroup.view_only).filter(\n DataSourceGroup.group == with_permissions_for,\n DataSourceGroup.data_source == self).one()[0]\n\n return d\n\n def __str__(self):\n return text_type(self.name)\n\n @classmethod\n def create_with_group(cls, *args, **kwargs):\n data_source = cls(*args, **kwargs)\n data_source_group = DataSourceGroup(\n data_source=data_source,\n group=data_source.org.default_group)\n db.session.add_all([data_source, data_source_group])\n return data_source\n\n @classmethod\n def all(cls, org, group_ids=None):\n data_sources = cls.query.filter(cls.org == org).order_by(cls.id.asc())\n\n if group_ids:\n data_sources = data_sources.join(DataSourceGroup).filter(\n DataSourceGroup.group_id.in_(group_ids))\n\n return data_sources.distinct()\n\n @classmethod\n def get_by_id(cls, _id):\n return cls.query.filter(cls.id == _id).one()\n\n def delete(self):\n Query.query.filter(Query.data_source == self).update(dict(data_source_id=None, latest_query_data_id=None))\n QueryResult.query.filter(QueryResult.data_source == self).delete()\n res = db.session.delete(self)\n db.session.commit()\n return res\n\n def get_schema(self, refresh=False):\n key = \"data_source:schema:{}\".format(self.id)\n\n cache = None\n if not refresh:\n cache = redis_connection.get(key)\n\n if cache is None:\n query_runner = self.query_runner\n schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name'])\n\n redis_connection.set(key, json_dumps(schema))\n else:\n schema = json_loads(cache)\n\n return schema\n\n def _pause_key(self):\n return 'ds:{}:pause'.format(self.id)\n\n @property\n def paused(self):\n return redis_connection.exists(self._pause_key())\n\n @property\n def pause_reason(self):\n return redis_connection.get(self._pause_key())\n\n def pause(self, reason=None):\n redis_connection.set(self._pause_key(), reason or '')\n\n def resume(self):\n redis_connection.delete(self._pause_key())\n\n def add_group(self, group, view_only=False):\n dsg = DataSourceGroup(group=group, data_source=self, view_only=view_only)\n db.session.add(dsg)\n return dsg\n\n def remove_group(self, group):\n DataSourceGroup.query.filter(\n DataSourceGroup.group == group,\n DataSourceGroup.data_source == self\n ).delete()\n db.session.commit()\n\n def update_group_permission(self, group, view_only):\n dsg = DataSourceGroup.query.filter(\n DataSourceGroup.group == group,\n DataSourceGroup.data_source == self).one()\n dsg.view_only = view_only\n db.session.add(dsg)\n return dsg\n\n @property\n def query_runner(self):\n return get_query_runner(self.type, self.options)\n\n @classmethod\n def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).one()\n\n # XXX examine call sites to see if a regular SQLA collection would work better\n @property\n def groups(self):\n groups = DataSourceGroup.query.filter(\n DataSourceGroup.data_source == self\n )\n return dict(map(lambda g: (g.group_id, g.view_only), groups))\n\n\n@generic_repr('id', 'data_source_id', 'group_id', 'view_only')\nclass DataSourceGroup(db.Model):\n # XXX drop id, use datasource/group as PK\n id = Column(db.Integer, primary_key=True)\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"))\n data_source = db.relationship(DataSource, back_populates=\"data_source_groups\")\n group_id = Column(db.Integer, db.ForeignKey(\"groups.id\"))\n group = db.relationship(Group, back_populates=\"data_sources\")\n view_only = Column(db.Boolean, default=False)\n\n __tablename__ = \"data_source_groups\"\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'org_id', 'data_source_id', 'query_hash', 'runtime', 'retrieved_at')\nclass QueryResult(db.Model, BelongsToOrgMixin):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization)\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"))\n data_source = db.relationship(DataSource, backref=backref('query_results'))\n query_hash = Column(db.String(32), index=True)\n query_text = Column('query', db.Text)\n data = Column(db.Text)\n runtime = Column(postgresql.DOUBLE_PRECISION)\n retrieved_at = Column(db.DateTime(True))\n\n __tablename__ = 'query_results'\n\n def __str__(self):\n return u\"%d | %s | %s\" % (self.id, self.query_hash, self.retrieved_at)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'query_hash': self.query_hash,\n 'query': self.query_text,\n 'data': json_loads(self.data),\n 'data_source_id': self.data_source_id,\n 'runtime': self.runtime,\n 'retrieved_at': self.retrieved_at\n }\n\n @classmethod\n def unused(cls, days=7):\n age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)\n return (\n cls.query.filter(\n Query.id.is_(None),\n cls.retrieved_at < age_threshold\n )\n .outerjoin(Query)\n ).options(load_only('id'))\n\n @classmethod\n def get_latest(cls, data_source, query, max_age=0):\n query_hash = utils.gen_query_hash(query)\n\n if max_age == -1:\n query = cls.query.filter(\n cls.query_hash == query_hash,\n cls.data_source == data_source\n )\n else:\n query = cls.query.filter(\n cls.query_hash == query_hash,\n cls.data_source == data_source,\n (\n db.func.timezone('utc', cls.retrieved_at) +\n datetime.timedelta(seconds=max_age) >=\n db.func.timezone('utc', db.func.now())\n )\n )\n\n return query.order_by(cls.retrieved_at.desc()).first()\n\n @classmethod\n def store_result(cls, org, data_source, query_hash, query, data, run_time, retrieved_at):\n query_result = cls(org_id=org,\n query_hash=query_hash,\n query_text=query,\n runtime=run_time,\n data_source=data_source,\n retrieved_at=retrieved_at,\n data=data)\n db.session.add(query_result)\n logging.info(\"Inserted query (%s) data; id=%s\", query_hash, query_result.id)\n # TODO: Investigate how big an impact this select-before-update makes.\n queries = Query.query.filter(\n Query.query_hash == query_hash,\n Query.data_source == data_source\n )\n for q in queries:\n q.latest_query_data = query_result\n # don't auto-update the updated_at timestamp\n q.skip_updated_at = True\n db.session.add(q)\n query_ids = [q.id for q in queries]\n logging.info(\"Updated %s queries with result (%s).\", len(query_ids), query_hash)\n\n return query_result, query_ids\n\n @property\n def groups(self):\n return self.data_source.groups\n\n def make_csv_content(self):\n s = cStringIO.StringIO()\n\n query_data = json_loads(self.data)\n writer = csv.DictWriter(s, extrasaction=\"ignore\", fieldnames=[col['name'] for col in query_data['columns']])\n writer.writer = utils.UnicodeWriter(s)\n writer.writeheader()\n for row in query_data['rows']:\n writer.writerow(row)\n\n return s.getvalue()\n\n def make_excel_content(self):\n s = cStringIO.StringIO()\n\n query_data = json_loads(self.data)\n book = xlsxwriter.Workbook(s, {'constant_memory': True})\n sheet = book.add_worksheet(\"result\")\n\n column_names = []\n for (c, col) in enumerate(query_data['columns']):\n sheet.write(0, c, col['name'])\n column_names.append(col['name'])\n\n for (r, row) in enumerate(query_data['rows']):\n for (c, name) in enumerate(column_names):\n v = row.get(name)\n if isinstance(v, list) or isinstance(v, dict):\n v = str(v).encode('utf-8')\n sheet.write(r + 1, c, v)\n\n book.close()\n\n return s.getvalue()\n\n\ndef should_schedule_next(previous_iteration, now, interval, time=None, day_of_week=None, failures=0):\n # if time exists then interval > 23 hours (82800s)\n # if day_of_week exists then interval > 6 days (518400s)\n if (time is None):\n ttl = int(interval)\n next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)\n else:\n hour, minute = time.split(':')\n hour, minute = int(hour), int(minute)\n\n # The following logic is needed for cases like the following:\n # - The query scheduled to run at 23:59.\n # - The scheduler wakes up at 00:01.\n # - Using naive implementation of comparing timestamps, it will skip the execution.\n normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)\n\n if normalized_previous_iteration > previous_iteration:\n previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)\n\n days_delay = int(interval) / 60 / 60 / 24\n\n days_to_add = 0\n if (day_of_week is not None):\n days_to_add = list(calendar.day_name).index(day_of_week) - normalized_previous_iteration.weekday()\n\n next_iteration = (previous_iteration + datetime.timedelta(days=days_delay) +\n datetime.timedelta(days=days_to_add)).replace(hour=hour, minute=minute)\n if failures:\n next_iteration += datetime.timedelta(minutes=2**failures)\n return now > next_iteration\n\n\n@python_2_unicode_compatible\n@gfk_type\n@generic_repr('id', 'name', 'query_hash', 'version', 'user_id', 'org_id',\n 'data_source_id', 'query_hash', 'last_modified_by_id',\n 'is_archived', 'is_draft', 'schedule', 'schedule_failures')\nclass Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n version = Column(db.Integer, default=1)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization, backref=\"queries\")\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"), nullable=True)\n data_source = db.relationship(DataSource, backref='queries')\n latest_query_data_id = Column(db.Integer, db.ForeignKey(\"query_results.id\"), nullable=True)\n latest_query_data = db.relationship(QueryResult)\n name = Column(db.String(255))\n description = Column(db.String(4096), nullable=True)\n query_text = Column(\"query\", db.Text)\n query_hash = Column(db.String(32))\n api_key = Column(db.String(40), default=lambda: generate_token(40))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, foreign_keys=[user_id])\n last_modified_by_id = Column(db.Integer, db.ForeignKey('users.id'), nullable=True)\n last_modified_by = db.relationship(User, backref=\"modified_queries\",\n foreign_keys=[last_modified_by_id])\n is_archived = Column(db.Boolean, default=False, index=True)\n is_draft = Column(db.Boolean, default=True, index=True)\n schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True)\n schedule_failures = Column(db.Integer, default=0)\n visualizations = db.relationship(\"Visualization\", cascade=\"all, delete-orphan\")\n options = Column(MutableDict.as_mutable(PseudoJSON), default={})\n search_vector = Column(TSVectorType('id', 'name', 'description', 'query',\n weights={'name': 'A',\n 'id': 'B',\n 'description': 'C',\n 'query': 'D'}),\n nullable=True)\n tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)\n\n query_class = SearchBaseQuery\n __tablename__ = 'queries'\n __mapper_args__ = {\n \"version_id_col\": version,\n 'version_id_generator': False\n }\n\n def __str__(self):\n return text_type(self.id)\n\n def archive(self, user=None):\n db.session.add(self)\n self.is_archived = True\n self.schedule = None\n\n for vis in self.visualizations:\n for w in vis.widgets:\n db.session.delete(w)\n\n for a in self.alerts:\n db.session.delete(a)\n\n if user:\n self.record_changes(user)\n\n @classmethod\n def create(cls, **kwargs):\n query = cls(**kwargs)\n db.session.add(Visualization(query_rel=query,\n name=\"Table\",\n description='',\n type=\"TABLE\",\n options=\"{}\"))\n return query\n\n @classmethod\n def all_queries(cls, group_ids, user_id=None, drafts=False):\n query_ids = (\n db.session\n .query(distinct(cls.id))\n .join(\n DataSourceGroup,\n Query.data_source_id == DataSourceGroup.data_source_id\n )\n .filter(Query.is_archived == False)\n .filter(DataSourceGroup.group_id.in_(group_ids))\n )\n q = (\n cls\n .query\n .options(\n joinedload(Query.user),\n joinedload(\n Query.latest_query_data\n ).load_only(\n 'runtime',\n 'retrieved_at',\n )\n )\n .filter(cls.id.in_(query_ids))\n # Adding outer joins to be able to order by relationship\n .outerjoin(User, User.id == Query.user_id)\n .outerjoin(\n QueryResult,\n QueryResult.id == Query.latest_query_data_id\n )\n .options(\n contains_eager(Query.user),\n contains_eager(Query.latest_query_data),\n )\n .order_by(Query.created_at.desc())\n )\n\n if not drafts:\n q = q.filter(\n or_(\n Query.is_draft == False,\n Query.user_id == user_id\n )\n )\n return q\n\n @classmethod\n def favorites(cls, user, base_query=None):\n if base_query is None:\n base_query = cls.all_queries(user.group_ids, user.id, drafts=True)\n return base_query.join((\n Favorite,\n and_(\n Favorite.object_type == u'Query',\n Favorite.object_id == Query.id\n )\n )).filter(Favorite.user_id == user.id)\n\n @classmethod\n def all_tags(cls, user, include_drafts=False):\n queries = cls.all_queries(\n group_ids=user.group_ids,\n user_id=user.id,\n drafts=include_drafts,\n )\n\n tag_column = func.unnest(cls.tags).label('tag')\n usage_count = func.count(1).label('usage_count')\n\n query = (\n db.session\n .query(tag_column, usage_count)\n .group_by(tag_column)\n .filter(Query.id.in_(queries.options(load_only('id'))))\n .order_by(usage_count.desc())\n )\n return query\n\n @classmethod\n def by_user(cls, user):\n return cls.all_queries(user.group_ids, user.id).filter(Query.user == user)\n\n @classmethod\n def outdated_queries(cls):\n queries = (Query.query\n .options(joinedload(Query.latest_query_data).load_only('retrieved_at'))\n .filter(Query.schedule.isnot(None))\n .order_by(Query.id))\n \n now = utils.utcnow()\n outdated_queries = {}\n scheduled_queries_executions.refresh()\n\n for query in queries:\n schedule_until = pytz.utc.localize(datetime.datetime.strptime(\n query.schedule['until'], '%Y-%m-%d')) if query.schedule['until'] else None\n if (query.schedule['interval'] == None or (\n schedule_until != None and (\n schedule_until <= now))):\n continue\n\n if query.latest_query_data:\n retrieved_at = query.latest_query_data.retrieved_at\n else:\n retrieved_at = now\n\n retrieved_at = scheduled_queries_executions.get(query.id) or retrieved_at\n\n if should_schedule_next(retrieved_at, now, query.schedule['interval'], query.schedule['time'],\n query.schedule['day_of_week'], query.schedule_failures):\n key = \"{}:{}\".format(query.query_hash, query.data_source_id)\n outdated_queries[key] = query\n\n return outdated_queries.values()\n\n @classmethod\n def search(cls, term, group_ids, user_id=None, include_drafts=False, limit=None):\n all_queries = cls.all_queries(group_ids, user_id=user_id, drafts=include_drafts)\n # sort the result using the weight as defined in the search vector column\n return all_queries.search(term, sort=True).limit(limit)\n\n @classmethod\n def search_by_user(cls, term, user, limit=None):\n return cls.by_user(user).search(term, sort=True).limit(limit)\n\n @classmethod\n def recent(cls, group_ids, user_id=None, limit=20):\n query = (cls.query\n .filter(Event.created_at > (db.func.current_date() - 7))\n .join(Event, Query.id == Event.object_id.cast(db.Integer))\n .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)\n .filter(\n Event.action.in_(['edit', 'execute', 'edit_name',\n 'edit_description', 'view_source']),\n Event.object_id != None,\n Event.object_type == 'query',\n DataSourceGroup.group_id.in_(group_ids),\n or_(Query.is_draft == False, Query.user_id == user_id),\n Query.is_archived == False)\n .group_by(Event.object_id, Query.id)\n .order_by(db.desc(db.func.count(0))))\n\n if user_id:\n query = query.filter(Event.user_id == user_id)\n\n query = query.limit(limit)\n\n return query\n\n @classmethod\n def get_by_id(cls, _id):\n return cls.query.filter(cls.id == _id).one()\n\n def fork(self, user):\n forked_list = ['org', 'data_source', 'latest_query_data', 'description',\n 'query_text', 'query_hash', 'options']\n kwargs = {a: getattr(self, a) for a in forked_list}\n forked_query = Query.create(name=u'Copy of (#{}) {}'.format(self.id, self.name),\n user=user, **kwargs)\n\n for v in self.visualizations:\n if v.type == 'TABLE':\n continue\n forked_v = v.copy()\n forked_v['query_rel'] = forked_query\n forked_query.visualizations.append(Visualization(**forked_v))\n db.session.add(forked_query)\n return forked_query\n\n @property\n def runtime(self):\n return self.latest_query_data.runtime\n\n @property\n def retrieved_at(self):\n return self.latest_query_data.retrieved_at\n\n @property\n def groups(self):\n if self.data_source is None:\n return {}\n\n return self.data_source.groups\n\n @hybrid_property\n def lowercase_name(self):\n \"Optional property useful for sorting purposes.\"\n return self.name.lower()\n\n @lowercase_name.expression\n def lowercase_name(cls):\n \"The SQLAlchemy expression for the property above.\"\n return func.lower(cls.name)\n\n\n@listens_for(Query.query_text, 'set')\ndef gen_query_hash(target, val, oldval, initiator):\n target.query_hash = utils.gen_query_hash(val)\n target.schedule_failures = 0\n\n\n@listens_for(Query.user_id, 'set')\ndef query_last_modified_by(target, val, oldval, initiator):\n target.last_modified_by_id = val\n\n\n@generic_repr('id', 'object_type', 'object_id', 'user_id', 'org_id')\nclass Favorite(TimestampMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n\n object_type = Column(db.Unicode(255))\n object_id = Column(db.Integer)\n object = generic_relationship(object_type, object_id)\n\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref='favorites')\n\n __tablename__ = \"favorites\"\n __table_args__ = (\n UniqueConstraint(\"object_type\", \"object_id\", \"user_id\", name=\"unique_favorite\"),\n )\n\n @classmethod\n def is_favorite(cls, user, object):\n return cls.query.filter(cls.object == object, cls.user_id == user).count() > 0\n\n @classmethod\n def are_favorites(cls, user, objects):\n objects = list(objects)\n if not objects:\n return []\n\n object_type = text_type(objects[0].__class__.__name__)\n return map(lambda fav: fav.object_id, cls.query.filter(cls.object_id.in_(map(lambda o: o.id, objects)), cls.object_type == object_type, cls.user_id == user))\n\n\n@generic_repr('id', 'name', 'query_id', 'user_id', 'state', 'last_triggered_at', 'rearm')\nclass Alert(TimestampMixin, BelongsToOrgMixin, db.Model):\n UNKNOWN_STATE = 'unknown'\n OK_STATE = 'ok'\n TRIGGERED_STATE = 'triggered'\n\n id = Column(db.Integer, primary_key=True)\n name = Column(db.String(255))\n query_id = Column(db.Integer, db.ForeignKey(\"queries.id\"))\n query_rel = db.relationship(Query, backref=backref('alerts', cascade=\"all\"))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref='alerts')\n options = Column(MutableDict.as_mutable(PseudoJSON))\n state = Column(db.String(255), default=UNKNOWN_STATE)\n subscriptions = db.relationship(\"AlertSubscription\", cascade=\"all, delete-orphan\")\n last_triggered_at = Column(db.DateTime(True), nullable=True)\n rearm = Column(db.Integer, nullable=True)\n\n __tablename__ = 'alerts'\n\n @classmethod\n def all(cls, group_ids):\n return (\n cls.query\n .options(\n joinedload(Alert.user),\n joinedload(Alert.query_rel),\n )\n .join(Query)\n .join(\n DataSourceGroup,\n DataSourceGroup.data_source_id == Query.data_source_id\n )\n .filter(DataSourceGroup.group_id.in_(group_ids))\n )\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Alert, cls).get_by_id_and_org(object_id, org, Query)\n\n def evaluate(self):\n data = json_loads(self.query_rel.latest_query_data.data)\n\n if data['rows'] and self.options['column'] in data['rows'][0]:\n value = data['rows'][0][self.options['column']]\n op = self.options['op']\n\n if op == 'greater than' and value > self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'less than' and value < self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'equals' and value == self.options['value']:\n new_state = self.TRIGGERED_STATE\n else:\n new_state = self.OK_STATE\n else:\n new_state = self.UNKNOWN_STATE\n\n return new_state\n\n def subscribers(self):\n return User.query.join(AlertSubscription).filter(AlertSubscription.alert == self)\n\n @property\n def groups(self):\n return self.query_rel.groups\n\n\ndef generate_slug(ctx):\n slug = utils.slugify(ctx.current_parameters['name'])\n tries = 1\n while Dashboard.query.filter(Dashboard.slug == slug).first() is not None:\n slug = utils.slugify(ctx.current_parameters['name']) + \"_\" + str(tries)\n tries += 1\n return slug\n\n\n@python_2_unicode_compatible\n@gfk_type\n@generic_repr('id', 'name', 'slug', 'user_id', 'org_id', 'version', 'is_archived', 'is_draft')\nclass Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n version = Column(db.Integer)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"dashboards\")\n slug = Column(db.String(140), index=True, default=generate_slug)\n name = Column(db.String(100))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User)\n # layout is no longer used, but kept so we know how to render old dashboards.\n layout = Column(db.Text)\n dashboard_filters_enabled = Column(db.Boolean, default=False)\n is_archived = Column(db.Boolean, default=False, index=True)\n is_draft = Column(db.Boolean, default=True, index=True)\n widgets = db.relationship('Widget', backref='dashboard', lazy='dynamic')\n tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)\n\n __tablename__ = 'dashboards'\n __mapper_args__ = {\n \"version_id_col\": version\n }\n\n def __str__(self):\n return u\"%s=%s\" % (self.id, self.name)\n\n @classmethod\n def all(cls, org, group_ids, user_id):\n query = (\n Dashboard.query\n .options(\n subqueryload(Dashboard.user).load_only('_profile_image_url', 'name'),\n )\n .outerjoin(Widget)\n .outerjoin(Visualization)\n .outerjoin(Query)\n .outerjoin(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)\n .filter(\n Dashboard.is_archived == False,\n (DataSourceGroup.group_id.in_(group_ids) |\n (Dashboard.user_id == user_id) |\n ((Widget.dashboard != None) & (Widget.visualization == None))),\n Dashboard.org == org)\n .distinct())\n\n query = query.filter(or_(Dashboard.user_id == user_id, Dashboard.is_draft == False))\n\n return query\n\n @classmethod\n def search(cls, org, groups_ids, user_id, search_term):\n # TODO: switch to FTS\n return cls.all(org, groups_ids, user_id).filter(cls.name.ilike(u'%{}%'.format(search_term)))\n\n @classmethod\n def all_tags(cls, org, user):\n dashboards = cls.all(org, user.group_ids, user.id)\n\n tag_column = func.unnest(cls.tags).label('tag')\n usage_count = func.count(1).label('usage_count')\n\n query = (\n db.session\n .query(tag_column, usage_count)\n .group_by(tag_column)\n .filter(Dashboard.id.in_(dashboards.options(load_only('id'))))\n .order_by(usage_count.desc())\n )\n return query\n\n @classmethod\n def favorites(cls, user, base_query=None):\n if base_query is None:\n base_query = cls.all(user.org, user.group_ids, user.id)\n return base_query.join(\n (\n Favorite,\n and_(\n Favorite.object_type == u'Dashboard',\n Favorite.object_id == Dashboard.id\n )\n )\n ).filter(Favorite.user_id == user.id)\n\n @classmethod\n def get_by_slug_and_org(cls, slug, org):\n return cls.query.filter(cls.slug == slug, cls.org == org).one()\n\n @hybrid_property\n def lowercase_name(self):\n \"Optional property useful for sorting purposes.\"\n return self.name.lower()\n\n @lowercase_name.expression\n def lowercase_name(cls):\n \"The SQLAlchemy expression for the property above.\"\n return func.lower(cls.name)\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'query_id')\nclass Visualization(TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n type = Column(db.String(100))\n query_id = Column(db.Integer, db.ForeignKey(\"queries.id\"))\n # query_rel and not query, because db.Model already has query defined.\n query_rel = db.relationship(Query, back_populates='visualizations')\n name = Column(db.String(255))\n description = Column(db.String(4096), nullable=True)\n options = Column(db.Text)\n\n __tablename__ = 'visualizations'\n\n def __str__(self):\n return u\"%s %s\" % (self.id, self.type)\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Visualization, cls).get_by_id_and_org(object_id, org, Query)\n\n def copy(self):\n return {\n 'type': self.type,\n 'name': self.name,\n 'description': self.description,\n 'options': self.options\n }\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'visualization_id', 'dashboard_id')\nclass Widget(TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n visualization_id = Column(db.Integer, db.ForeignKey('visualizations.id'), nullable=True)\n visualization = db.relationship(Visualization, backref='widgets')\n text = Column(db.Text, nullable=True)\n width = Column(db.Integer)\n options = Column(db.Text)\n dashboard_id = Column(db.Integer, db.ForeignKey(\"dashboards.id\"), index=True)\n\n __tablename__ = 'widgets'\n\n def __str__(self):\n return u\"%s\" % self.id\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Widget, cls).get_by_id_and_org(object_id, org, Dashboard)\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'object_type', 'object_id', 'action', 'user_id', 'org_id', 'created_at')\nclass Event(db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, back_populates=\"events\")\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=True)\n user = db.relationship(User, backref=\"events\")\n action = Column(db.String(255))\n object_type = Column(db.String(255))\n object_id = Column(db.String(255), nullable=True)\n additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={})\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n __tablename__ = 'events'\n\n def __str__(self):\n return u\"%s,%s,%s,%s\" % (self.user_id, self.action, self.object_type, self.object_id)\n\n def to_dict(self):\n return {\n 'org_id': self.org_id,\n 'user_id': self.user_id,\n 'action': self.action,\n 'object_type': self.object_type,\n 'object_id': self.object_id,\n 'additional_properties': self.additional_properties,\n 'created_at': self.created_at.isoformat()\n }\n\n @classmethod\n def record(cls, event):\n org_id = event.pop('org_id')\n user_id = event.pop('user_id', None)\n action = event.pop('action')\n object_type = event.pop('object_type')\n object_id = event.pop('object_id', None)\n\n created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))\n\n event = cls(org_id=org_id, user_id=user_id, action=action,\n object_type=object_type, object_id=object_id,\n additional_properties=event,\n created_at=created_at)\n db.session.add(event)\n return event\n\n\n@generic_repr('id', 'created_by_id', 'org_id', 'active')\nclass ApiKey(TimestampMixin, GFKBase, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization)\n api_key = Column(db.String(255), index=True, default=lambda: generate_token(40))\n active = Column(db.Boolean, default=True)\n # 'object' provided by GFKBase\n created_by_id = Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=True)\n created_by = db.relationship(User)\n\n __tablename__ = 'api_keys'\n __table_args__ = (\n db.Index('api_keys_object_type_object_id', 'object_type', 'object_id'),\n )\n\n @classmethod\n def get_by_api_key(cls, api_key):\n return cls.query.filter(cls.api_key == api_key, cls.active == True).one()\n\n @classmethod\n def get_by_object(cls, object):\n return cls.query.filter(\n cls.object_type == object.__class__.__tablename__,\n cls.object_id == object.id,\n cls.active == True\n ).first()\n\n @classmethod\n def create_for_object(cls, object, user):\n k = cls(org=user.org, object=object, created_by=user)\n db.session.add(k)\n return k\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'user_id', 'org_id', 'created_at')\nclass NotificationDestination(BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"notification_destinations\")\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref=\"notification_destinations\")\n name = Column(db.String(255))\n type = Column(db.String(255))\n options = Column(ConfigurationContainer.as_mutable(Configuration))\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n __tablename__ = 'notification_destinations'\n __table_args__ = (\n db.Index(\n 'notification_destinations_org_id_name', 'org_id', 'name', unique=True\n ),\n )\n\n def __str__(self):\n return text_type(self.name)\n\n def to_dict(self, all=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'icon': self.destination.icon()\n }\n\n if all:\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n\n return d\n\n @property\n def destination(self):\n return get_destination(self.type, self.options)\n\n @classmethod\n def all(cls, org):\n notification_destinations = cls.query.filter(cls.org == org).order_by(cls.id.asc())\n\n return notification_destinations\n\n def notify(self, alert, query, user, new_state, app, host):\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n return self.destination.notify(alert, query, user, new_state,\n app, host, self.options)\n\n\n@generic_repr('id', 'user_id', 'destination_id', 'alert_id')\nclass AlertSubscription(TimestampMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User)\n destination_id = Column(db.Integer,\n db.ForeignKey(\"notification_destinations.id\"),\n nullable=True)\n destination = db.relationship(NotificationDestination)\n alert_id = Column(db.Integer, db.ForeignKey(\"alerts.id\"))\n alert = db.relationship(Alert, back_populates=\"subscriptions\")\n\n __tablename__ = 'alert_subscriptions'\n __table_args__ = (\n db.Index(\n 'alert_subscriptions_destination_id_alert_id',\n 'destination_id', 'alert_id', unique=True\n ),\n )\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'user': self.user.to_dict(),\n 'alert_id': self.alert_id\n }\n\n if self.destination:\n d['destination'] = self.destination.to_dict()\n\n return d\n\n @classmethod\n def all(cls, alert_id):\n return AlertSubscription.query.join(User).filter(AlertSubscription.alert_id == alert_id)\n\n def notify(self, alert, query, user, new_state, app, host):\n if self.destination:\n return self.destination.notify(alert, query, user, new_state,\n app, host)\n else:\n # User email subscription, so create an email destination object\n config = {'addresses': self.user.email}\n schema = get_configuration_schema_for_destination_type('email')\n options = ConfigurationContainer(config, schema)\n destination = get_destination('email', options)\n return destination.notify(alert, query, user, new_state, app, host, options)\n\n\n@generic_repr('id', 'trigger', 'user_id', 'org_id')\nclass QuerySnippet(TimestampMixin, db.Model, BelongsToOrgMixin):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"query_snippets\")\n trigger = Column(db.String(255), unique=True)\n description = Column(db.Text)\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref=\"query_snippets\")\n snippet = Column(db.Text)\n\n __tablename__ = 'query_snippets'\n\n @classmethod\n def all(cls, org):\n return cls.query.filter(cls.org == org)\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'trigger': self.trigger,\n 'description': self.description,\n 'snippet': self.snippet,\n 'user': self.user.to_dict(),\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n return d\n\n\ndef init_db():\n default_org = Organization(name=\"Default\", slug='default', settings={})\n admin_group = Group(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP)\n default_group = Group(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP)\n\n db.session.add_all([default_org, admin_group, default_group])\n # XXX remove after fixing User.group_ids\n db.session.commit()\n return default_org, admin_group, default_group\n", "path": "redash/models/__init__.py"}]} |
gh_patches_debug_1555 | rasdani/github-patches | git_diff | jschneier__django-storages-762 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dropbox base path / should be an empty string
Using `django-storages` `1.7.2`, a base path (`'/`) is provided, the Dropbox API is rejecting it:
```
File "/usr/local/lib/python3.7/site-packages/dbbackup/storage.py", line 78, in list_directory
return self.storage.listdir(path)[1]
File "/usr/local/lib/python3.7/site-packages/storages/backends/dropbox.py", line 99, in listdir
metadata = self.client.files_list_folder(full_path)
File "/usr/local/lib/python3.7/site-packages/dropbox/base.py", line 1744, in files_list_folder
None,
File "/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py", line 274, in request
timeout=timeout)
File "/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py", line 365, in request_json_string_with_retry
timeout=timeout)
File "/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py", line 456, in request_json_string
raise BadInputError(request_id, r.text)
dropbox.exceptions.BadInputError: BadInputError('XXXXXXXXXXXXXXXXXXXX', 'Error in call to API function "files/list_folder": request body: path: Specify the root folder as an empty string rather than as "/".')
```
As the error says it should be an empty string rather than as '/'.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `storages/backends/dropbox.py`
Content:
```
1 # Dropbox storage class for Django pluggable storage system.
2 # Author: Anthony Monthe <[email protected]>
3 # License: BSD
4 #
5 # Usage:
6 #
7 # Add below to settings.py:
8 # DROPBOX_OAUTH2_TOKEN = 'YourOauthToken'
9 # DROPBOX_ROOT_PATH = '/dir/'
10
11 from __future__ import absolute_import
12
13 from io import BytesIO
14 from shutil import copyfileobj
15 from tempfile import SpooledTemporaryFile
16
17 from django.core.exceptions import ImproperlyConfigured
18 from django.core.files.base import File
19 from django.core.files.storage import Storage
20 from django.utils._os import safe_join
21 from django.utils.deconstruct import deconstructible
22 from dropbox import Dropbox
23 from dropbox.exceptions import ApiError
24 from dropbox.files import CommitInfo, FolderMetadata, UploadSessionCursor
25
26 from storages.utils import setting
27
28 _DEFAULT_TIMEOUT = 100
29
30
31 class DropBoxStorageException(Exception):
32 pass
33
34
35 class DropBoxFile(File):
36 def __init__(self, name, storage):
37 self.name = name
38 self._storage = storage
39 self._file = None
40
41 def _get_file(self):
42 if self._file is None:
43 self._file = SpooledTemporaryFile()
44 # As dropbox==9.3.0, the client returns a tuple
45 # (dropbox.files.FileMetadata, requests.models.Response)
46 file_metadata, response = \
47 self._storage.client.files_download(self.name)
48 if response.status_code == 200:
49 with BytesIO(response.content) as file_content:
50 copyfileobj(file_content, self._file)
51 else:
52 # JIC the exception isn't catched by the dropbox client
53 raise DropBoxStorageException(
54 "Dropbox server returned a {} response when accessing {}"
55 .format(response.status_code, self.name)
56 )
57 self._file.seek(0)
58 return self._file
59
60 def _set_file(self, value):
61 self._file = value
62
63 file = property(_get_file, _set_file)
64
65
66 @deconstructible
67 class DropBoxStorage(Storage):
68 """DropBox Storage class for Django pluggable storage system."""
69
70 CHUNK_SIZE = 4 * 1024 * 1024
71
72 def __init__(self, oauth2_access_token=None, root_path=None, timeout=None):
73 oauth2_access_token = oauth2_access_token or setting('DROPBOX_OAUTH2_TOKEN')
74 if oauth2_access_token is None:
75 raise ImproperlyConfigured("You must configure an auth token at"
76 "'settings.DROPBOX_OAUTH2_TOKEN'.")
77
78 self.root_path = root_path or setting('DROPBOX_ROOT_PATH', '/')
79 timeout = timeout or setting('DROPBOX_TIMEOUT', _DEFAULT_TIMEOUT)
80 self.client = Dropbox(oauth2_access_token, timeout=timeout)
81
82 def _full_path(self, name):
83 if name == '/':
84 name = ''
85 return safe_join(self.root_path, name).replace('\\', '/')
86
87 def delete(self, name):
88 self.client.files_delete(self._full_path(name))
89
90 def exists(self, name):
91 try:
92 return bool(self.client.files_get_metadata(self._full_path(name)))
93 except ApiError:
94 return False
95
96 def listdir(self, path):
97 directories, files = [], []
98 full_path = self._full_path(path)
99 metadata = self.client.files_list_folder(full_path)
100 for entry in metadata.entries:
101 if isinstance(entry, FolderMetadata):
102 directories.append(entry.name)
103 else:
104 files.append(entry.name)
105 return directories, files
106
107 def size(self, name):
108 metadata = self.client.files_get_metadata(self._full_path(name))
109 return metadata.size
110
111 def modified_time(self, name):
112 metadata = self.client.files_get_metadata(self._full_path(name))
113 return metadata.server_modified
114
115 def accessed_time(self, name):
116 metadata = self.client.files_get_metadata(self._full_path(name))
117 return metadata.client_modified
118
119 def url(self, name):
120 media = self.client.files_get_temporary_link(self._full_path(name))
121 return media.link
122
123 def _open(self, name, mode='rb'):
124 remote_file = DropBoxFile(self._full_path(name), self)
125 return remote_file
126
127 def _save(self, name, content):
128 content.open()
129 if content.size <= self.CHUNK_SIZE:
130 self.client.files_upload(content.read(), self._full_path(name))
131 else:
132 self._chunked_upload(content, self._full_path(name))
133 content.close()
134 return name
135
136 def _chunked_upload(self, content, dest_path):
137 upload_session = self.client.files_upload_session_start(
138 content.read(self.CHUNK_SIZE)
139 )
140 cursor = UploadSessionCursor(
141 session_id=upload_session.session_id,
142 offset=content.tell()
143 )
144 commit = CommitInfo(path=dest_path)
145
146 while content.tell() < content.size:
147 if (content.size - content.tell()) <= self.CHUNK_SIZE:
148 self.client.files_upload_session_finish(
149 content.read(self.CHUNK_SIZE), cursor, commit
150 )
151 else:
152 self.client.files_upload_session_append_v2(
153 content.read(self.CHUNK_SIZE), cursor
154 )
155 cursor.offset = content.tell()
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/storages/backends/dropbox.py b/storages/backends/dropbox.py
--- a/storages/backends/dropbox.py
+++ b/storages/backends/dropbox.py
@@ -96,6 +96,10 @@
def listdir(self, path):
directories, files = [], []
full_path = self._full_path(path)
+
+ if full_path == '/':
+ full_path = ''
+
metadata = self.client.files_list_folder(full_path)
for entry in metadata.entries:
if isinstance(entry, FolderMetadata):
| {"golden_diff": "diff --git a/storages/backends/dropbox.py b/storages/backends/dropbox.py\n--- a/storages/backends/dropbox.py\n+++ b/storages/backends/dropbox.py\n@@ -96,6 +96,10 @@\n def listdir(self, path):\n directories, files = [], []\n full_path = self._full_path(path)\n+\n+ if full_path == '/':\n+ full_path = ''\n+\n metadata = self.client.files_list_folder(full_path)\n for entry in metadata.entries:\n if isinstance(entry, FolderMetadata):\n", "issue": "Dropbox base path / should be an empty string\nUsing `django-storages` `1.7.2`, a base path (`'/`) is provided, the Dropbox API is rejecting it:\r\n\r\n```\r\n File \"/usr/local/lib/python3.7/site-packages/dbbackup/storage.py\", line 78, in list_directory\r\n return self.storage.listdir(path)[1]\r\n File \"/usr/local/lib/python3.7/site-packages/storages/backends/dropbox.py\", line 99, in listdir\r\n metadata = self.client.files_list_folder(full_path)\r\n File \"/usr/local/lib/python3.7/site-packages/dropbox/base.py\", line 1744, in files_list_folder\r\n None,\r\n File \"/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py\", line 274, in request\r\n timeout=timeout)\r\n File \"/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py\", line 365, in request_json_string_with_retry\r\n timeout=timeout)\r\n File \"/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py\", line 456, in request_json_string\r\n raise BadInputError(request_id, r.text)\r\ndropbox.exceptions.BadInputError: BadInputError('XXXXXXXXXXXXXXXXXXXX', 'Error in call to API function \"files/list_folder\": request body: path: Specify the root folder as an empty string rather than as \"/\".')\r\n```\r\n\r\nAs the error says it should be an empty string rather than as '/'.\n", "before_files": [{"content": "# Dropbox storage class for Django pluggable storage system.\n# Author: Anthony Monthe <[email protected]>\n# License: BSD\n#\n# Usage:\n#\n# Add below to settings.py:\n# DROPBOX_OAUTH2_TOKEN = 'YourOauthToken'\n# DROPBOX_ROOT_PATH = '/dir/'\n\nfrom __future__ import absolute_import\n\nfrom io import BytesIO\nfrom shutil import copyfileobj\nfrom tempfile import SpooledTemporaryFile\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.files.base import File\nfrom django.core.files.storage import Storage\nfrom django.utils._os import safe_join\nfrom django.utils.deconstruct import deconstructible\nfrom dropbox import Dropbox\nfrom dropbox.exceptions import ApiError\nfrom dropbox.files import CommitInfo, FolderMetadata, UploadSessionCursor\n\nfrom storages.utils import setting\n\n_DEFAULT_TIMEOUT = 100\n\n\nclass DropBoxStorageException(Exception):\n pass\n\n\nclass DropBoxFile(File):\n def __init__(self, name, storage):\n self.name = name\n self._storage = storage\n self._file = None\n\n def _get_file(self):\n if self._file is None:\n self._file = SpooledTemporaryFile()\n # As dropbox==9.3.0, the client returns a tuple\n # (dropbox.files.FileMetadata, requests.models.Response)\n file_metadata, response = \\\n self._storage.client.files_download(self.name)\n if response.status_code == 200:\n with BytesIO(response.content) as file_content:\n copyfileobj(file_content, self._file)\n else:\n # JIC the exception isn't catched by the dropbox client\n raise DropBoxStorageException(\n \"Dropbox server returned a {} response when accessing {}\"\n .format(response.status_code, self.name)\n )\n self._file.seek(0)\n return self._file\n\n def _set_file(self, value):\n self._file = value\n\n file = property(_get_file, _set_file)\n\n\n@deconstructible\nclass DropBoxStorage(Storage):\n \"\"\"DropBox Storage class for Django pluggable storage system.\"\"\"\n\n CHUNK_SIZE = 4 * 1024 * 1024\n\n def __init__(self, oauth2_access_token=None, root_path=None, timeout=None):\n oauth2_access_token = oauth2_access_token or setting('DROPBOX_OAUTH2_TOKEN')\n if oauth2_access_token is None:\n raise ImproperlyConfigured(\"You must configure an auth token at\"\n \"'settings.DROPBOX_OAUTH2_TOKEN'.\")\n\n self.root_path = root_path or setting('DROPBOX_ROOT_PATH', '/')\n timeout = timeout or setting('DROPBOX_TIMEOUT', _DEFAULT_TIMEOUT)\n self.client = Dropbox(oauth2_access_token, timeout=timeout)\n\n def _full_path(self, name):\n if name == '/':\n name = ''\n return safe_join(self.root_path, name).replace('\\\\', '/')\n\n def delete(self, name):\n self.client.files_delete(self._full_path(name))\n\n def exists(self, name):\n try:\n return bool(self.client.files_get_metadata(self._full_path(name)))\n except ApiError:\n return False\n\n def listdir(self, path):\n directories, files = [], []\n full_path = self._full_path(path)\n metadata = self.client.files_list_folder(full_path)\n for entry in metadata.entries:\n if isinstance(entry, FolderMetadata):\n directories.append(entry.name)\n else:\n files.append(entry.name)\n return directories, files\n\n def size(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.size\n\n def modified_time(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.server_modified\n\n def accessed_time(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.client_modified\n\n def url(self, name):\n media = self.client.files_get_temporary_link(self._full_path(name))\n return media.link\n\n def _open(self, name, mode='rb'):\n remote_file = DropBoxFile(self._full_path(name), self)\n return remote_file\n\n def _save(self, name, content):\n content.open()\n if content.size <= self.CHUNK_SIZE:\n self.client.files_upload(content.read(), self._full_path(name))\n else:\n self._chunked_upload(content, self._full_path(name))\n content.close()\n return name\n\n def _chunked_upload(self, content, dest_path):\n upload_session = self.client.files_upload_session_start(\n content.read(self.CHUNK_SIZE)\n )\n cursor = UploadSessionCursor(\n session_id=upload_session.session_id,\n offset=content.tell()\n )\n commit = CommitInfo(path=dest_path)\n\n while content.tell() < content.size:\n if (content.size - content.tell()) <= self.CHUNK_SIZE:\n self.client.files_upload_session_finish(\n content.read(self.CHUNK_SIZE), cursor, commit\n )\n else:\n self.client.files_upload_session_append_v2(\n content.read(self.CHUNK_SIZE), cursor\n )\n cursor.offset = content.tell()\n", "path": "storages/backends/dropbox.py"}], "after_files": [{"content": "# Dropbox storage class for Django pluggable storage system.\n# Author: Anthony Monthe <[email protected]>\n# License: BSD\n#\n# Usage:\n#\n# Add below to settings.py:\n# DROPBOX_OAUTH2_TOKEN = 'YourOauthToken'\n# DROPBOX_ROOT_PATH = '/dir/'\n\nfrom __future__ import absolute_import\n\nfrom io import BytesIO\nfrom shutil import copyfileobj\nfrom tempfile import SpooledTemporaryFile\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.files.base import File\nfrom django.core.files.storage import Storage\nfrom django.utils._os import safe_join\nfrom django.utils.deconstruct import deconstructible\nfrom dropbox import Dropbox\nfrom dropbox.exceptions import ApiError\nfrom dropbox.files import CommitInfo, FolderMetadata, UploadSessionCursor\n\nfrom storages.utils import setting\n\n_DEFAULT_TIMEOUT = 100\n\n\nclass DropBoxStorageException(Exception):\n pass\n\n\nclass DropBoxFile(File):\n def __init__(self, name, storage):\n self.name = name\n self._storage = storage\n self._file = None\n\n def _get_file(self):\n if self._file is None:\n self._file = SpooledTemporaryFile()\n # As dropbox==9.3.0, the client returns a tuple\n # (dropbox.files.FileMetadata, requests.models.Response)\n file_metadata, response = \\\n self._storage.client.files_download(self.name)\n if response.status_code == 200:\n with BytesIO(response.content) as file_content:\n copyfileobj(file_content, self._file)\n else:\n # JIC the exception isn't catched by the dropbox client\n raise DropBoxStorageException(\n \"Dropbox server returned a {} response when accessing {}\"\n .format(response.status_code, self.name)\n )\n self._file.seek(0)\n return self._file\n\n def _set_file(self, value):\n self._file = value\n\n file = property(_get_file, _set_file)\n\n\n@deconstructible\nclass DropBoxStorage(Storage):\n \"\"\"DropBox Storage class for Django pluggable storage system.\"\"\"\n\n CHUNK_SIZE = 4 * 1024 * 1024\n\n def __init__(self, oauth2_access_token=None, root_path=None, timeout=None):\n oauth2_access_token = oauth2_access_token or setting('DROPBOX_OAUTH2_TOKEN')\n if oauth2_access_token is None:\n raise ImproperlyConfigured(\"You must configure an auth token at\"\n \"'settings.DROPBOX_OAUTH2_TOKEN'.\")\n\n self.root_path = root_path or setting('DROPBOX_ROOT_PATH', '/')\n timeout = timeout or setting('DROPBOX_TIMEOUT', _DEFAULT_TIMEOUT)\n self.client = Dropbox(oauth2_access_token, timeout=timeout)\n\n def _full_path(self, name):\n if name == '/':\n name = ''\n return safe_join(self.root_path, name).replace('\\\\', '/')\n\n def delete(self, name):\n self.client.files_delete(self._full_path(name))\n\n def exists(self, name):\n try:\n return bool(self.client.files_get_metadata(self._full_path(name)))\n except ApiError:\n return False\n\n def listdir(self, path):\n directories, files = [], []\n full_path = self._full_path(path)\n\n if full_path == '/':\n full_path = ''\n\n metadata = self.client.files_list_folder(full_path)\n for entry in metadata.entries:\n if isinstance(entry, FolderMetadata):\n directories.append(entry.name)\n else:\n files.append(entry.name)\n return directories, files\n\n def size(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.size\n\n def modified_time(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.server_modified\n\n def accessed_time(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.client_modified\n\n def url(self, name):\n media = self.client.files_get_temporary_link(self._full_path(name))\n return media.link\n\n def _open(self, name, mode='rb'):\n remote_file = DropBoxFile(self._full_path(name), self)\n return remote_file\n\n def _save(self, name, content):\n content.open()\n if content.size <= self.CHUNK_SIZE:\n self.client.files_upload(content.read(), self._full_path(name))\n else:\n self._chunked_upload(content, self._full_path(name))\n content.close()\n return name\n\n def _chunked_upload(self, content, dest_path):\n upload_session = self.client.files_upload_session_start(\n content.read(self.CHUNK_SIZE)\n )\n cursor = UploadSessionCursor(\n session_id=upload_session.session_id,\n offset=content.tell()\n )\n commit = CommitInfo(path=dest_path)\n\n while content.tell() < content.size:\n if (content.size - content.tell()) <= self.CHUNK_SIZE:\n self.client.files_upload_session_finish(\n content.read(self.CHUNK_SIZE), cursor, commit\n )\n else:\n self.client.files_upload_session_append_v2(\n content.read(self.CHUNK_SIZE), cursor\n )\n cursor.offset = content.tell()\n", "path": "storages/backends/dropbox.py"}]} |
gh_patches_debug_1556 | rasdani/github-patches | git_diff | getnikola__nikola-2593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Archives (CREATE_MONTHLY_ARCHIVE = True) think all months are "None"
https://irclogs.getnikola.com/2015/
To reproduce locally: get https://github.com/getnikola/irclogs-site create any random files in `posts/*.rst`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/archive.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2016 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Classify the posts in archives."""
28
29 import os
30 import nikola.utils
31 import datetime
32 from nikola.plugin_categories import Taxonomy
33
34
35 class Archive(Taxonomy):
36 """Classify the post archives."""
37
38 name = "classify_archive"
39
40 classification_name = "archive"
41 overview_page_variable_name = "archive"
42 more_than_one_classifications_per_post = False
43 has_hierarchy = True
44 include_posts_from_subhierarchies = True
45 include_posts_into_hierarchy_root = True
46 subcategories_list_template = "list.tmpl"
47 generate_atom_feeds_for_post_lists = False
48 template_for_classification_overview = None
49 always_disable_rss = True
50 apply_to_posts = True
51 apply_to_pages = False
52 minimum_post_count_per_classification_in_overview = 1
53 omit_empty_classifications = False
54 also_create_classifications_from_other_languages = False
55
56 def set_site(self, site):
57 """Set Nikola site."""
58 # Sanity checks
59 if (site.config['CREATE_MONTHLY_ARCHIVE'] and site.config['CREATE_SINGLE_ARCHIVE']) and not site.config['CREATE_FULL_ARCHIVES']:
60 raise Exception('Cannot create monthly and single archives at the same time.')
61 # Finish setup
62 self.show_list_as_subcategories_list = not site.config['CREATE_FULL_ARCHIVES']
63 self.show_list_as_index = site.config['ARCHIVES_ARE_INDEXES']
64 self.template_for_single_list = "archiveindex.tmpl" if site.config['ARCHIVES_ARE_INDEXES'] else "list_post.tmpl"
65 # Determine maximum hierarchy height
66 if site.config['CREATE_DAILY_ARCHIVE'] or site.config['CREATE_FULL_ARCHIVES']:
67 self.max_levels = 3
68 elif site.config['CREATE_MONTHLY_ARCHIVE']:
69 self.max_levels = 2
70 elif site.config['CREATE_SINGLE_ARCHIVE']:
71 self.max_levels = 0
72 else:
73 self.max_levels = 1
74 return super(Archive, self).set_site(site)
75
76 def get_implicit_classifications(self, lang):
77 """Return a list of classification strings which should always appear in posts_per_classification."""
78 return ['']
79
80 def classify(self, post, lang):
81 """Classify the given post for the given language."""
82 levels = [str(post.date.year).zfill(4), str(post.date.month).zfill(2), str(post.date.day).zfill(2)]
83 return ['/'.join(levels[:self.max_levels])]
84
85 def sort_classifications(self, classifications, lang, level=None):
86 """Sort the given list of classification strings."""
87 if level in (0, 1):
88 # Years or months: sort descending
89 classifications.sort()
90 classifications.reverse()
91
92 def get_classification_friendly_name(self, classification, lang, only_last_component=False):
93 """Extract a friendly name from the classification."""
94 classification = self.extract_hierarchy(classification)
95 if len(classification) == 0:
96 return ""
97 elif len(classification) == 1:
98 return classification[0]
99 elif len(classification) == 2:
100 nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)
101 else:
102 # Fallback
103 return '/'.join(classification)
104
105 def get_path(self, classification, lang, dest_type='page'):
106 """A path handler for the given classification."""
107 components = [self.site.config['ARCHIVE_PATH']]
108 if classification:
109 components.extend(classification)
110 add_index = 'always'
111 else:
112 components.append(os.path.splitext(self.site.config['ARCHIVE_FILENAME'])[0])
113 add_index = 'never'
114 return [_f for _f in components if _f], add_index
115
116 def extract_hierarchy(self, classification):
117 """Given a classification, return a list of parts in the hierarchy."""
118 return classification.split('/') if classification else []
119
120 def recombine_classification_from_hierarchy(self, hierarchy):
121 """Given a list of parts in the hierarchy, return the classification string."""
122 return '/'.join(hierarchy)
123
124 def provide_context_and_uptodate(self, classification, lang, node=None):
125 """Provide data for the context and the uptodate list for the list of the given classifiation."""
126 hierarchy = self.extract_hierarchy(classification)
127 kw = {
128 "messages": self.site.MESSAGES,
129 }
130 page_kind = "list"
131 if self.show_list_as_index:
132 if not self.show_list_as_subcategories_list or len(hierarchy) == self.max_levels:
133 page_kind = "index"
134 if len(hierarchy) == 0:
135 title = kw["messages"][lang]["Archive"]
136 kw["is_feed_stale"] = False
137 elif len(hierarchy) == 1:
138 title = kw["messages"][lang]["Posts for year %s"] % hierarchy[0]
139 kw["is_feed_stale"] = (datetime.datetime.utcnow().strftime("%Y") != hierarchy[0])
140 elif len(hierarchy) == 2:
141 title = kw["messages"][lang]["Posts for {month} {year}"].format(
142 year=hierarchy[0],
143 month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang))
144 kw["is_feed_stale"] = (datetime.datetime.utcnow().strftime("%Y/%m") != classification)
145 elif len(hierarchy) == 3:
146 title = kw["messages"][lang]["Posts for {month} {day}, {year}"].format(
147 year=hierarchy[0],
148 month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang),
149 day=int(hierarchy[2]))
150 kw["is_feed_stale"] = (datetime.datetime.utcnow().strftime("%Y/%m/%d") != classification)
151 else:
152 raise Exception("Cannot interpret classification {}!".format(repr(classification)))
153 context = {
154 "title": title,
155 "pagekind": [page_kind, "archive_page"],
156 }
157 if page_kind == 'index':
158 context["archive_name"] = classification if classification else None
159 context["is_feed_stale"] = kw["is_feed_stale"]
160 kw.update(context)
161 return context, kw
162
163 def should_generate_classification_page(self, classification, post_list, lang):
164 """Only generates list of posts for classification if this function returns True."""
165 return len(classification.split('/')) < 3 or len(post_list) > 0
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/task/archive.py b/nikola/plugins/task/archive.py
--- a/nikola/plugins/task/archive.py
+++ b/nikola/plugins/task/archive.py
@@ -97,7 +97,7 @@
elif len(classification) == 1:
return classification[0]
elif len(classification) == 2:
- nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)
+ return nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)
else:
# Fallback
return '/'.join(classification)
| {"golden_diff": "diff --git a/nikola/plugins/task/archive.py b/nikola/plugins/task/archive.py\n--- a/nikola/plugins/task/archive.py\n+++ b/nikola/plugins/task/archive.py\n@@ -97,7 +97,7 @@\n elif len(classification) == 1:\n return classification[0]\n elif len(classification) == 2:\n- nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n+ return nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n else:\n # Fallback\n return '/'.join(classification)\n", "issue": "Archives (CREATE_MONTHLY_ARCHIVE = True) think all months are \"None\"\nhttps://irclogs.getnikola.com/2015/\r\n\r\nTo reproduce locally: get https://github.com/getnikola/irclogs-site create any random files in `posts/*.rst`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Classify the posts in archives.\"\"\"\n\nimport os\nimport nikola.utils\nimport datetime\nfrom nikola.plugin_categories import Taxonomy\n\n\nclass Archive(Taxonomy):\n \"\"\"Classify the post archives.\"\"\"\n\n name = \"classify_archive\"\n\n classification_name = \"archive\"\n overview_page_variable_name = \"archive\"\n more_than_one_classifications_per_post = False\n has_hierarchy = True\n include_posts_from_subhierarchies = True\n include_posts_into_hierarchy_root = True\n subcategories_list_template = \"list.tmpl\"\n generate_atom_feeds_for_post_lists = False\n template_for_classification_overview = None\n always_disable_rss = True\n apply_to_posts = True\n apply_to_pages = False\n minimum_post_count_per_classification_in_overview = 1\n omit_empty_classifications = False\n also_create_classifications_from_other_languages = False\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n # Sanity checks\n if (site.config['CREATE_MONTHLY_ARCHIVE'] and site.config['CREATE_SINGLE_ARCHIVE']) and not site.config['CREATE_FULL_ARCHIVES']:\n raise Exception('Cannot create monthly and single archives at the same time.')\n # Finish setup\n self.show_list_as_subcategories_list = not site.config['CREATE_FULL_ARCHIVES']\n self.show_list_as_index = site.config['ARCHIVES_ARE_INDEXES']\n self.template_for_single_list = \"archiveindex.tmpl\" if site.config['ARCHIVES_ARE_INDEXES'] else \"list_post.tmpl\"\n # Determine maximum hierarchy height\n if site.config['CREATE_DAILY_ARCHIVE'] or site.config['CREATE_FULL_ARCHIVES']:\n self.max_levels = 3\n elif site.config['CREATE_MONTHLY_ARCHIVE']:\n self.max_levels = 2\n elif site.config['CREATE_SINGLE_ARCHIVE']:\n self.max_levels = 0\n else:\n self.max_levels = 1\n return super(Archive, self).set_site(site)\n\n def get_implicit_classifications(self, lang):\n \"\"\"Return a list of classification strings which should always appear in posts_per_classification.\"\"\"\n return ['']\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n levels = [str(post.date.year).zfill(4), str(post.date.month).zfill(2), str(post.date.day).zfill(2)]\n return ['/'.join(levels[:self.max_levels])]\n\n def sort_classifications(self, classifications, lang, level=None):\n \"\"\"Sort the given list of classification strings.\"\"\"\n if level in (0, 1):\n # Years or months: sort descending\n classifications.sort()\n classifications.reverse()\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n classification = self.extract_hierarchy(classification)\n if len(classification) == 0:\n return \"\"\n elif len(classification) == 1:\n return classification[0]\n elif len(classification) == 2:\n nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n else:\n # Fallback\n return '/'.join(classification)\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"A path handler for the given classification.\"\"\"\n components = [self.site.config['ARCHIVE_PATH']]\n if classification:\n components.extend(classification)\n add_index = 'always'\n else:\n components.append(os.path.splitext(self.site.config['ARCHIVE_FILENAME'])[0])\n add_index = 'never'\n return [_f for _f in components if _f], add_index\n\n def extract_hierarchy(self, classification):\n \"\"\"Given a classification, return a list of parts in the hierarchy.\"\"\"\n return classification.split('/') if classification else []\n\n def recombine_classification_from_hierarchy(self, hierarchy):\n \"\"\"Given a list of parts in the hierarchy, return the classification string.\"\"\"\n return '/'.join(hierarchy)\n\n def provide_context_and_uptodate(self, classification, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n hierarchy = self.extract_hierarchy(classification)\n kw = {\n \"messages\": self.site.MESSAGES,\n }\n page_kind = \"list\"\n if self.show_list_as_index:\n if not self.show_list_as_subcategories_list or len(hierarchy) == self.max_levels:\n page_kind = \"index\"\n if len(hierarchy) == 0:\n title = kw[\"messages\"][lang][\"Archive\"]\n kw[\"is_feed_stale\"] = False\n elif len(hierarchy) == 1:\n title = kw[\"messages\"][lang][\"Posts for year %s\"] % hierarchy[0]\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y\") != hierarchy[0])\n elif len(hierarchy) == 2:\n title = kw[\"messages\"][lang][\"Posts for {month} {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m\") != classification)\n elif len(hierarchy) == 3:\n title = kw[\"messages\"][lang][\"Posts for {month} {day}, {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang),\n day=int(hierarchy[2]))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m/%d\") != classification)\n else:\n raise Exception(\"Cannot interpret classification {}!\".format(repr(classification)))\n context = {\n \"title\": title,\n \"pagekind\": [page_kind, \"archive_page\"],\n }\n if page_kind == 'index':\n context[\"archive_name\"] = classification if classification else None\n context[\"is_feed_stale\"] = kw[\"is_feed_stale\"]\n kw.update(context)\n return context, kw\n\n def should_generate_classification_page(self, classification, post_list, lang):\n \"\"\"Only generates list of posts for classification if this function returns True.\"\"\"\n return len(classification.split('/')) < 3 or len(post_list) > 0\n", "path": "nikola/plugins/task/archive.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Classify the posts in archives.\"\"\"\n\nimport os\nimport nikola.utils\nimport datetime\nfrom nikola.plugin_categories import Taxonomy\n\n\nclass Archive(Taxonomy):\n \"\"\"Classify the post archives.\"\"\"\n\n name = \"classify_archive\"\n\n classification_name = \"archive\"\n overview_page_variable_name = \"archive\"\n more_than_one_classifications_per_post = False\n has_hierarchy = True\n include_posts_from_subhierarchies = True\n include_posts_into_hierarchy_root = True\n subcategories_list_template = \"list.tmpl\"\n generate_atom_feeds_for_post_lists = False\n template_for_classification_overview = None\n always_disable_rss = True\n apply_to_posts = True\n apply_to_pages = False\n minimum_post_count_per_classification_in_overview = 1\n omit_empty_classifications = False\n also_create_classifications_from_other_languages = False\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n # Sanity checks\n if (site.config['CREATE_MONTHLY_ARCHIVE'] and site.config['CREATE_SINGLE_ARCHIVE']) and not site.config['CREATE_FULL_ARCHIVES']:\n raise Exception('Cannot create monthly and single archives at the same time.')\n # Finish setup\n self.show_list_as_subcategories_list = not site.config['CREATE_FULL_ARCHIVES']\n self.show_list_as_index = site.config['ARCHIVES_ARE_INDEXES']\n self.template_for_single_list = \"archiveindex.tmpl\" if site.config['ARCHIVES_ARE_INDEXES'] else \"list_post.tmpl\"\n # Determine maximum hierarchy height\n if site.config['CREATE_DAILY_ARCHIVE'] or site.config['CREATE_FULL_ARCHIVES']:\n self.max_levels = 3\n elif site.config['CREATE_MONTHLY_ARCHIVE']:\n self.max_levels = 2\n elif site.config['CREATE_SINGLE_ARCHIVE']:\n self.max_levels = 0\n else:\n self.max_levels = 1\n return super(Archive, self).set_site(site)\n\n def get_implicit_classifications(self, lang):\n \"\"\"Return a list of classification strings which should always appear in posts_per_classification.\"\"\"\n return ['']\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n levels = [str(post.date.year).zfill(4), str(post.date.month).zfill(2), str(post.date.day).zfill(2)]\n return ['/'.join(levels[:self.max_levels])]\n\n def sort_classifications(self, classifications, lang, level=None):\n \"\"\"Sort the given list of classification strings.\"\"\"\n if level in (0, 1):\n # Years or months: sort descending\n classifications.sort()\n classifications.reverse()\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n classification = self.extract_hierarchy(classification)\n if len(classification) == 0:\n return \"\"\n elif len(classification) == 1:\n return classification[0]\n elif len(classification) == 2:\n return nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n else:\n # Fallback\n return '/'.join(classification)\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"A path handler for the given classification.\"\"\"\n components = [self.site.config['ARCHIVE_PATH']]\n if classification:\n components.extend(classification)\n add_index = 'always'\n else:\n components.append(os.path.splitext(self.site.config['ARCHIVE_FILENAME'])[0])\n add_index = 'never'\n return [_f for _f in components if _f], add_index\n\n def extract_hierarchy(self, classification):\n \"\"\"Given a classification, return a list of parts in the hierarchy.\"\"\"\n return classification.split('/') if classification else []\n\n def recombine_classification_from_hierarchy(self, hierarchy):\n \"\"\"Given a list of parts in the hierarchy, return the classification string.\"\"\"\n return '/'.join(hierarchy)\n\n def provide_context_and_uptodate(self, classification, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n hierarchy = self.extract_hierarchy(classification)\n kw = {\n \"messages\": self.site.MESSAGES,\n }\n page_kind = \"list\"\n if self.show_list_as_index:\n if not self.show_list_as_subcategories_list or len(hierarchy) == self.max_levels:\n page_kind = \"index\"\n if len(hierarchy) == 0:\n title = kw[\"messages\"][lang][\"Archive\"]\n kw[\"is_feed_stale\"] = False\n elif len(hierarchy) == 1:\n title = kw[\"messages\"][lang][\"Posts for year %s\"] % hierarchy[0]\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y\") != hierarchy[0])\n elif len(hierarchy) == 2:\n title = kw[\"messages\"][lang][\"Posts for {month} {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m\") != classification)\n elif len(hierarchy) == 3:\n title = kw[\"messages\"][lang][\"Posts for {month} {day}, {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang),\n day=int(hierarchy[2]))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m/%d\") != classification)\n else:\n raise Exception(\"Cannot interpret classification {}!\".format(repr(classification)))\n context = {\n \"title\": title,\n \"pagekind\": [page_kind, \"archive_page\"],\n }\n if page_kind == 'index':\n context[\"archive_name\"] = classification if classification else None\n context[\"is_feed_stale\"] = kw[\"is_feed_stale\"]\n kw.update(context)\n return context, kw\n\n def should_generate_classification_page(self, classification, post_list, lang):\n \"\"\"Only generates list of posts for classification if this function returns True.\"\"\"\n return len(classification.split('/')) < 3 or len(post_list) > 0\n", "path": "nikola/plugins/task/archive.py"}]} |
gh_patches_debug_1557 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-5134 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods
**Describe the bug**
TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers
**To Reproduce**
Steps to reproduce the behavior:
1. git clone https://github.com/OpenBMB/MiniCPM.git
2. follow setup step.
3. run `!bash lora_finetune.sh` via `lora_finetune.ipynb`
**Expected behavior**
runnable
**ds_report output**
Please run `ds_report` to give us details about your setup.
**Screenshots**
```
class CudaEventTimer(object):
File "/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/utils/timer.py", line 33, in CudaEventTimer
def __init__(self, start_event: get_accelerator().Event, end_event: get_accelerator().Event):
^^^^^^^^^^^^^^^^^
File "/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/accelerator/real_accelerator.py", line 184, in get_accelerator
ds_accelerator = MPS_Accelerator()
^^^^^^^^^^^^^^^^^
TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers
```
**System info (please complete the following information):**
- OS: macOS 14.2.1 (23C71)
- metal
- Python 3.11.7
**Launcher context**
deepspeed
**Docker context**
no
**Additional context**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `accelerator/mps_accelerator.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 import torch
7
8 from .abstract_accelerator import DeepSpeedAccelerator
9
10 # During setup stage torch may not be installed, pass on no torch will
11 # allow op builder related API to be executed.
12 try:
13 import torch.mps
14 except ImportError:
15 pass
16
17
18 class MPS_Accelerator(DeepSpeedAccelerator):
19
20 def __init__(self):
21 self._name = "mps"
22 self._communication_backend_name = None
23
24 def is_synchronized_device(self):
25 return False
26
27 # Device APIs
28 def device_name(self, device_index=None):
29 if device_index is None:
30 return "mps"
31 return "mps:{}".format(device_index)
32
33 def device(self, device_index):
34 return torch.device("mps", index=0)
35
36 def set_device(self, device_index):
37 return
38
39 def current_device(self):
40 return torch.device("mps", index=0)
41
42 def current_device_name(self):
43 return "mps:0"
44
45 def device_count(self):
46 return 1
47
48 def synchronize(self, device_index=None):
49 return torch.mps.synchronize()
50
51 # RNG APIs
52 def random(self):
53 return torch.random
54
55 def set_rng_state(self, new_state, device_index=None):
56 return torch.mps.set_rng_state(new_state)
57
58 def get_rng_state(self, device_index=None):
59 return torch.mps.get_rng_state()
60
61 def manual_seed(self, seed):
62 return torch.mps.manual_seed(seed)
63
64 def manual_seed_all(self, seed):
65 return torch.mps.manual_seed(seed)
66
67 def seed(self):
68 return torch.mps.seed()
69
70 def initial_seed(self, seed):
71 return
72
73 def default_generator(self, device_index):
74 return
75
76 # Streams/Events
77 @property
78 def Stream(self):
79 return None
80
81 def stream(self, stream):
82 return None
83
84 def current_stream(self, device_index=None):
85 return None
86
87 def default_stream(self, device_index=None):
88 return None
89
90 @property
91 def Event(self):
92 return None
93
94 # Memory management
95 def empty_cache(self):
96 return torch.mps.empty_cache()
97
98 def memory_allocated(self, device_index=None):
99 return torch.mps.current_allocated_memory()
100
101 def max_memory_allocated(self, device_index=None):
102 return torch.mps.driver_allocated_memory()
103
104 def set_per_process_memory_fraction(self, fraction):
105 return torch.mps.set_per_process_memory_fraction(fraction)
106
107 def reset_max_memory_allocated(self, device_index=None):
108 return
109
110 def memory_cached(self, device_index=None):
111 return
112
113 def max_memory_cached(self, device_index=None):
114 return
115
116 def reset_max_memory_cached(self, device_index=None):
117 return
118
119 def memory_stats(self, device_index=None):
120 return
121
122 def reset_peak_memory_stats(self, device_index=None):
123 return
124
125 def memory_reserved(self, device_index=None):
126 return
127
128 def max_memory_reserved(self, device_index=None):
129 return
130
131 def total_memory(self, device_index=None):
132 return
133
134 def available_memory(self, device_index=None):
135 return
136
137 # Data types
138 def is_bf16_supported(self):
139 return False
140
141 def is_fp16_supported(self):
142 return False
143
144 def supported_dtypes(self):
145 return [torch.float]
146
147 # Misc
148 def amp(self):
149 return
150
151 def is_available(self):
152 return hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
153
154 def range_push(self, msg):
155 return
156
157 def range_pop(self):
158 return
159
160 def lazy_call(self, callback):
161 return
162
163 def communication_backend_name(self):
164 return self._communication_backend_name
165
166 def is_triton_supported(self):
167 return False
168
169 # Graph operations
170 def create_graph(self):
171 return None
172
173 def capture_to_graph(self, graph, pool=None, stream=None):
174 from deepspeed.runtime.utils import noop_context
175 return noop_context()
176
177 def replay_graph(self, graph):
178 return
179
180 # Tensor operations
181 @property
182 def BFloat16Tensor(self):
183 return
184
185 @property
186 def ByteTensor(self):
187 return
188
189 @property
190 def DoubleTensor(self):
191 return
192
193 @property
194 def FloatTensor(self):
195 return
196
197 @property
198 def HalfTensor(self):
199 return
200
201 @property
202 def IntTensor(self):
203 return
204
205 @property
206 def LongTensor(self):
207 return
208
209 def pin_memory(self, tensor, align_bytes=1):
210 return tensor.pin_memory()
211
212 def is_pinned(self, tensor):
213 return tensor.is_pinned()
214
215 def on_accelerator(self, tensor):
216 device_str = str(tensor.device)
217 if device_str.startswith("mps"):
218 return True
219 else:
220 return False
221
222 def op_builder_dir(self):
223 try:
224 # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
225 # if successful this also means we're doing a local install and not JIT compile path
226 from op_builder import __deepspeed__ # noqa: F401 # type: ignore
227
228 return "op_builder"
229 except ImportError:
230 return "deepspeed.ops.op_builder"
231
232 # create an instance of op builder, specified by class_name
233 def create_op_builder(self, op_name):
234 builder_class = self.get_op_builder(op_name)
235 if builder_class is not None:
236 return builder_class()
237 return None
238
239 # return an op builder class, specified by class_name
240 def get_op_builder(self, class_name):
241 from deepspeed.ops.op_builder.cpu import NotImplementedBuilder
242
243 return NotImplementedBuilder
244
245 def build_extension(self):
246 from torch.utils.cpp_extension import BuildExtension
247
248 return BuildExtension
249
250 def export_envs(self):
251 return []
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/accelerator/mps_accelerator.py b/accelerator/mps_accelerator.py
--- a/accelerator/mps_accelerator.py
+++ b/accelerator/mps_accelerator.py
@@ -24,6 +24,15 @@
def is_synchronized_device(self):
return False
+ def use_host_timers(self):
+ return self.is_synchronized_device()
+
+ def resolves_data_dependency(self):
+ return self.is_synchronized_device()
+
+ def handles_memory_backpressure(self):
+ return self.is_synchronized_device()
+
# Device APIs
def device_name(self, device_index=None):
if device_index is None:
| {"golden_diff": "diff --git a/accelerator/mps_accelerator.py b/accelerator/mps_accelerator.py\n--- a/accelerator/mps_accelerator.py\n+++ b/accelerator/mps_accelerator.py\n@@ -24,6 +24,15 @@\n def is_synchronized_device(self):\n return False\n \n+ def use_host_timers(self):\n+ return self.is_synchronized_device()\n+\n+ def resolves_data_dependency(self):\n+ return self.is_synchronized_device()\n+\n+ def handles_memory_backpressure(self):\n+ return self.is_synchronized_device()\n+\n # Device APIs\n def device_name(self, device_index=None):\n if device_index is None:\n", "issue": "[BUG] TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods\n**Describe the bug**\r\nTypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. git clone https://github.com/OpenBMB/MiniCPM.git\r\n2. follow setup step.\r\n3. run `!bash lora_finetune.sh` via `lora_finetune.ipynb`\r\n\r\n**Expected behavior**\r\nrunnable\r\n\r\n**ds_report output**\r\nPlease run `ds_report` to give us details about your setup.\r\n\r\n**Screenshots**\r\n```\r\n class CudaEventTimer(object):\r\n File \"/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/utils/timer.py\", line 33, in CudaEventTimer\r\n def __init__(self, start_event: get_accelerator().Event, end_event: get_accelerator().Event):\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/accelerator/real_accelerator.py\", line 184, in get_accelerator\r\n ds_accelerator = MPS_Accelerator()\r\n ^^^^^^^^^^^^^^^^^\r\nTypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers\r\n```\r\n\r\n**System info (please complete the following information):**\r\n - OS: macOS 14.2.1 (23C71)\r\n - metal\r\n - Python 3.11.7\r\n\r\n**Launcher context**\r\ndeepspeed\r\n\r\n**Docker context**\r\nno\r\n\r\n**Additional context**\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\n\nfrom .abstract_accelerator import DeepSpeedAccelerator\n\n# During setup stage torch may not be installed, pass on no torch will\n# allow op builder related API to be executed.\ntry:\n import torch.mps\nexcept ImportError:\n pass\n\n\nclass MPS_Accelerator(DeepSpeedAccelerator):\n\n def __init__(self):\n self._name = \"mps\"\n self._communication_backend_name = None\n\n def is_synchronized_device(self):\n return False\n\n # Device APIs\n def device_name(self, device_index=None):\n if device_index is None:\n return \"mps\"\n return \"mps:{}\".format(device_index)\n\n def device(self, device_index):\n return torch.device(\"mps\", index=0)\n\n def set_device(self, device_index):\n return\n\n def current_device(self):\n return torch.device(\"mps\", index=0)\n\n def current_device_name(self):\n return \"mps:0\"\n\n def device_count(self):\n return 1\n\n def synchronize(self, device_index=None):\n return torch.mps.synchronize()\n\n # RNG APIs\n def random(self):\n return torch.random\n\n def set_rng_state(self, new_state, device_index=None):\n return torch.mps.set_rng_state(new_state)\n\n def get_rng_state(self, device_index=None):\n return torch.mps.get_rng_state()\n\n def manual_seed(self, seed):\n return torch.mps.manual_seed(seed)\n\n def manual_seed_all(self, seed):\n return torch.mps.manual_seed(seed)\n\n def seed(self):\n return torch.mps.seed()\n\n def initial_seed(self, seed):\n return\n\n def default_generator(self, device_index):\n return\n\n # Streams/Events\n @property\n def Stream(self):\n return None\n\n def stream(self, stream):\n return None\n\n def current_stream(self, device_index=None):\n return None\n\n def default_stream(self, device_index=None):\n return None\n\n @property\n def Event(self):\n return None\n\n # Memory management\n def empty_cache(self):\n return torch.mps.empty_cache()\n\n def memory_allocated(self, device_index=None):\n return torch.mps.current_allocated_memory()\n\n def max_memory_allocated(self, device_index=None):\n return torch.mps.driver_allocated_memory()\n\n def set_per_process_memory_fraction(self, fraction):\n return torch.mps.set_per_process_memory_fraction(fraction)\n\n def reset_max_memory_allocated(self, device_index=None):\n return\n\n def memory_cached(self, device_index=None):\n return\n\n def max_memory_cached(self, device_index=None):\n return\n\n def reset_max_memory_cached(self, device_index=None):\n return\n\n def memory_stats(self, device_index=None):\n return\n\n def reset_peak_memory_stats(self, device_index=None):\n return\n\n def memory_reserved(self, device_index=None):\n return\n\n def max_memory_reserved(self, device_index=None):\n return\n\n def total_memory(self, device_index=None):\n return\n\n def available_memory(self, device_index=None):\n return\n\n # Data types\n def is_bf16_supported(self):\n return False\n\n def is_fp16_supported(self):\n return False\n\n def supported_dtypes(self):\n return [torch.float]\n\n # Misc\n def amp(self):\n return\n\n def is_available(self):\n return hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available()\n\n def range_push(self, msg):\n return\n\n def range_pop(self):\n return\n\n def lazy_call(self, callback):\n return\n\n def communication_backend_name(self):\n return self._communication_backend_name\n\n def is_triton_supported(self):\n return False\n\n # Graph operations\n def create_graph(self):\n return None\n\n def capture_to_graph(self, graph, pool=None, stream=None):\n from deepspeed.runtime.utils import noop_context\n return noop_context()\n\n def replay_graph(self, graph):\n return\n\n # Tensor operations\n @property\n def BFloat16Tensor(self):\n return\n\n @property\n def ByteTensor(self):\n return\n\n @property\n def DoubleTensor(self):\n return\n\n @property\n def FloatTensor(self):\n return\n\n @property\n def HalfTensor(self):\n return\n\n @property\n def IntTensor(self):\n return\n\n @property\n def LongTensor(self):\n return\n\n def pin_memory(self, tensor, align_bytes=1):\n return tensor.pin_memory()\n\n def is_pinned(self, tensor):\n return tensor.is_pinned()\n\n def on_accelerator(self, tensor):\n device_str = str(tensor.device)\n if device_str.startswith(\"mps\"):\n return True\n else:\n return False\n\n def op_builder_dir(self):\n try:\n # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed\n # if successful this also means we're doing a local install and not JIT compile path\n from op_builder import __deepspeed__ # noqa: F401 # type: ignore\n\n return \"op_builder\"\n except ImportError:\n return \"deepspeed.ops.op_builder\"\n\n # create an instance of op builder, specified by class_name\n def create_op_builder(self, op_name):\n builder_class = self.get_op_builder(op_name)\n if builder_class is not None:\n return builder_class()\n return None\n\n # return an op builder class, specified by class_name\n def get_op_builder(self, class_name):\n from deepspeed.ops.op_builder.cpu import NotImplementedBuilder\n\n return NotImplementedBuilder\n\n def build_extension(self):\n from torch.utils.cpp_extension import BuildExtension\n\n return BuildExtension\n\n def export_envs(self):\n return []\n", "path": "accelerator/mps_accelerator.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\n\nfrom .abstract_accelerator import DeepSpeedAccelerator\n\n# During setup stage torch may not be installed, pass on no torch will\n# allow op builder related API to be executed.\ntry:\n import torch.mps\nexcept ImportError:\n pass\n\n\nclass MPS_Accelerator(DeepSpeedAccelerator):\n\n def __init__(self):\n self._name = \"mps\"\n self._communication_backend_name = None\n\n def is_synchronized_device(self):\n return False\n\n def use_host_timers(self):\n return self.is_synchronized_device()\n\n def resolves_data_dependency(self):\n return self.is_synchronized_device()\n\n def handles_memory_backpressure(self):\n return self.is_synchronized_device()\n\n # Device APIs\n def device_name(self, device_index=None):\n if device_index is None:\n return \"mps\"\n return \"mps:{}\".format(device_index)\n\n def device(self, device_index):\n return torch.device(\"mps\", index=0)\n\n def set_device(self, device_index):\n return\n\n def current_device(self):\n return torch.device(\"mps\", index=0)\n\n def current_device_name(self):\n return \"mps:0\"\n\n def device_count(self):\n return 1\n\n def synchronize(self, device_index=None):\n return torch.mps.synchronize()\n\n # RNG APIs\n def random(self):\n return torch.random\n\n def set_rng_state(self, new_state, device_index=None):\n return torch.mps.set_rng_state(new_state)\n\n def get_rng_state(self, device_index=None):\n return torch.mps.get_rng_state()\n\n def manual_seed(self, seed):\n return torch.mps.manual_seed(seed)\n\n def manual_seed_all(self, seed):\n return torch.mps.manual_seed(seed)\n\n def seed(self):\n return torch.mps.seed()\n\n def initial_seed(self, seed):\n return\n\n def default_generator(self, device_index):\n return\n\n # Streams/Events\n @property\n def Stream(self):\n return None\n\n def stream(self, stream):\n return None\n\n def current_stream(self, device_index=None):\n return None\n\n def default_stream(self, device_index=None):\n return None\n\n @property\n def Event(self):\n return None\n\n # Memory management\n def empty_cache(self):\n return torch.mps.empty_cache()\n\n def memory_allocated(self, device_index=None):\n return torch.mps.current_allocated_memory()\n\n def max_memory_allocated(self, device_index=None):\n return torch.mps.driver_allocated_memory()\n\n def set_per_process_memory_fraction(self, fraction):\n return torch.mps.set_per_process_memory_fraction(fraction)\n\n def reset_max_memory_allocated(self, device_index=None):\n return\n\n def memory_cached(self, device_index=None):\n return\n\n def max_memory_cached(self, device_index=None):\n return\n\n def reset_max_memory_cached(self, device_index=None):\n return\n\n def memory_stats(self, device_index=None):\n return\n\n def reset_peak_memory_stats(self, device_index=None):\n return\n\n def memory_reserved(self, device_index=None):\n return\n\n def max_memory_reserved(self, device_index=None):\n return\n\n def total_memory(self, device_index=None):\n return\n\n def available_memory(self, device_index=None):\n return\n\n # Data types\n def is_bf16_supported(self):\n return False\n\n def is_fp16_supported(self):\n return False\n\n def supported_dtypes(self):\n return [torch.float]\n\n # Misc\n def amp(self):\n return\n\n def is_available(self):\n return hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available()\n\n def range_push(self, msg):\n return\n\n def range_pop(self):\n return\n\n def lazy_call(self, callback):\n return\n\n def communication_backend_name(self):\n return self._communication_backend_name\n\n def is_triton_supported(self):\n return False\n\n # Graph operations\n def create_graph(self):\n return None\n\n def capture_to_graph(self, graph, pool=None, stream=None):\n from deepspeed.runtime.utils import noop_context\n return noop_context()\n\n def replay_graph(self, graph):\n return\n\n # Tensor operations\n @property\n def BFloat16Tensor(self):\n return\n\n @property\n def ByteTensor(self):\n return\n\n @property\n def DoubleTensor(self):\n return\n\n @property\n def FloatTensor(self):\n return\n\n @property\n def HalfTensor(self):\n return\n\n @property\n def IntTensor(self):\n return\n\n @property\n def LongTensor(self):\n return\n\n def pin_memory(self, tensor, align_bytes=1):\n return tensor.pin_memory()\n\n def is_pinned(self, tensor):\n return tensor.is_pinned()\n\n def on_accelerator(self, tensor):\n device_str = str(tensor.device)\n if device_str.startswith(\"mps\"):\n return True\n else:\n return False\n\n def op_builder_dir(self):\n try:\n # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed\n # if successful this also means we're doing a local install and not JIT compile path\n from op_builder import __deepspeed__ # noqa: F401 # type: ignore\n\n return \"op_builder\"\n except ImportError:\n return \"deepspeed.ops.op_builder\"\n\n # create an instance of op builder, specified by class_name\n def create_op_builder(self, op_name):\n builder_class = self.get_op_builder(op_name)\n if builder_class is not None:\n return builder_class()\n return None\n\n # return an op builder class, specified by class_name\n def get_op_builder(self, class_name):\n from deepspeed.ops.op_builder.cpu import NotImplementedBuilder\n\n return NotImplementedBuilder\n\n def build_extension(self):\n from torch.utils.cpp_extension import BuildExtension\n\n return BuildExtension\n\n def export_envs(self):\n return []\n", "path": "accelerator/mps_accelerator.py"}]} |
gh_patches_debug_1558 | rasdani/github-patches | git_diff | kovidgoyal__kitty-5932 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Permission Error when using SSH kitten on FreeBSD
Good morning. I was hoping you or someone could point me in the right direction, or is this possibly a bug?
When attempting to use the SSH kitten on FreeBSD I am getting this error:
```bash
$ kitty +kitten ssh test-host
[Errno 13] Permission denied: '/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af'
Shared connection to [redacted] closed.
Exception ignored in atexit callback: <bound method SharedMemory.unlink of SharedMemory('/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af', size=32718)>
Traceback (most recent call last):
File "/usr/home/user/kitty/launcher/../../kitty/shm.py", line 180, in unlink
shm_unlink(self._name)
PermissionError: [Errno 13] Permission denied: '/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af'
```
I am not sure as to what resource the kitty process is lacking permission in order to rectify this.
This is on kitty v0.26.5. Happens in every shell.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kittens/ssh/main.py`
Content:
```
1 #!/usr/bin/env python3
2 # License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
3
4 import fnmatch
5 import glob
6 import io
7 import json
8 import os
9 import re
10 import secrets
11 import shlex
12 import shutil
13 import stat
14 import subprocess
15 import sys
16 import tarfile
17 import tempfile
18 import termios
19 import time
20 import traceback
21 from base64 import standard_b64decode, standard_b64encode
22 from contextlib import contextmanager, suppress
23 from getpass import getuser
24 from select import select
25 from typing import Any, Callable, Dict, Iterator, List, NoReturn, Optional, Sequence, Set, Tuple, Union, cast
26
27 from kitty.constants import cache_dir, runtime_dir, shell_integration_dir, ssh_control_master_template, str_version, terminfo_dir
28 from kitty.shell_integration import as_str_literal
29 from kitty.shm import SharedMemory
30 from kitty.types import run_once
31 from kitty.utils import SSHConnectionData, expandvars, resolve_abs_or_config_path
32 from kitty.utils import set_echo as turn_off_echo
33
34 from ..tui.operations import RESTORE_PRIVATE_MODE_VALUES, SAVE_PRIVATE_MODE_VALUES, Mode, restore_colors, save_colors, set_mode
35 from ..tui.utils import kitty_opts, running_in_tmux
36 from .config import init_config
37 from .copy import CopyInstruction
38 from .options.types import Options as SSHOptions
39 from .options.utils import DELETE_ENV_VAR
40 from .utils import create_shared_memory, ssh_options
41
42
43 @run_once
44 def ssh_exe() -> str:
45 return shutil.which('ssh') or 'ssh'
46
47
48 def read_data_from_shared_memory(shm_name: str) -> Any:
49 with SharedMemory(shm_name, readonly=True) as shm:
50 shm.unlink()
51 if shm.stats.st_uid != os.geteuid() or shm.stats.st_gid != os.getegid():
52 raise ValueError('Incorrect owner on pwfile')
53 mode = stat.S_IMODE(shm.stats.st_mode)
54 if mode != stat.S_IREAD:
55 raise ValueError('Incorrect permissions on pwfile')
56 return json.loads(shm.read_data_with_size())
57
58
59 # See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html
60 quote_pat = re.compile('([\\`"])')
61
62
63 def quote_env_val(x: str, literal_quote: bool = False) -> str:
64 if literal_quote:
65 return as_str_literal(x)
66 x = quote_pat.sub(r'\\\1', x)
67 x = x.replace('$(', r'\$(') # prevent execution with $()
68 return f'"{x}"'
69
70
71 def serialize_env(literal_env: Dict[str, str], env: Dict[str, str], base_env: Dict[str, str], for_python: bool = False) -> bytes:
72 lines = []
73 literal_quote = True
74
75 if for_python:
76 def a(k: str, val: str = '', prefix: str = 'export') -> None:
77 if val:
78 lines.append(f'{prefix} {json.dumps((k, val, literal_quote))}')
79 else:
80 lines.append(f'{prefix} {json.dumps((k,))}')
81 else:
82 def a(k: str, val: str = '', prefix: str = 'export') -> None:
83 if val:
84 lines.append(f'{prefix} {shlex.quote(k)}={quote_env_val(val, literal_quote)}')
85 else:
86 lines.append(f'{prefix} {shlex.quote(k)}')
87
88 for k, v in literal_env.items():
89 a(k, v)
90
91 literal_quote = False
92 for k in sorted(env):
93 v = env[k]
94 if v == DELETE_ENV_VAR:
95 a(k, prefix='unset')
96 elif v == '_kitty_copy_env_var_':
97 q = base_env.get(k)
98 if q is not None:
99 a(k, q)
100 else:
101 a(k, v)
102 return '\n'.join(lines).encode('utf-8')
103
104
105 def make_tarfile(ssh_opts: SSHOptions, base_env: Dict[str, str], compression: str = 'gz', literal_env: Dict[str, str] = {}) -> bytes:
106
107 def normalize_tarinfo(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo:
108 tarinfo.uname = tarinfo.gname = ''
109 tarinfo.uid = tarinfo.gid = 0
110 # some distro's like nix mess with installed file permissions so ensure
111 # files are at least readable and writable by owning user
112 tarinfo.mode |= stat.S_IWUSR | stat.S_IRUSR
113 return tarinfo
114
115 def add_data_as_file(tf: tarfile.TarFile, arcname: str, data: Union[str, bytes]) -> tarfile.TarInfo:
116 ans = tarfile.TarInfo(arcname)
117 ans.mtime = 0
118 ans.type = tarfile.REGTYPE
119 if isinstance(data, str):
120 data = data.encode('utf-8')
121 ans.size = len(data)
122 normalize_tarinfo(ans)
123 tf.addfile(ans, io.BytesIO(data))
124 return ans
125
126 def filter_from_globs(*pats: str) -> Callable[[tarfile.TarInfo], Optional[tarfile.TarInfo]]:
127 def filter(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]:
128 for junk_dir in ('.DS_Store', '__pycache__'):
129 for pat in (f'*/{junk_dir}', f'*/{junk_dir}/*'):
130 if fnmatch.fnmatch(tarinfo.name, pat):
131 return None
132 for pat in pats:
133 if fnmatch.fnmatch(tarinfo.name, pat):
134 return None
135 return normalize_tarinfo(tarinfo)
136 return filter
137
138 from kitty.shell_integration import get_effective_ksi_env_var
139 if ssh_opts.shell_integration == 'inherited':
140 ksi = get_effective_ksi_env_var(kitty_opts())
141 else:
142 from kitty.options.types import Options
143 from kitty.options.utils import shell_integration
144 ksi = get_effective_ksi_env_var(Options({'shell_integration': shell_integration(ssh_opts.shell_integration)}))
145
146 env = {
147 'TERM': os.environ.get('TERM') or kitty_opts().term,
148 'COLORTERM': 'truecolor',
149 }
150 env.update(ssh_opts.env)
151 for q in ('KITTY_WINDOW_ID', 'WINDOWID'):
152 val = os.environ.get(q)
153 if val is not None:
154 env[q] = val
155 env['KITTY_SHELL_INTEGRATION'] = ksi or DELETE_ENV_VAR
156 env['KITTY_SSH_KITTEN_DATA_DIR'] = ssh_opts.remote_dir
157 if ssh_opts.login_shell:
158 env['KITTY_LOGIN_SHELL'] = ssh_opts.login_shell
159 if ssh_opts.cwd:
160 env['KITTY_LOGIN_CWD'] = ssh_opts.cwd
161 if ssh_opts.remote_kitty != 'no':
162 env['KITTY_REMOTE'] = ssh_opts.remote_kitty
163 if os.environ.get('KITTY_PUBLIC_KEY'):
164 env.pop('KITTY_PUBLIC_KEY', None)
165 literal_env['KITTY_PUBLIC_KEY'] = os.environ['KITTY_PUBLIC_KEY']
166 env_script = serialize_env(literal_env, env, base_env, for_python=compression != 'gz')
167 buf = io.BytesIO()
168 with tarfile.open(mode=f'w:{compression}', fileobj=buf, encoding='utf-8') as tf:
169 rd = ssh_opts.remote_dir.rstrip('/')
170 for ci in ssh_opts.copy.values():
171 tf.add(ci.local_path, arcname=ci.arcname, filter=filter_from_globs(*ci.exclude_patterns))
172 add_data_as_file(tf, 'data.sh', env_script)
173 if compression == 'gz':
174 tf.add(f'{shell_integration_dir}/ssh/bootstrap-utils.sh', arcname='bootstrap-utils.sh', filter=normalize_tarinfo)
175 if ksi:
176 arcname = 'home/' + rd + '/shell-integration'
177 tf.add(shell_integration_dir, arcname=arcname, filter=filter_from_globs(
178 f'{arcname}/ssh/*', # bootstrap files are sent as command line args
179 f'{arcname}/zsh/kitty.zsh', # present for legacy compat not needed by ssh kitten
180 ))
181 if ssh_opts.remote_kitty != 'no':
182 arcname = 'home/' + rd + '/kitty'
183 add_data_as_file(tf, arcname + '/version', str_version.encode('ascii'))
184 tf.add(shell_integration_dir + '/ssh/kitty', arcname=arcname + '/bin/kitty', filter=normalize_tarinfo)
185 tf.add(shell_integration_dir + '/ssh/kitten', arcname=arcname + '/bin/kitten', filter=normalize_tarinfo)
186 tf.add(f'{terminfo_dir}/kitty.terminfo', arcname='home/.terminfo/kitty.terminfo', filter=normalize_tarinfo)
187 tf.add(glob.glob(f'{terminfo_dir}/*/xterm-kitty')[0], arcname='home/.terminfo/x/xterm-kitty', filter=normalize_tarinfo)
188 return buf.getvalue()
189
190
191 def get_ssh_data(msg: str, request_id: str) -> Iterator[bytes]:
192 yield b'\nKITTY_DATA_START\n' # to discard leading data
193 try:
194 msg = standard_b64decode(msg).decode('utf-8')
195 md = dict(x.split('=', 1) for x in msg.split(':'))
196 pw = md['pw']
197 pwfilename = md['pwfile']
198 rq_id = md['id']
199 except Exception:
200 traceback.print_exc()
201 yield b'invalid ssh data request message\n'
202 else:
203 try:
204 env_data = read_data_from_shared_memory(pwfilename)
205 if pw != env_data['pw']:
206 raise ValueError('Incorrect password')
207 if rq_id != request_id:
208 raise ValueError(f'Incorrect request id: {rq_id!r} expecting the KITTY_PID-KITTY_WINDOW_ID for the current kitty window')
209 except Exception as e:
210 traceback.print_exc()
211 yield f'{e}\n'.encode('utf-8')
212 else:
213 yield b'OK\n'
214 ssh_opts = SSHOptions(env_data['opts'])
215 ssh_opts.copy = {k: CopyInstruction(*v) for k, v in ssh_opts.copy.items()}
216 encoded_data = memoryview(env_data['tarfile'].encode('ascii'))
217 # macOS has a 255 byte limit on its input queue as per man stty.
218 # Not clear if that applies to canonical mode input as well, but
219 # better to be safe.
220 line_sz = 254
221 while encoded_data:
222 yield encoded_data[:line_sz]
223 yield b'\n'
224 encoded_data = encoded_data[line_sz:]
225 yield b'KITTY_DATA_END\n'
226
227
228 def safe_remove(x: str) -> None:
229 with suppress(OSError):
230 os.remove(x)
231
232
233 def prepare_script(ans: str, replacements: Dict[str, str], script_type: str) -> str:
234 for k in ('EXEC_CMD', 'EXPORT_HOME_CMD'):
235 replacements[k] = replacements.get(k, '')
236
237 def sub(m: 're.Match[str]') -> str:
238 return replacements[m.group()]
239
240 return re.sub('|'.join(fr'\b{k}\b' for k in replacements), sub, ans)
241
242
243 def prepare_exec_cmd(remote_args: Sequence[str], is_python: bool) -> str:
244 # ssh simply concatenates multiple commands using a space see
245 # line 1129 of ssh.c and on the remote side sshd.c runs the
246 # concatenated command as shell -c cmd
247 if is_python:
248 return standard_b64encode(' '.join(remote_args).encode('utf-8')).decode('ascii')
249 args = ' '.join(c.replace("'", """'"'"'""") for c in remote_args)
250 return f"""unset KITTY_SHELL_INTEGRATION; exec "$login_shell" -c '{args}'"""
251
252
253 def prepare_export_home_cmd(ssh_opts: SSHOptions, is_python: bool) -> str:
254 home = ssh_opts.env.get('HOME')
255 if home == '_kitty_copy_env_var_':
256 home = os.environ.get('HOME')
257 if home:
258 if is_python:
259 return standard_b64encode(home.encode('utf-8')).decode('ascii')
260 else:
261 return f'export HOME={quote_env_val(home)}; cd "$HOME"'
262 return ''
263
264
265 def bootstrap_script(
266 ssh_opts: SSHOptions, script_type: str = 'sh', remote_args: Sequence[str] = (),
267 test_script: str = '', request_id: Optional[str] = None, cli_hostname: str = '', cli_uname: str = '',
268 request_data: bool = False, echo_on: bool = True, literal_env: Dict[str, str] = {}
269 ) -> Tuple[str, Dict[str, str], str]:
270 if request_id is None:
271 request_id = os.environ['KITTY_PID'] + '-' + os.environ['KITTY_WINDOW_ID']
272 is_python = script_type == 'py'
273 export_home_cmd = prepare_export_home_cmd(ssh_opts, is_python) if 'HOME' in ssh_opts.env else ''
274 exec_cmd = prepare_exec_cmd(remote_args, is_python) if remote_args else ''
275 with open(os.path.join(shell_integration_dir, 'ssh', f'bootstrap.{script_type}')) as f:
276 ans = f.read()
277 pw = secrets.token_hex()
278 tfd = standard_b64encode(make_tarfile(ssh_opts, dict(os.environ), 'gz' if script_type == 'sh' else 'bz2', literal_env=literal_env)).decode('ascii')
279 data = {'pw': pw, 'opts': ssh_opts._asdict(), 'hostname': cli_hostname, 'uname': cli_uname, 'tarfile': tfd}
280 shm_name = create_shared_memory(data, prefix=f'kssh-{os.getpid()}-')
281 sensitive_data = {'REQUEST_ID': request_id, 'DATA_PASSWORD': pw, 'PASSWORD_FILENAME': shm_name}
282 replacements = {
283 'EXPORT_HOME_CMD': export_home_cmd,
284 'EXEC_CMD': exec_cmd, 'TEST_SCRIPT': test_script,
285 'REQUEST_DATA': '1' if request_data else '0', 'ECHO_ON': '1' if echo_on else '0',
286 }
287 sd = replacements.copy()
288 if request_data:
289 sd.update(sensitive_data)
290 replacements.update(sensitive_data)
291 return prepare_script(ans, sd, script_type), replacements, shm_name
292
293
294 def get_ssh_cli() -> Tuple[Set[str], Set[str]]:
295 other_ssh_args: Set[str] = set()
296 boolean_ssh_args: Set[str] = set()
297 for k, v in ssh_options().items():
298 k = f'-{k}'
299 if v:
300 other_ssh_args.add(k)
301 else:
302 boolean_ssh_args.add(k)
303 return boolean_ssh_args, other_ssh_args
304
305
306 def is_extra_arg(arg: str, extra_args: Tuple[str, ...]) -> str:
307 for x in extra_args:
308 if arg == x or arg.startswith(f'{x}='):
309 return x
310 return ''
311
312
313 def get_connection_data(args: List[str], cwd: str = '', extra_args: Tuple[str, ...] = ()) -> Optional[SSHConnectionData]:
314 boolean_ssh_args, other_ssh_args = get_ssh_cli()
315 port: Optional[int] = None
316 expecting_port = expecting_identity = False
317 expecting_option_val = False
318 expecting_hostname = False
319 expecting_extra_val = ''
320 host_name = identity_file = found_ssh = ''
321 found_extra_args: List[Tuple[str, str]] = []
322
323 for i, arg in enumerate(args):
324 if not found_ssh:
325 if os.path.basename(arg).lower() in ('ssh', 'ssh.exe'):
326 found_ssh = arg
327 continue
328 if expecting_hostname:
329 host_name = arg
330 continue
331 if arg.startswith('-') and not expecting_option_val:
332 if arg in boolean_ssh_args:
333 continue
334 if arg == '--':
335 expecting_hostname = True
336 if arg.startswith('-p'):
337 if arg[2:].isdigit():
338 with suppress(Exception):
339 port = int(arg[2:])
340 continue
341 elif arg == '-p':
342 expecting_port = True
343 elif arg.startswith('-i'):
344 if arg == '-i':
345 expecting_identity = True
346 else:
347 identity_file = arg[2:]
348 continue
349 if arg.startswith('--') and extra_args:
350 matching_ex = is_extra_arg(arg, extra_args)
351 if matching_ex:
352 if '=' in arg:
353 exval = arg.partition('=')[-1]
354 found_extra_args.append((matching_ex, exval))
355 continue
356 expecting_extra_val = matching_ex
357
358 expecting_option_val = True
359 continue
360
361 if expecting_option_val:
362 if expecting_port:
363 with suppress(Exception):
364 port = int(arg)
365 expecting_port = False
366 elif expecting_identity:
367 identity_file = arg
368 elif expecting_extra_val:
369 found_extra_args.append((expecting_extra_val, arg))
370 expecting_extra_val = ''
371 expecting_option_val = False
372 continue
373
374 if not host_name:
375 host_name = arg
376 if not host_name:
377 return None
378 if host_name.startswith('ssh://'):
379 from urllib.parse import urlparse
380 purl = urlparse(host_name)
381 if purl.hostname:
382 host_name = purl.hostname
383 if purl.username:
384 host_name = f'{purl.username}@{host_name}'
385 if port is None and purl.port:
386 port = purl.port
387 if identity_file:
388 if not os.path.isabs(identity_file):
389 identity_file = os.path.expanduser(identity_file)
390 if not os.path.isabs(identity_file):
391 identity_file = os.path.normpath(os.path.join(cwd or os.getcwd(), identity_file))
392
393 return SSHConnectionData(found_ssh, host_name, port, identity_file, tuple(found_extra_args))
394
395
396 class InvalidSSHArgs(ValueError):
397
398 def __init__(self, msg: str = ''):
399 super().__init__(msg)
400 self.err_msg = msg
401
402 def system_exit(self) -> None:
403 if self.err_msg:
404 print(self.err_msg, file=sys.stderr)
405 os.execlp(ssh_exe(), 'ssh')
406
407
408 passthrough_args = {f'-{x}' for x in 'NnfGT'}
409
410
411 def parse_ssh_args(args: List[str], extra_args: Tuple[str, ...] = ()) -> Tuple[List[str], List[str], bool, Tuple[str, ...]]:
412 boolean_ssh_args, other_ssh_args = get_ssh_cli()
413 ssh_args = []
414 server_args: List[str] = []
415 expecting_option_val = False
416 passthrough = False
417 stop_option_processing = False
418 found_extra_args: List[str] = []
419 expecting_extra_val = ''
420 for argument in args:
421 if len(server_args) > 1 or stop_option_processing:
422 server_args.append(argument)
423 continue
424 if argument.startswith('-') and not expecting_option_val:
425 if argument == '--':
426 stop_option_processing = True
427 continue
428 if extra_args:
429 matching_ex = is_extra_arg(argument, extra_args)
430 if matching_ex:
431 if '=' in argument:
432 exval = argument.partition('=')[-1]
433 found_extra_args.extend((matching_ex, exval))
434 else:
435 expecting_extra_val = matching_ex
436 expecting_option_val = True
437 continue
438 # could be a multi-character option
439 all_args = argument[1:]
440 for i, arg in enumerate(all_args):
441 arg = f'-{arg}'
442 if arg in passthrough_args:
443 passthrough = True
444 if arg in boolean_ssh_args:
445 ssh_args.append(arg)
446 continue
447 if arg in other_ssh_args:
448 ssh_args.append(arg)
449 rest = all_args[i+1:]
450 if rest:
451 ssh_args.append(rest)
452 else:
453 expecting_option_val = True
454 break
455 raise InvalidSSHArgs(f'unknown option -- {arg[1:]}')
456 continue
457 if expecting_option_val:
458 if expecting_extra_val:
459 found_extra_args.extend((expecting_extra_val, argument))
460 expecting_extra_val = ''
461 else:
462 ssh_args.append(argument)
463 expecting_option_val = False
464 continue
465 server_args.append(argument)
466 if not server_args:
467 raise InvalidSSHArgs()
468 return ssh_args, server_args, passthrough, tuple(found_extra_args)
469
470
471 def wrap_bootstrap_script(sh_script: str, interpreter: str) -> List[str]:
472 # sshd will execute the command we pass it by join all command line
473 # arguments with a space and passing it as a single argument to the users
474 # login shell with -c. If the user has a non POSIX login shell it might
475 # have different escaping semantics and syntax, so the command it should
476 # execute has to be as simple as possible, basically of the form
477 # interpreter -c unwrap_script escaped_bootstrap_script
478 # The unwrap_script is responsible for unescaping the bootstrap script and
479 # executing it.
480 q = os.path.basename(interpreter).lower()
481 is_python = 'python' in q
482 if is_python:
483 es = standard_b64encode(sh_script.encode('utf-8')).decode('ascii')
484 unwrap_script = '''"import base64, sys; eval(compile(base64.standard_b64decode(sys.argv[-1]), 'bootstrap.py', 'exec'))"'''
485 else:
486 # We cant rely on base64 being available on the remote system, so instead
487 # we quote the bootstrap script by replacing ' and \ with \v and \f
488 # also replacing \n and ! with \r and \b for tcsh
489 # finally surrounding with '
490 es = "'" + sh_script.replace("'", '\v').replace('\\', '\f').replace('\n', '\r').replace('!', '\b') + "'"
491 unwrap_script = r"""'eval "$(echo "$0" | tr \\\v\\\f\\\r\\\b \\\047\\\134\\\n\\\041)"' """
492 # exec is supported by all sh like shells, and fish and csh
493 return ['exec', interpreter, '-c', unwrap_script, es]
494
495
496 def get_remote_command(
497 remote_args: List[str], ssh_opts: SSHOptions, cli_hostname: str = '', cli_uname: str = '',
498 echo_on: bool = True, request_data: bool = False, literal_env: Dict[str, str] = {}
499 ) -> Tuple[List[str], Dict[str, str], str]:
500 interpreter = ssh_opts.interpreter
501 q = os.path.basename(interpreter).lower()
502 is_python = 'python' in q
503 sh_script, replacements, shm_name = bootstrap_script(
504 ssh_opts, script_type='py' if is_python else 'sh', remote_args=remote_args, literal_env=literal_env,
505 cli_hostname=cli_hostname, cli_uname=cli_uname, echo_on=echo_on, request_data=request_data)
506 return wrap_bootstrap_script(sh_script, interpreter), replacements, shm_name
507
508
509 def connection_sharing_args(kitty_pid: int) -> List[str]:
510 rd = runtime_dir()
511 # Bloody OpenSSH generates a 40 char hash and in creating the socket
512 # appends a 27 char temp suffix to it. Socket max path length is approx
513 # ~104 chars. macOS has no system runtime dir so we use a cache dir in
514 # /Users/WHY_DOES_ANYONE_USE_MACOS/Library/Caches/APPLE_ARE_IDIOTIC
515 if len(rd) > 35 and os.path.isdir('/tmp'):
516 idiotic_design = f'/tmp/kssh-rdir-{os.getuid()}'
517 try:
518 os.symlink(rd, idiotic_design)
519 except FileExistsError:
520 try:
521 dest = os.readlink(idiotic_design)
522 except OSError as e:
523 raise ValueError(f'The {idiotic_design} symlink could not be created as something with that name exists already') from e
524 else:
525 if dest != rd:
526 with tempfile.TemporaryDirectory(dir='/tmp') as tdir:
527 tlink = os.path.join(tdir, 'sigh')
528 os.symlink(rd, tlink)
529 os.rename(tlink, idiotic_design)
530 rd = idiotic_design
531
532 cp = os.path.join(rd, ssh_control_master_template.format(kitty_pid=kitty_pid, ssh_placeholder='%C'))
533 ans: List[str] = [
534 '-o', 'ControlMaster=auto',
535 '-o', f'ControlPath={cp}',
536 '-o', 'ControlPersist=yes',
537 '-o', 'ServerAliveInterval=60',
538 '-o', 'ServerAliveCountMax=5',
539 '-o', 'TCPKeepAlive=no',
540 ]
541 return ans
542
543
544 @contextmanager
545 def restore_terminal_state() -> Iterator[bool]:
546 with open(os.ctermid()) as f:
547 val = termios.tcgetattr(f.fileno())
548 print(end=SAVE_PRIVATE_MODE_VALUES)
549 print(end=set_mode(Mode.HANDLE_TERMIOS_SIGNALS), flush=True)
550 try:
551 yield bool(val[3] & termios.ECHO)
552 finally:
553 termios.tcsetattr(f.fileno(), termios.TCSAFLUSH, val)
554 print(end=RESTORE_PRIVATE_MODE_VALUES, flush=True)
555
556
557 def dcs_to_kitty(payload: Union[bytes, str], type: str = 'ssh') -> bytes:
558 if isinstance(payload, str):
559 payload = payload.encode('utf-8')
560 payload = standard_b64encode(payload)
561 ans = b'\033P@kitty-' + type.encode('ascii') + b'|' + payload
562 tmux = running_in_tmux()
563 if tmux:
564 cp = subprocess.run([tmux, 'set', '-p', 'allow-passthrough', 'on'])
565 if cp.returncode != 0:
566 raise SystemExit(cp.returncode)
567 ans = b'\033Ptmux;\033' + ans + b'\033\033\\\033\\'
568 else:
569 ans += b'\033\\'
570 return ans
571
572
573 @run_once
574 def ssh_version() -> Tuple[int, int]:
575 o = subprocess.check_output([ssh_exe(), '-V'], stderr=subprocess.STDOUT).decode()
576 m = re.match(r'OpenSSH_(\d+).(\d+)', o)
577 if m is None:
578 raise ValueError(f'Invalid version string for OpenSSH: {o}')
579 return int(m.group(1)), int(m.group(2))
580
581
582 @contextmanager
583 def drain_potential_tty_garbage(p: 'subprocess.Popen[bytes]', data_request: str) -> Iterator[None]:
584 with open(os.open(os.ctermid(), os.O_CLOEXEC | os.O_RDWR | os.O_NOCTTY), 'wb') as tty:
585 if data_request:
586 turn_off_echo(tty.fileno())
587 tty.write(dcs_to_kitty(data_request))
588 tty.flush()
589 try:
590 yield
591 finally:
592 # discard queued input data on tty in case data transmission was
593 # interrupted due to SSH failure, avoids spewing garbage to screen
594 from uuid import uuid4
595 canary = uuid4().hex.encode('ascii')
596 turn_off_echo(tty.fileno())
597 tty.write(dcs_to_kitty(canary + b'\n\r', type='echo'))
598 tty.flush()
599 data = b''
600 give_up_at = time.monotonic() + 2
601 tty_fd = tty.fileno()
602 while time.monotonic() < give_up_at and canary not in data:
603 with suppress(KeyboardInterrupt):
604 rd, wr, err = select([tty_fd], [], [tty_fd], max(0, give_up_at - time.monotonic()))
605 if err or not rd:
606 break
607 q = os.read(tty_fd, io.DEFAULT_BUFFER_SIZE)
608 if not q:
609 break
610 data += q
611
612
613 def change_colors(color_scheme: str) -> bool:
614 if not color_scheme:
615 return False
616 from kittens.themes.collection import NoCacheFound, load_themes, text_as_opts
617 from kittens.themes.main import colors_as_escape_codes
618 if color_scheme.endswith('.conf'):
619 conf_file = resolve_abs_or_config_path(color_scheme)
620 try:
621 with open(conf_file) as f:
622 opts = text_as_opts(f.read())
623 except FileNotFoundError:
624 raise SystemExit(f'Failed to find the color conf file: {expandvars(conf_file)}')
625 else:
626 try:
627 themes = load_themes(-1)
628 except NoCacheFound:
629 themes = load_themes()
630 cs = expandvars(color_scheme)
631 try:
632 theme = themes[cs]
633 except KeyError:
634 raise SystemExit(f'Failed to find the color theme: {cs}')
635 opts = theme.kitty_opts
636 raw = colors_as_escape_codes(opts)
637 print(save_colors(), sep='', end=raw, flush=True)
638 return True
639
640
641 def add_cloned_env(shm_name: str) -> Dict[str, str]:
642 try:
643 return cast(Dict[str, str], read_data_from_shared_memory(shm_name))
644 except FileNotFoundError:
645 pass
646 return {}
647
648
649 def run_ssh(ssh_args: List[str], server_args: List[str], found_extra_args: Tuple[str, ...]) -> NoReturn:
650 cmd = [ssh_exe()] + ssh_args
651 hostname, remote_args = server_args[0], server_args[1:]
652 if not remote_args:
653 cmd.append('-t')
654 insertion_point = len(cmd)
655 cmd.append('--')
656 cmd.append(hostname)
657 uname = getuser()
658 if hostname.startswith('ssh://'):
659 from urllib.parse import urlparse
660 purl = urlparse(hostname)
661 hostname_for_match = purl.hostname or hostname[6:].split('/', 1)[0]
662 uname = purl.username or uname
663 elif '@' in hostname and hostname[0] != '@':
664 uname, hostname_for_match = hostname.split('@', 1)
665 else:
666 hostname_for_match = hostname
667 hostname_for_match = hostname_for_match.split('@', 1)[-1].split(':', 1)[0]
668 overrides: List[str] = []
669 literal_env: Dict[str, str] = {}
670 pat = re.compile(r'^([a-zA-Z0-9_]+)[ \t]*=')
671 for i, a in enumerate(found_extra_args):
672 if i % 2 == 1:
673 aq = pat.sub(r'\1 ', a.lstrip())
674 key = aq.split(maxsplit=1)[0]
675 if key == 'clone_env':
676 literal_env = add_cloned_env(aq.split(maxsplit=1)[1])
677 elif key != 'hostname':
678 overrides.append(aq)
679 if overrides:
680 overrides.insert(0, f'hostname {uname}@{hostname_for_match}')
681 host_opts = init_config(hostname_for_match, uname, overrides)
682 if host_opts.share_connections:
683 cmd[insertion_point:insertion_point] = connection_sharing_args(int(os.environ['KITTY_PID']))
684 use_kitty_askpass = host_opts.askpass == 'native' or (host_opts.askpass == 'unless-set' and 'SSH_ASKPASS' not in os.environ)
685 need_to_request_data = True
686 if use_kitty_askpass:
687 sentinel = os.path.join(cache_dir(), 'openssh-is-new-enough-for-askpass')
688 sentinel_exists = os.path.exists(sentinel)
689 if sentinel_exists or ssh_version() >= (8, 4):
690 if not sentinel_exists:
691 open(sentinel, 'w').close()
692 # SSH_ASKPASS_REQUIRE was introduced in 8.4 release on 2020-09-27
693 need_to_request_data = False
694 os.environ['SSH_ASKPASS_REQUIRE'] = 'force'
695 os.environ['SSH_ASKPASS'] = os.path.join(shell_integration_dir, 'ssh', 'askpass.py')
696 if need_to_request_data and host_opts.share_connections:
697 cp = subprocess.run(cmd[:1] + ['-O', 'check'] + cmd[1:], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
698 if cp.returncode == 0:
699 # we will use the master connection so SSH does not need to use the tty
700 need_to_request_data = False
701 with restore_terminal_state() as echo_on:
702 rcmd, replacements, shm_name = get_remote_command(
703 remote_args, host_opts, hostname_for_match, uname, echo_on, request_data=need_to_request_data, literal_env=literal_env)
704 cmd += rcmd
705 colors_changed = change_colors(host_opts.color_scheme)
706 try:
707 p = subprocess.Popen(cmd)
708 except FileNotFoundError:
709 raise SystemExit('Could not find the ssh executable, is it in your PATH?')
710 else:
711 rq = '' if need_to_request_data else 'id={REQUEST_ID}:pwfile={PASSWORD_FILENAME}:pw={DATA_PASSWORD}'.format(**replacements)
712 with drain_potential_tty_garbage(p, rq):
713 raise SystemExit(p.wait())
714 finally:
715 if colors_changed:
716 print(end=restore_colors(), flush=True)
717
718
719 def main(args: List[str]) -> None:
720 args = args[1:]
721 if args and args[0] == 'use-python':
722 args = args[1:] # backwards compat from when we had a python implementation
723 try:
724 ssh_args, server_args, passthrough, found_extra_args = parse_ssh_args(args, extra_args=('--kitten',))
725 except InvalidSSHArgs as e:
726 e.system_exit()
727 if passthrough:
728 if found_extra_args:
729 raise SystemExit(f'The SSH kitten cannot work with the options: {", ".join(passthrough_args)}')
730 os.execlp(ssh_exe(), 'ssh', *args)
731
732 if not os.environ.get('KITTY_WINDOW_ID') or not os.environ.get('KITTY_PID'):
733 raise SystemExit('The SSH kitten is meant to run inside a kitty window')
734 if not sys.stdin.isatty():
735 raise SystemExit('The SSH kitten is meant for interactive use only, STDIN must be a terminal')
736 try:
737 run_ssh(ssh_args, server_args, found_extra_args)
738 except KeyboardInterrupt:
739 sys.excepthook = lambda *a: None
740 raise
741
742
743 if __name__ == '__main__':
744 main(sys.argv)
745 elif __name__ == '__wrapper_of__':
746 cd = sys.cli_docs # type: ignore
747 cd['wrapper_of'] = 'ssh'
748 elif __name__ == '__conf__':
749 from .options.definition import definition
750 sys.options_definition = definition # type: ignore
751
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kittens/ssh/main.py b/kittens/ssh/main.py
--- a/kittens/ssh/main.py
+++ b/kittens/ssh/main.py
@@ -50,9 +50,6 @@
shm.unlink()
if shm.stats.st_uid != os.geteuid() or shm.stats.st_gid != os.getegid():
raise ValueError('Incorrect owner on pwfile')
- mode = stat.S_IMODE(shm.stats.st_mode)
- if mode != stat.S_IREAD:
- raise ValueError('Incorrect permissions on pwfile')
return json.loads(shm.read_data_with_size())
| {"golden_diff": "diff --git a/kittens/ssh/main.py b/kittens/ssh/main.py\n--- a/kittens/ssh/main.py\n+++ b/kittens/ssh/main.py\n@@ -50,9 +50,6 @@\n shm.unlink()\n if shm.stats.st_uid != os.geteuid() or shm.stats.st_gid != os.getegid():\n raise ValueError('Incorrect owner on pwfile')\n- mode = stat.S_IMODE(shm.stats.st_mode)\n- if mode != stat.S_IREAD:\n- raise ValueError('Incorrect permissions on pwfile')\n return json.loads(shm.read_data_with_size())\n", "issue": "Permission Error when using SSH kitten on FreeBSD\nGood morning. I was hoping you or someone could point me in the right direction, or is this possibly a bug?\r\n\r\nWhen attempting to use the SSH kitten on FreeBSD I am getting this error:\r\n\r\n```bash\r\n$ kitty +kitten ssh test-host\r\n[Errno 13] Permission denied: '/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af'\r\nShared connection to [redacted] closed.\r\nException ignored in atexit callback: <bound method SharedMemory.unlink of SharedMemory('/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af', size=32718)>\r\nTraceback (most recent call last):\r\n File \"/usr/home/user/kitty/launcher/../../kitty/shm.py\", line 180, in unlink\r\n shm_unlink(self._name)\r\nPermissionError: [Errno 13] Permission denied: '/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af'\r\n```\r\n\r\nI am not sure as to what resource the kitty process is lacking permission in order to rectify this.\r\n\r\nThis is on kitty v0.26.5. Happens in every shell.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>\n\nimport fnmatch\nimport glob\nimport io\nimport json\nimport os\nimport re\nimport secrets\nimport shlex\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport termios\nimport time\nimport traceback\nfrom base64 import standard_b64decode, standard_b64encode\nfrom contextlib import contextmanager, suppress\nfrom getpass import getuser\nfrom select import select\nfrom typing import Any, Callable, Dict, Iterator, List, NoReturn, Optional, Sequence, Set, Tuple, Union, cast\n\nfrom kitty.constants import cache_dir, runtime_dir, shell_integration_dir, ssh_control_master_template, str_version, terminfo_dir\nfrom kitty.shell_integration import as_str_literal\nfrom kitty.shm import SharedMemory\nfrom kitty.types import run_once\nfrom kitty.utils import SSHConnectionData, expandvars, resolve_abs_or_config_path\nfrom kitty.utils import set_echo as turn_off_echo\n\nfrom ..tui.operations import RESTORE_PRIVATE_MODE_VALUES, SAVE_PRIVATE_MODE_VALUES, Mode, restore_colors, save_colors, set_mode\nfrom ..tui.utils import kitty_opts, running_in_tmux\nfrom .config import init_config\nfrom .copy import CopyInstruction\nfrom .options.types import Options as SSHOptions\nfrom .options.utils import DELETE_ENV_VAR\nfrom .utils import create_shared_memory, ssh_options\n\n\n@run_once\ndef ssh_exe() -> str:\n return shutil.which('ssh') or 'ssh'\n\n\ndef read_data_from_shared_memory(shm_name: str) -> Any:\n with SharedMemory(shm_name, readonly=True) as shm:\n shm.unlink()\n if shm.stats.st_uid != os.geteuid() or shm.stats.st_gid != os.getegid():\n raise ValueError('Incorrect owner on pwfile')\n mode = stat.S_IMODE(shm.stats.st_mode)\n if mode != stat.S_IREAD:\n raise ValueError('Incorrect permissions on pwfile')\n return json.loads(shm.read_data_with_size())\n\n\n# See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html\nquote_pat = re.compile('([\\\\`\"])')\n\n\ndef quote_env_val(x: str, literal_quote: bool = False) -> str:\n if literal_quote:\n return as_str_literal(x)\n x = quote_pat.sub(r'\\\\\\1', x)\n x = x.replace('$(', r'\\$(') # prevent execution with $()\n return f'\"{x}\"'\n\n\ndef serialize_env(literal_env: Dict[str, str], env: Dict[str, str], base_env: Dict[str, str], for_python: bool = False) -> bytes:\n lines = []\n literal_quote = True\n\n if for_python:\n def a(k: str, val: str = '', prefix: str = 'export') -> None:\n if val:\n lines.append(f'{prefix} {json.dumps((k, val, literal_quote))}')\n else:\n lines.append(f'{prefix} {json.dumps((k,))}')\n else:\n def a(k: str, val: str = '', prefix: str = 'export') -> None:\n if val:\n lines.append(f'{prefix} {shlex.quote(k)}={quote_env_val(val, literal_quote)}')\n else:\n lines.append(f'{prefix} {shlex.quote(k)}')\n\n for k, v in literal_env.items():\n a(k, v)\n\n literal_quote = False\n for k in sorted(env):\n v = env[k]\n if v == DELETE_ENV_VAR:\n a(k, prefix='unset')\n elif v == '_kitty_copy_env_var_':\n q = base_env.get(k)\n if q is not None:\n a(k, q)\n else:\n a(k, v)\n return '\\n'.join(lines).encode('utf-8')\n\n\ndef make_tarfile(ssh_opts: SSHOptions, base_env: Dict[str, str], compression: str = 'gz', literal_env: Dict[str, str] = {}) -> bytes:\n\n def normalize_tarinfo(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo:\n tarinfo.uname = tarinfo.gname = ''\n tarinfo.uid = tarinfo.gid = 0\n # some distro's like nix mess with installed file permissions so ensure\n # files are at least readable and writable by owning user\n tarinfo.mode |= stat.S_IWUSR | stat.S_IRUSR\n return tarinfo\n\n def add_data_as_file(tf: tarfile.TarFile, arcname: str, data: Union[str, bytes]) -> tarfile.TarInfo:\n ans = tarfile.TarInfo(arcname)\n ans.mtime = 0\n ans.type = tarfile.REGTYPE\n if isinstance(data, str):\n data = data.encode('utf-8')\n ans.size = len(data)\n normalize_tarinfo(ans)\n tf.addfile(ans, io.BytesIO(data))\n return ans\n\n def filter_from_globs(*pats: str) -> Callable[[tarfile.TarInfo], Optional[tarfile.TarInfo]]:\n def filter(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]:\n for junk_dir in ('.DS_Store', '__pycache__'):\n for pat in (f'*/{junk_dir}', f'*/{junk_dir}/*'):\n if fnmatch.fnmatch(tarinfo.name, pat):\n return None\n for pat in pats:\n if fnmatch.fnmatch(tarinfo.name, pat):\n return None\n return normalize_tarinfo(tarinfo)\n return filter\n\n from kitty.shell_integration import get_effective_ksi_env_var\n if ssh_opts.shell_integration == 'inherited':\n ksi = get_effective_ksi_env_var(kitty_opts())\n else:\n from kitty.options.types import Options\n from kitty.options.utils import shell_integration\n ksi = get_effective_ksi_env_var(Options({'shell_integration': shell_integration(ssh_opts.shell_integration)}))\n\n env = {\n 'TERM': os.environ.get('TERM') or kitty_opts().term,\n 'COLORTERM': 'truecolor',\n }\n env.update(ssh_opts.env)\n for q in ('KITTY_WINDOW_ID', 'WINDOWID'):\n val = os.environ.get(q)\n if val is not None:\n env[q] = val\n env['KITTY_SHELL_INTEGRATION'] = ksi or DELETE_ENV_VAR\n env['KITTY_SSH_KITTEN_DATA_DIR'] = ssh_opts.remote_dir\n if ssh_opts.login_shell:\n env['KITTY_LOGIN_SHELL'] = ssh_opts.login_shell\n if ssh_opts.cwd:\n env['KITTY_LOGIN_CWD'] = ssh_opts.cwd\n if ssh_opts.remote_kitty != 'no':\n env['KITTY_REMOTE'] = ssh_opts.remote_kitty\n if os.environ.get('KITTY_PUBLIC_KEY'):\n env.pop('KITTY_PUBLIC_KEY', None)\n literal_env['KITTY_PUBLIC_KEY'] = os.environ['KITTY_PUBLIC_KEY']\n env_script = serialize_env(literal_env, env, base_env, for_python=compression != 'gz')\n buf = io.BytesIO()\n with tarfile.open(mode=f'w:{compression}', fileobj=buf, encoding='utf-8') as tf:\n rd = ssh_opts.remote_dir.rstrip('/')\n for ci in ssh_opts.copy.values():\n tf.add(ci.local_path, arcname=ci.arcname, filter=filter_from_globs(*ci.exclude_patterns))\n add_data_as_file(tf, 'data.sh', env_script)\n if compression == 'gz':\n tf.add(f'{shell_integration_dir}/ssh/bootstrap-utils.sh', arcname='bootstrap-utils.sh', filter=normalize_tarinfo)\n if ksi:\n arcname = 'home/' + rd + '/shell-integration'\n tf.add(shell_integration_dir, arcname=arcname, filter=filter_from_globs(\n f'{arcname}/ssh/*', # bootstrap files are sent as command line args\n f'{arcname}/zsh/kitty.zsh', # present for legacy compat not needed by ssh kitten\n ))\n if ssh_opts.remote_kitty != 'no':\n arcname = 'home/' + rd + '/kitty'\n add_data_as_file(tf, arcname + '/version', str_version.encode('ascii'))\n tf.add(shell_integration_dir + '/ssh/kitty', arcname=arcname + '/bin/kitty', filter=normalize_tarinfo)\n tf.add(shell_integration_dir + '/ssh/kitten', arcname=arcname + '/bin/kitten', filter=normalize_tarinfo)\n tf.add(f'{terminfo_dir}/kitty.terminfo', arcname='home/.terminfo/kitty.terminfo', filter=normalize_tarinfo)\n tf.add(glob.glob(f'{terminfo_dir}/*/xterm-kitty')[0], arcname='home/.terminfo/x/xterm-kitty', filter=normalize_tarinfo)\n return buf.getvalue()\n\n\ndef get_ssh_data(msg: str, request_id: str) -> Iterator[bytes]:\n yield b'\\nKITTY_DATA_START\\n' # to discard leading data\n try:\n msg = standard_b64decode(msg).decode('utf-8')\n md = dict(x.split('=', 1) for x in msg.split(':'))\n pw = md['pw']\n pwfilename = md['pwfile']\n rq_id = md['id']\n except Exception:\n traceback.print_exc()\n yield b'invalid ssh data request message\\n'\n else:\n try:\n env_data = read_data_from_shared_memory(pwfilename)\n if pw != env_data['pw']:\n raise ValueError('Incorrect password')\n if rq_id != request_id:\n raise ValueError(f'Incorrect request id: {rq_id!r} expecting the KITTY_PID-KITTY_WINDOW_ID for the current kitty window')\n except Exception as e:\n traceback.print_exc()\n yield f'{e}\\n'.encode('utf-8')\n else:\n yield b'OK\\n'\n ssh_opts = SSHOptions(env_data['opts'])\n ssh_opts.copy = {k: CopyInstruction(*v) for k, v in ssh_opts.copy.items()}\n encoded_data = memoryview(env_data['tarfile'].encode('ascii'))\n # macOS has a 255 byte limit on its input queue as per man stty.\n # Not clear if that applies to canonical mode input as well, but\n # better to be safe.\n line_sz = 254\n while encoded_data:\n yield encoded_data[:line_sz]\n yield b'\\n'\n encoded_data = encoded_data[line_sz:]\n yield b'KITTY_DATA_END\\n'\n\n\ndef safe_remove(x: str) -> None:\n with suppress(OSError):\n os.remove(x)\n\n\ndef prepare_script(ans: str, replacements: Dict[str, str], script_type: str) -> str:\n for k in ('EXEC_CMD', 'EXPORT_HOME_CMD'):\n replacements[k] = replacements.get(k, '')\n\n def sub(m: 're.Match[str]') -> str:\n return replacements[m.group()]\n\n return re.sub('|'.join(fr'\\b{k}\\b' for k in replacements), sub, ans)\n\n\ndef prepare_exec_cmd(remote_args: Sequence[str], is_python: bool) -> str:\n # ssh simply concatenates multiple commands using a space see\n # line 1129 of ssh.c and on the remote side sshd.c runs the\n # concatenated command as shell -c cmd\n if is_python:\n return standard_b64encode(' '.join(remote_args).encode('utf-8')).decode('ascii')\n args = ' '.join(c.replace(\"'\", \"\"\"'\"'\"'\"\"\") for c in remote_args)\n return f\"\"\"unset KITTY_SHELL_INTEGRATION; exec \"$login_shell\" -c '{args}'\"\"\"\n\n\ndef prepare_export_home_cmd(ssh_opts: SSHOptions, is_python: bool) -> str:\n home = ssh_opts.env.get('HOME')\n if home == '_kitty_copy_env_var_':\n home = os.environ.get('HOME')\n if home:\n if is_python:\n return standard_b64encode(home.encode('utf-8')).decode('ascii')\n else:\n return f'export HOME={quote_env_val(home)}; cd \"$HOME\"'\n return ''\n\n\ndef bootstrap_script(\n ssh_opts: SSHOptions, script_type: str = 'sh', remote_args: Sequence[str] = (),\n test_script: str = '', request_id: Optional[str] = None, cli_hostname: str = '', cli_uname: str = '',\n request_data: bool = False, echo_on: bool = True, literal_env: Dict[str, str] = {}\n) -> Tuple[str, Dict[str, str], str]:\n if request_id is None:\n request_id = os.environ['KITTY_PID'] + '-' + os.environ['KITTY_WINDOW_ID']\n is_python = script_type == 'py'\n export_home_cmd = prepare_export_home_cmd(ssh_opts, is_python) if 'HOME' in ssh_opts.env else ''\n exec_cmd = prepare_exec_cmd(remote_args, is_python) if remote_args else ''\n with open(os.path.join(shell_integration_dir, 'ssh', f'bootstrap.{script_type}')) as f:\n ans = f.read()\n pw = secrets.token_hex()\n tfd = standard_b64encode(make_tarfile(ssh_opts, dict(os.environ), 'gz' if script_type == 'sh' else 'bz2', literal_env=literal_env)).decode('ascii')\n data = {'pw': pw, 'opts': ssh_opts._asdict(), 'hostname': cli_hostname, 'uname': cli_uname, 'tarfile': tfd}\n shm_name = create_shared_memory(data, prefix=f'kssh-{os.getpid()}-')\n sensitive_data = {'REQUEST_ID': request_id, 'DATA_PASSWORD': pw, 'PASSWORD_FILENAME': shm_name}\n replacements = {\n 'EXPORT_HOME_CMD': export_home_cmd,\n 'EXEC_CMD': exec_cmd, 'TEST_SCRIPT': test_script,\n 'REQUEST_DATA': '1' if request_data else '0', 'ECHO_ON': '1' if echo_on else '0',\n }\n sd = replacements.copy()\n if request_data:\n sd.update(sensitive_data)\n replacements.update(sensitive_data)\n return prepare_script(ans, sd, script_type), replacements, shm_name\n\n\ndef get_ssh_cli() -> Tuple[Set[str], Set[str]]:\n other_ssh_args: Set[str] = set()\n boolean_ssh_args: Set[str] = set()\n for k, v in ssh_options().items():\n k = f'-{k}'\n if v:\n other_ssh_args.add(k)\n else:\n boolean_ssh_args.add(k)\n return boolean_ssh_args, other_ssh_args\n\n\ndef is_extra_arg(arg: str, extra_args: Tuple[str, ...]) -> str:\n for x in extra_args:\n if arg == x or arg.startswith(f'{x}='):\n return x\n return ''\n\n\ndef get_connection_data(args: List[str], cwd: str = '', extra_args: Tuple[str, ...] = ()) -> Optional[SSHConnectionData]:\n boolean_ssh_args, other_ssh_args = get_ssh_cli()\n port: Optional[int] = None\n expecting_port = expecting_identity = False\n expecting_option_val = False\n expecting_hostname = False\n expecting_extra_val = ''\n host_name = identity_file = found_ssh = ''\n found_extra_args: List[Tuple[str, str]] = []\n\n for i, arg in enumerate(args):\n if not found_ssh:\n if os.path.basename(arg).lower() in ('ssh', 'ssh.exe'):\n found_ssh = arg\n continue\n if expecting_hostname:\n host_name = arg\n continue\n if arg.startswith('-') and not expecting_option_val:\n if arg in boolean_ssh_args:\n continue\n if arg == '--':\n expecting_hostname = True\n if arg.startswith('-p'):\n if arg[2:].isdigit():\n with suppress(Exception):\n port = int(arg[2:])\n continue\n elif arg == '-p':\n expecting_port = True\n elif arg.startswith('-i'):\n if arg == '-i':\n expecting_identity = True\n else:\n identity_file = arg[2:]\n continue\n if arg.startswith('--') and extra_args:\n matching_ex = is_extra_arg(arg, extra_args)\n if matching_ex:\n if '=' in arg:\n exval = arg.partition('=')[-1]\n found_extra_args.append((matching_ex, exval))\n continue\n expecting_extra_val = matching_ex\n\n expecting_option_val = True\n continue\n\n if expecting_option_val:\n if expecting_port:\n with suppress(Exception):\n port = int(arg)\n expecting_port = False\n elif expecting_identity:\n identity_file = arg\n elif expecting_extra_val:\n found_extra_args.append((expecting_extra_val, arg))\n expecting_extra_val = ''\n expecting_option_val = False\n continue\n\n if not host_name:\n host_name = arg\n if not host_name:\n return None\n if host_name.startswith('ssh://'):\n from urllib.parse import urlparse\n purl = urlparse(host_name)\n if purl.hostname:\n host_name = purl.hostname\n if purl.username:\n host_name = f'{purl.username}@{host_name}'\n if port is None and purl.port:\n port = purl.port\n if identity_file:\n if not os.path.isabs(identity_file):\n identity_file = os.path.expanduser(identity_file)\n if not os.path.isabs(identity_file):\n identity_file = os.path.normpath(os.path.join(cwd or os.getcwd(), identity_file))\n\n return SSHConnectionData(found_ssh, host_name, port, identity_file, tuple(found_extra_args))\n\n\nclass InvalidSSHArgs(ValueError):\n\n def __init__(self, msg: str = ''):\n super().__init__(msg)\n self.err_msg = msg\n\n def system_exit(self) -> None:\n if self.err_msg:\n print(self.err_msg, file=sys.stderr)\n os.execlp(ssh_exe(), 'ssh')\n\n\npassthrough_args = {f'-{x}' for x in 'NnfGT'}\n\n\ndef parse_ssh_args(args: List[str], extra_args: Tuple[str, ...] = ()) -> Tuple[List[str], List[str], bool, Tuple[str, ...]]:\n boolean_ssh_args, other_ssh_args = get_ssh_cli()\n ssh_args = []\n server_args: List[str] = []\n expecting_option_val = False\n passthrough = False\n stop_option_processing = False\n found_extra_args: List[str] = []\n expecting_extra_val = ''\n for argument in args:\n if len(server_args) > 1 or stop_option_processing:\n server_args.append(argument)\n continue\n if argument.startswith('-') and not expecting_option_val:\n if argument == '--':\n stop_option_processing = True\n continue\n if extra_args:\n matching_ex = is_extra_arg(argument, extra_args)\n if matching_ex:\n if '=' in argument:\n exval = argument.partition('=')[-1]\n found_extra_args.extend((matching_ex, exval))\n else:\n expecting_extra_val = matching_ex\n expecting_option_val = True\n continue\n # could be a multi-character option\n all_args = argument[1:]\n for i, arg in enumerate(all_args):\n arg = f'-{arg}'\n if arg in passthrough_args:\n passthrough = True\n if arg in boolean_ssh_args:\n ssh_args.append(arg)\n continue\n if arg in other_ssh_args:\n ssh_args.append(arg)\n rest = all_args[i+1:]\n if rest:\n ssh_args.append(rest)\n else:\n expecting_option_val = True\n break\n raise InvalidSSHArgs(f'unknown option -- {arg[1:]}')\n continue\n if expecting_option_val:\n if expecting_extra_val:\n found_extra_args.extend((expecting_extra_val, argument))\n expecting_extra_val = ''\n else:\n ssh_args.append(argument)\n expecting_option_val = False\n continue\n server_args.append(argument)\n if not server_args:\n raise InvalidSSHArgs()\n return ssh_args, server_args, passthrough, tuple(found_extra_args)\n\n\ndef wrap_bootstrap_script(sh_script: str, interpreter: str) -> List[str]:\n # sshd will execute the command we pass it by join all command line\n # arguments with a space and passing it as a single argument to the users\n # login shell with -c. If the user has a non POSIX login shell it might\n # have different escaping semantics and syntax, so the command it should\n # execute has to be as simple as possible, basically of the form\n # interpreter -c unwrap_script escaped_bootstrap_script\n # The unwrap_script is responsible for unescaping the bootstrap script and\n # executing it.\n q = os.path.basename(interpreter).lower()\n is_python = 'python' in q\n if is_python:\n es = standard_b64encode(sh_script.encode('utf-8')).decode('ascii')\n unwrap_script = '''\"import base64, sys; eval(compile(base64.standard_b64decode(sys.argv[-1]), 'bootstrap.py', 'exec'))\"'''\n else:\n # We cant rely on base64 being available on the remote system, so instead\n # we quote the bootstrap script by replacing ' and \\ with \\v and \\f\n # also replacing \\n and ! with \\r and \\b for tcsh\n # finally surrounding with '\n es = \"'\" + sh_script.replace(\"'\", '\\v').replace('\\\\', '\\f').replace('\\n', '\\r').replace('!', '\\b') + \"'\"\n unwrap_script = r\"\"\"'eval \"$(echo \"$0\" | tr \\\\\\v\\\\\\f\\\\\\r\\\\\\b \\\\\\047\\\\\\134\\\\\\n\\\\\\041)\"' \"\"\"\n # exec is supported by all sh like shells, and fish and csh\n return ['exec', interpreter, '-c', unwrap_script, es]\n\n\ndef get_remote_command(\n remote_args: List[str], ssh_opts: SSHOptions, cli_hostname: str = '', cli_uname: str = '',\n echo_on: bool = True, request_data: bool = False, literal_env: Dict[str, str] = {}\n) -> Tuple[List[str], Dict[str, str], str]:\n interpreter = ssh_opts.interpreter\n q = os.path.basename(interpreter).lower()\n is_python = 'python' in q\n sh_script, replacements, shm_name = bootstrap_script(\n ssh_opts, script_type='py' if is_python else 'sh', remote_args=remote_args, literal_env=literal_env,\n cli_hostname=cli_hostname, cli_uname=cli_uname, echo_on=echo_on, request_data=request_data)\n return wrap_bootstrap_script(sh_script, interpreter), replacements, shm_name\n\n\ndef connection_sharing_args(kitty_pid: int) -> List[str]:\n rd = runtime_dir()\n # Bloody OpenSSH generates a 40 char hash and in creating the socket\n # appends a 27 char temp suffix to it. Socket max path length is approx\n # ~104 chars. macOS has no system runtime dir so we use a cache dir in\n # /Users/WHY_DOES_ANYONE_USE_MACOS/Library/Caches/APPLE_ARE_IDIOTIC\n if len(rd) > 35 and os.path.isdir('/tmp'):\n idiotic_design = f'/tmp/kssh-rdir-{os.getuid()}'\n try:\n os.symlink(rd, idiotic_design)\n except FileExistsError:\n try:\n dest = os.readlink(idiotic_design)\n except OSError as e:\n raise ValueError(f'The {idiotic_design} symlink could not be created as something with that name exists already') from e\n else:\n if dest != rd:\n with tempfile.TemporaryDirectory(dir='/tmp') as tdir:\n tlink = os.path.join(tdir, 'sigh')\n os.symlink(rd, tlink)\n os.rename(tlink, idiotic_design)\n rd = idiotic_design\n\n cp = os.path.join(rd, ssh_control_master_template.format(kitty_pid=kitty_pid, ssh_placeholder='%C'))\n ans: List[str] = [\n '-o', 'ControlMaster=auto',\n '-o', f'ControlPath={cp}',\n '-o', 'ControlPersist=yes',\n '-o', 'ServerAliveInterval=60',\n '-o', 'ServerAliveCountMax=5',\n '-o', 'TCPKeepAlive=no',\n ]\n return ans\n\n\n@contextmanager\ndef restore_terminal_state() -> Iterator[bool]:\n with open(os.ctermid()) as f:\n val = termios.tcgetattr(f.fileno())\n print(end=SAVE_PRIVATE_MODE_VALUES)\n print(end=set_mode(Mode.HANDLE_TERMIOS_SIGNALS), flush=True)\n try:\n yield bool(val[3] & termios.ECHO)\n finally:\n termios.tcsetattr(f.fileno(), termios.TCSAFLUSH, val)\n print(end=RESTORE_PRIVATE_MODE_VALUES, flush=True)\n\n\ndef dcs_to_kitty(payload: Union[bytes, str], type: str = 'ssh') -> bytes:\n if isinstance(payload, str):\n payload = payload.encode('utf-8')\n payload = standard_b64encode(payload)\n ans = b'\\033P@kitty-' + type.encode('ascii') + b'|' + payload\n tmux = running_in_tmux()\n if tmux:\n cp = subprocess.run([tmux, 'set', '-p', 'allow-passthrough', 'on'])\n if cp.returncode != 0:\n raise SystemExit(cp.returncode)\n ans = b'\\033Ptmux;\\033' + ans + b'\\033\\033\\\\\\033\\\\'\n else:\n ans += b'\\033\\\\'\n return ans\n\n\n@run_once\ndef ssh_version() -> Tuple[int, int]:\n o = subprocess.check_output([ssh_exe(), '-V'], stderr=subprocess.STDOUT).decode()\n m = re.match(r'OpenSSH_(\\d+).(\\d+)', o)\n if m is None:\n raise ValueError(f'Invalid version string for OpenSSH: {o}')\n return int(m.group(1)), int(m.group(2))\n\n\n@contextmanager\ndef drain_potential_tty_garbage(p: 'subprocess.Popen[bytes]', data_request: str) -> Iterator[None]:\n with open(os.open(os.ctermid(), os.O_CLOEXEC | os.O_RDWR | os.O_NOCTTY), 'wb') as tty:\n if data_request:\n turn_off_echo(tty.fileno())\n tty.write(dcs_to_kitty(data_request))\n tty.flush()\n try:\n yield\n finally:\n # discard queued input data on tty in case data transmission was\n # interrupted due to SSH failure, avoids spewing garbage to screen\n from uuid import uuid4\n canary = uuid4().hex.encode('ascii')\n turn_off_echo(tty.fileno())\n tty.write(dcs_to_kitty(canary + b'\\n\\r', type='echo'))\n tty.flush()\n data = b''\n give_up_at = time.monotonic() + 2\n tty_fd = tty.fileno()\n while time.monotonic() < give_up_at and canary not in data:\n with suppress(KeyboardInterrupt):\n rd, wr, err = select([tty_fd], [], [tty_fd], max(0, give_up_at - time.monotonic()))\n if err or not rd:\n break\n q = os.read(tty_fd, io.DEFAULT_BUFFER_SIZE)\n if not q:\n break\n data += q\n\n\ndef change_colors(color_scheme: str) -> bool:\n if not color_scheme:\n return False\n from kittens.themes.collection import NoCacheFound, load_themes, text_as_opts\n from kittens.themes.main import colors_as_escape_codes\n if color_scheme.endswith('.conf'):\n conf_file = resolve_abs_or_config_path(color_scheme)\n try:\n with open(conf_file) as f:\n opts = text_as_opts(f.read())\n except FileNotFoundError:\n raise SystemExit(f'Failed to find the color conf file: {expandvars(conf_file)}')\n else:\n try:\n themes = load_themes(-1)\n except NoCacheFound:\n themes = load_themes()\n cs = expandvars(color_scheme)\n try:\n theme = themes[cs]\n except KeyError:\n raise SystemExit(f'Failed to find the color theme: {cs}')\n opts = theme.kitty_opts\n raw = colors_as_escape_codes(opts)\n print(save_colors(), sep='', end=raw, flush=True)\n return True\n\n\ndef add_cloned_env(shm_name: str) -> Dict[str, str]:\n try:\n return cast(Dict[str, str], read_data_from_shared_memory(shm_name))\n except FileNotFoundError:\n pass\n return {}\n\n\ndef run_ssh(ssh_args: List[str], server_args: List[str], found_extra_args: Tuple[str, ...]) -> NoReturn:\n cmd = [ssh_exe()] + ssh_args\n hostname, remote_args = server_args[0], server_args[1:]\n if not remote_args:\n cmd.append('-t')\n insertion_point = len(cmd)\n cmd.append('--')\n cmd.append(hostname)\n uname = getuser()\n if hostname.startswith('ssh://'):\n from urllib.parse import urlparse\n purl = urlparse(hostname)\n hostname_for_match = purl.hostname or hostname[6:].split('/', 1)[0]\n uname = purl.username or uname\n elif '@' in hostname and hostname[0] != '@':\n uname, hostname_for_match = hostname.split('@', 1)\n else:\n hostname_for_match = hostname\n hostname_for_match = hostname_for_match.split('@', 1)[-1].split(':', 1)[0]\n overrides: List[str] = []\n literal_env: Dict[str, str] = {}\n pat = re.compile(r'^([a-zA-Z0-9_]+)[ \\t]*=')\n for i, a in enumerate(found_extra_args):\n if i % 2 == 1:\n aq = pat.sub(r'\\1 ', a.lstrip())\n key = aq.split(maxsplit=1)[0]\n if key == 'clone_env':\n literal_env = add_cloned_env(aq.split(maxsplit=1)[1])\n elif key != 'hostname':\n overrides.append(aq)\n if overrides:\n overrides.insert(0, f'hostname {uname}@{hostname_for_match}')\n host_opts = init_config(hostname_for_match, uname, overrides)\n if host_opts.share_connections:\n cmd[insertion_point:insertion_point] = connection_sharing_args(int(os.environ['KITTY_PID']))\n use_kitty_askpass = host_opts.askpass == 'native' or (host_opts.askpass == 'unless-set' and 'SSH_ASKPASS' not in os.environ)\n need_to_request_data = True\n if use_kitty_askpass:\n sentinel = os.path.join(cache_dir(), 'openssh-is-new-enough-for-askpass')\n sentinel_exists = os.path.exists(sentinel)\n if sentinel_exists or ssh_version() >= (8, 4):\n if not sentinel_exists:\n open(sentinel, 'w').close()\n # SSH_ASKPASS_REQUIRE was introduced in 8.4 release on 2020-09-27\n need_to_request_data = False\n os.environ['SSH_ASKPASS_REQUIRE'] = 'force'\n os.environ['SSH_ASKPASS'] = os.path.join(shell_integration_dir, 'ssh', 'askpass.py')\n if need_to_request_data and host_opts.share_connections:\n cp = subprocess.run(cmd[:1] + ['-O', 'check'] + cmd[1:], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n if cp.returncode == 0:\n # we will use the master connection so SSH does not need to use the tty\n need_to_request_data = False\n with restore_terminal_state() as echo_on:\n rcmd, replacements, shm_name = get_remote_command(\n remote_args, host_opts, hostname_for_match, uname, echo_on, request_data=need_to_request_data, literal_env=literal_env)\n cmd += rcmd\n colors_changed = change_colors(host_opts.color_scheme)\n try:\n p = subprocess.Popen(cmd)\n except FileNotFoundError:\n raise SystemExit('Could not find the ssh executable, is it in your PATH?')\n else:\n rq = '' if need_to_request_data else 'id={REQUEST_ID}:pwfile={PASSWORD_FILENAME}:pw={DATA_PASSWORD}'.format(**replacements)\n with drain_potential_tty_garbage(p, rq):\n raise SystemExit(p.wait())\n finally:\n if colors_changed:\n print(end=restore_colors(), flush=True)\n\n\ndef main(args: List[str]) -> None:\n args = args[1:]\n if args and args[0] == 'use-python':\n args = args[1:] # backwards compat from when we had a python implementation\n try:\n ssh_args, server_args, passthrough, found_extra_args = parse_ssh_args(args, extra_args=('--kitten',))\n except InvalidSSHArgs as e:\n e.system_exit()\n if passthrough:\n if found_extra_args:\n raise SystemExit(f'The SSH kitten cannot work with the options: {\", \".join(passthrough_args)}')\n os.execlp(ssh_exe(), 'ssh', *args)\n\n if not os.environ.get('KITTY_WINDOW_ID') or not os.environ.get('KITTY_PID'):\n raise SystemExit('The SSH kitten is meant to run inside a kitty window')\n if not sys.stdin.isatty():\n raise SystemExit('The SSH kitten is meant for interactive use only, STDIN must be a terminal')\n try:\n run_ssh(ssh_args, server_args, found_extra_args)\n except KeyboardInterrupt:\n sys.excepthook = lambda *a: None\n raise\n\n\nif __name__ == '__main__':\n main(sys.argv)\nelif __name__ == '__wrapper_of__':\n cd = sys.cli_docs # type: ignore\n cd['wrapper_of'] = 'ssh'\nelif __name__ == '__conf__':\n from .options.definition import definition\n sys.options_definition = definition # type: ignore\n", "path": "kittens/ssh/main.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>\n\nimport fnmatch\nimport glob\nimport io\nimport json\nimport os\nimport re\nimport secrets\nimport shlex\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport termios\nimport time\nimport traceback\nfrom base64 import standard_b64decode, standard_b64encode\nfrom contextlib import contextmanager, suppress\nfrom getpass import getuser\nfrom select import select\nfrom typing import Any, Callable, Dict, Iterator, List, NoReturn, Optional, Sequence, Set, Tuple, Union, cast\n\nfrom kitty.constants import cache_dir, runtime_dir, shell_integration_dir, ssh_control_master_template, str_version, terminfo_dir\nfrom kitty.shell_integration import as_str_literal\nfrom kitty.shm import SharedMemory\nfrom kitty.types import run_once\nfrom kitty.utils import SSHConnectionData, expandvars, resolve_abs_or_config_path\nfrom kitty.utils import set_echo as turn_off_echo\n\nfrom ..tui.operations import RESTORE_PRIVATE_MODE_VALUES, SAVE_PRIVATE_MODE_VALUES, Mode, restore_colors, save_colors, set_mode\nfrom ..tui.utils import kitty_opts, running_in_tmux\nfrom .config import init_config\nfrom .copy import CopyInstruction\nfrom .options.types import Options as SSHOptions\nfrom .options.utils import DELETE_ENV_VAR\nfrom .utils import create_shared_memory, ssh_options\n\n\n@run_once\ndef ssh_exe() -> str:\n return shutil.which('ssh') or 'ssh'\n\n\ndef read_data_from_shared_memory(shm_name: str) -> Any:\n with SharedMemory(shm_name, readonly=True) as shm:\n shm.unlink()\n if shm.stats.st_uid != os.geteuid() or shm.stats.st_gid != os.getegid():\n raise ValueError('Incorrect owner on pwfile')\n return json.loads(shm.read_data_with_size())\n\n\n# See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html\nquote_pat = re.compile('([\\\\`\"])')\n\n\ndef quote_env_val(x: str, literal_quote: bool = False) -> str:\n if literal_quote:\n return as_str_literal(x)\n x = quote_pat.sub(r'\\\\\\1', x)\n x = x.replace('$(', r'\\$(') # prevent execution with $()\n return f'\"{x}\"'\n\n\ndef serialize_env(literal_env: Dict[str, str], env: Dict[str, str], base_env: Dict[str, str], for_python: bool = False) -> bytes:\n lines = []\n literal_quote = True\n\n if for_python:\n def a(k: str, val: str = '', prefix: str = 'export') -> None:\n if val:\n lines.append(f'{prefix} {json.dumps((k, val, literal_quote))}')\n else:\n lines.append(f'{prefix} {json.dumps((k,))}')\n else:\n def a(k: str, val: str = '', prefix: str = 'export') -> None:\n if val:\n lines.append(f'{prefix} {shlex.quote(k)}={quote_env_val(val, literal_quote)}')\n else:\n lines.append(f'{prefix} {shlex.quote(k)}')\n\n for k, v in literal_env.items():\n a(k, v)\n\n literal_quote = False\n for k in sorted(env):\n v = env[k]\n if v == DELETE_ENV_VAR:\n a(k, prefix='unset')\n elif v == '_kitty_copy_env_var_':\n q = base_env.get(k)\n if q is not None:\n a(k, q)\n else:\n a(k, v)\n return '\\n'.join(lines).encode('utf-8')\n\n\ndef make_tarfile(ssh_opts: SSHOptions, base_env: Dict[str, str], compression: str = 'gz', literal_env: Dict[str, str] = {}) -> bytes:\n\n def normalize_tarinfo(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo:\n tarinfo.uname = tarinfo.gname = ''\n tarinfo.uid = tarinfo.gid = 0\n # some distro's like nix mess with installed file permissions so ensure\n # files are at least readable and writable by owning user\n tarinfo.mode |= stat.S_IWUSR | stat.S_IRUSR\n return tarinfo\n\n def add_data_as_file(tf: tarfile.TarFile, arcname: str, data: Union[str, bytes]) -> tarfile.TarInfo:\n ans = tarfile.TarInfo(arcname)\n ans.mtime = 0\n ans.type = tarfile.REGTYPE\n if isinstance(data, str):\n data = data.encode('utf-8')\n ans.size = len(data)\n normalize_tarinfo(ans)\n tf.addfile(ans, io.BytesIO(data))\n return ans\n\n def filter_from_globs(*pats: str) -> Callable[[tarfile.TarInfo], Optional[tarfile.TarInfo]]:\n def filter(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]:\n for junk_dir in ('.DS_Store', '__pycache__'):\n for pat in (f'*/{junk_dir}', f'*/{junk_dir}/*'):\n if fnmatch.fnmatch(tarinfo.name, pat):\n return None\n for pat in pats:\n if fnmatch.fnmatch(tarinfo.name, pat):\n return None\n return normalize_tarinfo(tarinfo)\n return filter\n\n from kitty.shell_integration import get_effective_ksi_env_var\n if ssh_opts.shell_integration == 'inherited':\n ksi = get_effective_ksi_env_var(kitty_opts())\n else:\n from kitty.options.types import Options\n from kitty.options.utils import shell_integration\n ksi = get_effective_ksi_env_var(Options({'shell_integration': shell_integration(ssh_opts.shell_integration)}))\n\n env = {\n 'TERM': os.environ.get('TERM') or kitty_opts().term,\n 'COLORTERM': 'truecolor',\n }\n env.update(ssh_opts.env)\n for q in ('KITTY_WINDOW_ID', 'WINDOWID'):\n val = os.environ.get(q)\n if val is not None:\n env[q] = val\n env['KITTY_SHELL_INTEGRATION'] = ksi or DELETE_ENV_VAR\n env['KITTY_SSH_KITTEN_DATA_DIR'] = ssh_opts.remote_dir\n if ssh_opts.login_shell:\n env['KITTY_LOGIN_SHELL'] = ssh_opts.login_shell\n if ssh_opts.cwd:\n env['KITTY_LOGIN_CWD'] = ssh_opts.cwd\n if ssh_opts.remote_kitty != 'no':\n env['KITTY_REMOTE'] = ssh_opts.remote_kitty\n if os.environ.get('KITTY_PUBLIC_KEY'):\n env.pop('KITTY_PUBLIC_KEY', None)\n literal_env['KITTY_PUBLIC_KEY'] = os.environ['KITTY_PUBLIC_KEY']\n env_script = serialize_env(literal_env, env, base_env, for_python=compression != 'gz')\n buf = io.BytesIO()\n with tarfile.open(mode=f'w:{compression}', fileobj=buf, encoding='utf-8') as tf:\n rd = ssh_opts.remote_dir.rstrip('/')\n for ci in ssh_opts.copy.values():\n tf.add(ci.local_path, arcname=ci.arcname, filter=filter_from_globs(*ci.exclude_patterns))\n add_data_as_file(tf, 'data.sh', env_script)\n if compression == 'gz':\n tf.add(f'{shell_integration_dir}/ssh/bootstrap-utils.sh', arcname='bootstrap-utils.sh', filter=normalize_tarinfo)\n if ksi:\n arcname = 'home/' + rd + '/shell-integration'\n tf.add(shell_integration_dir, arcname=arcname, filter=filter_from_globs(\n f'{arcname}/ssh/*', # bootstrap files are sent as command line args\n f'{arcname}/zsh/kitty.zsh', # present for legacy compat not needed by ssh kitten\n ))\n if ssh_opts.remote_kitty != 'no':\n arcname = 'home/' + rd + '/kitty'\n add_data_as_file(tf, arcname + '/version', str_version.encode('ascii'))\n tf.add(shell_integration_dir + '/ssh/kitty', arcname=arcname + '/bin/kitty', filter=normalize_tarinfo)\n tf.add(shell_integration_dir + '/ssh/kitten', arcname=arcname + '/bin/kitten', filter=normalize_tarinfo)\n tf.add(f'{terminfo_dir}/kitty.terminfo', arcname='home/.terminfo/kitty.terminfo', filter=normalize_tarinfo)\n tf.add(glob.glob(f'{terminfo_dir}/*/xterm-kitty')[0], arcname='home/.terminfo/x/xterm-kitty', filter=normalize_tarinfo)\n return buf.getvalue()\n\n\ndef get_ssh_data(msg: str, request_id: str) -> Iterator[bytes]:\n yield b'\\nKITTY_DATA_START\\n' # to discard leading data\n try:\n msg = standard_b64decode(msg).decode('utf-8')\n md = dict(x.split('=', 1) for x in msg.split(':'))\n pw = md['pw']\n pwfilename = md['pwfile']\n rq_id = md['id']\n except Exception:\n traceback.print_exc()\n yield b'invalid ssh data request message\\n'\n else:\n try:\n env_data = read_data_from_shared_memory(pwfilename)\n if pw != env_data['pw']:\n raise ValueError('Incorrect password')\n if rq_id != request_id:\n raise ValueError(f'Incorrect request id: {rq_id!r} expecting the KITTY_PID-KITTY_WINDOW_ID for the current kitty window')\n except Exception as e:\n traceback.print_exc()\n yield f'{e}\\n'.encode('utf-8')\n else:\n yield b'OK\\n'\n ssh_opts = SSHOptions(env_data['opts'])\n ssh_opts.copy = {k: CopyInstruction(*v) for k, v in ssh_opts.copy.items()}\n encoded_data = memoryview(env_data['tarfile'].encode('ascii'))\n # macOS has a 255 byte limit on its input queue as per man stty.\n # Not clear if that applies to canonical mode input as well, but\n # better to be safe.\n line_sz = 254\n while encoded_data:\n yield encoded_data[:line_sz]\n yield b'\\n'\n encoded_data = encoded_data[line_sz:]\n yield b'KITTY_DATA_END\\n'\n\n\ndef safe_remove(x: str) -> None:\n with suppress(OSError):\n os.remove(x)\n\n\ndef prepare_script(ans: str, replacements: Dict[str, str], script_type: str) -> str:\n for k in ('EXEC_CMD', 'EXPORT_HOME_CMD'):\n replacements[k] = replacements.get(k, '')\n\n def sub(m: 're.Match[str]') -> str:\n return replacements[m.group()]\n\n return re.sub('|'.join(fr'\\b{k}\\b' for k in replacements), sub, ans)\n\n\ndef prepare_exec_cmd(remote_args: Sequence[str], is_python: bool) -> str:\n # ssh simply concatenates multiple commands using a space see\n # line 1129 of ssh.c and on the remote side sshd.c runs the\n # concatenated command as shell -c cmd\n if is_python:\n return standard_b64encode(' '.join(remote_args).encode('utf-8')).decode('ascii')\n args = ' '.join(c.replace(\"'\", \"\"\"'\"'\"'\"\"\") for c in remote_args)\n return f\"\"\"unset KITTY_SHELL_INTEGRATION; exec \"$login_shell\" -c '{args}'\"\"\"\n\n\ndef prepare_export_home_cmd(ssh_opts: SSHOptions, is_python: bool) -> str:\n home = ssh_opts.env.get('HOME')\n if home == '_kitty_copy_env_var_':\n home = os.environ.get('HOME')\n if home:\n if is_python:\n return standard_b64encode(home.encode('utf-8')).decode('ascii')\n else:\n return f'export HOME={quote_env_val(home)}; cd \"$HOME\"'\n return ''\n\n\ndef bootstrap_script(\n ssh_opts: SSHOptions, script_type: str = 'sh', remote_args: Sequence[str] = (),\n test_script: str = '', request_id: Optional[str] = None, cli_hostname: str = '', cli_uname: str = '',\n request_data: bool = False, echo_on: bool = True, literal_env: Dict[str, str] = {}\n) -> Tuple[str, Dict[str, str], str]:\n if request_id is None:\n request_id = os.environ['KITTY_PID'] + '-' + os.environ['KITTY_WINDOW_ID']\n is_python = script_type == 'py'\n export_home_cmd = prepare_export_home_cmd(ssh_opts, is_python) if 'HOME' in ssh_opts.env else ''\n exec_cmd = prepare_exec_cmd(remote_args, is_python) if remote_args else ''\n with open(os.path.join(shell_integration_dir, 'ssh', f'bootstrap.{script_type}')) as f:\n ans = f.read()\n pw = secrets.token_hex()\n tfd = standard_b64encode(make_tarfile(ssh_opts, dict(os.environ), 'gz' if script_type == 'sh' else 'bz2', literal_env=literal_env)).decode('ascii')\n data = {'pw': pw, 'opts': ssh_opts._asdict(), 'hostname': cli_hostname, 'uname': cli_uname, 'tarfile': tfd}\n shm_name = create_shared_memory(data, prefix=f'kssh-{os.getpid()}-')\n sensitive_data = {'REQUEST_ID': request_id, 'DATA_PASSWORD': pw, 'PASSWORD_FILENAME': shm_name}\n replacements = {\n 'EXPORT_HOME_CMD': export_home_cmd,\n 'EXEC_CMD': exec_cmd, 'TEST_SCRIPT': test_script,\n 'REQUEST_DATA': '1' if request_data else '0', 'ECHO_ON': '1' if echo_on else '0',\n }\n sd = replacements.copy()\n if request_data:\n sd.update(sensitive_data)\n replacements.update(sensitive_data)\n return prepare_script(ans, sd, script_type), replacements, shm_name\n\n\ndef get_ssh_cli() -> Tuple[Set[str], Set[str]]:\n other_ssh_args: Set[str] = set()\n boolean_ssh_args: Set[str] = set()\n for k, v in ssh_options().items():\n k = f'-{k}'\n if v:\n other_ssh_args.add(k)\n else:\n boolean_ssh_args.add(k)\n return boolean_ssh_args, other_ssh_args\n\n\ndef is_extra_arg(arg: str, extra_args: Tuple[str, ...]) -> str:\n for x in extra_args:\n if arg == x or arg.startswith(f'{x}='):\n return x\n return ''\n\n\ndef get_connection_data(args: List[str], cwd: str = '', extra_args: Tuple[str, ...] = ()) -> Optional[SSHConnectionData]:\n boolean_ssh_args, other_ssh_args = get_ssh_cli()\n port: Optional[int] = None\n expecting_port = expecting_identity = False\n expecting_option_val = False\n expecting_hostname = False\n expecting_extra_val = ''\n host_name = identity_file = found_ssh = ''\n found_extra_args: List[Tuple[str, str]] = []\n\n for i, arg in enumerate(args):\n if not found_ssh:\n if os.path.basename(arg).lower() in ('ssh', 'ssh.exe'):\n found_ssh = arg\n continue\n if expecting_hostname:\n host_name = arg\n continue\n if arg.startswith('-') and not expecting_option_val:\n if arg in boolean_ssh_args:\n continue\n if arg == '--':\n expecting_hostname = True\n if arg.startswith('-p'):\n if arg[2:].isdigit():\n with suppress(Exception):\n port = int(arg[2:])\n continue\n elif arg == '-p':\n expecting_port = True\n elif arg.startswith('-i'):\n if arg == '-i':\n expecting_identity = True\n else:\n identity_file = arg[2:]\n continue\n if arg.startswith('--') and extra_args:\n matching_ex = is_extra_arg(arg, extra_args)\n if matching_ex:\n if '=' in arg:\n exval = arg.partition('=')[-1]\n found_extra_args.append((matching_ex, exval))\n continue\n expecting_extra_val = matching_ex\n\n expecting_option_val = True\n continue\n\n if expecting_option_val:\n if expecting_port:\n with suppress(Exception):\n port = int(arg)\n expecting_port = False\n elif expecting_identity:\n identity_file = arg\n elif expecting_extra_val:\n found_extra_args.append((expecting_extra_val, arg))\n expecting_extra_val = ''\n expecting_option_val = False\n continue\n\n if not host_name:\n host_name = arg\n if not host_name:\n return None\n if host_name.startswith('ssh://'):\n from urllib.parse import urlparse\n purl = urlparse(host_name)\n if purl.hostname:\n host_name = purl.hostname\n if purl.username:\n host_name = f'{purl.username}@{host_name}'\n if port is None and purl.port:\n port = purl.port\n if identity_file:\n if not os.path.isabs(identity_file):\n identity_file = os.path.expanduser(identity_file)\n if not os.path.isabs(identity_file):\n identity_file = os.path.normpath(os.path.join(cwd or os.getcwd(), identity_file))\n\n return SSHConnectionData(found_ssh, host_name, port, identity_file, tuple(found_extra_args))\n\n\nclass InvalidSSHArgs(ValueError):\n\n def __init__(self, msg: str = ''):\n super().__init__(msg)\n self.err_msg = msg\n\n def system_exit(self) -> None:\n if self.err_msg:\n print(self.err_msg, file=sys.stderr)\n os.execlp(ssh_exe(), 'ssh')\n\n\npassthrough_args = {f'-{x}' for x in 'NnfGT'}\n\n\ndef parse_ssh_args(args: List[str], extra_args: Tuple[str, ...] = ()) -> Tuple[List[str], List[str], bool, Tuple[str, ...]]:\n boolean_ssh_args, other_ssh_args = get_ssh_cli()\n ssh_args = []\n server_args: List[str] = []\n expecting_option_val = False\n passthrough = False\n stop_option_processing = False\n found_extra_args: List[str] = []\n expecting_extra_val = ''\n for argument in args:\n if len(server_args) > 1 or stop_option_processing:\n server_args.append(argument)\n continue\n if argument.startswith('-') and not expecting_option_val:\n if argument == '--':\n stop_option_processing = True\n continue\n if extra_args:\n matching_ex = is_extra_arg(argument, extra_args)\n if matching_ex:\n if '=' in argument:\n exval = argument.partition('=')[-1]\n found_extra_args.extend((matching_ex, exval))\n else:\n expecting_extra_val = matching_ex\n expecting_option_val = True\n continue\n # could be a multi-character option\n all_args = argument[1:]\n for i, arg in enumerate(all_args):\n arg = f'-{arg}'\n if arg in passthrough_args:\n passthrough = True\n if arg in boolean_ssh_args:\n ssh_args.append(arg)\n continue\n if arg in other_ssh_args:\n ssh_args.append(arg)\n rest = all_args[i+1:]\n if rest:\n ssh_args.append(rest)\n else:\n expecting_option_val = True\n break\n raise InvalidSSHArgs(f'unknown option -- {arg[1:]}')\n continue\n if expecting_option_val:\n if expecting_extra_val:\n found_extra_args.extend((expecting_extra_val, argument))\n expecting_extra_val = ''\n else:\n ssh_args.append(argument)\n expecting_option_val = False\n continue\n server_args.append(argument)\n if not server_args:\n raise InvalidSSHArgs()\n return ssh_args, server_args, passthrough, tuple(found_extra_args)\n\n\ndef wrap_bootstrap_script(sh_script: str, interpreter: str) -> List[str]:\n # sshd will execute the command we pass it by join all command line\n # arguments with a space and passing it as a single argument to the users\n # login shell with -c. If the user has a non POSIX login shell it might\n # have different escaping semantics and syntax, so the command it should\n # execute has to be as simple as possible, basically of the form\n # interpreter -c unwrap_script escaped_bootstrap_script\n # The unwrap_script is responsible for unescaping the bootstrap script and\n # executing it.\n q = os.path.basename(interpreter).lower()\n is_python = 'python' in q\n if is_python:\n es = standard_b64encode(sh_script.encode('utf-8')).decode('ascii')\n unwrap_script = '''\"import base64, sys; eval(compile(base64.standard_b64decode(sys.argv[-1]), 'bootstrap.py', 'exec'))\"'''\n else:\n # We cant rely on base64 being available on the remote system, so instead\n # we quote the bootstrap script by replacing ' and \\ with \\v and \\f\n # also replacing \\n and ! with \\r and \\b for tcsh\n # finally surrounding with '\n es = \"'\" + sh_script.replace(\"'\", '\\v').replace('\\\\', '\\f').replace('\\n', '\\r').replace('!', '\\b') + \"'\"\n unwrap_script = r\"\"\"'eval \"$(echo \"$0\" | tr \\\\\\v\\\\\\f\\\\\\r\\\\\\b \\\\\\047\\\\\\134\\\\\\n\\\\\\041)\"' \"\"\"\n # exec is supported by all sh like shells, and fish and csh\n return ['exec', interpreter, '-c', unwrap_script, es]\n\n\ndef get_remote_command(\n remote_args: List[str], ssh_opts: SSHOptions, cli_hostname: str = '', cli_uname: str = '',\n echo_on: bool = True, request_data: bool = False, literal_env: Dict[str, str] = {}\n) -> Tuple[List[str], Dict[str, str], str]:\n interpreter = ssh_opts.interpreter\n q = os.path.basename(interpreter).lower()\n is_python = 'python' in q\n sh_script, replacements, shm_name = bootstrap_script(\n ssh_opts, script_type='py' if is_python else 'sh', remote_args=remote_args, literal_env=literal_env,\n cli_hostname=cli_hostname, cli_uname=cli_uname, echo_on=echo_on, request_data=request_data)\n return wrap_bootstrap_script(sh_script, interpreter), replacements, shm_name\n\n\ndef connection_sharing_args(kitty_pid: int) -> List[str]:\n rd = runtime_dir()\n # Bloody OpenSSH generates a 40 char hash and in creating the socket\n # appends a 27 char temp suffix to it. Socket max path length is approx\n # ~104 chars. macOS has no system runtime dir so we use a cache dir in\n # /Users/WHY_DOES_ANYONE_USE_MACOS/Library/Caches/APPLE_ARE_IDIOTIC\n if len(rd) > 35 and os.path.isdir('/tmp'):\n idiotic_design = f'/tmp/kssh-rdir-{os.getuid()}'\n try:\n os.symlink(rd, idiotic_design)\n except FileExistsError:\n try:\n dest = os.readlink(idiotic_design)\n except OSError as e:\n raise ValueError(f'The {idiotic_design} symlink could not be created as something with that name exists already') from e\n else:\n if dest != rd:\n with tempfile.TemporaryDirectory(dir='/tmp') as tdir:\n tlink = os.path.join(tdir, 'sigh')\n os.symlink(rd, tlink)\n os.rename(tlink, idiotic_design)\n rd = idiotic_design\n\n cp = os.path.join(rd, ssh_control_master_template.format(kitty_pid=kitty_pid, ssh_placeholder='%C'))\n ans: List[str] = [\n '-o', 'ControlMaster=auto',\n '-o', f'ControlPath={cp}',\n '-o', 'ControlPersist=yes',\n '-o', 'ServerAliveInterval=60',\n '-o', 'ServerAliveCountMax=5',\n '-o', 'TCPKeepAlive=no',\n ]\n return ans\n\n\n@contextmanager\ndef restore_terminal_state() -> Iterator[bool]:\n with open(os.ctermid()) as f:\n val = termios.tcgetattr(f.fileno())\n print(end=SAVE_PRIVATE_MODE_VALUES)\n print(end=set_mode(Mode.HANDLE_TERMIOS_SIGNALS), flush=True)\n try:\n yield bool(val[3] & termios.ECHO)\n finally:\n termios.tcsetattr(f.fileno(), termios.TCSAFLUSH, val)\n print(end=RESTORE_PRIVATE_MODE_VALUES, flush=True)\n\n\ndef dcs_to_kitty(payload: Union[bytes, str], type: str = 'ssh') -> bytes:\n if isinstance(payload, str):\n payload = payload.encode('utf-8')\n payload = standard_b64encode(payload)\n ans = b'\\033P@kitty-' + type.encode('ascii') + b'|' + payload\n tmux = running_in_tmux()\n if tmux:\n cp = subprocess.run([tmux, 'set', '-p', 'allow-passthrough', 'on'])\n if cp.returncode != 0:\n raise SystemExit(cp.returncode)\n ans = b'\\033Ptmux;\\033' + ans + b'\\033\\033\\\\\\033\\\\'\n else:\n ans += b'\\033\\\\'\n return ans\n\n\n@run_once\ndef ssh_version() -> Tuple[int, int]:\n o = subprocess.check_output([ssh_exe(), '-V'], stderr=subprocess.STDOUT).decode()\n m = re.match(r'OpenSSH_(\\d+).(\\d+)', o)\n if m is None:\n raise ValueError(f'Invalid version string for OpenSSH: {o}')\n return int(m.group(1)), int(m.group(2))\n\n\n@contextmanager\ndef drain_potential_tty_garbage(p: 'subprocess.Popen[bytes]', data_request: str) -> Iterator[None]:\n with open(os.open(os.ctermid(), os.O_CLOEXEC | os.O_RDWR | os.O_NOCTTY), 'wb') as tty:\n if data_request:\n turn_off_echo(tty.fileno())\n tty.write(dcs_to_kitty(data_request))\n tty.flush()\n try:\n yield\n finally:\n # discard queued input data on tty in case data transmission was\n # interrupted due to SSH failure, avoids spewing garbage to screen\n from uuid import uuid4\n canary = uuid4().hex.encode('ascii')\n turn_off_echo(tty.fileno())\n tty.write(dcs_to_kitty(canary + b'\\n\\r', type='echo'))\n tty.flush()\n data = b''\n give_up_at = time.monotonic() + 2\n tty_fd = tty.fileno()\n while time.monotonic() < give_up_at and canary not in data:\n with suppress(KeyboardInterrupt):\n rd, wr, err = select([tty_fd], [], [tty_fd], max(0, give_up_at - time.monotonic()))\n if err or not rd:\n break\n q = os.read(tty_fd, io.DEFAULT_BUFFER_SIZE)\n if not q:\n break\n data += q\n\n\ndef change_colors(color_scheme: str) -> bool:\n if not color_scheme:\n return False\n from kittens.themes.collection import NoCacheFound, load_themes, text_as_opts\n from kittens.themes.main import colors_as_escape_codes\n if color_scheme.endswith('.conf'):\n conf_file = resolve_abs_or_config_path(color_scheme)\n try:\n with open(conf_file) as f:\n opts = text_as_opts(f.read())\n except FileNotFoundError:\n raise SystemExit(f'Failed to find the color conf file: {expandvars(conf_file)}')\n else:\n try:\n themes = load_themes(-1)\n except NoCacheFound:\n themes = load_themes()\n cs = expandvars(color_scheme)\n try:\n theme = themes[cs]\n except KeyError:\n raise SystemExit(f'Failed to find the color theme: {cs}')\n opts = theme.kitty_opts\n raw = colors_as_escape_codes(opts)\n print(save_colors(), sep='', end=raw, flush=True)\n return True\n\n\ndef add_cloned_env(shm_name: str) -> Dict[str, str]:\n try:\n return cast(Dict[str, str], read_data_from_shared_memory(shm_name))\n except FileNotFoundError:\n pass\n return {}\n\n\ndef run_ssh(ssh_args: List[str], server_args: List[str], found_extra_args: Tuple[str, ...]) -> NoReturn:\n cmd = [ssh_exe()] + ssh_args\n hostname, remote_args = server_args[0], server_args[1:]\n if not remote_args:\n cmd.append('-t')\n insertion_point = len(cmd)\n cmd.append('--')\n cmd.append(hostname)\n uname = getuser()\n if hostname.startswith('ssh://'):\n from urllib.parse import urlparse\n purl = urlparse(hostname)\n hostname_for_match = purl.hostname or hostname[6:].split('/', 1)[0]\n uname = purl.username or uname\n elif '@' in hostname and hostname[0] != '@':\n uname, hostname_for_match = hostname.split('@', 1)\n else:\n hostname_for_match = hostname\n hostname_for_match = hostname_for_match.split('@', 1)[-1].split(':', 1)[0]\n overrides: List[str] = []\n literal_env: Dict[str, str] = {}\n pat = re.compile(r'^([a-zA-Z0-9_]+)[ \\t]*=')\n for i, a in enumerate(found_extra_args):\n if i % 2 == 1:\n aq = pat.sub(r'\\1 ', a.lstrip())\n key = aq.split(maxsplit=1)[0]\n if key == 'clone_env':\n literal_env = add_cloned_env(aq.split(maxsplit=1)[1])\n elif key != 'hostname':\n overrides.append(aq)\n if overrides:\n overrides.insert(0, f'hostname {uname}@{hostname_for_match}')\n host_opts = init_config(hostname_for_match, uname, overrides)\n if host_opts.share_connections:\n cmd[insertion_point:insertion_point] = connection_sharing_args(int(os.environ['KITTY_PID']))\n use_kitty_askpass = host_opts.askpass == 'native' or (host_opts.askpass == 'unless-set' and 'SSH_ASKPASS' not in os.environ)\n need_to_request_data = True\n if use_kitty_askpass:\n sentinel = os.path.join(cache_dir(), 'openssh-is-new-enough-for-askpass')\n sentinel_exists = os.path.exists(sentinel)\n if sentinel_exists or ssh_version() >= (8, 4):\n if not sentinel_exists:\n open(sentinel, 'w').close()\n # SSH_ASKPASS_REQUIRE was introduced in 8.4 release on 2020-09-27\n need_to_request_data = False\n os.environ['SSH_ASKPASS_REQUIRE'] = 'force'\n os.environ['SSH_ASKPASS'] = os.path.join(shell_integration_dir, 'ssh', 'askpass.py')\n if need_to_request_data and host_opts.share_connections:\n cp = subprocess.run(cmd[:1] + ['-O', 'check'] + cmd[1:], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n if cp.returncode == 0:\n # we will use the master connection so SSH does not need to use the tty\n need_to_request_data = False\n with restore_terminal_state() as echo_on:\n rcmd, replacements, shm_name = get_remote_command(\n remote_args, host_opts, hostname_for_match, uname, echo_on, request_data=need_to_request_data, literal_env=literal_env)\n cmd += rcmd\n colors_changed = change_colors(host_opts.color_scheme)\n try:\n p = subprocess.Popen(cmd)\n except FileNotFoundError:\n raise SystemExit('Could not find the ssh executable, is it in your PATH?')\n else:\n rq = '' if need_to_request_data else 'id={REQUEST_ID}:pwfile={PASSWORD_FILENAME}:pw={DATA_PASSWORD}'.format(**replacements)\n with drain_potential_tty_garbage(p, rq):\n raise SystemExit(p.wait())\n finally:\n if colors_changed:\n print(end=restore_colors(), flush=True)\n\n\ndef main(args: List[str]) -> None:\n args = args[1:]\n if args and args[0] == 'use-python':\n args = args[1:] # backwards compat from when we had a python implementation\n try:\n ssh_args, server_args, passthrough, found_extra_args = parse_ssh_args(args, extra_args=('--kitten',))\n except InvalidSSHArgs as e:\n e.system_exit()\n if passthrough:\n if found_extra_args:\n raise SystemExit(f'The SSH kitten cannot work with the options: {\", \".join(passthrough_args)}')\n os.execlp(ssh_exe(), 'ssh', *args)\n\n if not os.environ.get('KITTY_WINDOW_ID') or not os.environ.get('KITTY_PID'):\n raise SystemExit('The SSH kitten is meant to run inside a kitty window')\n if not sys.stdin.isatty():\n raise SystemExit('The SSH kitten is meant for interactive use only, STDIN must be a terminal')\n try:\n run_ssh(ssh_args, server_args, found_extra_args)\n except KeyboardInterrupt:\n sys.excepthook = lambda *a: None\n raise\n\n\nif __name__ == '__main__':\n main(sys.argv)\nelif __name__ == '__wrapper_of__':\n cd = sys.cli_docs # type: ignore\n cd['wrapper_of'] = 'ssh'\nelif __name__ == '__conf__':\n from .options.definition import definition\n sys.options_definition = definition # type: ignore\n", "path": "kittens/ssh/main.py"}]} |
gh_patches_debug_1559 | rasdani/github-patches | git_diff | django-oscar__django-oscar-1766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In cases where the stock is lower than the number of allocated products incorrect message appears
To reproduce:
1. Add 2 products to basket
2. On one of them set num of allocated products as 1 and stock as 0.
3. On the basket page try to do an operation on the one that is still in stock. The message that appears is "a maximum of -1 can be bought" instead of the "no stock available" message
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/oscar/apps/partner/availability.py`
Content:
```
1 from django.utils.translation import ugettext_lazy as _
2
3
4 class Base(object):
5 """
6 Base availability policy.
7 """
8
9 #: Availability code. This is used for HTML classes
10 code = ''
11
12 #: A description of the availability of a product. This is shown on the
13 #: product detail page. Eg "In stock", "Out of stock" etc
14 message = ''
15
16 #: When this item should be dispatched
17 dispatch_date = None
18
19 @property
20 def short_message(self):
21 """
22 A shorter version of the availability message, suitable for showing on
23 browsing pages.
24 """
25 return self.message
26
27 @property
28 def is_available_to_buy(self):
29 """
30 Test if this product is available to be bought. This is used for
31 validation when a product is added to a user's basket.
32 """
33 # We test a purchase of a single item
34 return self.is_purchase_permitted(1)[0]
35
36 def is_purchase_permitted(self, quantity):
37 """
38 Test whether a proposed purchase is allowed
39
40 Should return a boolean and a reason
41 """
42 return False, _("unavailable")
43
44
45 # Common availability policies
46
47
48 class Unavailable(Base):
49 """
50 Policy for when a product is unavailable
51 """
52 code = 'unavailable'
53 message = _("Unavailable")
54
55
56 class Available(Base):
57 """
58 For when a product is always available, irrespective of stock level.
59
60 This might be appropriate for digital products where stock doesn't need to
61 be tracked and the product is always available to buy.
62 """
63 code = 'available'
64 message = _("Available")
65
66 def is_purchase_permitted(self, quantity):
67 return True, ""
68
69
70 class StockRequired(Base):
71 """
72 Allow a product to be bought while there is stock. This policy is
73 instantiated with a stock number (``num_available``). It ensures that the
74 product is only available to buy while there is stock available.
75
76 This is suitable for physical products where back orders (eg allowing
77 purchases when there isn't stock available) are not permitted.
78 """
79 CODE_IN_STOCK = 'instock'
80 CODE_OUT_OF_STOCK = 'outofstock'
81
82 def __init__(self, num_available):
83 self.num_available = num_available
84
85 def is_purchase_permitted(self, quantity):
86 if self.num_available == 0:
87 return False, _("no stock available")
88 if quantity > self.num_available:
89 msg = _("a maximum of %(max)d can be bought") % {
90 'max': self.num_available}
91 return False, msg
92 return True, ""
93
94 @property
95 def code(self):
96 if self.num_available > 0:
97 return self.CODE_IN_STOCK
98 return self.CODE_OUT_OF_STOCK
99
100 @property
101 def short_message(self):
102 if self.num_available > 0:
103 return _("In stock")
104 return _("Unavailable")
105
106 @property
107 def message(self):
108 if self.num_available > 0:
109 return _("In stock (%d available)") % self.num_available
110 return _("Unavailable")
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/oscar/apps/partner/availability.py b/src/oscar/apps/partner/availability.py
--- a/src/oscar/apps/partner/availability.py
+++ b/src/oscar/apps/partner/availability.py
@@ -83,7 +83,7 @@
self.num_available = num_available
def is_purchase_permitted(self, quantity):
- if self.num_available == 0:
+ if self.num_available <= 0:
return False, _("no stock available")
if quantity > self.num_available:
msg = _("a maximum of %(max)d can be bought") % {
| {"golden_diff": "diff --git a/src/oscar/apps/partner/availability.py b/src/oscar/apps/partner/availability.py\n--- a/src/oscar/apps/partner/availability.py\n+++ b/src/oscar/apps/partner/availability.py\n@@ -83,7 +83,7 @@\n self.num_available = num_available\n \n def is_purchase_permitted(self, quantity):\n- if self.num_available == 0:\n+ if self.num_available <= 0:\n return False, _(\"no stock available\")\n if quantity > self.num_available:\n msg = _(\"a maximum of %(max)d can be bought\") % {\n", "issue": "In cases where the stock is lower than the number of allocated products incorrect message appears\nTo reproduce:\n1. Add 2 products to basket\n2. On one of them set num of allocated products as 1 and stock as 0.\n3. On the basket page try to do an operation on the one that is still in stock. The message that appears is \"a maximum of -1 can be bought\" instead of the \"no stock available\" message\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\n\nclass Base(object):\n \"\"\"\n Base availability policy.\n \"\"\"\n\n #: Availability code. This is used for HTML classes\n code = ''\n\n #: A description of the availability of a product. This is shown on the\n #: product detail page. Eg \"In stock\", \"Out of stock\" etc\n message = ''\n\n #: When this item should be dispatched\n dispatch_date = None\n\n @property\n def short_message(self):\n \"\"\"\n A shorter version of the availability message, suitable for showing on\n browsing pages.\n \"\"\"\n return self.message\n\n @property\n def is_available_to_buy(self):\n \"\"\"\n Test if this product is available to be bought. This is used for\n validation when a product is added to a user's basket.\n \"\"\"\n # We test a purchase of a single item\n return self.is_purchase_permitted(1)[0]\n\n def is_purchase_permitted(self, quantity):\n \"\"\"\n Test whether a proposed purchase is allowed\n\n Should return a boolean and a reason\n \"\"\"\n return False, _(\"unavailable\")\n\n\n# Common availability policies\n\n\nclass Unavailable(Base):\n \"\"\"\n Policy for when a product is unavailable\n \"\"\"\n code = 'unavailable'\n message = _(\"Unavailable\")\n\n\nclass Available(Base):\n \"\"\"\n For when a product is always available, irrespective of stock level.\n\n This might be appropriate for digital products where stock doesn't need to\n be tracked and the product is always available to buy.\n \"\"\"\n code = 'available'\n message = _(\"Available\")\n\n def is_purchase_permitted(self, quantity):\n return True, \"\"\n\n\nclass StockRequired(Base):\n \"\"\"\n Allow a product to be bought while there is stock. This policy is\n instantiated with a stock number (``num_available``). It ensures that the\n product is only available to buy while there is stock available.\n\n This is suitable for physical products where back orders (eg allowing\n purchases when there isn't stock available) are not permitted.\n \"\"\"\n CODE_IN_STOCK = 'instock'\n CODE_OUT_OF_STOCK = 'outofstock'\n\n def __init__(self, num_available):\n self.num_available = num_available\n\n def is_purchase_permitted(self, quantity):\n if self.num_available == 0:\n return False, _(\"no stock available\")\n if quantity > self.num_available:\n msg = _(\"a maximum of %(max)d can be bought\") % {\n 'max': self.num_available}\n return False, msg\n return True, \"\"\n\n @property\n def code(self):\n if self.num_available > 0:\n return self.CODE_IN_STOCK\n return self.CODE_OUT_OF_STOCK\n\n @property\n def short_message(self):\n if self.num_available > 0:\n return _(\"In stock\")\n return _(\"Unavailable\")\n\n @property\n def message(self):\n if self.num_available > 0:\n return _(\"In stock (%d available)\") % self.num_available\n return _(\"Unavailable\")\n", "path": "src/oscar/apps/partner/availability.py"}], "after_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\n\nclass Base(object):\n \"\"\"\n Base availability policy.\n \"\"\"\n\n #: Availability code. This is used for HTML classes\n code = ''\n\n #: A description of the availability of a product. This is shown on the\n #: product detail page. Eg \"In stock\", \"Out of stock\" etc\n message = ''\n\n #: When this item should be dispatched\n dispatch_date = None\n\n @property\n def short_message(self):\n \"\"\"\n A shorter version of the availability message, suitable for showing on\n browsing pages.\n \"\"\"\n return self.message\n\n @property\n def is_available_to_buy(self):\n \"\"\"\n Test if this product is available to be bought. This is used for\n validation when a product is added to a user's basket.\n \"\"\"\n # We test a purchase of a single item\n return self.is_purchase_permitted(1)[0]\n\n def is_purchase_permitted(self, quantity):\n \"\"\"\n Test whether a proposed purchase is allowed\n\n Should return a boolean and a reason\n \"\"\"\n return False, _(\"unavailable\")\n\n\n# Common availability policies\n\n\nclass Unavailable(Base):\n \"\"\"\n Policy for when a product is unavailable\n \"\"\"\n code = 'unavailable'\n message = _(\"Unavailable\")\n\n\nclass Available(Base):\n \"\"\"\n For when a product is always available, irrespective of stock level.\n\n This might be appropriate for digital products where stock doesn't need to\n be tracked and the product is always available to buy.\n \"\"\"\n code = 'available'\n message = _(\"Available\")\n\n def is_purchase_permitted(self, quantity):\n return True, \"\"\n\n\nclass StockRequired(Base):\n \"\"\"\n Allow a product to be bought while there is stock. This policy is\n instantiated with a stock number (``num_available``). It ensures that the\n product is only available to buy while there is stock available.\n\n This is suitable for physical products where back orders (eg allowing\n purchases when there isn't stock available) are not permitted.\n \"\"\"\n CODE_IN_STOCK = 'instock'\n CODE_OUT_OF_STOCK = 'outofstock'\n\n def __init__(self, num_available):\n self.num_available = num_available\n\n def is_purchase_permitted(self, quantity):\n if self.num_available <= 0:\n return False, _(\"no stock available\")\n if quantity > self.num_available:\n msg = _(\"a maximum of %(max)d can be bought\") % {\n 'max': self.num_available}\n return False, msg\n return True, \"\"\n\n @property\n def code(self):\n if self.num_available > 0:\n return self.CODE_IN_STOCK\n return self.CODE_OUT_OF_STOCK\n\n @property\n def short_message(self):\n if self.num_available > 0:\n return _(\"In stock\")\n return _(\"Unavailable\")\n\n @property\n def message(self):\n if self.num_available > 0:\n return _(\"In stock (%d available)\") % self.num_available\n return _(\"Unavailable\")\n", "path": "src/oscar/apps/partner/availability.py"}]} |
gh_patches_debug_1560 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-3693 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
After build is triggered, state is shown as failed
To reproduce, fire off a new build, make sure to catch the build list page while VCS operations are happening. Build will be in a failure state. This is a regression where we are setting the state of the build to failed without checking that the build has completed. This might be a byproduct of using multiple environments during the build process.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/vcs_support/backends/git.py`
Content:
```
1 """Git-related utilities."""
2
3 from __future__ import absolute_import
4
5 import csv
6 import logging
7 import os
8 import re
9
10 from builtins import str
11 from six import StringIO
12
13 from readthedocs.projects.exceptions import RepositoryError
14 from readthedocs.vcs_support.base import BaseVCS, VCSVersion
15
16
17 log = logging.getLogger(__name__)
18
19
20 class Backend(BaseVCS):
21
22 """Git VCS backend."""
23
24 supports_tags = True
25 supports_branches = True
26 fallback_branch = 'master' # default branch
27
28 def __init__(self, *args, **kwargs):
29 super(Backend, self).__init__(*args, **kwargs)
30 self.token = kwargs.get('token', None)
31 self.repo_url = self._get_clone_url()
32
33 def _get_clone_url(self):
34 if '://' in self.repo_url:
35 hacked_url = self.repo_url.split('://')[1]
36 hacked_url = re.sub('.git$', '', hacked_url)
37 clone_url = 'https://%s' % hacked_url
38 if self.token:
39 clone_url = 'https://%s@%s' % (self.token, hacked_url)
40 return clone_url
41 # Don't edit URL because all hosts aren't the same
42
43 # else:
44 # clone_url = 'git://%s' % (hacked_url)
45 return self.repo_url
46
47 def set_remote_url(self, url):
48 return self.run('git', 'remote', 'set-url', 'origin', url)
49
50 def update(self):
51 # Use checkout() to update repo
52 self.checkout()
53
54 def repo_exists(self):
55 code, _, _ = self.run('git', 'status', record=False)
56 return code == 0
57
58 def submodules_exists(self):
59 code, out, _ = self.run('git', 'submodule', 'status', record=False)
60 return code == 0 and bool(out)
61
62 def fetch(self):
63 code, _, _ = self.run('git', 'fetch', '--tags', '--prune')
64 if code != 0:
65 raise RepositoryError
66
67 def checkout_revision(self, revision=None):
68 if not revision:
69 branch = self.default_branch or self.fallback_branch
70 revision = 'origin/%s' % branch
71
72 code, out, err = self.run(
73 'git', 'checkout', '--force', revision)
74 if code != 0:
75 log.warning("Failed to checkout revision '%s': %s",
76 revision, code)
77 return [code, out, err]
78
79 def clone(self):
80 code, _, _ = self.run(
81 'git', 'clone', '--recursive', self.repo_url, '.')
82 if code != 0:
83 raise RepositoryError
84
85 @property
86 def tags(self):
87 retcode, stdout, _ = self.run('git', 'show-ref', '--tags', record_as_success=True)
88 # error (or no tags found)
89 if retcode != 0:
90 return []
91 return self.parse_tags(stdout)
92
93 def parse_tags(self, data):
94 """
95 Parses output of show-ref --tags, eg:
96
97 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0
98 bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1
99 c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2
100 a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2
101 c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1
102 edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2
103
104 Into VCSTag objects with the tag name as verbose_name and the commit
105 hash as identifier.
106 """
107 # parse the lines into a list of tuples (commit-hash, tag ref name)
108 # StringIO below is expecting Unicode data, so ensure that it gets it.
109 if not isinstance(data, str):
110 data = str(data)
111 raw_tags = csv.reader(StringIO(data), delimiter=' ')
112 vcs_tags = []
113 for row in raw_tags:
114 row = [f for f in row if f != '']
115 if row == []:
116 continue
117 commit_hash, name = row
118 clean_name = name.replace('refs/tags/', '')
119 vcs_tags.append(VCSVersion(self, commit_hash, clean_name))
120 return vcs_tags
121
122 @property
123 def branches(self):
124 # Only show remote branches
125 retcode, stdout, _ = self.run('git', 'branch', '-r')
126 # error (or no tags found)
127 if retcode != 0:
128 return []
129 return self.parse_branches(stdout)
130
131 def parse_branches(self, data):
132 """
133 Parse output of git branch -r
134
135 e.g.:
136
137 origin/2.0.X
138 origin/HEAD -> origin/master
139 origin/develop
140 origin/master
141 origin/release/2.0.0
142 origin/release/2.1.0
143 """
144 clean_branches = []
145 # StringIO below is expecting Unicode data, so ensure that it gets it.
146 if not isinstance(data, str):
147 data = str(data)
148 raw_branches = csv.reader(StringIO(data), delimiter=' ')
149 for branch in raw_branches:
150 branch = [f for f in branch if f != '' and f != '*']
151 # Handle empty branches
152 if branch:
153 branch = branch[0]
154 if branch.startswith('origin/'):
155 verbose_name = branch.replace('origin/', '')
156 if verbose_name in ['HEAD']:
157 continue
158 clean_branches.append(VCSVersion(self, branch, verbose_name))
159 else:
160 clean_branches.append(VCSVersion(self, branch, branch))
161 return clean_branches
162
163 @property
164 def commit(self):
165 _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')
166 return stdout.strip()
167
168 def checkout(self, identifier=None):
169 self.check_working_dir()
170
171 # Clone or update repository
172 if self.repo_exists():
173 self.set_remote_url(self.repo_url)
174 self.fetch()
175 else:
176 self.make_clean_working_dir()
177 self.clone()
178
179 # Find proper identifier
180 if not identifier:
181 identifier = self.default_branch or self.fallback_branch
182
183 identifier = self.find_ref(identifier)
184
185 # Checkout the correct identifier for this branch.
186 code, out, err = self.checkout_revision(identifier)
187 if code != 0:
188 return code, out, err
189
190 # Clean any remains of previous checkouts
191 self.run('git', 'clean', '-d', '-f', '-f')
192
193 # Update submodules
194 if self.submodules_exists():
195 self.run('git', 'submodule', 'sync')
196 self.run('git', 'submodule', 'update',
197 '--init', '--recursive', '--force')
198
199 return code, out, err
200
201 def find_ref(self, ref):
202 # Check if ref starts with 'origin/'
203 if ref.startswith('origin/'):
204 return ref
205
206 # Check if ref is a branch of the origin remote
207 if self.ref_exists('remotes/origin/' + ref):
208 return 'origin/' + ref
209
210 return ref
211
212 def ref_exists(self, ref):
213 code, _, _ = self.run('git', 'show-ref', ref, record_as_success=True)
214 return code == 0
215
216 @property
217 def env(self):
218 env = super(Backend, self).env
219 env['GIT_DIR'] = os.path.join(self.working_dir, '.git')
220 # Don't prompt for username, this requires Git 2.3+
221 env['GIT_TERMINAL_PROMPT'] = '0'
222 return env
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py
--- a/readthedocs/vcs_support/backends/git.py
+++ b/readthedocs/vcs_support/backends/git.py
@@ -122,8 +122,8 @@
@property
def branches(self):
# Only show remote branches
- retcode, stdout, _ = self.run('git', 'branch', '-r')
- # error (or no tags found)
+ retcode, stdout, _ = self.run('git', 'branch', '-r', record_as_success=True)
+ # error (or no branches found)
if retcode != 0:
return []
return self.parse_branches(stdout)
| {"golden_diff": "diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py\n--- a/readthedocs/vcs_support/backends/git.py\n+++ b/readthedocs/vcs_support/backends/git.py\n@@ -122,8 +122,8 @@\n @property\n def branches(self):\n # Only show remote branches\n- retcode, stdout, _ = self.run('git', 'branch', '-r')\n- # error (or no tags found)\n+ retcode, stdout, _ = self.run('git', 'branch', '-r', record_as_success=True)\n+ # error (or no branches found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n", "issue": "After build is triggered, state is shown as failed\nTo reproduce, fire off a new build, make sure to catch the build list page while VCS operations are happening. Build will be in a failure state. This is a regression where we are setting the state of the build to failed without checking that the build has completed. This might be a byproduct of using multiple environments during the build process.\n", "before_files": [{"content": "\"\"\"Git-related utilities.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport csv\nimport logging\nimport os\nimport re\n\nfrom builtins import str\nfrom six import StringIO\n\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n fallback_branch = 'master' # default branch\n\n def __init__(self, *args, **kwargs):\n super(Backend, self).__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://%s@%s' % (self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n # Use checkout() to update repo\n self.checkout()\n\n def repo_exists(self):\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n\n def submodules_exists(self):\n code, out, _ = self.run('git', 'submodule', 'status', record=False)\n return code == 0 and bool(out)\n\n def fetch(self):\n code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n if code != 0:\n raise RepositoryError\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run(\n 'git', 'checkout', '--force', revision)\n if code != 0:\n log.warning(\"Failed to checkout revision '%s': %s\",\n revision, code)\n return [code, out, err]\n\n def clone(self):\n code, _, _ = self.run(\n 'git', 'clone', '--recursive', self.repo_url, '.')\n if code != 0:\n raise RepositoryError\n\n @property\n def tags(self):\n retcode, stdout, _ = self.run('git', 'show-ref', '--tags', record_as_success=True)\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_tags(stdout)\n\n def parse_tags(self, data):\n \"\"\"\n Parses output of show-ref --tags, eg:\n\n 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0\n bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1\n c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2\n a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2\n c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1\n edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2\n\n Into VCSTag objects with the tag name as verbose_name and the commit\n hash as identifier.\n \"\"\"\n # parse the lines into a list of tuples (commit-hash, tag ref name)\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_tags = csv.reader(StringIO(data), delimiter=' ')\n vcs_tags = []\n for row in raw_tags:\n row = [f for f in row if f != '']\n if row == []:\n continue\n commit_hash, name = row\n clean_name = name.replace('refs/tags/', '')\n vcs_tags.append(VCSVersion(self, commit_hash, clean_name))\n return vcs_tags\n\n @property\n def branches(self):\n # Only show remote branches\n retcode, stdout, _ = self.run('git', 'branch', '-r')\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n\n def parse_branches(self, data):\n \"\"\"\n Parse output of git branch -r\n\n e.g.:\n\n origin/2.0.X\n origin/HEAD -> origin/master\n origin/develop\n origin/master\n origin/release/2.0.0\n origin/release/2.1.0\n \"\"\"\n clean_branches = []\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_branches = csv.reader(StringIO(data), delimiter=' ')\n for branch in raw_branches:\n branch = [f for f in branch if f != '' and f != '*']\n # Handle empty branches\n if branch:\n branch = branch[0]\n if branch.startswith('origin/'):\n verbose_name = branch.replace('origin/', '')\n if verbose_name in ['HEAD']:\n continue\n clean_branches.append(VCSVersion(self, branch, verbose_name))\n else:\n clean_branches.append(VCSVersion(self, branch, branch))\n return clean_branches\n\n @property\n def commit(self):\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n\n def checkout(self, identifier=None):\n self.check_working_dir()\n\n # Clone or update repository\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n self.fetch()\n else:\n self.make_clean_working_dir()\n self.clone()\n\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n\n # Update submodules\n if self.submodules_exists():\n self.run('git', 'submodule', 'sync')\n self.run('git', 'submodule', 'update',\n '--init', '--recursive', '--force')\n\n return code, out, err\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n code, _, _ = self.run('git', 'show-ref', ref, record_as_success=True)\n return code == 0\n\n @property\n def env(self):\n env = super(Backend, self).env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py"}], "after_files": [{"content": "\"\"\"Git-related utilities.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport csv\nimport logging\nimport os\nimport re\n\nfrom builtins import str\nfrom six import StringIO\n\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n fallback_branch = 'master' # default branch\n\n def __init__(self, *args, **kwargs):\n super(Backend, self).__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://%s@%s' % (self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n # Use checkout() to update repo\n self.checkout()\n\n def repo_exists(self):\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n\n def submodules_exists(self):\n code, out, _ = self.run('git', 'submodule', 'status', record=False)\n return code == 0 and bool(out)\n\n def fetch(self):\n code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n if code != 0:\n raise RepositoryError\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run(\n 'git', 'checkout', '--force', revision)\n if code != 0:\n log.warning(\"Failed to checkout revision '%s': %s\",\n revision, code)\n return [code, out, err]\n\n def clone(self):\n code, _, _ = self.run(\n 'git', 'clone', '--recursive', self.repo_url, '.')\n if code != 0:\n raise RepositoryError\n\n @property\n def tags(self):\n retcode, stdout, _ = self.run('git', 'show-ref', '--tags', record_as_success=True)\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_tags(stdout)\n\n def parse_tags(self, data):\n \"\"\"\n Parses output of show-ref --tags, eg:\n\n 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0\n bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1\n c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2\n a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2\n c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1\n edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2\n\n Into VCSTag objects with the tag name as verbose_name and the commit\n hash as identifier.\n \"\"\"\n # parse the lines into a list of tuples (commit-hash, tag ref name)\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_tags = csv.reader(StringIO(data), delimiter=' ')\n vcs_tags = []\n for row in raw_tags:\n row = [f for f in row if f != '']\n if row == []:\n continue\n commit_hash, name = row\n clean_name = name.replace('refs/tags/', '')\n vcs_tags.append(VCSVersion(self, commit_hash, clean_name))\n return vcs_tags\n\n @property\n def branches(self):\n # Only show remote branches\n retcode, stdout, _ = self.run('git', 'branch', '-r', record_as_success=True)\n # error (or no branches found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n\n def parse_branches(self, data):\n \"\"\"\n Parse output of git branch -r\n\n e.g.:\n\n origin/2.0.X\n origin/HEAD -> origin/master\n origin/develop\n origin/master\n origin/release/2.0.0\n origin/release/2.1.0\n \"\"\"\n clean_branches = []\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_branches = csv.reader(StringIO(data), delimiter=' ')\n for branch in raw_branches:\n branch = [f for f in branch if f != '' and f != '*']\n # Handle empty branches\n if branch:\n branch = branch[0]\n if branch.startswith('origin/'):\n verbose_name = branch.replace('origin/', '')\n if verbose_name in ['HEAD']:\n continue\n clean_branches.append(VCSVersion(self, branch, verbose_name))\n else:\n clean_branches.append(VCSVersion(self, branch, branch))\n return clean_branches\n\n @property\n def commit(self):\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n\n def checkout(self, identifier=None):\n self.check_working_dir()\n\n # Clone or update repository\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n self.fetch()\n else:\n self.make_clean_working_dir()\n self.clone()\n\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n\n # Update submodules\n if self.submodules_exists():\n self.run('git', 'submodule', 'sync')\n self.run('git', 'submodule', 'update',\n '--init', '--recursive', '--force')\n\n return code, out, err\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n code, _, _ = self.run('git', 'show-ref', ref, record_as_success=True)\n return code == 0\n\n @property\n def env(self):\n env = super(Backend, self).env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py"}]} |
gh_patches_debug_1561 | rasdani/github-patches | git_diff | canonical__microk8s-3573 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
microk8s suggests command hiding error info
<!--
Thank you for submitting an issue. Please fill in the template below
information about the bug you encountered.
-->
#### Summary
<!-- Please explain the bug in a few short sentences -->
When you try to run microk8s with insufficient permissions, you get an error and suggestion:
```console
$ microk8s status
Insufficient permissions to access MicroK8s.
You can either try again with sudo or add the user runner to the 'snap_microk8s' group:
sudo usermod -a -G snap_microk8s runner
sudo chown -f -R runner ~/.kube
After this, reload the user groups either via a reboot or by running 'newgrp snap_microk8s'.
```
However, if you don't have a `~/.kube` file, the `chown` command will fail silently and suppress the error message. This can cause failures e.g. in GitHub runners / scripts with `set -e`, and the `-f` option will hide the error message. This makes it very hard to debug the failure as there are no log messages.
#### What Should Happen Instead?
<!-- Please explain what the expected behavior is -->
Suggest to the user not to use the `-f` flag in `chown`. There is no reason to suppress the error message.
```console
$ microk8s status
Insufficient permissions to access MicroK8s.
You can either try again with sudo or add the user runner to the 'snap_microk8s' group:
sudo usermod -a -G snap_microk8s runner
sudo chown -R runner ~/.kube
After this, reload the user groups either via a reboot or by running 'newgrp snap_microk8s'.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/wrappers/common/utils.py`
Content:
```
1 import getpass
2 import json
3 import os
4 import platform
5 import subprocess
6 import sys
7 import time
8 from pathlib import Path
9 import logging
10
11 import click
12 import yaml
13
14 LOG = logging.getLogger(__name__)
15
16 KUBECTL = os.path.expandvars("$SNAP/microk8s-kubectl.wrapper")
17
18
19 def get_group():
20 return "snap_microk8s" if is_strict() else "microk8s"
21
22
23 def is_strict():
24 snap_yaml = snap() / "meta/snap.yaml"
25 with open(snap_yaml) as f:
26 snap_meta = yaml.safe_load(f)
27 return snap_meta["confinement"] == "strict"
28
29
30 def get_current_arch():
31 # architecture mapping
32 arch_mapping = {
33 "aarch64": "arm64",
34 "armv7l": "armhf",
35 "x86_64": "amd64",
36 "s390x": "s390x",
37 "ppc64le": "ppc64le",
38 "ppc64el": "ppc64le",
39 }
40
41 return arch_mapping[platform.machine()]
42
43
44 def snap() -> Path:
45 try:
46 return Path(os.environ["SNAP"])
47 except KeyError:
48 return Path("/snap/microk8s/current")
49
50
51 def snap_data() -> Path:
52 try:
53 return Path(os.environ["SNAP_DATA"])
54 except KeyError:
55 return Path("/var/snap/microk8s/current")
56
57
58 def snap_common() -> Path:
59 try:
60 return Path(os.environ["SNAP_COMMON"])
61 except KeyError:
62 return Path("/var/snap/microk8s/common")
63
64
65 def run(*args, die=True):
66 # Add wrappers to $PATH
67 env = os.environ.copy()
68 env["PATH"] += ":%s" % os.environ["SNAP"]
69 result = subprocess.run(
70 args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
71 )
72
73 try:
74 result.check_returncode()
75 except subprocess.CalledProcessError as err:
76 if die:
77 if result.stderr:
78 print(result.stderr.decode("utf-8"))
79 print(err)
80 sys.exit(1)
81 else:
82 raise
83
84 return result.stdout.decode("utf-8")
85
86
87 def is_cluster_ready():
88 try:
89 service_output = kubectl_get("all")
90 node_output = kubectl_get("nodes")
91 # Make sure to compare with the word " Ready " with spaces.
92 if " Ready " in node_output and "service/kubernetes" in service_output:
93 return True
94 else:
95 return False
96 except Exception:
97 return False
98
99
100 def is_ha_enabled():
101 ha_lock = os.path.expandvars("${SNAP_DATA}/var/lock/ha-cluster")
102 return os.path.isfile(ha_lock)
103
104
105 def get_dqlite_info():
106 cluster_dir = os.path.expandvars("${SNAP_DATA}/var/kubernetes/backend")
107 snap_path = os.environ.get("SNAP")
108
109 info = []
110
111 if not is_ha_enabled():
112 return info
113
114 waits = 10
115 while waits > 0:
116 try:
117 with open("{}/info.yaml".format(cluster_dir), mode="r") as f:
118 data = yaml.safe_load(f)
119 out = subprocess.check_output(
120 "{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
121 "-k {dbdir}/cluster.key -f json k8s .cluster".format(
122 snappath=snap_path, dbdir=cluster_dir
123 ).split(),
124 timeout=4,
125 stderr=subprocess.DEVNULL,
126 )
127 if data["Address"] in out.decode():
128 break
129 else:
130 time.sleep(5)
131 waits -= 1
132 except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
133 time.sleep(2)
134 waits -= 1
135
136 if waits == 0:
137 return info
138
139 nodes = json.loads(out.decode())
140 for n in nodes:
141 if n["Role"] == 0:
142 info.append((n["Address"], "voter"))
143 if n["Role"] == 1:
144 info.append((n["Address"], "standby"))
145 if n["Role"] == 2:
146 info.append((n["Address"], "spare"))
147 return info
148
149
150 def is_cluster_locked():
151 if (snap_data() / "var/lock/clustered.lock").exists():
152 click.echo("This MicroK8s deployment is acting as a node in a cluster.")
153 click.echo("Please use the master node.")
154 sys.exit(1)
155
156
157 def wait_for_ready(timeout):
158 start_time = time.time()
159
160 while True:
161 if is_cluster_ready():
162 return True
163 elif timeout and time.time() > start_time + timeout:
164 return False
165 else:
166 time.sleep(2)
167
168
169 def exit_if_no_root():
170 """
171 Exit if the user is not root
172 """
173 if not os.geteuid() == 0:
174 click.echo(
175 "Elevated permissions is needed for this operation. Please run this command with sudo."
176 )
177 exit(50)
178
179
180 def exit_if_stopped():
181 stoppedLockFile = os.path.expandvars("${SNAP_DATA}/var/lock/stopped.lock")
182 if os.path.isfile(stoppedLockFile):
183 print("microk8s is not running, try microk8s start")
184 exit(0)
185
186
187 def exit_if_no_permission():
188 user = getpass.getuser()
189 # test if we can access the default kubeconfig
190 clientConfigFile = os.path.expandvars("${SNAP_DATA}/credentials/client.config")
191 if not os.access(clientConfigFile, os.R_OK):
192 print("Insufficient permissions to access MicroK8s.")
193 print(
194 "You can either try again with sudo or add the user {} to the 'microk8s' group:".format(
195 user
196 )
197 )
198 print("")
199 print(" sudo usermod -a -G microk8s {}".format(user))
200 print(" sudo chown -f -R $USER ~/.kube")
201 print("")
202 print(
203 "After this, reload the user groups either via a reboot or by running 'newgrp microk8s'."
204 )
205 exit(1)
206
207
208 def ensure_started():
209 if (snap_data() / "var/lock/stopped.lock").exists():
210 click.echo("microk8s is not running, try microk8s start", err=True)
211 sys.exit(1)
212
213
214 def kubectl_get(cmd, namespace="--all-namespaces"):
215 if namespace == "--all-namespaces":
216 return run(KUBECTL, "get", cmd, "--all-namespaces", die=False)
217 else:
218 return run(KUBECTL, "get", cmd, "-n", namespace, die=False)
219
220
221 def kubectl_get_clusterroles():
222 return run(
223 KUBECTL,
224 "get",
225 "clusterroles",
226 "--show-kind",
227 "--no-headers",
228 die=False,
229 )
230
231
232 def is_community_addon(arch, addon_name):
233 """
234 Check if an addon is part of the community repo.
235
236 :param arch: architecture of the addon we are looking for
237 :param addon_name: name of the addon we are looking for
238 :return: True if the addon is in the community repo
239 """
240 try:
241 addons_yaml = f"{os.environ['SNAP']}/addons/community/addons.yaml"
242 with open(addons_yaml, "r") as fin:
243 addons = yaml.safe_load(fin)
244
245 for addon in addons["microk8s-addons"]["addons"]:
246 if arch in addon["supported_architectures"]:
247 if addon_name == addon["name"]:
248 return True
249 except Exception:
250 LOG.exception("could not load addons from %s", addons_yaml)
251
252 return False
253
254
255 def get_available_addons(arch):
256 available = []
257 strict = is_strict()
258 for dir in os.listdir(snap_common() / "addons"):
259 try:
260 addons_yaml = snap_common() / "addons" / dir / "addons.yaml"
261 with open(addons_yaml, "r") as fin:
262 addons = yaml.safe_load(fin)
263
264 for addon in addons["microk8s-addons"]["addons"]:
265 if arch not in addon["supported_architectures"]:
266 continue
267
268 if "confinement" in addon:
269 if strict and "strict" not in addon["confinement"]:
270 continue
271 if not strict and "classic" not in addon["confinement"]:
272 continue
273
274 available.append({**addon, "repository": dir})
275
276 except Exception:
277 LOG.exception("could not load addons from %s", addons_yaml)
278
279 available = sorted(available, key=lambda k: (k["repository"], k["name"]))
280 return available
281
282
283 def get_addon_by_name(addons, name):
284 filtered_addon = []
285
286 parts = name.split("/")
287 if len(parts) == 1:
288 repo_name, addon_name = None, parts[0]
289 elif len(parts) == 2:
290 repo_name, addon_name = parts[0], parts[1]
291 else:
292 # just fallback to the addon name
293 repo_name, addon_name = None, name
294
295 for addon in addons:
296 if addon_name == addon["name"] and (repo_name == addon["repository"] or not repo_name):
297 filtered_addon.append(addon)
298
299 return filtered_addon
300
301
302 def is_service_expected_to_start(service):
303 """
304 Check if a service is supposed to start
305 :param service: the service name
306 :return: True if the service is meant to start
307 """
308 lock_path = os.path.expandvars("${SNAP_DATA}/var/lock")
309 lock = "{}/{}".format(lock_path, service)
310 return os.path.exists(lock_path) and not os.path.isfile(lock)
311
312
313 def set_service_expected_to_start(service, start=True):
314 """
315 Check if a service is not expected to start.
316 :param service: the service name
317 :param start: should the service start or not
318 """
319 lock_path = os.path.expandvars("${SNAP_DATA}/var/lock")
320 lock = "{}/{}".format(lock_path, service)
321 if start:
322 os.remove(lock)
323 else:
324 fd = os.open(lock, os.O_CREAT, mode=0o700)
325 os.close(fd)
326
327
328 def check_help_flag(addons: list) -> bool:
329 """Checks to see if a help message needs to be printed for an addon.
330
331 Not all addons check for help flags themselves. Until they do, intercept
332 calls to print help text and print out a generic message to that effect.
333 """
334 addon = addons[0]
335 if any(help_arg in addons for help_arg in ("-h", "--help")):
336 print("Addon %s does not yet have a help message." % addon)
337 print("For more information about it, visit https://microk8s.io/docs/addons")
338 return True
339 return False
340
341
342 def parse_xable_addon_args(addon_args: list, available_addons: list):
343 """
344 Parse the list of addons passed into the microk8s enable or disable commands.
345 Further, it will infer the repository name for addons when possible.
346 If any errors are encountered, we print them to stderr and exit.
347
348 :param addon_args: The parameters passed to the microk8s enable command
349 :param available_addons: List of available addons as (repo_name, addon_name) tuples
350
351 Handles the following cases:
352 - microk8s enable foo bar:--baz # enable many addons, inline arguments
353 - microk8s enable bar --baz # enable one addon, unix style command line arguments
354
355 :return: a list of (repo_name, addon_name, args) tuples
356 """
357
358 # Backwards compatibility with enabling multiple addons at once, e.g.
359 # `microk8s.enable foo bar:"baz"`
360 available_addon_names = [addon_name for (_, addon_name) in available_addons]
361 available_addon_names += [
362 "/".join([repo_name, addon_name]) for (repo_name, addon_name) in available_addons
363 ]
364 addon_names = [arg.split(":")[0] for arg in addon_args]
365 if set(addon_names) < set(available_addon_names):
366 return [parse_xable_single_arg(addon_arg, available_addons) for addon_arg in addon_args]
367
368 # The new way of xabling addons, that allows for unix-style argument passing,
369 # such as `microk8s.enable foo --bar`.
370 repo_name, addon_name, args = parse_xable_single_arg(addon_args[0], available_addons)
371 if args and addon_args[1:]:
372 click.echo(
373 "Can't pass string arguments and flag arguments simultaneously!\n"
374 "Enable or disable addons with only one argument style at a time:\n"
375 "\n"
376 " microk8s enable foo:'bar'\n"
377 "or\n"
378 " microk8s enable foo --bar\n"
379 )
380 sys.exit(1)
381
382 return [(repo_name, addon_name, addon_args[1:])]
383
384
385 def parse_xable_single_arg(addon_arg: str, available_addons: list):
386 """
387 Parse an addon arg of the following form: `(repo_name/)addon_name(:args)`
388 It will automatically infer the repository name if not specified. If multiple repositories
389 are found for the addon, we print an error and exit.
390
391 :param addon_arg: A parameter passed to the microk8s enable command
392 :param available_addons: List of available addons as (repo_name, addon_name) tuples
393
394 :return: a (repo_name, addon_name, args) tuple
395 """
396 addon_name, *args = addon_arg.split(":")
397 parts = addon_name.split("/")
398 if len(parts) == 2:
399 return (parts[0], parts[1], args)
400 elif len(parts) == 1:
401 matching_repos = [repo for (repo, addon) in available_addons if addon == addon_name]
402 if len(matching_repos) == 0:
403 click.echo("Addon {} was not found in any repository".format(addon_name), err=True)
404 if is_community_addon(get_current_arch(), addon_name):
405 click.echo(
406 "To use the community maintained flavor enable the respective repository:"
407 )
408 click.echo("")
409 click.echo(" microk8s enable community")
410 click.echo("")
411
412 sys.exit(1)
413 elif len(matching_repos) == 1:
414 click.echo(
415 "Infer repository {} for addon {}".format(matching_repos[0], addon_name), err=True
416 )
417 return (matching_repos[0], addon_name, args)
418 else:
419 click.echo(
420 "Addon {} exists in more than repository. Please explicitly specify\n"
421 "the repository using any of:\n".format(addon_name),
422 err=True,
423 )
424 for repo in matching_repos:
425 click.echo(" {}/{}".format(repo, addon_name), err=True)
426 click.echo("", err=True)
427 sys.exit(1)
428
429 else:
430 click.echo("Invalid addon name {}".format(addon_name))
431 sys.exit(1)
432
433
434 def xable(action: str, addon_args: list):
435 """Enables or disables the given addons.
436
437 Collated into a single function since the logic is identical other than
438 the script names.
439
440 :param action: "enable" or "disable"
441 :param addons: List of addons to enable. Each addon may be prefixed with `repository/`
442 to specify which addon repository it will be sourced from.
443 """
444 available_addons_info = get_available_addons(get_current_arch())
445 enabled_addons_info, disabled_addons_info = get_status(available_addons_info, True)
446 if action == "enable":
447 xabled_addons_info = enabled_addons_info
448 elif action == "disable":
449 xabled_addons_info = disabled_addons_info
450 else:
451 click.echo("Invalid action {}. Only enable and disable are supported".format(action))
452 sys.exit(1)
453
454 # available_addons is a list of (repo_name, addon_name) tuples for all available addons
455 available_addons = [(addon["repository"], addon["name"]) for addon in available_addons_info]
456 # xabled_addons is a list (repo_name, addon_name) tuples of already xabled addons
457 xabled_addons = [(addon["repository"], addon["name"]) for addon in xabled_addons_info]
458
459 addons = parse_xable_addon_args(addon_args, available_addons)
460
461 for repo_name, addon_name, args in addons:
462 if (repo_name, addon_name) not in available_addons:
463 click.echo("Addon {}/{} not found".format(repo_name, addon_name))
464 continue
465 if (repo_name, addon_name) in xabled_addons:
466 click.echo("Addon {}/{} is already {}d".format(repo_name, addon_name, action))
467 continue
468
469 wait_for_ready(timeout=30)
470 p = subprocess.run(
471 [snap_common() / "addons" / repo_name / "addons" / addon_name / action, *args]
472 )
473 if p.returncode:
474 sys.exit(p.returncode)
475 wait_for_ready(timeout=30)
476
477
478 def is_enabled(addon, item):
479 if addon in item:
480 return True
481 else:
482 filepath = os.path.expandvars(addon)
483 return os.path.isfile(filepath)
484
485
486 def get_status(available_addons, isReady):
487 enabled = []
488 disabled = []
489 if isReady:
490 # 'all' does not include ingress
491 kube_output = kubectl_get("all,ingress")
492 cluster_output = kubectl_get_clusterroles()
493 kube_output = kube_output + cluster_output
494 for addon in available_addons:
495 found = False
496 for row in kube_output.split("\n"):
497 if is_enabled(addon["check_status"], row):
498 enabled.append(addon)
499 found = True
500 break
501 if not found:
502 disabled.append(addon)
503
504 return enabled, disabled
505
506
507 def is_within_directory(directory, target):
508
509 abs_directory = os.path.abspath(directory)
510 abs_target = os.path.abspath(target)
511
512 prefix = os.path.commonprefix([abs_directory, abs_target])
513
514 return prefix == abs_directory
515
516
517 def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
518
519 for member in tar.getmembers():
520 member_path = os.path.join(path, member.name)
521 if not is_within_directory(path, member_path):
522 raise Exception("Attempted Path Traversal in Tar File")
523
524 tar.extractall(path, members, numeric_owner=numeric_owner)
525
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/wrappers/common/utils.py b/scripts/wrappers/common/utils.py
--- a/scripts/wrappers/common/utils.py
+++ b/scripts/wrappers/common/utils.py
@@ -197,7 +197,7 @@
)
print("")
print(" sudo usermod -a -G microk8s {}".format(user))
- print(" sudo chown -f -R $USER ~/.kube")
+ print(" sudo chown -R $USER ~/.kube")
print("")
print(
"After this, reload the user groups either via a reboot or by running 'newgrp microk8s'."
| {"golden_diff": "diff --git a/scripts/wrappers/common/utils.py b/scripts/wrappers/common/utils.py\n--- a/scripts/wrappers/common/utils.py\n+++ b/scripts/wrappers/common/utils.py\n@@ -197,7 +197,7 @@\n )\n print(\"\")\n print(\" sudo usermod -a -G microk8s {}\".format(user))\n- print(\" sudo chown -f -R $USER ~/.kube\")\n+ print(\" sudo chown -R $USER ~/.kube\")\n print(\"\")\n print(\n \"After this, reload the user groups either via a reboot or by running 'newgrp microk8s'.\"\n", "issue": "microk8s suggests command hiding error info\n<!--\r\n Thank you for submitting an issue. Please fill in the template below\r\n information about the bug you encountered.\r\n-->\r\n\r\n#### Summary\r\n<!-- Please explain the bug in a few short sentences -->\r\n\r\nWhen you try to run microk8s with insufficient permissions, you get an error and suggestion:\r\n\r\n```console\r\n$ microk8s status\r\nInsufficient permissions to access MicroK8s.\r\nYou can either try again with sudo or add the user runner to the 'snap_microk8s' group:\r\n\r\n sudo usermod -a -G snap_microk8s runner\r\n sudo chown -f -R runner ~/.kube\r\n\r\nAfter this, reload the user groups either via a reboot or by running 'newgrp snap_microk8s'.\r\n```\r\n\r\nHowever, if you don't have a `~/.kube` file, the `chown` command will fail silently and suppress the error message. This can cause failures e.g. in GitHub runners / scripts with `set -e`, and the `-f` option will hide the error message. This makes it very hard to debug the failure as there are no log messages.\r\n\r\n#### What Should Happen Instead?\r\n<!-- Please explain what the expected behavior is -->\r\n\r\nSuggest to the user not to use the `-f` flag in `chown`. There is no reason to suppress the error message.\r\n```console\r\n$ microk8s status\r\nInsufficient permissions to access MicroK8s.\r\nYou can either try again with sudo or add the user runner to the 'snap_microk8s' group:\r\n\r\n sudo usermod -a -G snap_microk8s runner\r\n sudo chown -R runner ~/.kube\r\n\r\nAfter this, reload the user groups either via a reboot or by running 'newgrp snap_microk8s'.\r\n```\n", "before_files": [{"content": "import getpass\nimport json\nimport os\nimport platform\nimport subprocess\nimport sys\nimport time\nfrom pathlib import Path\nimport logging\n\nimport click\nimport yaml\n\nLOG = logging.getLogger(__name__)\n\nKUBECTL = os.path.expandvars(\"$SNAP/microk8s-kubectl.wrapper\")\n\n\ndef get_group():\n return \"snap_microk8s\" if is_strict() else \"microk8s\"\n\n\ndef is_strict():\n snap_yaml = snap() / \"meta/snap.yaml\"\n with open(snap_yaml) as f:\n snap_meta = yaml.safe_load(f)\n return snap_meta[\"confinement\"] == \"strict\"\n\n\ndef get_current_arch():\n # architecture mapping\n arch_mapping = {\n \"aarch64\": \"arm64\",\n \"armv7l\": \"armhf\",\n \"x86_64\": \"amd64\",\n \"s390x\": \"s390x\",\n \"ppc64le\": \"ppc64le\",\n \"ppc64el\": \"ppc64le\",\n }\n\n return arch_mapping[platform.machine()]\n\n\ndef snap() -> Path:\n try:\n return Path(os.environ[\"SNAP\"])\n except KeyError:\n return Path(\"/snap/microk8s/current\")\n\n\ndef snap_data() -> Path:\n try:\n return Path(os.environ[\"SNAP_DATA\"])\n except KeyError:\n return Path(\"/var/snap/microk8s/current\")\n\n\ndef snap_common() -> Path:\n try:\n return Path(os.environ[\"SNAP_COMMON\"])\n except KeyError:\n return Path(\"/var/snap/microk8s/common\")\n\n\ndef run(*args, die=True):\n # Add wrappers to $PATH\n env = os.environ.copy()\n env[\"PATH\"] += \":%s\" % os.environ[\"SNAP\"]\n result = subprocess.run(\n args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env\n )\n\n try:\n result.check_returncode()\n except subprocess.CalledProcessError as err:\n if die:\n if result.stderr:\n print(result.stderr.decode(\"utf-8\"))\n print(err)\n sys.exit(1)\n else:\n raise\n\n return result.stdout.decode(\"utf-8\")\n\n\ndef is_cluster_ready():\n try:\n service_output = kubectl_get(\"all\")\n node_output = kubectl_get(\"nodes\")\n # Make sure to compare with the word \" Ready \" with spaces.\n if \" Ready \" in node_output and \"service/kubernetes\" in service_output:\n return True\n else:\n return False\n except Exception:\n return False\n\n\ndef is_ha_enabled():\n ha_lock = os.path.expandvars(\"${SNAP_DATA}/var/lock/ha-cluster\")\n return os.path.isfile(ha_lock)\n\n\ndef get_dqlite_info():\n cluster_dir = os.path.expandvars(\"${SNAP_DATA}/var/kubernetes/backend\")\n snap_path = os.environ.get(\"SNAP\")\n\n info = []\n\n if not is_ha_enabled():\n return info\n\n waits = 10\n while waits > 0:\n try:\n with open(\"{}/info.yaml\".format(cluster_dir), mode=\"r\") as f:\n data = yaml.safe_load(f)\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split(),\n timeout=4,\n stderr=subprocess.DEVNULL,\n )\n if data[\"Address\"] in out.decode():\n break\n else:\n time.sleep(5)\n waits -= 1\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired):\n time.sleep(2)\n waits -= 1\n\n if waits == 0:\n return info\n\n nodes = json.loads(out.decode())\n for n in nodes:\n if n[\"Role\"] == 0:\n info.append((n[\"Address\"], \"voter\"))\n if n[\"Role\"] == 1:\n info.append((n[\"Address\"], \"standby\"))\n if n[\"Role\"] == 2:\n info.append((n[\"Address\"], \"spare\"))\n return info\n\n\ndef is_cluster_locked():\n if (snap_data() / \"var/lock/clustered.lock\").exists():\n click.echo(\"This MicroK8s deployment is acting as a node in a cluster.\")\n click.echo(\"Please use the master node.\")\n sys.exit(1)\n\n\ndef wait_for_ready(timeout):\n start_time = time.time()\n\n while True:\n if is_cluster_ready():\n return True\n elif timeout and time.time() > start_time + timeout:\n return False\n else:\n time.sleep(2)\n\n\ndef exit_if_no_root():\n \"\"\"\n Exit if the user is not root\n \"\"\"\n if not os.geteuid() == 0:\n click.echo(\n \"Elevated permissions is needed for this operation. Please run this command with sudo.\"\n )\n exit(50)\n\n\ndef exit_if_stopped():\n stoppedLockFile = os.path.expandvars(\"${SNAP_DATA}/var/lock/stopped.lock\")\n if os.path.isfile(stoppedLockFile):\n print(\"microk8s is not running, try microk8s start\")\n exit(0)\n\n\ndef exit_if_no_permission():\n user = getpass.getuser()\n # test if we can access the default kubeconfig\n clientConfigFile = os.path.expandvars(\"${SNAP_DATA}/credentials/client.config\")\n if not os.access(clientConfigFile, os.R_OK):\n print(\"Insufficient permissions to access MicroK8s.\")\n print(\n \"You can either try again with sudo or add the user {} to the 'microk8s' group:\".format(\n user\n )\n )\n print(\"\")\n print(\" sudo usermod -a -G microk8s {}\".format(user))\n print(\" sudo chown -f -R $USER ~/.kube\")\n print(\"\")\n print(\n \"After this, reload the user groups either via a reboot or by running 'newgrp microk8s'.\"\n )\n exit(1)\n\n\ndef ensure_started():\n if (snap_data() / \"var/lock/stopped.lock\").exists():\n click.echo(\"microk8s is not running, try microk8s start\", err=True)\n sys.exit(1)\n\n\ndef kubectl_get(cmd, namespace=\"--all-namespaces\"):\n if namespace == \"--all-namespaces\":\n return run(KUBECTL, \"get\", cmd, \"--all-namespaces\", die=False)\n else:\n return run(KUBECTL, \"get\", cmd, \"-n\", namespace, die=False)\n\n\ndef kubectl_get_clusterroles():\n return run(\n KUBECTL,\n \"get\",\n \"clusterroles\",\n \"--show-kind\",\n \"--no-headers\",\n die=False,\n )\n\n\ndef is_community_addon(arch, addon_name):\n \"\"\"\n Check if an addon is part of the community repo.\n\n :param arch: architecture of the addon we are looking for\n :param addon_name: name of the addon we are looking for\n :return: True if the addon is in the community repo\n \"\"\"\n try:\n addons_yaml = f\"{os.environ['SNAP']}/addons/community/addons.yaml\"\n with open(addons_yaml, \"r\") as fin:\n addons = yaml.safe_load(fin)\n\n for addon in addons[\"microk8s-addons\"][\"addons\"]:\n if arch in addon[\"supported_architectures\"]:\n if addon_name == addon[\"name\"]:\n return True\n except Exception:\n LOG.exception(\"could not load addons from %s\", addons_yaml)\n\n return False\n\n\ndef get_available_addons(arch):\n available = []\n strict = is_strict()\n for dir in os.listdir(snap_common() / \"addons\"):\n try:\n addons_yaml = snap_common() / \"addons\" / dir / \"addons.yaml\"\n with open(addons_yaml, \"r\") as fin:\n addons = yaml.safe_load(fin)\n\n for addon in addons[\"microk8s-addons\"][\"addons\"]:\n if arch not in addon[\"supported_architectures\"]:\n continue\n\n if \"confinement\" in addon:\n if strict and \"strict\" not in addon[\"confinement\"]:\n continue\n if not strict and \"classic\" not in addon[\"confinement\"]:\n continue\n\n available.append({**addon, \"repository\": dir})\n\n except Exception:\n LOG.exception(\"could not load addons from %s\", addons_yaml)\n\n available = sorted(available, key=lambda k: (k[\"repository\"], k[\"name\"]))\n return available\n\n\ndef get_addon_by_name(addons, name):\n filtered_addon = []\n\n parts = name.split(\"/\")\n if len(parts) == 1:\n repo_name, addon_name = None, parts[0]\n elif len(parts) == 2:\n repo_name, addon_name = parts[0], parts[1]\n else:\n # just fallback to the addon name\n repo_name, addon_name = None, name\n\n for addon in addons:\n if addon_name == addon[\"name\"] and (repo_name == addon[\"repository\"] or not repo_name):\n filtered_addon.append(addon)\n\n return filtered_addon\n\n\ndef is_service_expected_to_start(service):\n \"\"\"\n Check if a service is supposed to start\n :param service: the service name\n :return: True if the service is meant to start\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/{}\".format(lock_path, service)\n return os.path.exists(lock_path) and not os.path.isfile(lock)\n\n\ndef set_service_expected_to_start(service, start=True):\n \"\"\"\n Check if a service is not expected to start.\n :param service: the service name\n :param start: should the service start or not\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/{}\".format(lock_path, service)\n if start:\n os.remove(lock)\n else:\n fd = os.open(lock, os.O_CREAT, mode=0o700)\n os.close(fd)\n\n\ndef check_help_flag(addons: list) -> bool:\n \"\"\"Checks to see if a help message needs to be printed for an addon.\n\n Not all addons check for help flags themselves. Until they do, intercept\n calls to print help text and print out a generic message to that effect.\n \"\"\"\n addon = addons[0]\n if any(help_arg in addons for help_arg in (\"-h\", \"--help\")):\n print(\"Addon %s does not yet have a help message.\" % addon)\n print(\"For more information about it, visit https://microk8s.io/docs/addons\")\n return True\n return False\n\n\ndef parse_xable_addon_args(addon_args: list, available_addons: list):\n \"\"\"\n Parse the list of addons passed into the microk8s enable or disable commands.\n Further, it will infer the repository name for addons when possible.\n If any errors are encountered, we print them to stderr and exit.\n\n :param addon_args: The parameters passed to the microk8s enable command\n :param available_addons: List of available addons as (repo_name, addon_name) tuples\n\n Handles the following cases:\n - microk8s enable foo bar:--baz # enable many addons, inline arguments\n - microk8s enable bar --baz # enable one addon, unix style command line arguments\n\n :return: a list of (repo_name, addon_name, args) tuples\n \"\"\"\n\n # Backwards compatibility with enabling multiple addons at once, e.g.\n # `microk8s.enable foo bar:\"baz\"`\n available_addon_names = [addon_name for (_, addon_name) in available_addons]\n available_addon_names += [\n \"/\".join([repo_name, addon_name]) for (repo_name, addon_name) in available_addons\n ]\n addon_names = [arg.split(\":\")[0] for arg in addon_args]\n if set(addon_names) < set(available_addon_names):\n return [parse_xable_single_arg(addon_arg, available_addons) for addon_arg in addon_args]\n\n # The new way of xabling addons, that allows for unix-style argument passing,\n # such as `microk8s.enable foo --bar`.\n repo_name, addon_name, args = parse_xable_single_arg(addon_args[0], available_addons)\n if args and addon_args[1:]:\n click.echo(\n \"Can't pass string arguments and flag arguments simultaneously!\\n\"\n \"Enable or disable addons with only one argument style at a time:\\n\"\n \"\\n\"\n \" microk8s enable foo:'bar'\\n\"\n \"or\\n\"\n \" microk8s enable foo --bar\\n\"\n )\n sys.exit(1)\n\n return [(repo_name, addon_name, addon_args[1:])]\n\n\ndef parse_xable_single_arg(addon_arg: str, available_addons: list):\n \"\"\"\n Parse an addon arg of the following form: `(repo_name/)addon_name(:args)`\n It will automatically infer the repository name if not specified. If multiple repositories\n are found for the addon, we print an error and exit.\n\n :param addon_arg: A parameter passed to the microk8s enable command\n :param available_addons: List of available addons as (repo_name, addon_name) tuples\n\n :return: a (repo_name, addon_name, args) tuple\n \"\"\"\n addon_name, *args = addon_arg.split(\":\")\n parts = addon_name.split(\"/\")\n if len(parts) == 2:\n return (parts[0], parts[1], args)\n elif len(parts) == 1:\n matching_repos = [repo for (repo, addon) in available_addons if addon == addon_name]\n if len(matching_repos) == 0:\n click.echo(\"Addon {} was not found in any repository\".format(addon_name), err=True)\n if is_community_addon(get_current_arch(), addon_name):\n click.echo(\n \"To use the community maintained flavor enable the respective repository:\"\n )\n click.echo(\"\")\n click.echo(\" microk8s enable community\")\n click.echo(\"\")\n\n sys.exit(1)\n elif len(matching_repos) == 1:\n click.echo(\n \"Infer repository {} for addon {}\".format(matching_repos[0], addon_name), err=True\n )\n return (matching_repos[0], addon_name, args)\n else:\n click.echo(\n \"Addon {} exists in more than repository. Please explicitly specify\\n\"\n \"the repository using any of:\\n\".format(addon_name),\n err=True,\n )\n for repo in matching_repos:\n click.echo(\" {}/{}\".format(repo, addon_name), err=True)\n click.echo(\"\", err=True)\n sys.exit(1)\n\n else:\n click.echo(\"Invalid addon name {}\".format(addon_name))\n sys.exit(1)\n\n\ndef xable(action: str, addon_args: list):\n \"\"\"Enables or disables the given addons.\n\n Collated into a single function since the logic is identical other than\n the script names.\n\n :param action: \"enable\" or \"disable\"\n :param addons: List of addons to enable. Each addon may be prefixed with `repository/`\n to specify which addon repository it will be sourced from.\n \"\"\"\n available_addons_info = get_available_addons(get_current_arch())\n enabled_addons_info, disabled_addons_info = get_status(available_addons_info, True)\n if action == \"enable\":\n xabled_addons_info = enabled_addons_info\n elif action == \"disable\":\n xabled_addons_info = disabled_addons_info\n else:\n click.echo(\"Invalid action {}. Only enable and disable are supported\".format(action))\n sys.exit(1)\n\n # available_addons is a list of (repo_name, addon_name) tuples for all available addons\n available_addons = [(addon[\"repository\"], addon[\"name\"]) for addon in available_addons_info]\n # xabled_addons is a list (repo_name, addon_name) tuples of already xabled addons\n xabled_addons = [(addon[\"repository\"], addon[\"name\"]) for addon in xabled_addons_info]\n\n addons = parse_xable_addon_args(addon_args, available_addons)\n\n for repo_name, addon_name, args in addons:\n if (repo_name, addon_name) not in available_addons:\n click.echo(\"Addon {}/{} not found\".format(repo_name, addon_name))\n continue\n if (repo_name, addon_name) in xabled_addons:\n click.echo(\"Addon {}/{} is already {}d\".format(repo_name, addon_name, action))\n continue\n\n wait_for_ready(timeout=30)\n p = subprocess.run(\n [snap_common() / \"addons\" / repo_name / \"addons\" / addon_name / action, *args]\n )\n if p.returncode:\n sys.exit(p.returncode)\n wait_for_ready(timeout=30)\n\n\ndef is_enabled(addon, item):\n if addon in item:\n return True\n else:\n filepath = os.path.expandvars(addon)\n return os.path.isfile(filepath)\n\n\ndef get_status(available_addons, isReady):\n enabled = []\n disabled = []\n if isReady:\n # 'all' does not include ingress\n kube_output = kubectl_get(\"all,ingress\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n found = False\n for row in kube_output.split(\"\\n\"):\n if is_enabled(addon[\"check_status\"], row):\n enabled.append(addon)\n found = True\n break\n if not found:\n disabled.append(addon)\n\n return enabled, disabled\n\n\ndef is_within_directory(directory, target):\n\n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n\n prefix = os.path.commonprefix([abs_directory, abs_target])\n\n return prefix == abs_directory\n\n\ndef safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n\n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n\n tar.extractall(path, members, numeric_owner=numeric_owner)\n", "path": "scripts/wrappers/common/utils.py"}], "after_files": [{"content": "import getpass\nimport json\nimport os\nimport platform\nimport subprocess\nimport sys\nimport time\nfrom pathlib import Path\nimport logging\n\nimport click\nimport yaml\n\nLOG = logging.getLogger(__name__)\n\nKUBECTL = os.path.expandvars(\"$SNAP/microk8s-kubectl.wrapper\")\n\n\ndef get_group():\n return \"snap_microk8s\" if is_strict() else \"microk8s\"\n\n\ndef is_strict():\n snap_yaml = snap() / \"meta/snap.yaml\"\n with open(snap_yaml) as f:\n snap_meta = yaml.safe_load(f)\n return snap_meta[\"confinement\"] == \"strict\"\n\n\ndef get_current_arch():\n # architecture mapping\n arch_mapping = {\n \"aarch64\": \"arm64\",\n \"armv7l\": \"armhf\",\n \"x86_64\": \"amd64\",\n \"s390x\": \"s390x\",\n \"ppc64le\": \"ppc64le\",\n \"ppc64el\": \"ppc64le\",\n }\n\n return arch_mapping[platform.machine()]\n\n\ndef snap() -> Path:\n try:\n return Path(os.environ[\"SNAP\"])\n except KeyError:\n return Path(\"/snap/microk8s/current\")\n\n\ndef snap_data() -> Path:\n try:\n return Path(os.environ[\"SNAP_DATA\"])\n except KeyError:\n return Path(\"/var/snap/microk8s/current\")\n\n\ndef snap_common() -> Path:\n try:\n return Path(os.environ[\"SNAP_COMMON\"])\n except KeyError:\n return Path(\"/var/snap/microk8s/common\")\n\n\ndef run(*args, die=True):\n # Add wrappers to $PATH\n env = os.environ.copy()\n env[\"PATH\"] += \":%s\" % os.environ[\"SNAP\"]\n result = subprocess.run(\n args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env\n )\n\n try:\n result.check_returncode()\n except subprocess.CalledProcessError as err:\n if die:\n if result.stderr:\n print(result.stderr.decode(\"utf-8\"))\n print(err)\n sys.exit(1)\n else:\n raise\n\n return result.stdout.decode(\"utf-8\")\n\n\ndef is_cluster_ready():\n try:\n service_output = kubectl_get(\"all\")\n node_output = kubectl_get(\"nodes\")\n # Make sure to compare with the word \" Ready \" with spaces.\n if \" Ready \" in node_output and \"service/kubernetes\" in service_output:\n return True\n else:\n return False\n except Exception:\n return False\n\n\ndef is_ha_enabled():\n ha_lock = os.path.expandvars(\"${SNAP_DATA}/var/lock/ha-cluster\")\n return os.path.isfile(ha_lock)\n\n\ndef get_dqlite_info():\n cluster_dir = os.path.expandvars(\"${SNAP_DATA}/var/kubernetes/backend\")\n snap_path = os.environ.get(\"SNAP\")\n\n info = []\n\n if not is_ha_enabled():\n return info\n\n waits = 10\n while waits > 0:\n try:\n with open(\"{}/info.yaml\".format(cluster_dir), mode=\"r\") as f:\n data = yaml.safe_load(f)\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split(),\n timeout=4,\n stderr=subprocess.DEVNULL,\n )\n if data[\"Address\"] in out.decode():\n break\n else:\n time.sleep(5)\n waits -= 1\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired):\n time.sleep(2)\n waits -= 1\n\n if waits == 0:\n return info\n\n nodes = json.loads(out.decode())\n for n in nodes:\n if n[\"Role\"] == 0:\n info.append((n[\"Address\"], \"voter\"))\n if n[\"Role\"] == 1:\n info.append((n[\"Address\"], \"standby\"))\n if n[\"Role\"] == 2:\n info.append((n[\"Address\"], \"spare\"))\n return info\n\n\ndef is_cluster_locked():\n if (snap_data() / \"var/lock/clustered.lock\").exists():\n click.echo(\"This MicroK8s deployment is acting as a node in a cluster.\")\n click.echo(\"Please use the master node.\")\n sys.exit(1)\n\n\ndef wait_for_ready(timeout):\n start_time = time.time()\n\n while True:\n if is_cluster_ready():\n return True\n elif timeout and time.time() > start_time + timeout:\n return False\n else:\n time.sleep(2)\n\n\ndef exit_if_no_root():\n \"\"\"\n Exit if the user is not root\n \"\"\"\n if not os.geteuid() == 0:\n click.echo(\n \"Elevated permissions is needed for this operation. Please run this command with sudo.\"\n )\n exit(50)\n\n\ndef exit_if_stopped():\n stoppedLockFile = os.path.expandvars(\"${SNAP_DATA}/var/lock/stopped.lock\")\n if os.path.isfile(stoppedLockFile):\n print(\"microk8s is not running, try microk8s start\")\n exit(0)\n\n\ndef exit_if_no_permission():\n user = getpass.getuser()\n # test if we can access the default kubeconfig\n clientConfigFile = os.path.expandvars(\"${SNAP_DATA}/credentials/client.config\")\n if not os.access(clientConfigFile, os.R_OK):\n print(\"Insufficient permissions to access MicroK8s.\")\n print(\n \"You can either try again with sudo or add the user {} to the 'microk8s' group:\".format(\n user\n )\n )\n print(\"\")\n print(\" sudo usermod -a -G microk8s {}\".format(user))\n print(\" sudo chown -R $USER ~/.kube\")\n print(\"\")\n print(\n \"After this, reload the user groups either via a reboot or by running 'newgrp microk8s'.\"\n )\n exit(1)\n\n\ndef ensure_started():\n if (snap_data() / \"var/lock/stopped.lock\").exists():\n click.echo(\"microk8s is not running, try microk8s start\", err=True)\n sys.exit(1)\n\n\ndef kubectl_get(cmd, namespace=\"--all-namespaces\"):\n if namespace == \"--all-namespaces\":\n return run(KUBECTL, \"get\", cmd, \"--all-namespaces\", die=False)\n else:\n return run(KUBECTL, \"get\", cmd, \"-n\", namespace, die=False)\n\n\ndef kubectl_get_clusterroles():\n return run(\n KUBECTL,\n \"get\",\n \"clusterroles\",\n \"--show-kind\",\n \"--no-headers\",\n die=False,\n )\n\n\ndef is_community_addon(arch, addon_name):\n \"\"\"\n Check if an addon is part of the community repo.\n\n :param arch: architecture of the addon we are looking for\n :param addon_name: name of the addon we are looking for\n :return: True if the addon is in the community repo\n \"\"\"\n try:\n addons_yaml = f\"{os.environ['SNAP']}/addons/community/addons.yaml\"\n with open(addons_yaml, \"r\") as fin:\n addons = yaml.safe_load(fin)\n\n for addon in addons[\"microk8s-addons\"][\"addons\"]:\n if arch in addon[\"supported_architectures\"]:\n if addon_name == addon[\"name\"]:\n return True\n except Exception:\n LOG.exception(\"could not load addons from %s\", addons_yaml)\n\n return False\n\n\ndef get_available_addons(arch):\n available = []\n strict = is_strict()\n for dir in os.listdir(snap_common() / \"addons\"):\n try:\n addons_yaml = snap_common() / \"addons\" / dir / \"addons.yaml\"\n with open(addons_yaml, \"r\") as fin:\n addons = yaml.safe_load(fin)\n\n for addon in addons[\"microk8s-addons\"][\"addons\"]:\n if arch not in addon[\"supported_architectures\"]:\n continue\n\n if \"confinement\" in addon:\n if strict and \"strict\" not in addon[\"confinement\"]:\n continue\n if not strict and \"classic\" not in addon[\"confinement\"]:\n continue\n\n available.append({**addon, \"repository\": dir})\n\n except Exception:\n LOG.exception(\"could not load addons from %s\", addons_yaml)\n\n available = sorted(available, key=lambda k: (k[\"repository\"], k[\"name\"]))\n return available\n\n\ndef get_addon_by_name(addons, name):\n filtered_addon = []\n\n parts = name.split(\"/\")\n if len(parts) == 1:\n repo_name, addon_name = None, parts[0]\n elif len(parts) == 2:\n repo_name, addon_name = parts[0], parts[1]\n else:\n # just fallback to the addon name\n repo_name, addon_name = None, name\n\n for addon in addons:\n if addon_name == addon[\"name\"] and (repo_name == addon[\"repository\"] or not repo_name):\n filtered_addon.append(addon)\n\n return filtered_addon\n\n\ndef is_service_expected_to_start(service):\n \"\"\"\n Check if a service is supposed to start\n :param service: the service name\n :return: True if the service is meant to start\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/{}\".format(lock_path, service)\n return os.path.exists(lock_path) and not os.path.isfile(lock)\n\n\ndef set_service_expected_to_start(service, start=True):\n \"\"\"\n Check if a service is not expected to start.\n :param service: the service name\n :param start: should the service start or not\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/{}\".format(lock_path, service)\n if start:\n os.remove(lock)\n else:\n fd = os.open(lock, os.O_CREAT, mode=0o700)\n os.close(fd)\n\n\ndef check_help_flag(addons: list) -> bool:\n \"\"\"Checks to see if a help message needs to be printed for an addon.\n\n Not all addons check for help flags themselves. Until they do, intercept\n calls to print help text and print out a generic message to that effect.\n \"\"\"\n addon = addons[0]\n if any(help_arg in addons for help_arg in (\"-h\", \"--help\")):\n print(\"Addon %s does not yet have a help message.\" % addon)\n print(\"For more information about it, visit https://microk8s.io/docs/addons\")\n return True\n return False\n\n\ndef parse_xable_addon_args(addon_args: list, available_addons: list):\n \"\"\"\n Parse the list of addons passed into the microk8s enable or disable commands.\n Further, it will infer the repository name for addons when possible.\n If any errors are encountered, we print them to stderr and exit.\n\n :param addon_args: The parameters passed to the microk8s enable command\n :param available_addons: List of available addons as (repo_name, addon_name) tuples\n\n Handles the following cases:\n - microk8s enable foo bar:--baz # enable many addons, inline arguments\n - microk8s enable bar --baz # enable one addon, unix style command line arguments\n\n :return: a list of (repo_name, addon_name, args) tuples\n \"\"\"\n\n # Backwards compatibility with enabling multiple addons at once, e.g.\n # `microk8s.enable foo bar:\"baz\"`\n available_addon_names = [addon_name for (_, addon_name) in available_addons]\n available_addon_names += [\n \"/\".join([repo_name, addon_name]) for (repo_name, addon_name) in available_addons\n ]\n addon_names = [arg.split(\":\")[0] for arg in addon_args]\n if set(addon_names) < set(available_addon_names):\n return [parse_xable_single_arg(addon_arg, available_addons) for addon_arg in addon_args]\n\n # The new way of xabling addons, that allows for unix-style argument passing,\n # such as `microk8s.enable foo --bar`.\n repo_name, addon_name, args = parse_xable_single_arg(addon_args[0], available_addons)\n if args and addon_args[1:]:\n click.echo(\n \"Can't pass string arguments and flag arguments simultaneously!\\n\"\n \"Enable or disable addons with only one argument style at a time:\\n\"\n \"\\n\"\n \" microk8s enable foo:'bar'\\n\"\n \"or\\n\"\n \" microk8s enable foo --bar\\n\"\n )\n sys.exit(1)\n\n return [(repo_name, addon_name, addon_args[1:])]\n\n\ndef parse_xable_single_arg(addon_arg: str, available_addons: list):\n \"\"\"\n Parse an addon arg of the following form: `(repo_name/)addon_name(:args)`\n It will automatically infer the repository name if not specified. If multiple repositories\n are found for the addon, we print an error and exit.\n\n :param addon_arg: A parameter passed to the microk8s enable command\n :param available_addons: List of available addons as (repo_name, addon_name) tuples\n\n :return: a (repo_name, addon_name, args) tuple\n \"\"\"\n addon_name, *args = addon_arg.split(\":\")\n parts = addon_name.split(\"/\")\n if len(parts) == 2:\n return (parts[0], parts[1], args)\n elif len(parts) == 1:\n matching_repos = [repo for (repo, addon) in available_addons if addon == addon_name]\n if len(matching_repos) == 0:\n click.echo(\"Addon {} was not found in any repository\".format(addon_name), err=True)\n if is_community_addon(get_current_arch(), addon_name):\n click.echo(\n \"To use the community maintained flavor enable the respective repository:\"\n )\n click.echo(\"\")\n click.echo(\" microk8s enable community\")\n click.echo(\"\")\n\n sys.exit(1)\n elif len(matching_repos) == 1:\n click.echo(\n \"Infer repository {} for addon {}\".format(matching_repos[0], addon_name), err=True\n )\n return (matching_repos[0], addon_name, args)\n else:\n click.echo(\n \"Addon {} exists in more than repository. Please explicitly specify\\n\"\n \"the repository using any of:\\n\".format(addon_name),\n err=True,\n )\n for repo in matching_repos:\n click.echo(\" {}/{}\".format(repo, addon_name), err=True)\n click.echo(\"\", err=True)\n sys.exit(1)\n\n else:\n click.echo(\"Invalid addon name {}\".format(addon_name))\n sys.exit(1)\n\n\ndef xable(action: str, addon_args: list):\n \"\"\"Enables or disables the given addons.\n\n Collated into a single function since the logic is identical other than\n the script names.\n\n :param action: \"enable\" or \"disable\"\n :param addons: List of addons to enable. Each addon may be prefixed with `repository/`\n to specify which addon repository it will be sourced from.\n \"\"\"\n available_addons_info = get_available_addons(get_current_arch())\n enabled_addons_info, disabled_addons_info = get_status(available_addons_info, True)\n if action == \"enable\":\n xabled_addons_info = enabled_addons_info\n elif action == \"disable\":\n xabled_addons_info = disabled_addons_info\n else:\n click.echo(\"Invalid action {}. Only enable and disable are supported\".format(action))\n sys.exit(1)\n\n # available_addons is a list of (repo_name, addon_name) tuples for all available addons\n available_addons = [(addon[\"repository\"], addon[\"name\"]) for addon in available_addons_info]\n # xabled_addons is a list (repo_name, addon_name) tuples of already xabled addons\n xabled_addons = [(addon[\"repository\"], addon[\"name\"]) for addon in xabled_addons_info]\n\n addons = parse_xable_addon_args(addon_args, available_addons)\n\n for repo_name, addon_name, args in addons:\n if (repo_name, addon_name) not in available_addons:\n click.echo(\"Addon {}/{} not found\".format(repo_name, addon_name))\n continue\n if (repo_name, addon_name) in xabled_addons:\n click.echo(\"Addon {}/{} is already {}d\".format(repo_name, addon_name, action))\n continue\n\n wait_for_ready(timeout=30)\n p = subprocess.run(\n [snap_common() / \"addons\" / repo_name / \"addons\" / addon_name / action, *args]\n )\n if p.returncode:\n sys.exit(p.returncode)\n wait_for_ready(timeout=30)\n\n\ndef is_enabled(addon, item):\n if addon in item:\n return True\n else:\n filepath = os.path.expandvars(addon)\n return os.path.isfile(filepath)\n\n\ndef get_status(available_addons, isReady):\n enabled = []\n disabled = []\n if isReady:\n # 'all' does not include ingress\n kube_output = kubectl_get(\"all,ingress\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n found = False\n for row in kube_output.split(\"\\n\"):\n if is_enabled(addon[\"check_status\"], row):\n enabled.append(addon)\n found = True\n break\n if not found:\n disabled.append(addon)\n\n return enabled, disabled\n\n\ndef is_within_directory(directory, target):\n\n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n\n prefix = os.path.commonprefix([abs_directory, abs_target])\n\n return prefix == abs_directory\n\n\ndef safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n\n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n\n tar.extractall(path, members, numeric_owner=numeric_owner)\n", "path": "scripts/wrappers/common/utils.py"}]} |
gh_patches_debug_1562 | rasdani/github-patches | git_diff | Flexget__Flexget-3648 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error on scheduler: Only timezones from the pytz library are supported
### Steps to reproduce:
- Step 1: `flexget -L verbose daemon start`
#### Config:
```yaml
schedules:
- tasks: ['some-task']
interval:
hours: 1
```
#### Backtrace:
```
File "/home/pi/.local/lib/python3.9/site-packages/flexget/__init__.py", line 44, in main
manager.start()
File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 383, in start
self.handle_cli()
File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 409, in handle_cli
self.daemon_command(command_options)
File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 506, in daemon_command
run_daemon()
File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 487, in run_daemon
fire_event('manager.daemon.started', self)
File "/home/pi/.local/lib/python3.9/site-packages/flexget/event.py", line 109, in fire_event
result = event(*args, **kwargs)
File "/home/pi/.local/lib/python3.9/site-packages/flexget/event.py", line 20, in __call__
return self.func(*args, **kwargs)
File "/home/pi/.local/lib/python3.9/site-packages/flexget/components/scheduler/scheduler.py", line 126, in setup_scheduler
scheduler = BackgroundScheduler(
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 87, in __init__
self.configure(gconfig, **options)
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 126, in configure
self._configure(config)
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/background.py", line 29, in _configure
super(BackgroundScheduler, self)._configure(config)
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 697, in _configure
self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/util.py", line 93, in astimezone
raise TypeError('Only timezones from the pytz library are supported')
TypeError: Only timezones from the pytz library are supported
```
### Additional information:
- FlexGet version: 3.5.2
- Python version: 3.9.2
- Installation method:
- Using daemon (yes/no): yes
It seems to have started after https://github.com/Flexget/Flexget/pull/3453 that change the timezone argument to a non-pytz compatible object.
Error on scheduler: Only timezones from the pytz library are supported
### Steps to reproduce:
- Step 1: `flexget -L verbose daemon start`
#### Config:
```yaml
schedules:
- tasks: ['some-task']
interval:
hours: 1
```
#### Backtrace:
```
File "/home/pi/.local/lib/python3.9/site-packages/flexget/__init__.py", line 44, in main
manager.start()
File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 383, in start
self.handle_cli()
File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 409, in handle_cli
self.daemon_command(command_options)
File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 506, in daemon_command
run_daemon()
File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 487, in run_daemon
fire_event('manager.daemon.started', self)
File "/home/pi/.local/lib/python3.9/site-packages/flexget/event.py", line 109, in fire_event
result = event(*args, **kwargs)
File "/home/pi/.local/lib/python3.9/site-packages/flexget/event.py", line 20, in __call__
return self.func(*args, **kwargs)
File "/home/pi/.local/lib/python3.9/site-packages/flexget/components/scheduler/scheduler.py", line 126, in setup_scheduler
scheduler = BackgroundScheduler(
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 87, in __init__
self.configure(gconfig, **options)
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 126, in configure
self._configure(config)
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/background.py", line 29, in _configure
super(BackgroundScheduler, self)._configure(config)
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 697, in _configure
self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()
File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/util.py", line 93, in astimezone
raise TypeError('Only timezones from the pytz library are supported')
TypeError: Only timezones from the pytz library are supported
```
### Additional information:
- FlexGet version: 3.5.2
- Python version: 3.9.2
- Installation method:
- Using daemon (yes/no): yes
It seems to have started after https://github.com/Flexget/Flexget/pull/3453 that change the timezone argument to a non-pytz compatible object.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/components/scheduler/scheduler.py`
Content:
```
1 import datetime
2 import hashlib
3 import logging
4 import os
5 import struct
6
7 from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
8 from apscheduler.schedulers.background import BackgroundScheduler
9 from apscheduler.triggers.cron import CronTrigger
10 from loguru import logger
11
12 from flexget.config_schema import format_checker, register_config_key, register_schema
13 from flexget.event import event
14 from flexget.manager import manager
15 from flexget.utils import json
16
17 logger = logger.bind(name='scheduler')
18
19
20 # Add a format checker for more detailed errors on cron type schedules
21 @format_checker.checks('cron_schedule', raises=ValueError)
22 def is_cron_schedule(instance):
23 if not isinstance(instance, dict):
24 return True
25 try:
26 return CronTrigger(**instance)
27 except TypeError:
28 # A more specific error message about which key will also be shown by properties schema keyword
29 raise ValueError('Invalid key for schedule.')
30
31
32 DEFAULT_SCHEDULES = [{'tasks': ['*'], 'interval': {'hours': 1}}]
33
34 UNITS = ['minutes', 'hours', 'days', 'weeks']
35 interval_schema = {
36 'type': 'object',
37 'title': 'Simple Interval',
38 'properties': {
39 'minutes': {'type': 'number'},
40 'hours': {'type': 'number'},
41 'days': {'type': 'number'},
42 'weeks': {'type': 'number'},
43 'jitter': {'type': 'integer'},
44 },
45 'anyOf': [{'required': [unit]} for unit in UNITS],
46 'error_anyOf': 'Interval must be specified as one or more of %s' % ', '.join(UNITS),
47 'additionalProperties': False,
48 }
49
50 cron_schema = {
51 'type': 'object',
52 'title': 'Advanced Cron Interval',
53 'properties': {
54 'year': {'type': ['integer', 'string']},
55 'month': {'type': ['integer', 'string']},
56 'day': {'type': ['integer', 'string']},
57 'week': {'type': ['integer', 'string']},
58 'day_of_week': {'type': ['integer', 'string']},
59 'hour': {'type': ['integer', 'string']},
60 'minute': {'type': ['integer', 'string']},
61 'jitter': {'type': 'integer'},
62 },
63 'additionalProperties': False,
64 }
65
66 schedule_schema = {
67 'type': 'object',
68 'title': 'Schedule',
69 'description': 'A schedule which runs specified tasks periodically.',
70 'properties': {
71 'tasks': {'type': ['array', 'string'], 'items': {'type': 'string'}},
72 'interval': interval_schema,
73 'schedule': cron_schema,
74 },
75 'required': ['tasks'],
76 'minProperties': 2,
77 'maxProperties': 2,
78 'error_minProperties': 'Either `cron` or `interval` must be defined.',
79 'error_maxProperties': 'Either `cron` or `interval` must be defined.',
80 'additionalProperties': False,
81 }
82
83 main_schema = {
84 'oneOf': [
85 {'type': 'array', 'title': 'Enable', 'items': schedule_schema},
86 {'type': 'boolean', 'title': 'Disable', 'description': 'Disable task schedules'},
87 ]
88 }
89
90 scheduler = None
91 scheduler_job_map = {}
92
93
94 def job_id(conf):
95 """Create a unique id for a schedule item in config."""
96 return hashlib.sha1(json.dumps(conf, sort_keys=True).encode('utf-8')).hexdigest()
97
98
99 def run_job(tasks):
100 """Add the execution to the queue and waits until it is finished"""
101 logger.debug('executing tasks: {}', tasks)
102 finished_events = manager.execute(
103 options={'tasks': tasks, 'cron': True, 'allow_manual': False}, priority=5
104 )
105 for _, task_name, event_ in finished_events:
106 logger.debug('task finished executing: {}', task_name)
107 event_.wait()
108 logger.debug('all tasks in schedule finished executing')
109
110
111 @event('manager.daemon.started')
112 def setup_scheduler(manager):
113 """Configure and start apscheduler"""
114 global scheduler
115 if logger.level(manager.options.loglevel).no > logger.level('DEBUG').no:
116 logging.getLogger('apscheduler').setLevel(logging.WARNING)
117 # Since APScheduler runs in a separate thread, slower devices can sometimes get a DB lock, so use a separate db
118 # for the jobs to avoid this
119 db_filename = os.path.join(manager.config_base, 'db-%s-jobs.sqlite' % manager.config_name)
120 # in case running on windows, needs double \\
121 db_filename = db_filename.replace('\\', '\\\\')
122 database_uri = 'sqlite:///%s' % db_filename
123 jobstores = {'default': SQLAlchemyJobStore(url=database_uri)}
124 # If job was meant to run within last day while daemon was shutdown, run it once when continuing
125 job_defaults = {'coalesce': True, 'misfire_grace_time': 60 * 60 * 24}
126 scheduler = BackgroundScheduler(
127 jobstores=jobstores,
128 job_defaults=job_defaults,
129 timezone=datetime.datetime.now().astimezone().tzinfo,
130 )
131 setup_jobs(manager)
132
133
134 @event('manager.config_updated')
135 def setup_jobs(manager):
136 """Set up the jobs for apscheduler to run."""
137 if not manager.is_daemon:
138 return
139
140 global scheduler_job_map
141 scheduler_job_map = {}
142
143 if 'schedules' not in manager.config:
144 logger.info(
145 'No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.'
146 )
147 config = manager.config.get('schedules', True)
148 if config is True:
149 config = DEFAULT_SCHEDULES
150 elif not config: # Schedules are disabled with `schedules: no`
151 if scheduler.running:
152 logger.info('Shutting down scheduler')
153 scheduler.shutdown()
154 return
155 if not scheduler.running:
156 logger.info('Starting scheduler')
157 scheduler.start(paused=True)
158 existing_job_ids = [job.id for job in scheduler.get_jobs()]
159 configured_job_ids = []
160 for job_config in config:
161 jid = job_id(job_config)
162 configured_job_ids.append(jid)
163 scheduler_job_map[id(job_config)] = jid
164 if jid in existing_job_ids:
165 continue
166 if 'interval' in job_config:
167 trigger, trigger_args = 'interval', job_config['interval']
168 else:
169 trigger, trigger_args = 'cron', job_config['schedule']
170 tasks = job_config['tasks']
171 if not isinstance(tasks, list):
172 tasks = [tasks]
173 name = ','.join(tasks)
174 scheduler.add_job(
175 run_job, args=(tasks,), id=jid, name=name, trigger=trigger, **trigger_args
176 )
177 # Remove jobs no longer in config
178 for jid in existing_job_ids:
179 if jid not in configured_job_ids:
180 scheduler.remove_job(jid)
181 scheduler.resume()
182
183
184 @event('manager.shutdown_requested')
185 def shutdown_requested(manager):
186 if scheduler and scheduler.running:
187 scheduler.shutdown(wait=True)
188
189
190 @event('manager.shutdown')
191 def stop_scheduler(manager):
192 if scheduler and scheduler.running:
193 scheduler.shutdown(wait=False)
194
195
196 @event('config.register')
197 def register_config():
198 register_config_key('schedules', main_schema)
199 register_schema('/schema/config/schedule', schedule_schema)
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/components/scheduler/scheduler.py b/flexget/components/scheduler/scheduler.py
--- a/flexget/components/scheduler/scheduler.py
+++ b/flexget/components/scheduler/scheduler.py
@@ -1,4 +1,3 @@
-import datetime
import hashlib
import logging
import os
@@ -126,7 +125,6 @@
scheduler = BackgroundScheduler(
jobstores=jobstores,
job_defaults=job_defaults,
- timezone=datetime.datetime.now().astimezone().tzinfo,
)
setup_jobs(manager)
| {"golden_diff": "diff --git a/flexget/components/scheduler/scheduler.py b/flexget/components/scheduler/scheduler.py\n--- a/flexget/components/scheduler/scheduler.py\n+++ b/flexget/components/scheduler/scheduler.py\n@@ -1,4 +1,3 @@\n-import datetime\n import hashlib\n import logging\n import os\n@@ -126,7 +125,6 @@\n scheduler = BackgroundScheduler(\n jobstores=jobstores,\n job_defaults=job_defaults,\n- timezone=datetime.datetime.now().astimezone().tzinfo,\n )\n setup_jobs(manager)\n", "issue": "Error on scheduler: Only timezones from the pytz library are supported\n### Steps to reproduce:\r\n- Step 1: `flexget -L verbose daemon start`\r\n\r\n#### Config:\r\n\r\n```yaml\r\nschedules:\r\n - tasks: ['some-task']\r\n interval:\r\n hours: 1\r\n```\r\n \r\n#### Backtrace:\r\n\r\n```\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/__init__.py\", line 44, in main\r\n manager.start()\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py\", line 383, in start\r\n self.handle_cli()\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py\", line 409, in handle_cli\r\n self.daemon_command(command_options)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py\", line 506, in daemon_command\r\n run_daemon()\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py\", line 487, in run_daemon\r\n fire_event('manager.daemon.started', self)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/event.py\", line 109, in fire_event\r\n result = event(*args, **kwargs)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/event.py\", line 20, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/components/scheduler/scheduler.py\", line 126, in setup_scheduler\r\n scheduler = BackgroundScheduler(\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py\", line 87, in __init__\r\n self.configure(gconfig, **options)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py\", line 126, in configure\r\n self._configure(config)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/background.py\", line 29, in _configure\r\n super(BackgroundScheduler, self)._configure(config)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py\", line 697, in _configure\r\n self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/util.py\", line 93, in astimezone\r\n raise TypeError('Only timezones from the pytz library are supported')\r\nTypeError: Only timezones from the pytz library are supported\r\n```\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.5.2\r\n- Python version: 3.9.2\r\n- Installation method:\r\n- Using daemon (yes/no): yes\r\n\r\nIt seems to have started after https://github.com/Flexget/Flexget/pull/3453 that change the timezone argument to a non-pytz compatible object.\nError on scheduler: Only timezones from the pytz library are supported\n### Steps to reproduce:\r\n- Step 1: `flexget -L verbose daemon start`\r\n\r\n#### Config:\r\n\r\n```yaml\r\nschedules:\r\n - tasks: ['some-task']\r\n interval:\r\n hours: 1\r\n```\r\n \r\n#### Backtrace:\r\n\r\n```\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/__init__.py\", line 44, in main\r\n manager.start()\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py\", line 383, in start\r\n self.handle_cli()\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py\", line 409, in handle_cli\r\n self.daemon_command(command_options)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py\", line 506, in daemon_command\r\n run_daemon()\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py\", line 487, in run_daemon\r\n fire_event('manager.daemon.started', self)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/event.py\", line 109, in fire_event\r\n result = event(*args, **kwargs)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/event.py\", line 20, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/flexget/components/scheduler/scheduler.py\", line 126, in setup_scheduler\r\n scheduler = BackgroundScheduler(\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py\", line 87, in __init__\r\n self.configure(gconfig, **options)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py\", line 126, in configure\r\n self._configure(config)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/background.py\", line 29, in _configure\r\n super(BackgroundScheduler, self)._configure(config)\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py\", line 697, in _configure\r\n self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()\r\n File \"/home/pi/.local/lib/python3.9/site-packages/apscheduler/util.py\", line 93, in astimezone\r\n raise TypeError('Only timezones from the pytz library are supported')\r\nTypeError: Only timezones from the pytz library are supported\r\n```\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.5.2\r\n- Python version: 3.9.2\r\n- Installation method:\r\n- Using daemon (yes/no): yes\r\n\r\nIt seems to have started after https://github.com/Flexget/Flexget/pull/3453 that change the timezone argument to a non-pytz compatible object.\n", "before_files": [{"content": "import datetime\nimport hashlib\nimport logging\nimport os\nimport struct\n\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom loguru import logger\n\nfrom flexget.config_schema import format_checker, register_config_key, register_schema\nfrom flexget.event import event\nfrom flexget.manager import manager\nfrom flexget.utils import json\n\nlogger = logger.bind(name='scheduler')\n\n\n# Add a format checker for more detailed errors on cron type schedules\n@format_checker.checks('cron_schedule', raises=ValueError)\ndef is_cron_schedule(instance):\n if not isinstance(instance, dict):\n return True\n try:\n return CronTrigger(**instance)\n except TypeError:\n # A more specific error message about which key will also be shown by properties schema keyword\n raise ValueError('Invalid key for schedule.')\n\n\nDEFAULT_SCHEDULES = [{'tasks': ['*'], 'interval': {'hours': 1}}]\n\nUNITS = ['minutes', 'hours', 'days', 'weeks']\ninterval_schema = {\n 'type': 'object',\n 'title': 'Simple Interval',\n 'properties': {\n 'minutes': {'type': 'number'},\n 'hours': {'type': 'number'},\n 'days': {'type': 'number'},\n 'weeks': {'type': 'number'},\n 'jitter': {'type': 'integer'},\n },\n 'anyOf': [{'required': [unit]} for unit in UNITS],\n 'error_anyOf': 'Interval must be specified as one or more of %s' % ', '.join(UNITS),\n 'additionalProperties': False,\n}\n\ncron_schema = {\n 'type': 'object',\n 'title': 'Advanced Cron Interval',\n 'properties': {\n 'year': {'type': ['integer', 'string']},\n 'month': {'type': ['integer', 'string']},\n 'day': {'type': ['integer', 'string']},\n 'week': {'type': ['integer', 'string']},\n 'day_of_week': {'type': ['integer', 'string']},\n 'hour': {'type': ['integer', 'string']},\n 'minute': {'type': ['integer', 'string']},\n 'jitter': {'type': 'integer'},\n },\n 'additionalProperties': False,\n}\n\nschedule_schema = {\n 'type': 'object',\n 'title': 'Schedule',\n 'description': 'A schedule which runs specified tasks periodically.',\n 'properties': {\n 'tasks': {'type': ['array', 'string'], 'items': {'type': 'string'}},\n 'interval': interval_schema,\n 'schedule': cron_schema,\n },\n 'required': ['tasks'],\n 'minProperties': 2,\n 'maxProperties': 2,\n 'error_minProperties': 'Either `cron` or `interval` must be defined.',\n 'error_maxProperties': 'Either `cron` or `interval` must be defined.',\n 'additionalProperties': False,\n}\n\nmain_schema = {\n 'oneOf': [\n {'type': 'array', 'title': 'Enable', 'items': schedule_schema},\n {'type': 'boolean', 'title': 'Disable', 'description': 'Disable task schedules'},\n ]\n}\n\nscheduler = None\nscheduler_job_map = {}\n\n\ndef job_id(conf):\n \"\"\"Create a unique id for a schedule item in config.\"\"\"\n return hashlib.sha1(json.dumps(conf, sort_keys=True).encode('utf-8')).hexdigest()\n\n\ndef run_job(tasks):\n \"\"\"Add the execution to the queue and waits until it is finished\"\"\"\n logger.debug('executing tasks: {}', tasks)\n finished_events = manager.execute(\n options={'tasks': tasks, 'cron': True, 'allow_manual': False}, priority=5\n )\n for _, task_name, event_ in finished_events:\n logger.debug('task finished executing: {}', task_name)\n event_.wait()\n logger.debug('all tasks in schedule finished executing')\n\n\n@event('manager.daemon.started')\ndef setup_scheduler(manager):\n \"\"\"Configure and start apscheduler\"\"\"\n global scheduler\n if logger.level(manager.options.loglevel).no > logger.level('DEBUG').no:\n logging.getLogger('apscheduler').setLevel(logging.WARNING)\n # Since APScheduler runs in a separate thread, slower devices can sometimes get a DB lock, so use a separate db\n # for the jobs to avoid this\n db_filename = os.path.join(manager.config_base, 'db-%s-jobs.sqlite' % manager.config_name)\n # in case running on windows, needs double \\\\\n db_filename = db_filename.replace('\\\\', '\\\\\\\\')\n database_uri = 'sqlite:///%s' % db_filename\n jobstores = {'default': SQLAlchemyJobStore(url=database_uri)}\n # If job was meant to run within last day while daemon was shutdown, run it once when continuing\n job_defaults = {'coalesce': True, 'misfire_grace_time': 60 * 60 * 24}\n scheduler = BackgroundScheduler(\n jobstores=jobstores,\n job_defaults=job_defaults,\n timezone=datetime.datetime.now().astimezone().tzinfo,\n )\n setup_jobs(manager)\n\n\n@event('manager.config_updated')\ndef setup_jobs(manager):\n \"\"\"Set up the jobs for apscheduler to run.\"\"\"\n if not manager.is_daemon:\n return\n\n global scheduler_job_map\n scheduler_job_map = {}\n\n if 'schedules' not in manager.config:\n logger.info(\n 'No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.'\n )\n config = manager.config.get('schedules', True)\n if config is True:\n config = DEFAULT_SCHEDULES\n elif not config: # Schedules are disabled with `schedules: no`\n if scheduler.running:\n logger.info('Shutting down scheduler')\n scheduler.shutdown()\n return\n if not scheduler.running:\n logger.info('Starting scheduler')\n scheduler.start(paused=True)\n existing_job_ids = [job.id for job in scheduler.get_jobs()]\n configured_job_ids = []\n for job_config in config:\n jid = job_id(job_config)\n configured_job_ids.append(jid)\n scheduler_job_map[id(job_config)] = jid\n if jid in existing_job_ids:\n continue\n if 'interval' in job_config:\n trigger, trigger_args = 'interval', job_config['interval']\n else:\n trigger, trigger_args = 'cron', job_config['schedule']\n tasks = job_config['tasks']\n if not isinstance(tasks, list):\n tasks = [tasks]\n name = ','.join(tasks)\n scheduler.add_job(\n run_job, args=(tasks,), id=jid, name=name, trigger=trigger, **trigger_args\n )\n # Remove jobs no longer in config\n for jid in existing_job_ids:\n if jid not in configured_job_ids:\n scheduler.remove_job(jid)\n scheduler.resume()\n\n\n@event('manager.shutdown_requested')\ndef shutdown_requested(manager):\n if scheduler and scheduler.running:\n scheduler.shutdown(wait=True)\n\n\n@event('manager.shutdown')\ndef stop_scheduler(manager):\n if scheduler and scheduler.running:\n scheduler.shutdown(wait=False)\n\n\n@event('config.register')\ndef register_config():\n register_config_key('schedules', main_schema)\n register_schema('/schema/config/schedule', schedule_schema)\n", "path": "flexget/components/scheduler/scheduler.py"}], "after_files": [{"content": "import hashlib\nimport logging\nimport os\nimport struct\n\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom loguru import logger\n\nfrom flexget.config_schema import format_checker, register_config_key, register_schema\nfrom flexget.event import event\nfrom flexget.manager import manager\nfrom flexget.utils import json\n\nlogger = logger.bind(name='scheduler')\n\n\n# Add a format checker for more detailed errors on cron type schedules\n@format_checker.checks('cron_schedule', raises=ValueError)\ndef is_cron_schedule(instance):\n if not isinstance(instance, dict):\n return True\n try:\n return CronTrigger(**instance)\n except TypeError:\n # A more specific error message about which key will also be shown by properties schema keyword\n raise ValueError('Invalid key for schedule.')\n\n\nDEFAULT_SCHEDULES = [{'tasks': ['*'], 'interval': {'hours': 1}}]\n\nUNITS = ['minutes', 'hours', 'days', 'weeks']\ninterval_schema = {\n 'type': 'object',\n 'title': 'Simple Interval',\n 'properties': {\n 'minutes': {'type': 'number'},\n 'hours': {'type': 'number'},\n 'days': {'type': 'number'},\n 'weeks': {'type': 'number'},\n 'jitter': {'type': 'integer'},\n },\n 'anyOf': [{'required': [unit]} for unit in UNITS],\n 'error_anyOf': 'Interval must be specified as one or more of %s' % ', '.join(UNITS),\n 'additionalProperties': False,\n}\n\ncron_schema = {\n 'type': 'object',\n 'title': 'Advanced Cron Interval',\n 'properties': {\n 'year': {'type': ['integer', 'string']},\n 'month': {'type': ['integer', 'string']},\n 'day': {'type': ['integer', 'string']},\n 'week': {'type': ['integer', 'string']},\n 'day_of_week': {'type': ['integer', 'string']},\n 'hour': {'type': ['integer', 'string']},\n 'minute': {'type': ['integer', 'string']},\n 'jitter': {'type': 'integer'},\n },\n 'additionalProperties': False,\n}\n\nschedule_schema = {\n 'type': 'object',\n 'title': 'Schedule',\n 'description': 'A schedule which runs specified tasks periodically.',\n 'properties': {\n 'tasks': {'type': ['array', 'string'], 'items': {'type': 'string'}},\n 'interval': interval_schema,\n 'schedule': cron_schema,\n },\n 'required': ['tasks'],\n 'minProperties': 2,\n 'maxProperties': 2,\n 'error_minProperties': 'Either `cron` or `interval` must be defined.',\n 'error_maxProperties': 'Either `cron` or `interval` must be defined.',\n 'additionalProperties': False,\n}\n\nmain_schema = {\n 'oneOf': [\n {'type': 'array', 'title': 'Enable', 'items': schedule_schema},\n {'type': 'boolean', 'title': 'Disable', 'description': 'Disable task schedules'},\n ]\n}\n\nscheduler = None\nscheduler_job_map = {}\n\n\ndef job_id(conf):\n \"\"\"Create a unique id for a schedule item in config.\"\"\"\n return hashlib.sha1(json.dumps(conf, sort_keys=True).encode('utf-8')).hexdigest()\n\n\ndef run_job(tasks):\n \"\"\"Add the execution to the queue and waits until it is finished\"\"\"\n logger.debug('executing tasks: {}', tasks)\n finished_events = manager.execute(\n options={'tasks': tasks, 'cron': True, 'allow_manual': False}, priority=5\n )\n for _, task_name, event_ in finished_events:\n logger.debug('task finished executing: {}', task_name)\n event_.wait()\n logger.debug('all tasks in schedule finished executing')\n\n\n@event('manager.daemon.started')\ndef setup_scheduler(manager):\n \"\"\"Configure and start apscheduler\"\"\"\n global scheduler\n if logger.level(manager.options.loglevel).no > logger.level('DEBUG').no:\n logging.getLogger('apscheduler').setLevel(logging.WARNING)\n # Since APScheduler runs in a separate thread, slower devices can sometimes get a DB lock, so use a separate db\n # for the jobs to avoid this\n db_filename = os.path.join(manager.config_base, 'db-%s-jobs.sqlite' % manager.config_name)\n # in case running on windows, needs double \\\\\n db_filename = db_filename.replace('\\\\', '\\\\\\\\')\n database_uri = 'sqlite:///%s' % db_filename\n jobstores = {'default': SQLAlchemyJobStore(url=database_uri)}\n # If job was meant to run within last day while daemon was shutdown, run it once when continuing\n job_defaults = {'coalesce': True, 'misfire_grace_time': 60 * 60 * 24}\n scheduler = BackgroundScheduler(\n jobstores=jobstores,\n job_defaults=job_defaults,\n )\n setup_jobs(manager)\n\n\n@event('manager.config_updated')\ndef setup_jobs(manager):\n \"\"\"Set up the jobs for apscheduler to run.\"\"\"\n if not manager.is_daemon:\n return\n\n global scheduler_job_map\n scheduler_job_map = {}\n\n if 'schedules' not in manager.config:\n logger.info(\n 'No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.'\n )\n config = manager.config.get('schedules', True)\n if config is True:\n config = DEFAULT_SCHEDULES\n elif not config: # Schedules are disabled with `schedules: no`\n if scheduler.running:\n logger.info('Shutting down scheduler')\n scheduler.shutdown()\n return\n if not scheduler.running:\n logger.info('Starting scheduler')\n scheduler.start(paused=True)\n existing_job_ids = [job.id for job in scheduler.get_jobs()]\n configured_job_ids = []\n for job_config in config:\n jid = job_id(job_config)\n configured_job_ids.append(jid)\n scheduler_job_map[id(job_config)] = jid\n if jid in existing_job_ids:\n continue\n if 'interval' in job_config:\n trigger, trigger_args = 'interval', job_config['interval']\n else:\n trigger, trigger_args = 'cron', job_config['schedule']\n tasks = job_config['tasks']\n if not isinstance(tasks, list):\n tasks = [tasks]\n name = ','.join(tasks)\n scheduler.add_job(\n run_job, args=(tasks,), id=jid, name=name, trigger=trigger, **trigger_args\n )\n # Remove jobs no longer in config\n for jid in existing_job_ids:\n if jid not in configured_job_ids:\n scheduler.remove_job(jid)\n scheduler.resume()\n\n\n@event('manager.shutdown_requested')\ndef shutdown_requested(manager):\n if scheduler and scheduler.running:\n scheduler.shutdown(wait=True)\n\n\n@event('manager.shutdown')\ndef stop_scheduler(manager):\n if scheduler and scheduler.running:\n scheduler.shutdown(wait=False)\n\n\n@event('config.register')\ndef register_config():\n register_config_key('schedules', main_schema)\n register_schema('/schema/config/schedule', schedule_schema)\n", "path": "flexget/components/scheduler/scheduler.py"}]} |
gh_patches_debug_1563 | rasdani/github-patches | git_diff | ray-project__ray-3109 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ship Modin with Ray
### Describe the problem
<!-- Describe the problem clearly here. -->
I think it makes sense to ship Modin with Ray. I suggest doing this similar to how pyarrow is shipped with Ray.
We don't need to rely on the dependencies of Modin, but some of the Modin source will have to be updated to make sure that the pandas version is correct.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/__init__.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import sys
7
8 if "pyarrow" in sys.modules:
9 raise ImportError("Ray must be imported before pyarrow because Ray "
10 "requires a specific version of pyarrow (which is "
11 "packaged along with Ray).")
12
13 # Add the directory containing pyarrow to the Python path so that we find the
14 # pyarrow version packaged with ray and not a pre-existing pyarrow.
15 pyarrow_path = os.path.join(
16 os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
17 sys.path.insert(0, pyarrow_path)
18
19 # See https://github.com/ray-project/ray/issues/131.
20 helpful_message = """
21
22 If you are using Anaconda, try fixing this problem by running:
23
24 conda install libgcc
25 """
26
27 try:
28 import pyarrow # noqa: F401
29 except ImportError as e:
30 if ((hasattr(e, "msg") and isinstance(e.msg, str)
31 and ("libstdc++" in e.msg or "CXX" in e.msg))):
32 # This code path should be taken with Python 3.
33 e.msg += helpful_message
34 elif (hasattr(e, "message") and isinstance(e.message, str)
35 and ("libstdc++" in e.message or "CXX" in e.message)):
36 # This code path should be taken with Python 2.
37 condition = (hasattr(e, "args") and isinstance(e.args, tuple)
38 and len(e.args) == 1 and isinstance(e.args[0], str))
39 if condition:
40 e.args = (e.args[0] + helpful_message, )
41 else:
42 if not hasattr(e, "args"):
43 e.args = ()
44 elif not isinstance(e.args, tuple):
45 e.args = (e.args, )
46 e.args += (helpful_message, )
47 raise
48
49 from ray.raylet import ObjectID, _config # noqa: E402
50 from ray.profiling import profile # noqa: E402
51 from ray.worker import (error_info, init, connect, disconnect, get, put, wait,
52 remote, get_gpu_ids, get_resource_ids, get_webui_url,
53 register_custom_serializer, shutdown,
54 is_initialized) # noqa: E402
55 from ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,
56 PYTHON_MODE) # noqa: E402
57 from ray.worker import global_state # noqa: E402
58 import ray.internal # noqa: E402
59 # We import ray.actor because some code is run in actor.py which initializes
60 # some functions in the worker.
61 import ray.actor # noqa: F401
62 from ray.actor import method # noqa: E402
63
64 # Ray version string.
65 __version__ = "0.5.3"
66
67 __all__ = [
68 "error_info", "init", "connect", "disconnect", "get", "put", "wait",
69 "remote", "profile", "actor", "method", "get_gpu_ids", "get_resource_ids",
70 "get_webui_url", "register_custom_serializer", "shutdown",
71 "is_initialized", "SCRIPT_MODE", "WORKER_MODE", "LOCAL_MODE",
72 "PYTHON_MODE", "global_state", "ObjectID", "_config", "__version__",
73 "internal"
74 ]
75
76 import ctypes # noqa: E402
77 # Windows only
78 if hasattr(ctypes, "windll"):
79 # Makes sure that all child processes die when we die. Also makes sure that
80 # fatal crashes result in process termination rather than an error dialog
81 # (the latter is annoying since we have a lot of processes). This is done
82 # by associating all child processes with a "job" object that imposes this
83 # behavior.
84 (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -46,6 +46,9 @@
e.args += (helpful_message, )
raise
+modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin")
+sys.path.insert(0, modin_path)
+
from ray.raylet import ObjectID, _config # noqa: E402
from ray.profiling import profile # noqa: E402
from ray.worker import (error_info, init, connect, disconnect, get, put, wait,
| {"golden_diff": "diff --git a/python/ray/__init__.py b/python/ray/__init__.py\n--- a/python/ray/__init__.py\n+++ b/python/ray/__init__.py\n@@ -46,6 +46,9 @@\n e.args += (helpful_message, )\n raise\n \n+modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\n+sys.path.insert(0, modin_path)\n+\n from ray.raylet import ObjectID, _config # noqa: E402\n from ray.profiling import profile # noqa: E402\n from ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n", "issue": "Ship Modin with Ray\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nI think it makes sense to ship Modin with Ray. I suggest doing this similar to how pyarrow is shipped with Ray.\r\n\r\nWe don't need to rely on the dependencies of Modin, but some of the Modin source will have to be updated to make sure that the pandas version is correct.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.5.3\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nmodin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\nsys.path.insert(0, modin_path)\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.5.3\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}]} |
gh_patches_debug_1564 | rasdani/github-patches | git_diff | web2py__web2py-1871 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
contrib/pg8000 is old and causes weird postgres errors
Please update the contrib/pg8000 driver to the current version.
Otherwise errors like Broken Pipe, Operationalerror,.. occur,
- at least for postgres 9.6,
- especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING).
related links:
https://github.com/mfenniak/pg8000/issues/73
https://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU
..in copy into issues: web2py/web2py, web2py/pydal
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup
4 from gluon.fileutils import tar, untar, read_file, write_file
5 import tarfile
6 import sys
7
8
9 def tar(file, filelist, expression='^.+$'):
10 """
11 tars dir/files into file, only tars file that match expression
12 """
13
14 tar = tarfile.TarFile(file, 'w')
15 try:
16 for element in filelist:
17 try:
18 for file in listdir(element, expression, add_dirs=True):
19 tar.add(os.path.join(element, file), file, False)
20 except:
21 tar.add(element)
22 finally:
23 tar.close()
24
25
26 def start():
27 if 'sdist' in sys.argv:
28 tar('gluon/env.tar', ['applications', 'VERSION',
29 'extras/icons/splashlogo.gif'])
30
31 setup(name='web2py',
32 version=read_file("VERSION").split()[1],
33 description="""full-stack framework for rapid development and prototyping
34 of secure database-driven web-based applications, written and
35 programmable in Python.""",
36 long_description="""
37 Everything in one package with no dependencies. Development, deployment,
38 debugging, testing, database administration and maintenance of applications can
39 be done via the provided web interface. web2py has no configuration files,
40 requires no installation, can run off a USB drive. web2py uses Python for the
41 Model, the Views and the Controllers, has a built-in ticketing system to manage
42 errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,
43 MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a
44 Database Abstraction Layer. web2py includes libraries to handle
45 HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production
46 ready, capable of upload/download streaming of very large files, and always
47 backward compatible.
48 """,
49 author='Massimo Di Pierro',
50 author_email='[email protected]',
51 license='http://web2py.com/examples/default/license',
52 classifiers=["Development Status :: 5 - Production/Stable"],
53 url='http://web2py.com',
54 platforms='Windows, Linux, Mac, Unix,Windows Mobile',
55 packages=['gluon',
56 'gluon/contrib',
57 'gluon/contrib/gateways',
58 'gluon/contrib/login_methods',
59 'gluon/contrib/markdown',
60 'gluon/contrib/markmin',
61 'gluon/contrib/memcache',
62 'gluon/contrib/fpdf',
63 'gluon/contrib/pymysql',
64 'gluon/contrib/pyrtf',
65 'gluon/contrib/pysimplesoap',
66 'gluon/contrib/pg8000',
67 'gluon/contrib/plural_rules',
68 'gluon/contrib/minify',
69 'gluon/contrib/pyaes',
70 'gluon/contrib/pyuca',
71 'gluon/tests',
72 ],
73 package_data={'gluon': ['env.tar']},
74 # scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],
75 )
76
77 if __name__ == '__main__':
78 #print "web2py does not require installation and"
79 #print "you should just start it with:"
80 #print
81 #print "$ python web2py.py"
82 #print
83 #print "are you sure you want to install it anyway (y/n)?"
84 #s = raw_input('>')
85 #if s.lower()[:1]=='y':
86 start()
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,6 @@
'gluon/contrib/pymysql',
'gluon/contrib/pyrtf',
'gluon/contrib/pysimplesoap',
- 'gluon/contrib/pg8000',
'gluon/contrib/plural_rules',
'gluon/contrib/minify',
'gluon/contrib/pyaes',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,6 @@\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n- 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n", "issue": "contrib/pg8000 is old and causes weird postgres errors\nPlease update the contrib/pg8000 driver to the current version.\r\nOtherwise errors like Broken Pipe, Operationalerror,.. occur,\r\n- at least for postgres 9.6,\r\n- especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING).\r\n\r\nrelated links:\r\nhttps://github.com/mfenniak/pg8000/issues/73\r\nhttps://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU\r\n\r\n..in copy into issues: web2py/web2py, web2py/pydal\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom gluon.fileutils import tar, untar, read_file, write_file\nimport tarfile\nimport sys\n\n\ndef tar(file, filelist, expression='^.+$'):\n \"\"\"\n tars dir/files into file, only tars file that match expression\n \"\"\"\n\n tar = tarfile.TarFile(file, 'w')\n try:\n for element in filelist:\n try:\n for file in listdir(element, expression, add_dirs=True):\n tar.add(os.path.join(element, file), file, False)\n except:\n tar.add(element)\n finally:\n tar.close()\n\n\ndef start():\n if 'sdist' in sys.argv:\n tar('gluon/env.tar', ['applications', 'VERSION',\n 'extras/icons/splashlogo.gif'])\n\n setup(name='web2py',\n version=read_file(\"VERSION\").split()[1],\n description=\"\"\"full-stack framework for rapid development and prototyping\n of secure database-driven web-based applications, written and\n programmable in Python.\"\"\",\n long_description=\"\"\"\n Everything in one package with no dependencies. Development, deployment,\n debugging, testing, database administration and maintenance of applications can\n be done via the provided web interface. web2py has no configuration files,\n requires no installation, can run off a USB drive. web2py uses Python for the\n Model, the Views and the Controllers, has a built-in ticketing system to manage\n errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,\n MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a\n Database Abstraction Layer. web2py includes libraries to handle\n HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production\n ready, capable of upload/download streaming of very large files, and always\n backward compatible.\n \"\"\",\n author='Massimo Di Pierro',\n author_email='[email protected]',\n license='http://web2py.com/examples/default/license',\n classifiers=[\"Development Status :: 5 - Production/Stable\"],\n url='http://web2py.com',\n platforms='Windows, Linux, Mac, Unix,Windows Mobile',\n packages=['gluon',\n 'gluon/contrib',\n 'gluon/contrib/gateways',\n 'gluon/contrib/login_methods',\n 'gluon/contrib/markdown',\n 'gluon/contrib/markmin',\n 'gluon/contrib/memcache',\n 'gluon/contrib/fpdf',\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n 'gluon/contrib/pyuca',\n 'gluon/tests',\n ],\n package_data={'gluon': ['env.tar']},\n# scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],\n )\n\nif __name__ == '__main__':\n #print \"web2py does not require installation and\"\n #print \"you should just start it with:\"\n #print\n #print \"$ python web2py.py\"\n #print\n #print \"are you sure you want to install it anyway (y/n)?\"\n #s = raw_input('>')\n #if s.lower()[:1]=='y':\n start()\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom gluon.fileutils import tar, untar, read_file, write_file\nimport tarfile\nimport sys\n\n\ndef tar(file, filelist, expression='^.+$'):\n \"\"\"\n tars dir/files into file, only tars file that match expression\n \"\"\"\n\n tar = tarfile.TarFile(file, 'w')\n try:\n for element in filelist:\n try:\n for file in listdir(element, expression, add_dirs=True):\n tar.add(os.path.join(element, file), file, False)\n except:\n tar.add(element)\n finally:\n tar.close()\n\n\ndef start():\n if 'sdist' in sys.argv:\n tar('gluon/env.tar', ['applications', 'VERSION',\n 'extras/icons/splashlogo.gif'])\n\n setup(name='web2py',\n version=read_file(\"VERSION\").split()[1],\n description=\"\"\"full-stack framework for rapid development and prototyping\n of secure database-driven web-based applications, written and\n programmable in Python.\"\"\",\n long_description=\"\"\"\n Everything in one package with no dependencies. Development, deployment,\n debugging, testing, database administration and maintenance of applications can\n be done via the provided web interface. web2py has no configuration files,\n requires no installation, can run off a USB drive. web2py uses Python for the\n Model, the Views and the Controllers, has a built-in ticketing system to manage\n errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,\n MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a\n Database Abstraction Layer. web2py includes libraries to handle\n HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production\n ready, capable of upload/download streaming of very large files, and always\n backward compatible.\n \"\"\",\n author='Massimo Di Pierro',\n author_email='[email protected]',\n license='http://web2py.com/examples/default/license',\n classifiers=[\"Development Status :: 5 - Production/Stable\"],\n url='http://web2py.com',\n platforms='Windows, Linux, Mac, Unix,Windows Mobile',\n packages=['gluon',\n 'gluon/contrib',\n 'gluon/contrib/gateways',\n 'gluon/contrib/login_methods',\n 'gluon/contrib/markdown',\n 'gluon/contrib/markmin',\n 'gluon/contrib/memcache',\n 'gluon/contrib/fpdf',\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n 'gluon/contrib/pyuca',\n 'gluon/tests',\n ],\n package_data={'gluon': ['env.tar']},\n# scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],\n )\n\nif __name__ == '__main__':\n #print \"web2py does not require installation and\"\n #print \"you should just start it with:\"\n #print\n #print \"$ python web2py.py\"\n #print\n #print \"are you sure you want to install it anyway (y/n)?\"\n #s = raw_input('>')\n #if s.lower()[:1]=='y':\n start()\n", "path": "setup.py"}]} |
gh_patches_debug_1565 | rasdani/github-patches | git_diff | holoviz__panel-1044 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support src urls and alt texts on Images
#### My Pain
I would like to use images in Panel via `pn.pane.Markdown` and/ or `pn.pane.PNG`.
Currently the Bokeh layout engine does not layout markdown with images well. See https://github.com/holoviz/panel/issues/835.
So I need to stick to `pn.pane.PNG` for images.
But the `ImageBase` class does not support parameters like the `src` url and `alt` text from the the HTML `img` tag. So I cannot provide image links or alt texts.
#### Solution
Add `src` and `alt` parameters to the `BaseImage` class.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/pane/image.py`
Content:
```
1 """
2 Contains Image panes including renderers for PNG, SVG, GIF and JPG
3 file types.
4 """
5 from __future__ import absolute_import, division, unicode_literals
6
7 import base64
8
9 from io import BytesIO
10 from six import string_types
11
12 import param
13
14 from .markup import escape, DivPaneBase
15 from ..util import isfile, isurl
16
17
18 class ImageBase(DivPaneBase):
19 """
20 Encodes an image as base64 and wraps it in a Bokeh Div model.
21 This is an abstract base class that needs the image type
22 to be specified and specific code for determining the image shape.
23
24 The imgtype determines the filetype, extension, and MIME type for
25 this image. Each image type (png,jpg,gif) has a base class that
26 supports anything with a `_repr_X_` method (where X is `png`,
27 `gif`, etc.), a local file with the given file extension, or a
28 HTTP(S) url with the given extension. Subclasses of each type can
29 provide their own way of obtaining or generating a PNG.
30 """
31
32 alt_text = param.String(default=None, doc="""
33 alt text to add to the image tag. The alt text is shown when a
34 user cannot load or display the image.""")
35
36 link_url = param.String(default=None, doc="""
37 A link URL to make the image clickable and link to some other
38 website.""")
39
40 embed = param.Boolean(default=True, doc="""
41 Whether to embed the image as base64.""")
42
43 imgtype = 'None'
44
45 _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']
46
47 _target_transforms = {'object': """'<img src="' + value + '"></img>'"""}
48
49 __abstract = True
50
51 @classmethod
52 def applies(cls, obj):
53 imgtype = cls.imgtype
54 if hasattr(obj, '_repr_{}_'.format(imgtype)):
55 return True
56 if isinstance(obj, string_types):
57 if isfile(obj) and obj.endswith('.'+imgtype):
58 return True
59 if isurl(obj, [cls.imgtype]):
60 return True
61 if hasattr(obj, 'read'): # Check for file like object
62 return True
63 return False
64
65 def _type_error(self, object):
66 if isinstance(object, string_types):
67 raise ValueError("%s pane cannot parse string that is not a filename "
68 "or URL." % type(self).__name__)
69 super(ImageBase, self)._type_error(object)
70
71 def _img(self):
72 if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):
73 return getattr(self.object, '_repr_' + self.imgtype + '_')()
74 if isinstance(self.object, string_types):
75 if isfile(self.object):
76 with open(self.object, 'rb') as f:
77 return f.read()
78 if hasattr(self.object, 'read'):
79 return self.object.read()
80 if isurl(self.object, [self.imgtype]):
81 import requests
82 r = requests.request(url=self.object, method='GET')
83 return r.content
84
85 def _imgshape(self, data):
86 """Calculate and return image width,height"""
87 raise NotImplementedError
88
89 def _get_properties(self):
90 p = super(ImageBase, self)._get_properties()
91 if self.object is None:
92 return dict(p, text='<img></img>')
93 data = self._img()
94 if not isinstance(data, bytes):
95 data = base64.b64decode(data)
96 width, height = self._imgshape(data)
97 if self.width is not None:
98 if self.height is None:
99 height = int((self.width/width)*height)
100 else:
101 height = self.height
102 width = self.width
103 elif self.height is not None:
104 width = int((self.height/height)*width)
105 height = self.height
106 if not self.embed:
107 src = self.object
108 else:
109 b64 = base64.b64encode(data).decode("utf-8")
110 src = "data:image/"+self.imgtype+";base64,{b64}".format(b64=b64)
111
112 smode = self.sizing_mode
113 if smode in ['fixed', None]:
114 w, h = '%spx' % width, '%spx' % height
115 elif smode == 'stretch_both':
116 w, h = '100%', '100%'
117 elif smode == 'stretch_height':
118 w, h = '%spx' % width, '100%'
119 elif smode == 'stretch_height':
120 w, h = '100%', '%spx' % height
121 elif smode == 'scale_height':
122 w, h = 'auto', '100%'
123 else:
124 w, h = '100%', 'auto'
125
126 html = '<img src="{src}" width="{width}" height="{height}" alt="{alt}"></img>'.format(
127 src=src, width=w, height=h, alt=self.alt_text or '')
128
129 if self.link_url:
130 html = '<a href="{url}" target="_blank">{html}</a>'.format(
131 url=self.link_url, html=html)
132
133 return dict(p, width=width, height=height, text=escape(html))
134
135
136 class PNG(ImageBase):
137
138 imgtype = 'png'
139
140 @classmethod
141 def _imgshape(cls, data):
142 import struct
143 w, h = struct.unpack('>LL', data[16:24])
144 return int(w), int(h)
145
146
147 class GIF(ImageBase):
148
149 imgtype = 'gif'
150
151 @classmethod
152 def _imgshape(cls, data):
153 import struct
154 w, h = struct.unpack("<HH", data[6:10])
155 return int(w), int(h)
156
157
158 class JPG(ImageBase):
159
160 imgtype = 'jpg'
161
162 @classmethod
163 def _imgshape(cls, data):
164 import struct
165 b = BytesIO(data)
166 b.read(2)
167 c = b.read(1)
168 while (c and ord(c) != 0xDA):
169 while (ord(c) != 0xFF): c = b.read(1)
170 while (ord(c) == 0xFF): c = b.read(1)
171 if (ord(c) >= 0xC0 and ord(c) <= 0xC3):
172 b.read(3)
173 h, w = struct.unpack(">HH", b.read(4))
174 break
175 else:
176 b.read(int(struct.unpack(">H", b.read(2))[0])-2)
177 c = b.read(1)
178 return int(w), int(h)
179
180
181 class SVG(ImageBase):
182
183 encode = param.Boolean(default=False, doc="""
184 Whether to enable base64 encoding of the SVG, base64 encoded
185 SVGs do not support links.""")
186
187 imgtype = 'svg'
188
189 _rerender_params = ImageBase._rerender_params + ['encode']
190
191 @classmethod
192 def applies(cls, obj):
193 return (super(SVG, cls).applies(obj) or
194 (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))
195
196 def _type_error(self, object):
197 if isinstance(object, string_types):
198 raise ValueError("%s pane cannot parse string that is not a filename, "
199 "URL or a SVG XML contents." % type(self).__name__)
200 super(SVG, self)._type_error(object)
201
202 def _img(self):
203 if (isinstance(self.object, string_types) and
204 self.object.lstrip().startswith('<svg')):
205 return self.object
206 return super(SVG, self)._img()
207
208 def _imgshape(self, data):
209 return (self.width, self.height)
210
211 def _get_properties(self):
212 p = super(ImageBase, self)._get_properties()
213 if self.object is None:
214 return dict(p, text='<img></img>')
215 data = self._img()
216 width, height = self._imgshape(data)
217 if not isinstance(data, bytes):
218 data = data.encode('utf-8')
219
220 if self.encode:
221 b64 = base64.b64encode(data).decode("utf-8")
222 src = "data:image/svg+xml;base64,{b64}".format(b64=b64)
223 html = "<img src='{src}' width={width} height={height}></img>".format(
224 src=src, width=width, height=height
225 )
226 else:
227 html = data.decode("utf-8")
228 return dict(p, width=width, height=height, text=escape(html))
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/pane/image.py b/panel/pane/image.py
--- a/panel/pane/image.py
+++ b/panel/pane/image.py
@@ -114,7 +114,7 @@
w, h = '%spx' % width, '%spx' % height
elif smode == 'stretch_both':
w, h = '100%', '100%'
- elif smode == 'stretch_height':
+ elif smode == 'stretch_width':
w, h = '%spx' % width, '100%'
elif smode == 'stretch_height':
w, h = '100%', '%spx' % height
| {"golden_diff": "diff --git a/panel/pane/image.py b/panel/pane/image.py\n--- a/panel/pane/image.py\n+++ b/panel/pane/image.py\n@@ -114,7 +114,7 @@\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n- elif smode == 'stretch_height':\n+ elif smode == 'stretch_width':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n", "issue": "Support src urls and alt texts on Images\n#### My Pain\r\n\r\nI would like to use images in Panel via `pn.pane.Markdown` and/ or `pn.pane.PNG`.\r\n\r\nCurrently the Bokeh layout engine does not layout markdown with images well. See https://github.com/holoviz/panel/issues/835.\r\n\r\nSo I need to stick to `pn.pane.PNG` for images.\r\n\r\nBut the `ImageBase` class does not support parameters like the `src` url and `alt` text from the the HTML `img` tag. So I cannot provide image links or alt texts.\r\n\r\n#### Solution\r\n\r\nAdd `src` and `alt` parameters to the `BaseImage` class.\n", "before_files": [{"content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n return self.object.read()\n if isurl(self.object, [self.imgtype]):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_height':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py"}], "after_files": [{"content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n return self.object.read()\n if isurl(self.object, [self.imgtype]):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_width':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py"}]} |
gh_patches_debug_1566 | rasdani/github-patches | git_diff | quantumlib__Cirq-423 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MergeInteractions optimization is creating bad circuits
Not sure how this made it past the tests yet.
```python
def main():
circuit = make_inefficient_circuit()
print("BEFORE:")
print(circuit.to_unitary_matrix().round(3))
print()
cirq.google.MergeInteractions().optimize_circuit(circuit)
print("AFTER:")
print(circuit.to_unitary_matrix().round(3))
def make_inefficient_circuit(t=0.1, v=0.11):
from cirq import H, CNOT, Z
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
return cirq.Circuit.from_ops(
H(b),
CNOT(a, b),
H(b),
CNOT(a, b),
CNOT(b, a),
H(a),
CNOT(a, b),
Z(a)**t, Z(b)**-t,
CNOT(a, b),
H(a), Z(b)**v,
CNOT(a, b),
Z(a)**-v, Z(b)**-v)
```
```
BEFORE:
[[ 1. -0.j 0. -0.j 0. +0.j 0. -0.j ]
[-0. -0.j -0. -0.309j 0.951-0.j -0. +0.j ]
[ 0. -0.j 0.951-0.j -0. -0.309j 0. +0.j ]
[-0. +0.j 0. +0.j 0. -0.j -0.771+0.637j]]
AFTER:
[[-0.354-0.354j -0.354-0.354j -0.227-0.446j 0.227+0.446j]
[-0.452-0.213j -0.452-0.213j 0.364+0.342j -0.364-0.342j]
[-0.354-0.354j 0.354+0.354j -0.446-0.227j -0.446-0.227j]
[-0.452-0.213j 0.452+0.213j 0.496+0.063j 0.496+0.063j]]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/google/merge_interactions.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """An optimization pass that combines adjacent single-qubit rotations."""
16
17 from typing import List, Tuple, Optional, cast
18
19 import numpy as np
20
21 from cirq import ops
22 from cirq.circuits import (
23 Circuit,
24 PointOptimizer,
25 PointOptimizationSummary,
26 )
27 from cirq.extension import Extensions
28 from cirq.google.decompositions import two_qubit_matrix_to_native_gates
29
30
31 class MergeInteractions(PointOptimizer):
32 """Combines adjacent constant single-qubit rotations."""
33
34 def __init__(self,
35 tolerance: float = 1e-8,
36 allow_partial_czs: bool = True,
37 extensions: Extensions = None) -> None:
38 self.tolerance = tolerance
39 self.allow_partial_czs = allow_partial_czs
40 self.extensions = extensions or Extensions()
41
42 def optimization_at(self, circuit, index, op):
43 if len(op.qubits) != 2:
44 return None
45
46 interaction_count, indices, matrix = (
47 self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))
48 if interaction_count <= 1:
49 return None
50
51 # Find a max-3-cz construction.
52 operations = two_qubit_matrix_to_native_gates(
53 op.qubits[0],
54 op.qubits[1],
55 matrix,
56 self.allow_partial_czs,
57 self.tolerance)
58
59 # TODO: don't replace if there's no benefit in CZ depth.
60
61 return PointOptimizationSummary(
62 clear_span=max(indices) + 1 - index,
63 clear_qubits=op.qubits,
64 new_operations=operations)
65
66 def _op_to_matrix(self,
67 op: ops.Operation,
68 qubits: Tuple[ops.QubitId, ...]
69 ) -> Optional[Tuple[np.ndarray, bool]]:
70 """Determines the effect of an operation on the given qubits.
71
72 The operation must be a 1-qubit operation on one of the given qubits,
73 or a 2-qubit operation on both of the given qubits. Also, the operation
74 must have a known matrix. Otherwise None is returned.
75
76 Args:
77 op: The operation to understand.
78 qubits: The qubits we care about. Order determines matrix tensor
79 order.
80
81 Returns:
82 None, or else a tuple containing a matrix equivalent to the effect
83 of the operation and a boolean indicating if the operation is a
84 2-qubit interaction.
85 """
86 q1, q2 = qubits
87
88 known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)
89 if known is None:
90 return None
91 m = known.matrix()
92
93 if op.qubits == qubits:
94 return m, True
95 if op.qubits == (q2, q1):
96 return MergeInteractions._flip_kron_order(m), True
97 if op.qubits == (q1,):
98 return np.kron(np.eye(2), m), False
99 if op.qubits == (q2,):
100 return np.kron(m, np.eye(2)), False
101
102 return None
103
104 def _scan_two_qubit_ops_into_matrix(
105 self,
106 circuit: Circuit,
107 index: Optional[int],
108 qubits: Tuple[ops.QubitId, ...]
109 ) -> Tuple[int, List[int], np.ndarray]:
110 """Accumulates operations affecting the given pair of qubits.
111
112 The scan terminates when it hits the end of the circuit, finds an
113 operation without a known matrix, or finds an operation that interacts
114 the given qubits with other qubits.
115
116 Args:
117 circuit: The circuit to scan for operations.
118 index: The index to start scanning forward from.
119 qubits: The pair of qubits we care about.
120
121 Returns:
122 A tuple containing:
123 0. The number of 2-qubit operations that were scanned.
124 1. The moment indices those operations were on.
125 2. A matrix equivalent to the effect of the scanned operations.
126 """
127
128 product = np.eye(4, dtype=np.complex128)
129 interaction_count = 0
130 touched_indices = []
131
132 while index is not None:
133 operations = {circuit.operation_at(q, index) for q in qubits}
134 op_data = [
135 self._op_to_matrix(op, qubits)
136 for op in operations
137 if op
138 ]
139
140 # Stop at any non-constant or non-local interaction.
141 if any(e is None for e in op_data):
142 break
143 present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)
144
145 for op_mat, interacts in present_op_data:
146 product = np.dot(op_mat, product)
147 if interacts:
148 interaction_count += 1
149
150 touched_indices.append(index)
151 index = circuit.next_moment_operating_on(qubits, index + 1)
152
153 return interaction_count, touched_indices, product
154
155 @staticmethod
156 def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:
157 """Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i))."""
158 result = np.array([[0] * 4] * 4, dtype=np.complex128)
159 order = [0, 2, 1, 3]
160 for i in range(4):
161 for j in range(4):
162 result[order[i], order[j]] = mat4x4[i, j]
163 return result
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/google/merge_interactions.py b/cirq/google/merge_interactions.py
--- a/cirq/google/merge_interactions.py
+++ b/cirq/google/merge_interactions.py
@@ -95,9 +95,9 @@
if op.qubits == (q2, q1):
return MergeInteractions._flip_kron_order(m), True
if op.qubits == (q1,):
- return np.kron(np.eye(2), m), False
- if op.qubits == (q2,):
return np.kron(m, np.eye(2)), False
+ if op.qubits == (q2,):
+ return np.kron(np.eye(2), m), False
return None
| {"golden_diff": "diff --git a/cirq/google/merge_interactions.py b/cirq/google/merge_interactions.py\n--- a/cirq/google/merge_interactions.py\n+++ b/cirq/google/merge_interactions.py\n@@ -95,9 +95,9 @@\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n- return np.kron(np.eye(2), m), False\n- if op.qubits == (q2,):\n return np.kron(m, np.eye(2)), False\n+ if op.qubits == (q2,):\n+ return np.kron(np.eye(2), m), False\n \n return None\n", "issue": "MergeInteractions optimization is creating bad circuits\nNot sure how this made it past the tests yet.\r\n\r\n```python\r\ndef main():\r\n circuit = make_inefficient_circuit()\r\n print(\"BEFORE:\")\r\n print(circuit.to_unitary_matrix().round(3))\r\n print()\r\n cirq.google.MergeInteractions().optimize_circuit(circuit)\r\n print(\"AFTER:\")\r\n print(circuit.to_unitary_matrix().round(3))\r\n\r\ndef make_inefficient_circuit(t=0.1, v=0.11):\r\n from cirq import H, CNOT, Z\r\n a = cirq.NamedQubit('a')\r\n b = cirq.NamedQubit('b')\r\n\r\n return cirq.Circuit.from_ops(\r\n H(b),\r\n CNOT(a, b),\r\n H(b),\r\n CNOT(a, b),\r\n CNOT(b, a),\r\n H(a),\r\n CNOT(a, b),\r\n Z(a)**t, Z(b)**-t,\r\n CNOT(a, b),\r\n H(a), Z(b)**v,\r\n CNOT(a, b),\r\n Z(a)**-v, Z(b)**-v)\r\n```\r\n\r\n```\r\nBEFORE:\r\n[[ 1. -0.j 0. -0.j 0. +0.j 0. -0.j ]\r\n [-0. -0.j -0. -0.309j 0.951-0.j -0. +0.j ]\r\n [ 0. -0.j 0.951-0.j -0. -0.309j 0. +0.j ]\r\n [-0. +0.j 0. +0.j 0. -0.j -0.771+0.637j]]\r\n\r\nAFTER:\r\n[[-0.354-0.354j -0.354-0.354j -0.227-0.446j 0.227+0.446j]\r\n [-0.452-0.213j -0.452-0.213j 0.364+0.342j -0.364-0.342j]\r\n [-0.354-0.354j 0.354+0.354j -0.446-0.227j -0.446-0.227j]\r\n [-0.452-0.213j 0.452+0.213j 0.496+0.063j 0.496+0.063j]]\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that combines adjacent single-qubit rotations.\"\"\"\n\nfrom typing import List, Tuple, Optional, cast\n\nimport numpy as np\n\nfrom cirq import ops\nfrom cirq.circuits import (\n Circuit,\n PointOptimizer,\n PointOptimizationSummary,\n)\nfrom cirq.extension import Extensions\nfrom cirq.google.decompositions import two_qubit_matrix_to_native_gates\n\n\nclass MergeInteractions(PointOptimizer):\n \"\"\"Combines adjacent constant single-qubit rotations.\"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n allow_partial_czs: bool = True,\n extensions: Extensions = None) -> None:\n self.tolerance = tolerance\n self.allow_partial_czs = allow_partial_czs\n self.extensions = extensions or Extensions()\n\n def optimization_at(self, circuit, index, op):\n if len(op.qubits) != 2:\n return None\n\n interaction_count, indices, matrix = (\n self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))\n if interaction_count <= 1:\n return None\n\n # Find a max-3-cz construction.\n operations = two_qubit_matrix_to_native_gates(\n op.qubits[0],\n op.qubits[1],\n matrix,\n self.allow_partial_czs,\n self.tolerance)\n\n # TODO: don't replace if there's no benefit in CZ depth.\n\n return PointOptimizationSummary(\n clear_span=max(indices) + 1 - index,\n clear_qubits=op.qubits,\n new_operations=operations)\n\n def _op_to_matrix(self,\n op: ops.Operation,\n qubits: Tuple[ops.QubitId, ...]\n ) -> Optional[Tuple[np.ndarray, bool]]:\n \"\"\"Determines the effect of an operation on the given qubits.\n\n The operation must be a 1-qubit operation on one of the given qubits,\n or a 2-qubit operation on both of the given qubits. Also, the operation\n must have a known matrix. Otherwise None is returned.\n\n Args:\n op: The operation to understand.\n qubits: The qubits we care about. Order determines matrix tensor\n order.\n\n Returns:\n None, or else a tuple containing a matrix equivalent to the effect\n of the operation and a boolean indicating if the operation is a\n 2-qubit interaction.\n \"\"\"\n q1, q2 = qubits\n\n known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)\n if known is None:\n return None\n m = known.matrix()\n\n if op.qubits == qubits:\n return m, True\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n return np.kron(np.eye(2), m), False\n if op.qubits == (q2,):\n return np.kron(m, np.eye(2)), False\n\n return None\n\n def _scan_two_qubit_ops_into_matrix(\n self,\n circuit: Circuit,\n index: Optional[int],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Tuple[int, List[int], np.ndarray]:\n \"\"\"Accumulates operations affecting the given pair of qubits.\n\n The scan terminates when it hits the end of the circuit, finds an\n operation without a known matrix, or finds an operation that interacts\n the given qubits with other qubits.\n\n Args:\n circuit: The circuit to scan for operations.\n index: The index to start scanning forward from.\n qubits: The pair of qubits we care about.\n\n Returns:\n A tuple containing:\n 0. The number of 2-qubit operations that were scanned.\n 1. The moment indices those operations were on.\n 2. A matrix equivalent to the effect of the scanned operations.\n \"\"\"\n\n product = np.eye(4, dtype=np.complex128)\n interaction_count = 0\n touched_indices = []\n\n while index is not None:\n operations = {circuit.operation_at(q, index) for q in qubits}\n op_data = [\n self._op_to_matrix(op, qubits)\n for op in operations\n if op\n ]\n\n # Stop at any non-constant or non-local interaction.\n if any(e is None for e in op_data):\n break\n present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)\n\n for op_mat, interacts in present_op_data:\n product = np.dot(op_mat, product)\n if interacts:\n interaction_count += 1\n\n touched_indices.append(index)\n index = circuit.next_moment_operating_on(qubits, index + 1)\n\n return interaction_count, touched_indices, product\n\n @staticmethod\n def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:\n \"\"\"Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).\"\"\"\n result = np.array([[0] * 4] * 4, dtype=np.complex128)\n order = [0, 2, 1, 3]\n for i in range(4):\n for j in range(4):\n result[order[i], order[j]] = mat4x4[i, j]\n return result\n", "path": "cirq/google/merge_interactions.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that combines adjacent single-qubit rotations.\"\"\"\n\nfrom typing import List, Tuple, Optional, cast\n\nimport numpy as np\n\nfrom cirq import ops\nfrom cirq.circuits import (\n Circuit,\n PointOptimizer,\n PointOptimizationSummary,\n)\nfrom cirq.extension import Extensions\nfrom cirq.google.decompositions import two_qubit_matrix_to_native_gates\n\n\nclass MergeInteractions(PointOptimizer):\n \"\"\"Combines adjacent constant single-qubit rotations.\"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n allow_partial_czs: bool = True,\n extensions: Extensions = None) -> None:\n self.tolerance = tolerance\n self.allow_partial_czs = allow_partial_czs\n self.extensions = extensions or Extensions()\n\n def optimization_at(self, circuit, index, op):\n if len(op.qubits) != 2:\n return None\n\n interaction_count, indices, matrix = (\n self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))\n if interaction_count <= 1:\n return None\n\n # Find a max-3-cz construction.\n operations = two_qubit_matrix_to_native_gates(\n op.qubits[0],\n op.qubits[1],\n matrix,\n self.allow_partial_czs,\n self.tolerance)\n\n # TODO: don't replace if there's no benefit in CZ depth.\n\n return PointOptimizationSummary(\n clear_span=max(indices) + 1 - index,\n clear_qubits=op.qubits,\n new_operations=operations)\n\n def _op_to_matrix(self,\n op: ops.Operation,\n qubits: Tuple[ops.QubitId, ...]\n ) -> Optional[Tuple[np.ndarray, bool]]:\n \"\"\"Determines the effect of an operation on the given qubits.\n\n The operation must be a 1-qubit operation on one of the given qubits,\n or a 2-qubit operation on both of the given qubits. Also, the operation\n must have a known matrix. Otherwise None is returned.\n\n Args:\n op: The operation to understand.\n qubits: The qubits we care about. Order determines matrix tensor\n order.\n\n Returns:\n None, or else a tuple containing a matrix equivalent to the effect\n of the operation and a boolean indicating if the operation is a\n 2-qubit interaction.\n \"\"\"\n q1, q2 = qubits\n\n known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)\n if known is None:\n return None\n m = known.matrix()\n\n if op.qubits == qubits:\n return m, True\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n return np.kron(m, np.eye(2)), False\n if op.qubits == (q2,):\n return np.kron(np.eye(2), m), False\n\n return None\n\n def _scan_two_qubit_ops_into_matrix(\n self,\n circuit: Circuit,\n index: Optional[int],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Tuple[int, List[int], np.ndarray]:\n \"\"\"Accumulates operations affecting the given pair of qubits.\n\n The scan terminates when it hits the end of the circuit, finds an\n operation without a known matrix, or finds an operation that interacts\n the given qubits with other qubits.\n\n Args:\n circuit: The circuit to scan for operations.\n index: The index to start scanning forward from.\n qubits: The pair of qubits we care about.\n\n Returns:\n A tuple containing:\n 0. The number of 2-qubit operations that were scanned.\n 1. The moment indices those operations were on.\n 2. A matrix equivalent to the effect of the scanned operations.\n \"\"\"\n\n product = np.eye(4, dtype=np.complex128)\n interaction_count = 0\n touched_indices = []\n\n while index is not None:\n operations = {circuit.operation_at(q, index) for q in qubits}\n op_data = [\n self._op_to_matrix(op, qubits)\n for op in operations\n if op\n ]\n\n # Stop at any non-constant or non-local interaction.\n if any(e is None for e in op_data):\n break\n present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)\n\n for op_mat, interacts in present_op_data:\n product = np.dot(op_mat, product)\n if interacts:\n interaction_count += 1\n\n touched_indices.append(index)\n index = circuit.next_moment_operating_on(qubits, index + 1)\n\n return interaction_count, touched_indices, product\n\n @staticmethod\n def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:\n \"\"\"Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).\"\"\"\n result = np.array([[0] * 4] * 4, dtype=np.complex128)\n order = [0, 2, 1, 3]\n for i in range(4):\n for j in range(4):\n result[order[i], order[j]] = mat4x4[i, j]\n return result\n", "path": "cirq/google/merge_interactions.py"}]} |
gh_patches_debug_1567 | rasdani/github-patches | git_diff | kivy__kivy-4584 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove and Re-add of Highest Widget in a Layout leaves the Widget Invisible
I have noticed that adding widgets to a Layout Manager does not seem to work correctly. I have tested with BoxLayout, GridLayout, and FloatLayout. I have tested with 3 widgets, and with 4 widgets. I am using Kivy 1.9.0 with Python 2.7 on Fedora 22.
I start by adding widgets to a Layout- it looks fine. Then I remove a widget- the "first" one, which is list index 3 in the list of children in my FloatLayout example (index 2 with the Grid example). See the attachments.
[app2-float.txt](https://github.com/kivy/kivy/files/388380/app2-float.txt)
[app2-grid.txt](https://github.com/kivy/kivy/files/388383/app2-grid.txt)
When I re-add the widget in a BoxLayout or GridLayout, space is used in but the widget is not displayed. Testing in my "real" app (code not given here) shows that the widget is indeed in place (I can perform actions on it) but it's just not displayed. This problem only seems to happen with the leftmost position.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/widget.py`
Content:
```
1 '''
2 Widget class
3 ============
4
5 The :class:`Widget` class is the base class required for creating Widgets.
6 This widget class was designed with a couple of principles in mind:
7
8 * *Event Driven*
9
10 Widget interaction is built on top of events that occur. If a property
11 changes, the widget can respond to the change in the 'on_<propname>'
12 callback. If nothing changes, nothing will be done. That's the main
13 goal of the :class:`~kivy.properties.Property` class.
14
15 * *Separation Of Concerns (the widget and its graphical representation)*
16
17 Widgets don't have a `draw()` method. This is done on purpose: The idea
18 is to allow you to create your own graphical representation outside the
19 widget class.
20 Obviously you can still use all the available properties to do that, so
21 that your representation properly reflects the widget's current state.
22 Every widget has its own :class:`~kivy.graphics.Canvas` that you
23 can use to draw. This separation allows Kivy to run your
24 application in a very efficient manner.
25
26 * *Bounding Box / Collision*
27
28 Often you want to know if a certain point is within the bounds of your
29 widget. An example would be a button widget where you only want to
30 trigger an action when the button itself is actually touched.
31 For this, you can use the :meth:`~Widget.collide_point` method, which
32 will return True if the point you pass to it is inside the axis-aligned
33 bounding box defined by the widget's position and size.
34 If a simple AABB is not sufficient, you can override the method to
35 perform the collision checks with more complex shapes, e.g. a polygon.
36 You can also check if a widget collides with another widget with
37 :meth:`~Widget.collide_widget`.
38
39
40 We also have some default values and behaviors that you should be aware of:
41
42 * A :class:`Widget` is not a :class:`~kivy.uix.layout.Layout`: it will not
43 change the position or the size of its children. If you want control over
44 positioning or sizing, use a :class:`~kivy.uix.layout.Layout`.
45
46 * The default size of a widget is (100, 100). This is only changed if the
47 parent is a :class:`~kivy.uix.layout.Layout`.
48 For example, if you add a :class:`Label` inside a
49 :class:`Button`, the label will not inherit the button's size or position
50 because the button is not a *Layout*: it's just another *Widget*.
51
52 * The default size_hint is (1, 1). If the parent is a :class:`Layout`, then the
53 widget size will be the parent layout's size.
54
55 * :meth:`~Widget.on_touch_down`, :meth:`~Widget.on_touch_move`,
56 :meth:`~Widget.on_touch_up` don't do any sort of collisions. If you want to
57 know if the touch is inside your widget, use :meth:`~Widget.collide_point`.
58
59 Using Properties
60 ----------------
61
62 When you read the documentation, all properties are described in the format::
63
64 <name> is a <property class> and defaults to <default value>.
65
66 e.g.
67
68 :attr:`~kivy.uix.label.Label.text` is a
69 :class:`~kivy.properties.StringProperty` and defaults to ''.
70
71 If you want to be notified when the pos attribute changes, i.e. when the
72 widget moves, you can bind your own callback function like this::
73
74 def callback_pos(instance, value):
75 print('The widget', instance, 'moved to', value)
76
77 wid = Widget()
78 wid.bind(pos=callback_pos)
79
80 Read more about :doc:`/api-kivy.properties`.
81
82 Basic drawing
83 -------------
84
85 Widgets support a range of drawing instructions that you can use to customize
86 the look of your widgets and layouts. For example, to draw a background image
87 for your widget, you can do the following:
88
89 .. code-block:: python
90
91 def redraw(self, args):
92 self.bg_rect.size = self.size
93 self.bg_rect.pos = self.pos
94
95 widget = Widget()
96 with widget.canvas:
97 widget.bg_rect = Rectangle(source="cover.jpg", pos=self.pos, \
98 size=self.size)
99 widget.bind(pos=redraw, size=redraw)
100
101 To draw a background in kv:
102
103 .. code-block:: kv
104
105 Widget:
106 canvas:
107 Rectangle:
108 source: "cover.jpg"
109 size: self.size
110 pos: self.pos
111
112 These examples only scratch the surface. Please see the :mod:`kivy.graphics`
113 documentation for more information.
114
115 .. _widget-event-bubbling:
116
117 Widget touch event bubbling
118 ---------------------------
119
120 When you catch touch events between multiple widgets, you often
121 need to be aware of the order in which these events are propagated. In Kivy,
122 events bubble up from the first child upwards through the other children.
123 If a widget has children, the event is passed through its children before
124 being passed on to the widget after it.
125
126 As the :meth:`~kivy.uix.widget.Widget.on_touch_up` method inserts widgets at
127 index 0 by default, this means the event goes from the most recently added
128 widget back to the first one added. Consider the following:
129
130 .. code-block:: python
131
132 box = BoxLayout()
133 box.add_widget(Label(text="a"))
134 box.add_widget(Label(text="b"))
135 box.add_widget(Label(text="c"))
136
137 The label with text "c" gets the event first, "b" second and "a" last. You can
138 reverse this order by manually specifying the index:
139
140 .. code-block:: python
141
142 box = BoxLayout()
143 box.add_widget(Label(text="a"), index=0)
144 box.add_widget(Label(text="b"), index=1)
145 box.add_widget(Label(text="c"), index=2)
146
147 Now the order would be "a", "b" then "c". One thing to keep in mind when using
148 kv is that declaring a widget uses the
149 :meth:`~kivy.uix.widget.Widget.add_widget` method for insertion. Hence, using
150
151 .. code-block:: kv
152
153 BoxLayout:
154 MyLabel:
155 text: "a"
156 MyLabel:
157 text: "b"
158 MyLabel:
159 text: "c"
160
161 would result in the event order "c", "b" then "a" as "c" was actually the last
162 added widget. It thus has index 0, "b" index 1 and "a" index 2. Effectively,
163 the child order is the reverse of its listed order.
164
165 This ordering is the same for the :meth:`~kivy.uix.widget.Widget.on_touch_move`
166 and :meth:`~kivy.uix.widget.Widget.on_touch_up` events.
167
168 In order to stop this event bubbling, a method can return `True`. This tells
169 Kivy the event has been handled and the event propagation stops. For example:
170
171 .. code-block:: python
172
173 class MyWidget(Widget):
174 def on_touch_down(self, touch):
175 If <some_condition>:
176 # Do stuff here and kill the event
177 return True
178 else:
179 return super(MyWidget, self).on_touch_down(touch)
180
181 This approach gives you good control over exactly how events are dispatched
182 and managed. Sometimes, however, you may wish to let the event be completely
183 propagated before taking action. You can use the
184 :class:`~kivy.clock.Clock` to help you here:
185
186 .. code-block:: python
187
188 class MyWidget(Label):
189 def on_touch_down(self, touch, after=False):
190 if after:
191 print "Fired after the event has been dispatched!"
192 else:
193 Clock.schedule_once(lambda dt: self.on_touch_down(touch, True))
194 return super(MyWidget, self).on_touch_down(touch)
195
196 Usage of :attr:`Widget.center`, :attr:`Widget.right`, and :attr:`Widget.top`
197 ----------------------------------------------------------------------------
198
199 A common mistake when using one of the computed properties such as
200 :attr:`Widget.right` is to use it to make a widget follow its parent with a
201 KV rule such as `right: self.parent.right`. Consider, for example:
202
203 .. code-block:: kv
204
205 FloatLayout:
206 id: layout
207 width: 100
208 Widget:
209 id: wid
210 right: layout.right
211
212 The (mistaken) expectation is that this rule ensures that wid's right will
213 always be whatever layout's right is - that is wid.right and layout.right will
214 always be identical. In actual fact, this rule only says that "whenever
215 layout's `right` changes, wid's right will be set to that value". The
216 difference being that as long as `layout.right` doesn't change, `wid.right`
217 could be anything, even a value that will make them different.
218
219 Specifically, for the KV code above, consider the following example::
220
221 >>> print(layout.right, wid.right)
222 (100, 100)
223 >>> wid.x = 200
224 >>> print(layout.right, wid.right)
225 (100, 300)
226
227 As can be seen, initially they are in sync, however, when we change `wid.x`
228 they go out of sync because `layout.right` is not changed and the rule is not
229 triggered.
230
231 The proper way to make the widget follow its parent's right is to use
232 :attr:`Widget.pos_hint`. If instead of `right: layout.right` we did
233 `pos_hint: {'right': 1}`, then the widgets right will always be set to be
234 at the parent's right at each layout update.
235 '''
236
237 __all__ = ('Widget', 'WidgetException')
238
239 from kivy.event import EventDispatcher
240 from kivy.factory import Factory
241 from kivy.properties import (
242 NumericProperty, StringProperty, AliasProperty, ReferenceListProperty,
243 ObjectProperty, ListProperty, DictProperty, BooleanProperty)
244 from kivy.graphics import (
245 Canvas, Translate, Fbo, ClearColor, ClearBuffers, Scale)
246 from kivy.graphics.transformation import Matrix
247 from kivy.base import EventLoop
248 from kivy.lang import Builder
249 from kivy.context import get_current_context
250 from kivy.weakproxy import WeakProxy
251 from functools import partial
252 from itertools import islice
253
254
255 # References to all the widget destructors (partial method with widget uid as
256 # key).
257 _widget_destructors = {}
258
259
260 def _widget_destructor(uid, r):
261 # Internal method called when a widget is deleted from memory. the only
262 # thing we remember about it is its uid. Clear all the associated callbacks
263 # created in kv language.
264 del _widget_destructors[uid]
265 Builder.unbind_widget(uid)
266
267
268 class WidgetException(Exception):
269 '''Fired when the widget gets an exception.
270 '''
271 pass
272
273
274 class WidgetMetaclass(type):
275 '''Metaclass to automatically register new widgets for the
276 :class:`~kivy.factory.Factory`.
277
278 .. warning::
279 This metaclass is used by the Widget. Do not use it directly!
280 '''
281 def __init__(mcs, name, bases, attrs):
282 super(WidgetMetaclass, mcs).__init__(name, bases, attrs)
283 Factory.register(name, cls=mcs)
284
285
286 #: Base class used for Widget, that inherits from :class:`EventDispatcher`
287 WidgetBase = WidgetMetaclass('WidgetBase', (EventDispatcher, ), {})
288
289
290 class Widget(WidgetBase):
291 '''Widget class. See module documentation for more information.
292
293 :Events:
294 `on_touch_down`:
295 Fired when a new touch event occurs
296 `on_touch_move`:
297 Fired when an existing touch moves
298 `on_touch_up`:
299 Fired when an existing touch disappears
300
301 .. warning::
302 Adding a `__del__` method to a class derived from Widget with Python
303 prior to 3.4 will disable automatic garbage collection for instances
304 of that class. This is because the Widget class creates reference
305 cycles, thereby `preventing garbage collection
306 <https://docs.python.org/2/library/gc.html#gc.garbage>`_.
307
308 .. versionchanged:: 1.0.9
309 Everything related to event properties has been moved to the
310 :class:`~kivy.event.EventDispatcher`. Event properties can now be used
311 when contructing a simple class without subclassing :class:`Widget`.
312
313 .. versionchanged:: 1.5.0
314 The constructor now accepts on_* arguments to automatically bind
315 callbacks to properties or events, as in the Kv language.
316 '''
317
318 __metaclass__ = WidgetMetaclass
319 __events__ = ('on_touch_down', 'on_touch_move', 'on_touch_up')
320 _proxy_ref = None
321
322 def __init__(self, **kwargs):
323 # Before doing anything, ensure the windows exist.
324 EventLoop.ensure_window()
325
326 # Assign the default context of the widget creation.
327 if not hasattr(self, '_context'):
328 self._context = get_current_context()
329
330 no_builder = '__no_builder' in kwargs
331 if no_builder:
332 del kwargs['__no_builder']
333 on_args = {k: v for k, v in kwargs.items() if k[:3] == 'on_'}
334 for key in on_args:
335 del kwargs[key]
336
337 super(Widget, self).__init__(**kwargs)
338
339 # Create the default canvas if it does not exist.
340 if self.canvas is None:
341 self.canvas = Canvas(opacity=self.opacity)
342
343 # Apply all the styles.
344 if not no_builder:
345 Builder.apply(self, ignored_consts=self._kwargs_applied_init)
346
347 # Bind all the events.
348 if on_args:
349 self.bind(**on_args)
350
351 @property
352 def proxy_ref(self):
353 '''Return a proxy reference to the widget, i.e. without creating a
354 reference to the widget. See `weakref.proxy
355 <http://docs.python.org/2/library/weakref.html?highlight\
356 =proxy#weakref.proxy>`_ for more information.
357
358 .. versionadded:: 1.7.2
359 '''
360 _proxy_ref = self._proxy_ref
361 if _proxy_ref is not None:
362 return _proxy_ref
363
364 f = partial(_widget_destructor, self.uid)
365 self._proxy_ref = _proxy_ref = WeakProxy(self, f)
366 # Only f should be enough here, but it appears that is a very
367 # specific case, the proxy destructor is not called if both f and
368 # _proxy_ref are not together in a tuple.
369 _widget_destructors[self.uid] = (f, _proxy_ref)
370 return _proxy_ref
371
372 def __hash__(self):
373 return id(self)
374
375 @property
376 def __self__(self):
377 return self
378
379 #
380 # Collision
381 #
382 def collide_point(self, x, y):
383 '''
384 Check if a point (x, y) is inside the widget's axis aligned bounding
385 box.
386
387 :Parameters:
388 `x`: numeric
389 x position of the point (in window coordinates)
390 `y`: numeric
391 y position of the point (in window coordinates)
392
393 :Returns:
394 A bool. True if the point is inside the bounding box, False
395 otherwise.
396
397 .. code-block:: python
398
399 >>> Widget(pos=(10, 10), size=(50, 50)).collide_point(40, 40)
400 True
401 '''
402 return self.x <= x <= self.right and self.y <= y <= self.top
403
404 def collide_widget(self, wid):
405 '''
406 Check if another widget collides with this widget. This function
407 performs an axis-aligned bounding box intersection test by default.
408
409 :Parameters:
410 `wid`: :class:`Widget` class
411 Widget to collide with.
412
413 :Returns:
414 bool. True if the other widget collides with this widget, False
415 otherwise.
416
417 .. code-block:: python
418
419 >>> wid = Widget(size=(50, 50))
420 >>> wid2 = Widget(size=(50, 50), pos=(25, 25))
421 >>> wid.collide_widget(wid2)
422 True
423 >>> wid2.pos = (55, 55)
424 >>> wid.collide_widget(wid2)
425 False
426 '''
427 if self.right < wid.x:
428 return False
429 if self.x > wid.right:
430 return False
431 if self.top < wid.y:
432 return False
433 if self.y > wid.top:
434 return False
435 return True
436
437 #
438 # Default event handlers
439 #
440 def on_touch_down(self, touch):
441 '''Receive a touch down event.
442
443 :Parameters:
444 `touch`: :class:`~kivy.input.motionevent.MotionEvent` class
445 Touch received. The touch is in parent coordinates. See
446 :mod:`~kivy.uix.relativelayout` for a discussion on
447 coordinate systems.
448
449 :Returns:
450 bool. If True, the dispatching of the touch event will stop.
451 If False, the event will continue to be dispatched to the rest
452 of the widget tree.
453 '''
454 if self.disabled and self.collide_point(*touch.pos):
455 return True
456 for child in self.children[:]:
457 if child.dispatch('on_touch_down', touch):
458 return True
459
460 def on_touch_move(self, touch):
461 '''Receive a touch move event. The touch is in parent coordinates.
462
463 See :meth:`on_touch_down` for more information.
464 '''
465 if self.disabled:
466 return
467 for child in self.children[:]:
468 if child.dispatch('on_touch_move', touch):
469 return True
470
471 def on_touch_up(self, touch):
472 '''Receive a touch up event. The touch is in parent coordinates.
473
474 See :meth:`on_touch_down` for more information.
475 '''
476 if self.disabled:
477 return
478 for child in self.children[:]:
479 if child.dispatch('on_touch_up', touch):
480 return True
481
482 def on_disabled(self, instance, value):
483 for child in self.children:
484 child.disabled = value
485
486 #
487 # Tree management
488 #
489 def add_widget(self, widget, index=0, canvas=None):
490 '''Add a new widget as a child of this widget.
491
492 :Parameters:
493 `widget`: :class:`Widget`
494 Widget to add to our list of children.
495 `index`: int, defaults to 0
496 Index to insert the widget in the list. Notice that the default
497 of 0 means the widget is inserted at the beginning of the list
498 and will thus be drawn on top of other sibling widgets. For a
499 full discussion of the index and widget hierarchy, please see
500 the :doc:`Widgets Programming Guide <guide/widgets>`.
501
502 .. versionadded:: 1.0.5
503 `canvas`: str, defaults to None
504 Canvas to add widget's canvas to. Can be 'before', 'after' or
505 None for the default canvas.
506
507 .. versionadded:: 1.9.0
508
509 .. code-block:: python
510
511 >>> from kivy.uix.button import Button
512 >>> from kivy.uix.slider import Slider
513 >>> root = Widget()
514 >>> root.add_widget(Button())
515 >>> slider = Slider()
516 >>> root.add_widget(slider)
517
518 '''
519 if not isinstance(widget, Widget):
520 raise WidgetException(
521 'add_widget() can be used only with instances'
522 ' of the Widget class.')
523
524 widget = widget.__self__
525 if widget is self:
526 raise WidgetException(
527 'Widget instances cannot be added to themselves.')
528 parent = widget.parent
529 # Check if the widget is already a child of another widget.
530 if parent:
531 raise WidgetException('Cannot add %r, it already has a parent %r'
532 % (widget, parent))
533 widget.parent = parent = self
534 # Child will be disabled if added to a disabled parent.
535 if parent.disabled:
536 widget.disabled = True
537
538 canvas = self.canvas.before if canvas == 'before' else \
539 self.canvas.after if canvas == 'after' else self.canvas
540
541 if index == 0 or len(self.children) == 0:
542 self.children.insert(0, widget)
543 canvas.add(widget.canvas)
544 else:
545 canvas = self.canvas
546 children = self.children
547 if index >= len(children):
548 index = len(children)
549 next_index = 0
550 else:
551 next_child = children[index]
552 next_index = canvas.indexof(next_child.canvas)
553 if next_index == -1:
554 next_index = canvas.length()
555 else:
556 next_index += 1
557
558 children.insert(index, widget)
559 # We never want to insert widget _before_ canvas.before.
560 if next_index == 0 and canvas.has_before:
561 next_index = 1
562 canvas.insert(next_index, widget.canvas)
563
564 def remove_widget(self, widget):
565 '''Remove a widget from the children of this widget.
566
567 :Parameters:
568 `widget`: :class:`Widget`
569 Widget to remove from our children list.
570
571 .. code-block:: python
572
573 >>> from kivy.uix.button import Button
574 >>> root = Widget()
575 >>> button = Button()
576 >>> root.add_widget(button)
577 >>> root.remove_widget(button)
578 '''
579 if widget not in self.children:
580 return
581 self.children.remove(widget)
582 if widget.canvas in self.canvas.children:
583 self.canvas.remove(widget.canvas)
584 elif widget.canvas in self.canvas.after.children:
585 self.canvas.after.remove(widget.canvas)
586 elif widget.canvas in self.canvas.before.children:
587 self.canvas.before.remove(widget.canvas)
588 widget.parent = None
589
590 def clear_widgets(self, children=None):
591 '''
592 Remove all (or the specified) :attr:`~Widget.children` of this widget.
593 If the 'children' argument is specified, it should be a list (or
594 filtered list) of children of the current widget.
595
596 .. versionchanged:: 1.8.0
597 The `children` argument can be used to specify the children you
598 want to remove.
599 '''
600
601 if not children:
602 children = self.children
603 remove_widget = self.remove_widget
604 for child in children[:]:
605 remove_widget(child)
606
607 def export_to_png(self, filename, *args):
608 '''Saves an image of the widget and its children in png format at the
609 specified filename. Works by removing the widget canvas from its
610 parent, rendering to an :class:`~kivy.graphics.fbo.Fbo`, and calling
611 :meth:`~kivy.graphics.texture.Texture.save`.
612
613 .. note::
614
615 The image includes only this widget and its children. If you want
616 to include widgets elsewhere in the tree, you must call
617 :meth:`~Widget.export_to_png` from their common parent, or use
618 :meth:`~kivy.core.window.WindowBase.screenshot` to capture the whole
619 window.
620
621 .. note::
622
623 The image will be saved in png format, you should include the
624 extension in your filename.
625
626 .. versionadded:: 1.9.0
627 '''
628
629 if self.parent is not None:
630 canvas_parent_index = self.parent.canvas.indexof(self.canvas)
631 if canvas_parent_index > -1:
632 self.parent.canvas.remove(self.canvas)
633
634 fbo = Fbo(size=self.size, with_stencilbuffer=True)
635
636 with fbo:
637 ClearColor(0, 0, 0, 1)
638 ClearBuffers()
639 Scale(1, -1, 1)
640 Translate(-self.x, -self.y - self.height, 0)
641
642 fbo.add(self.canvas)
643 fbo.draw()
644 fbo.texture.save(filename, flipped=False)
645 fbo.remove(self.canvas)
646
647 if self.parent is not None and canvas_parent_index > -1:
648 self.parent.canvas.insert(canvas_parent_index, self.canvas)
649
650 return True
651
652 def get_root_window(self):
653 '''Return the root window.
654
655 :Returns:
656 Instance of the root window. Can be a
657 :class:`~kivy.core.window.WindowBase` or
658 :class:`Widget`.
659 '''
660 if self.parent:
661 return self.parent.get_root_window()
662
663 def get_parent_window(self):
664 '''Return the parent window.
665
666 :Returns:
667 Instance of the parent window. Can be a
668 :class:`~kivy.core.window.WindowBase` or
669 :class:`Widget`.
670 '''
671 if self.parent:
672 return self.parent.get_parent_window()
673
674 def _walk(self, restrict=False, loopback=False, index=None):
675 # We pass index only when we are going on the parent
676 # so don't yield the parent as well.
677 if index is None:
678 index = len(self.children)
679 yield self
680
681 for child in reversed(self.children[:index]):
682 for walk_child in child._walk(restrict=True):
683 yield walk_child
684
685 # If we want to continue with our parent, just do it.
686 if not restrict:
687 parent = self.parent
688 try:
689 if parent is None or not isinstance(parent, Widget):
690 raise ValueError
691 index = parent.children.index(self)
692 except ValueError:
693 # Self is root, if we want to loopback from the first element:
694 if not loopback:
695 return
696 # If we started with root (i.e. index==None), then we have to
697 # start from root again, so we return self again. Otherwise, we
698 # never returned it, so return it now starting with it.
699 parent = self
700 index = None
701 for walk_child in parent._walk(loopback=loopback, index=index):
702 yield walk_child
703
704 def walk(self, restrict=False, loopback=False):
705 ''' Iterator that walks the widget tree starting with this widget and
706 goes forward returning widgets in the order in which layouts display
707 them.
708
709 :Parameters:
710 `restrict`: bool, defaults to False
711 If True, it will only iterate through the widget and its
712 children (or children of its children etc.). Defaults to False.
713 `loopback`: bool, defaults to False
714 If True, when the last widget in the tree is reached,
715 it'll loop back to the uppermost root and start walking until
716 we hit this widget again. Naturally, it can only loop back when
717 `restrict` is False. Defaults to False.
718
719 :return:
720 A generator that walks the tree, returning widgets in the
721 forward layout order.
722
723 For example, given a tree with the following structure:
724
725 .. code-block:: kv
726
727 GridLayout:
728 Button
729 BoxLayout:
730 id: box
731 Widget
732 Button
733 Widget
734
735 walking this tree:
736
737 .. code-block:: python
738
739 >>> # Call walk on box with loopback True, and restrict False
740 >>> [type(widget) for widget in box.walk(loopback=True)]
741 [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,
742 <class 'Widget'>, <class 'GridLayout'>, <class 'Button'>]
743 >>> # Now with loopback False, and restrict False
744 >>> [type(widget) for widget in box.walk()]
745 [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,
746 <class 'Widget'>]
747 >>> # Now with restrict True
748 >>> [type(widget) for widget in box.walk(restrict=True)]
749 [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>]
750
751 .. versionadded:: 1.9.0
752 '''
753 gen = self._walk(restrict, loopback)
754 yield next(gen)
755 for node in gen:
756 if node is self:
757 return
758 yield node
759
760 def _walk_reverse(self, loopback=False, go_up=False):
761 # process is walk up level, walk down its children tree, then walk up
762 # next level etc.
763 # default just walk down the children tree
764 root = self
765 index = 0
766 # we need to go up a level before walking tree
767 if go_up:
768 root = self.parent
769 try:
770 if root is None or not isinstance(root, Widget):
771 raise ValueError
772 index = root.children.index(self) + 1
773 except ValueError:
774 if not loopback:
775 return
776 index = 0
777 go_up = False
778 root = self
779
780 # now walk children tree starting with last-most child
781 for child in islice(root.children, index, None):
782 for walk_child in child._walk_reverse(loopback=loopback):
783 yield walk_child
784 # we need to return ourself last, in all cases
785 yield root
786
787 # if going up, continue walking up the parent tree
788 if go_up:
789 for walk_child in root._walk_reverse(loopback=loopback,
790 go_up=go_up):
791 yield walk_child
792
793 def walk_reverse(self, loopback=False):
794 ''' Iterator that walks the widget tree backwards starting with the
795 widget before this, and going backwards returning widgets in the
796 reverse order in which layouts display them.
797
798 This walks in the opposite direction of :meth:`walk`, so a list of the
799 tree generated with :meth:`walk` will be in reverse order compared
800 to the list generated with this, provided `loopback` is True.
801
802 :Parameters:
803 `loopback`: bool, defaults to False
804 If True, when the uppermost root in the tree is
805 reached, it'll loop back to the last widget and start walking
806 back until after we hit widget again. Defaults to False.
807
808 :return:
809 A generator that walks the tree, returning widgets in the
810 reverse layout order.
811
812 For example, given a tree with the following structure:
813
814 .. code-block:: kv
815
816 GridLayout:
817 Button
818 BoxLayout:
819 id: box
820 Widget
821 Button
822 Widget
823
824 walking this tree:
825
826 .. code-block:: python
827
828 >>> # Call walk on box with loopback True
829 >>> [type(widget) for widget in box.walk_reverse(loopback=True)]
830 [<class 'Button'>, <class 'GridLayout'>, <class 'Widget'>,
831 <class 'Button'>, <class 'Widget'>, <class 'BoxLayout'>]
832 >>> # Now with loopback False
833 >>> [type(widget) for widget in box.walk_reverse()]
834 [<class 'Button'>, <class 'GridLayout'>]
835 >>> forward = [w for w in box.walk(loopback=True)]
836 >>> backward = [w for w in box.walk_reverse(loopback=True)]
837 >>> forward == backward[::-1]
838 True
839
840 .. versionadded:: 1.9.0
841
842 '''
843 for node in self._walk_reverse(loopback=loopback, go_up=True):
844 yield node
845 if node is self:
846 return
847
848 def to_widget(self, x, y, relative=False):
849 '''Convert the given coordinate from window to local widget
850 coordinates. See :mod:`~kivy.uix.relativelayout` for details on the
851 coordinate systems.
852 '''
853 if self.parent:
854 x, y = self.parent.to_widget(x, y)
855 return self.to_local(x, y, relative=relative)
856
857 def to_window(self, x, y, initial=True, relative=False):
858 '''Transform local coordinates to window coordinates. See
859 :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
860 '''
861 if not initial:
862 x, y = self.to_parent(x, y, relative=relative)
863 if self.parent:
864 return self.parent.to_window(x, y, initial=False,
865 relative=relative)
866 return (x, y)
867
868 def to_parent(self, x, y, relative=False):
869 '''Transform local coordinates to parent coordinates. See
870 :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
871
872 :Parameters:
873 `relative`: bool, defaults to False
874 Change to True if you want to translate relative positions from
875 a widget to its parent coordinates.
876 '''
877 if relative:
878 return (x + self.x, y + self.y)
879 return (x, y)
880
881 def to_local(self, x, y, relative=False):
882 '''Transform parent coordinates to local coordinates. See
883 :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
884
885 :Parameters:
886 `relative`: bool, defaults to False
887 Change to True if you want to translate coordinates to
888 relative widget coordinates.
889 '''
890 if relative:
891 return (x - self.x, y - self.y)
892 return (x, y)
893
894 def _apply_transform(self, m, pos=None):
895 if self.parent:
896 x, y = self.parent.to_widget(relative=True,
897 *self.to_window(*(pos or self.pos)))
898 m.translate(x, y, 0)
899 m = self.parent._apply_transform(m) if self.parent else m
900 return m
901
902 def get_window_matrix(self, x=0, y=0):
903 '''Calculate the transformation matrix to convert between window and
904 widget coordinates.
905
906 :Parameters:
907 `x`: float, defaults to 0
908 Translates the matrix on the x axis.
909 `y`: float, defaults to 0
910 Translates the matrix on the y axis.
911 '''
912 m = Matrix()
913 m.translate(x, y, 0)
914 m = self._apply_transform(m)
915 return m
916
917 x = NumericProperty(0)
918 '''X position of the widget.
919
920 :attr:`x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.
921 '''
922
923 y = NumericProperty(0)
924 '''Y position of the widget.
925
926 :attr:`y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.
927 '''
928
929 width = NumericProperty(100)
930 '''Width of the widget.
931
932 :attr:`width` is a :class:`~kivy.properties.NumericProperty` and defaults
933 to 100.
934
935 .. warning::
936 Keep in mind that the `width` property is subject to layout logic and
937 that this has not yet happened at the time of the widget's `__init__`
938 method.
939 '''
940
941 height = NumericProperty(100)
942 '''Height of the widget.
943
944 :attr:`height` is a :class:`~kivy.properties.NumericProperty` and defaults
945 to 100.
946
947 .. warning::
948 Keep in mind that the `height` property is subject to layout logic and
949 that this has not yet happened at the time of the widget's `__init__`
950 method.
951 '''
952
953 pos = ReferenceListProperty(x, y)
954 '''Position of the widget.
955
956 :attr:`pos` is a :class:`~kivy.properties.ReferenceListProperty` of
957 (:attr:`x`, :attr:`y`) properties.
958 '''
959
960 size = ReferenceListProperty(width, height)
961 '''Size of the widget.
962
963 :attr:`size` is a :class:`~kivy.properties.ReferenceListProperty` of
964 (:attr:`width`, :attr:`height`) properties.
965 '''
966
967 def get_right(self):
968 return self.x + self.width
969
970 def set_right(self, value):
971 self.x = value - self.width
972
973 right = AliasProperty(get_right, set_right, bind=('x', 'width'))
974 '''Right position of the widget.
975
976 :attr:`right` is an :class:`~kivy.properties.AliasProperty` of
977 (:attr:`x` + :attr:`width`).
978 '''
979
980 def get_top(self):
981 return self.y + self.height
982
983 def set_top(self, value):
984 self.y = value - self.height
985
986 top = AliasProperty(get_top, set_top, bind=('y', 'height'))
987 '''Top position of the widget.
988
989 :attr:`top` is an :class:`~kivy.properties.AliasProperty` of
990 (:attr:`y` + :attr:`height`).
991 '''
992
993 def get_center_x(self):
994 return self.x + self.width / 2.
995
996 def set_center_x(self, value):
997 self.x = value - self.width / 2.
998
999 center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))
1000 '''X center position of the widget.
1001
1002 :attr:`center_x` is an :class:`~kivy.properties.AliasProperty` of
1003 (:attr:`x` + :attr:`width` / 2.).
1004 '''
1005
1006 def get_center_y(self):
1007 return self.y + self.height / 2.
1008
1009 def set_center_y(self, value):
1010 self.y = value - self.height / 2.
1011
1012 center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))
1013 '''Y center position of the widget.
1014
1015 :attr:`center_y` is an :class:`~kivy.properties.AliasProperty` of
1016 (:attr:`y` + :attr:`height` / 2.).
1017 '''
1018
1019 center = ReferenceListProperty(center_x, center_y)
1020 '''Center position of the widget.
1021
1022 :attr:`center` is a :class:`~kivy.properties.ReferenceListProperty` of
1023 (:attr:`center_x`, :attr:`center_y`) properties.
1024 '''
1025
1026 cls = ListProperty([])
1027 '''Class of the widget, used for styling.
1028 '''
1029
1030 id = StringProperty(None, allownone=True)
1031 '''Unique identifier of the widget in the tree.
1032
1033 :attr:`id` is a :class:`~kivy.properties.StringProperty` and defaults to
1034 None.
1035
1036 .. warning::
1037
1038 If the :attr:`id` is already used in the tree, an exception will
1039 be raised.
1040 '''
1041
1042 children = ListProperty([])
1043 '''List of children of this widget.
1044
1045 :attr:`children` is a :class:`~kivy.properties.ListProperty` and
1046 defaults to an empty list.
1047
1048 Use :meth:`add_widget` and :meth:`remove_widget` for manipulating the
1049 children list. Don't manipulate the children list directly unless you know
1050 what you are doing.
1051 '''
1052
1053 parent = ObjectProperty(None, allownone=True, rebind=True)
1054 '''Parent of this widget. The parent of a widget is set when the widget
1055 is added to another widget and unset when the widget is removed from its
1056 parent.
1057
1058 :attr:`parent` is an :class:`~kivy.properties.ObjectProperty` and
1059 defaults to None.
1060 '''
1061
1062 size_hint_x = NumericProperty(1, allownone=True)
1063 '''X size hint. Represents how much space the widget should use in the
1064 direction of the X axis relative to its parent's width.
1065 Only the :class:`~kivy.uix.layout.Layout` and
1066 :class:`~kivy.core.window.Window` classes make use of the hint.
1067
1068 The size_hint is used by layouts for two purposes:
1069
1070 - When the layout considers widgets on their own rather than in
1071 relation to its other children, the size_hint_x is a direct proportion
1072 of the parent width, normally between 0.0 and 1.0. For instance, a
1073 widget with ``size_hint_x=0.5`` in
1074 a vertical BoxLayout will take up half the BoxLayout's width, or
1075 a widget in a FloatLayout with ``size_hint_x=0.2`` will take up 20%
1076 of the FloatLayout width. If the size_hint is greater than 1, the
1077 widget will be wider than the parent.
1078 - When multiple widgets can share a row of a layout, such as in a
1079 horizontal BoxLayout, their widths will be their size_hint_x as a
1080 fraction of the sum of widget size_hints. For instance, if the
1081 size_hint_xs are (0.5, 1.0, 0.5), the first widget will have a
1082 width of 25% of the parent width.
1083
1084 :attr:`size_hint_x` is a :class:`~kivy.properties.NumericProperty` and
1085 defaults to 1.
1086 '''
1087
1088 size_hint_y = NumericProperty(1, allownone=True)
1089 '''Y size hint.
1090
1091 :attr:`size_hint_y` is a :class:`~kivy.properties.NumericProperty` and
1092 defaults to 1.
1093
1094 See :attr:`size_hint_x` for more information, but with widths and heights
1095 swapped.
1096 '''
1097
1098 size_hint = ReferenceListProperty(size_hint_x, size_hint_y)
1099 '''Size hint.
1100
1101 :attr:`size_hint` is a :class:`~kivy.properties.ReferenceListProperty` of
1102 (:attr:`size_hint_x`, :attr:`size_hint_y`) properties.
1103
1104 See :attr:`size_hint_x` for more information.
1105 '''
1106
1107 pos_hint = ObjectProperty({})
1108 '''Position hint. This property allows you to set the position of
1109 the widget inside its parent layout, in percent (similar to
1110 size_hint).
1111
1112 For example, if you want to set the top of the widget to be at 90%
1113 height of its parent layout, you can write::
1114
1115 widget = Widget(pos_hint={'top': 0.9})
1116
1117 The keys 'x', 'right' and 'center_x' will use the parent width.
1118 The keys 'y', 'top' and 'center_y' will use the parent height.
1119
1120 See :doc:`api-kivy.uix.floatlayout` for further reference.
1121
1122 .. note::
1123 :attr:`pos_hint` is not used by all layouts. Check the documentation
1124 of the layout in question to see if it supports pos_hint.
1125
1126 :attr:`pos_hint` is an :class:`~kivy.properties.ObjectProperty`
1127 containing a dict.
1128 '''
1129
1130 ids = DictProperty({})
1131 '''This is a dictionary of ids defined in your kv language. This will only
1132 be populated if you use ids in your kv language code.
1133
1134 .. versionadded:: 1.7.0
1135
1136 :attr:`ids` is a :class:`~kivy.properties.DictProperty` and defaults to an
1137 empty dict {}.
1138
1139 The :attr:`ids` are populated for each root level widget definition. For
1140 example:
1141
1142 .. code-block:: kv
1143
1144 # in kv
1145 <MyWidget@Widget>:
1146 id: my_widget
1147 Label:
1148 id: label_widget
1149 Widget:
1150 id: inner_widget
1151 Label:
1152 id: inner_label
1153 TextInput:
1154 id: text_input
1155 OtherWidget:
1156 id: other_widget
1157
1158
1159 <OtherWidget@Widget>
1160 id: other_widget
1161 Label:
1162 id: other_label
1163 TextInput:
1164 id: other_textinput
1165
1166 Then, in python:
1167
1168 .. code-block:: python
1169
1170 >>> widget = MyWidget()
1171 >>> print(widget.ids)
1172 {'other_widget': <weakproxy at 041CFED0 to OtherWidget at 041BEC38>,
1173 'inner_widget': <weakproxy at 04137EA0 to Widget at 04138228>,
1174 'inner_label': <weakproxy at 04143540 to Label at 04138260>,
1175 'label_widget': <weakproxy at 04137B70 to Label at 040F97A0>,
1176 'text_input': <weakproxy at 041BB5D0 to TextInput at 041BEC00>}
1177 >>> print(widget.ids['other_widget'].ids)
1178 {'other_textinput': <weakproxy at 041DBB40 to TextInput at 041BEF48>,
1179 'other_label': <weakproxy at 041DB570 to Label at 041BEEA0>}
1180 >>> print(widget.ids['label_widget'].ids)
1181 {}
1182 '''
1183
1184 opacity = NumericProperty(1.0)
1185 '''Opacity of the widget and all its children.
1186
1187 .. versionadded:: 1.4.1
1188
1189 The opacity attribute controls the opacity of the widget and its children.
1190 Be careful, it's a cumulative attribute: the value is multiplied by the
1191 current global opacity and the result is applied to the current context
1192 color.
1193
1194 For example, if the parent has an opacity of 0.5 and a child has an
1195 opacity of 0.2, the real opacity of the child will be 0.5 * 0.2 = 0.1.
1196
1197 Then, the opacity is applied by the shader as:
1198
1199 .. code-block:: python
1200
1201 frag_color = color * vec4(1.0, 1.0, 1.0, opacity);
1202
1203 :attr:`opacity` is a :class:`~kivy.properties.NumericProperty` and defaults
1204 to 1.0.
1205 '''
1206
1207 def on_opacity(self, instance, value):
1208 canvas = self.canvas
1209 if canvas is not None:
1210 canvas.opacity = value
1211
1212 canvas = None
1213 '''Canvas of the widget.
1214
1215 The canvas is a graphics object that contains all the drawing instructions
1216 for the graphical representation of the widget.
1217
1218 There are no general properties for the Widget class, such as background
1219 color, to keep the design simple and lean. Some derived classes, such as
1220 Button, do add such convenience properties but generally the developer is
1221 responsible for implementing the graphics representation for a custom
1222 widget from the ground up. See the derived widget classes for patterns to
1223 follow and extend.
1224
1225 See :class:`~kivy.graphics.Canvas` for more information about the usage.
1226 '''
1227
1228 disabled = BooleanProperty(False)
1229 '''Indicates whether this widget can interact with input or not.
1230
1231 .. note::
1232
1233 1. Child Widgets, when added to a disabled widget, will be disabled
1234 automatically.
1235 2. Disabling/enabling a parent disables/enables all
1236 of its children.
1237
1238 .. versionadded:: 1.8.0
1239
1240 :attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and
1241 defaults to False.
1242 '''
1243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/uix/widget.py b/kivy/uix/widget.py
--- a/kivy/uix/widget.py
+++ b/kivy/uix/widget.py
@@ -546,7 +546,7 @@
children = self.children
if index >= len(children):
index = len(children)
- next_index = 0
+ next_index = canvas.indexof(children[-1].canvas)
else:
next_child = children[index]
next_index = canvas.indexof(next_child.canvas)
| {"golden_diff": "diff --git a/kivy/uix/widget.py b/kivy/uix/widget.py\n--- a/kivy/uix/widget.py\n+++ b/kivy/uix/widget.py\n@@ -546,7 +546,7 @@\n children = self.children\n if index >= len(children):\n index = len(children)\n- next_index = 0\n+ next_index = canvas.indexof(children[-1].canvas)\n else:\n next_child = children[index]\n next_index = canvas.indexof(next_child.canvas)\n", "issue": "Remove and Re-add of Highest Widget in a Layout leaves the Widget Invisible\nI have noticed that adding widgets to a Layout Manager does not seem to work correctly. I have tested with BoxLayout, GridLayout, and FloatLayout. I have tested with 3 widgets, and with 4 widgets. I am using Kivy 1.9.0 with Python 2.7 on Fedora 22.\n\nI start by adding widgets to a Layout- it looks fine. Then I remove a widget- the \"first\" one, which is list index 3 in the list of children in my FloatLayout example (index 2 with the Grid example). See the attachments.\n[app2-float.txt](https://github.com/kivy/kivy/files/388380/app2-float.txt)\n[app2-grid.txt](https://github.com/kivy/kivy/files/388383/app2-grid.txt)\n\nWhen I re-add the widget in a BoxLayout or GridLayout, space is used in but the widget is not displayed. Testing in my \"real\" app (code not given here) shows that the widget is indeed in place (I can perform actions on it) but it's just not displayed. This problem only seems to happen with the leftmost position.\n\n", "before_files": [{"content": "'''\nWidget class\n============\n\nThe :class:`Widget` class is the base class required for creating Widgets.\nThis widget class was designed with a couple of principles in mind:\n\n* *Event Driven*\n\n Widget interaction is built on top of events that occur. If a property\n changes, the widget can respond to the change in the 'on_<propname>'\n callback. If nothing changes, nothing will be done. That's the main\n goal of the :class:`~kivy.properties.Property` class.\n\n* *Separation Of Concerns (the widget and its graphical representation)*\n\n Widgets don't have a `draw()` method. This is done on purpose: The idea\n is to allow you to create your own graphical representation outside the\n widget class.\n Obviously you can still use all the available properties to do that, so\n that your representation properly reflects the widget's current state.\n Every widget has its own :class:`~kivy.graphics.Canvas` that you\n can use to draw. This separation allows Kivy to run your\n application in a very efficient manner.\n\n* *Bounding Box / Collision*\n\n Often you want to know if a certain point is within the bounds of your\n widget. An example would be a button widget where you only want to\n trigger an action when the button itself is actually touched.\n For this, you can use the :meth:`~Widget.collide_point` method, which\n will return True if the point you pass to it is inside the axis-aligned\n bounding box defined by the widget's position and size.\n If a simple AABB is not sufficient, you can override the method to\n perform the collision checks with more complex shapes, e.g. a polygon.\n You can also check if a widget collides with another widget with\n :meth:`~Widget.collide_widget`.\n\n\nWe also have some default values and behaviors that you should be aware of:\n\n* A :class:`Widget` is not a :class:`~kivy.uix.layout.Layout`: it will not\n change the position or the size of its children. If you want control over\n positioning or sizing, use a :class:`~kivy.uix.layout.Layout`.\n\n* The default size of a widget is (100, 100). This is only changed if the\n parent is a :class:`~kivy.uix.layout.Layout`.\n For example, if you add a :class:`Label` inside a\n :class:`Button`, the label will not inherit the button's size or position\n because the button is not a *Layout*: it's just another *Widget*.\n\n* The default size_hint is (1, 1). If the parent is a :class:`Layout`, then the\n widget size will be the parent layout's size.\n\n* :meth:`~Widget.on_touch_down`, :meth:`~Widget.on_touch_move`,\n :meth:`~Widget.on_touch_up` don't do any sort of collisions. If you want to\n know if the touch is inside your widget, use :meth:`~Widget.collide_point`.\n\nUsing Properties\n----------------\n\nWhen you read the documentation, all properties are described in the format::\n\n <name> is a <property class> and defaults to <default value>.\n\ne.g.\n\n :attr:`~kivy.uix.label.Label.text` is a\n :class:`~kivy.properties.StringProperty` and defaults to ''.\n\nIf you want to be notified when the pos attribute changes, i.e. when the\nwidget moves, you can bind your own callback function like this::\n\n def callback_pos(instance, value):\n print('The widget', instance, 'moved to', value)\n\n wid = Widget()\n wid.bind(pos=callback_pos)\n\nRead more about :doc:`/api-kivy.properties`.\n\nBasic drawing\n-------------\n\nWidgets support a range of drawing instructions that you can use to customize\nthe look of your widgets and layouts. For example, to draw a background image\nfor your widget, you can do the following:\n\n.. code-block:: python\n\n def redraw(self, args):\n self.bg_rect.size = self.size\n self.bg_rect.pos = self.pos\n\n widget = Widget()\n with widget.canvas:\n widget.bg_rect = Rectangle(source=\"cover.jpg\", pos=self.pos, \\\nsize=self.size)\n widget.bind(pos=redraw, size=redraw)\n\nTo draw a background in kv:\n\n.. code-block:: kv\n\n Widget:\n canvas:\n Rectangle:\n source: \"cover.jpg\"\n size: self.size\n pos: self.pos\n\nThese examples only scratch the surface. Please see the :mod:`kivy.graphics`\ndocumentation for more information.\n\n.. _widget-event-bubbling:\n\nWidget touch event bubbling\n---------------------------\n\nWhen you catch touch events between multiple widgets, you often\nneed to be aware of the order in which these events are propagated. In Kivy,\nevents bubble up from the first child upwards through the other children.\nIf a widget has children, the event is passed through its children before\nbeing passed on to the widget after it.\n\nAs the :meth:`~kivy.uix.widget.Widget.on_touch_up` method inserts widgets at\nindex 0 by default, this means the event goes from the most recently added\nwidget back to the first one added. Consider the following:\n\n.. code-block:: python\n\n box = BoxLayout()\n box.add_widget(Label(text=\"a\"))\n box.add_widget(Label(text=\"b\"))\n box.add_widget(Label(text=\"c\"))\n\nThe label with text \"c\" gets the event first, \"b\" second and \"a\" last. You can\nreverse this order by manually specifying the index:\n\n.. code-block:: python\n\n box = BoxLayout()\n box.add_widget(Label(text=\"a\"), index=0)\n box.add_widget(Label(text=\"b\"), index=1)\n box.add_widget(Label(text=\"c\"), index=2)\n\nNow the order would be \"a\", \"b\" then \"c\". One thing to keep in mind when using\nkv is that declaring a widget uses the\n:meth:`~kivy.uix.widget.Widget.add_widget` method for insertion. Hence, using\n\n.. code-block:: kv\n\n BoxLayout:\n MyLabel:\n text: \"a\"\n MyLabel:\n text: \"b\"\n MyLabel:\n text: \"c\"\n\nwould result in the event order \"c\", \"b\" then \"a\" as \"c\" was actually the last\nadded widget. It thus has index 0, \"b\" index 1 and \"a\" index 2. Effectively,\nthe child order is the reverse of its listed order.\n\nThis ordering is the same for the :meth:`~kivy.uix.widget.Widget.on_touch_move`\nand :meth:`~kivy.uix.widget.Widget.on_touch_up` events.\n\nIn order to stop this event bubbling, a method can return `True`. This tells\nKivy the event has been handled and the event propagation stops. For example:\n\n.. code-block:: python\n\n class MyWidget(Widget):\n def on_touch_down(self, touch):\n If <some_condition>:\n # Do stuff here and kill the event\n return True\n else:\n return super(MyWidget, self).on_touch_down(touch)\n\nThis approach gives you good control over exactly how events are dispatched\nand managed. Sometimes, however, you may wish to let the event be completely\npropagated before taking action. You can use the\n:class:`~kivy.clock.Clock` to help you here:\n\n.. code-block:: python\n\n class MyWidget(Label):\n def on_touch_down(self, touch, after=False):\n if after:\n print \"Fired after the event has been dispatched!\"\n else:\n Clock.schedule_once(lambda dt: self.on_touch_down(touch, True))\n return super(MyWidget, self).on_touch_down(touch)\n\nUsage of :attr:`Widget.center`, :attr:`Widget.right`, and :attr:`Widget.top`\n----------------------------------------------------------------------------\n\nA common mistake when using one of the computed properties such as\n:attr:`Widget.right` is to use it to make a widget follow its parent with a\nKV rule such as `right: self.parent.right`. Consider, for example:\n\n.. code-block:: kv\n\n FloatLayout:\n id: layout\n width: 100\n Widget:\n id: wid\n right: layout.right\n\nThe (mistaken) expectation is that this rule ensures that wid's right will\nalways be whatever layout's right is - that is wid.right and layout.right will\nalways be identical. In actual fact, this rule only says that \"whenever\nlayout's `right` changes, wid's right will be set to that value\". The\ndifference being that as long as `layout.right` doesn't change, `wid.right`\ncould be anything, even a value that will make them different.\n\nSpecifically, for the KV code above, consider the following example::\n\n >>> print(layout.right, wid.right)\n (100, 100)\n >>> wid.x = 200\n >>> print(layout.right, wid.right)\n (100, 300)\n\nAs can be seen, initially they are in sync, however, when we change `wid.x`\nthey go out of sync because `layout.right` is not changed and the rule is not\ntriggered.\n\nThe proper way to make the widget follow its parent's right is to use\n:attr:`Widget.pos_hint`. If instead of `right: layout.right` we did\n`pos_hint: {'right': 1}`, then the widgets right will always be set to be\nat the parent's right at each layout update.\n'''\n\n__all__ = ('Widget', 'WidgetException')\n\nfrom kivy.event import EventDispatcher\nfrom kivy.factory import Factory\nfrom kivy.properties import (\n NumericProperty, StringProperty, AliasProperty, ReferenceListProperty,\n ObjectProperty, ListProperty, DictProperty, BooleanProperty)\nfrom kivy.graphics import (\n Canvas, Translate, Fbo, ClearColor, ClearBuffers, Scale)\nfrom kivy.graphics.transformation import Matrix\nfrom kivy.base import EventLoop\nfrom kivy.lang import Builder\nfrom kivy.context import get_current_context\nfrom kivy.weakproxy import WeakProxy\nfrom functools import partial\nfrom itertools import islice\n\n\n# References to all the widget destructors (partial method with widget uid as\n# key).\n_widget_destructors = {}\n\n\ndef _widget_destructor(uid, r):\n # Internal method called when a widget is deleted from memory. the only\n # thing we remember about it is its uid. Clear all the associated callbacks\n # created in kv language.\n del _widget_destructors[uid]\n Builder.unbind_widget(uid)\n\n\nclass WidgetException(Exception):\n '''Fired when the widget gets an exception.\n '''\n pass\n\n\nclass WidgetMetaclass(type):\n '''Metaclass to automatically register new widgets for the\n :class:`~kivy.factory.Factory`.\n\n .. warning::\n This metaclass is used by the Widget. Do not use it directly!\n '''\n def __init__(mcs, name, bases, attrs):\n super(WidgetMetaclass, mcs).__init__(name, bases, attrs)\n Factory.register(name, cls=mcs)\n\n\n#: Base class used for Widget, that inherits from :class:`EventDispatcher`\nWidgetBase = WidgetMetaclass('WidgetBase', (EventDispatcher, ), {})\n\n\nclass Widget(WidgetBase):\n '''Widget class. See module documentation for more information.\n\n :Events:\n `on_touch_down`:\n Fired when a new touch event occurs\n `on_touch_move`:\n Fired when an existing touch moves\n `on_touch_up`:\n Fired when an existing touch disappears\n\n .. warning::\n Adding a `__del__` method to a class derived from Widget with Python\n prior to 3.4 will disable automatic garbage collection for instances\n of that class. This is because the Widget class creates reference\n cycles, thereby `preventing garbage collection\n <https://docs.python.org/2/library/gc.html#gc.garbage>`_.\n\n .. versionchanged:: 1.0.9\n Everything related to event properties has been moved to the\n :class:`~kivy.event.EventDispatcher`. Event properties can now be used\n when contructing a simple class without subclassing :class:`Widget`.\n\n .. versionchanged:: 1.5.0\n The constructor now accepts on_* arguments to automatically bind\n callbacks to properties or events, as in the Kv language.\n '''\n\n __metaclass__ = WidgetMetaclass\n __events__ = ('on_touch_down', 'on_touch_move', 'on_touch_up')\n _proxy_ref = None\n\n def __init__(self, **kwargs):\n # Before doing anything, ensure the windows exist.\n EventLoop.ensure_window()\n\n # Assign the default context of the widget creation.\n if not hasattr(self, '_context'):\n self._context = get_current_context()\n\n no_builder = '__no_builder' in kwargs\n if no_builder:\n del kwargs['__no_builder']\n on_args = {k: v for k, v in kwargs.items() if k[:3] == 'on_'}\n for key in on_args:\n del kwargs[key]\n\n super(Widget, self).__init__(**kwargs)\n\n # Create the default canvas if it does not exist.\n if self.canvas is None:\n self.canvas = Canvas(opacity=self.opacity)\n\n # Apply all the styles.\n if not no_builder:\n Builder.apply(self, ignored_consts=self._kwargs_applied_init)\n\n # Bind all the events.\n if on_args:\n self.bind(**on_args)\n\n @property\n def proxy_ref(self):\n '''Return a proxy reference to the widget, i.e. without creating a\n reference to the widget. See `weakref.proxy\n <http://docs.python.org/2/library/weakref.html?highlight\\\n =proxy#weakref.proxy>`_ for more information.\n\n .. versionadded:: 1.7.2\n '''\n _proxy_ref = self._proxy_ref\n if _proxy_ref is not None:\n return _proxy_ref\n\n f = partial(_widget_destructor, self.uid)\n self._proxy_ref = _proxy_ref = WeakProxy(self, f)\n # Only f should be enough here, but it appears that is a very\n # specific case, the proxy destructor is not called if both f and\n # _proxy_ref are not together in a tuple.\n _widget_destructors[self.uid] = (f, _proxy_ref)\n return _proxy_ref\n\n def __hash__(self):\n return id(self)\n\n @property\n def __self__(self):\n return self\n\n #\n # Collision\n #\n def collide_point(self, x, y):\n '''\n Check if a point (x, y) is inside the widget's axis aligned bounding\n box.\n\n :Parameters:\n `x`: numeric\n x position of the point (in window coordinates)\n `y`: numeric\n y position of the point (in window coordinates)\n\n :Returns:\n A bool. True if the point is inside the bounding box, False\n otherwise.\n\n .. code-block:: python\n\n >>> Widget(pos=(10, 10), size=(50, 50)).collide_point(40, 40)\n True\n '''\n return self.x <= x <= self.right and self.y <= y <= self.top\n\n def collide_widget(self, wid):\n '''\n Check if another widget collides with this widget. This function\n performs an axis-aligned bounding box intersection test by default.\n\n :Parameters:\n `wid`: :class:`Widget` class\n Widget to collide with.\n\n :Returns:\n bool. True if the other widget collides with this widget, False\n otherwise.\n\n .. code-block:: python\n\n >>> wid = Widget(size=(50, 50))\n >>> wid2 = Widget(size=(50, 50), pos=(25, 25))\n >>> wid.collide_widget(wid2)\n True\n >>> wid2.pos = (55, 55)\n >>> wid.collide_widget(wid2)\n False\n '''\n if self.right < wid.x:\n return False\n if self.x > wid.right:\n return False\n if self.top < wid.y:\n return False\n if self.y > wid.top:\n return False\n return True\n\n #\n # Default event handlers\n #\n def on_touch_down(self, touch):\n '''Receive a touch down event.\n\n :Parameters:\n `touch`: :class:`~kivy.input.motionevent.MotionEvent` class\n Touch received. The touch is in parent coordinates. See\n :mod:`~kivy.uix.relativelayout` for a discussion on\n coordinate systems.\n\n :Returns:\n bool. If True, the dispatching of the touch event will stop.\n If False, the event will continue to be dispatched to the rest\n of the widget tree.\n '''\n if self.disabled and self.collide_point(*touch.pos):\n return True\n for child in self.children[:]:\n if child.dispatch('on_touch_down', touch):\n return True\n\n def on_touch_move(self, touch):\n '''Receive a touch move event. The touch is in parent coordinates.\n\n See :meth:`on_touch_down` for more information.\n '''\n if self.disabled:\n return\n for child in self.children[:]:\n if child.dispatch('on_touch_move', touch):\n return True\n\n def on_touch_up(self, touch):\n '''Receive a touch up event. The touch is in parent coordinates.\n\n See :meth:`on_touch_down` for more information.\n '''\n if self.disabled:\n return\n for child in self.children[:]:\n if child.dispatch('on_touch_up', touch):\n return True\n\n def on_disabled(self, instance, value):\n for child in self.children:\n child.disabled = value\n\n #\n # Tree management\n #\n def add_widget(self, widget, index=0, canvas=None):\n '''Add a new widget as a child of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to add to our list of children.\n `index`: int, defaults to 0\n Index to insert the widget in the list. Notice that the default\n of 0 means the widget is inserted at the beginning of the list\n and will thus be drawn on top of other sibling widgets. For a\n full discussion of the index and widget hierarchy, please see\n the :doc:`Widgets Programming Guide <guide/widgets>`.\n\n .. versionadded:: 1.0.5\n `canvas`: str, defaults to None\n Canvas to add widget's canvas to. Can be 'before', 'after' or\n None for the default canvas.\n\n .. versionadded:: 1.9.0\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> from kivy.uix.slider import Slider\n >>> root = Widget()\n >>> root.add_widget(Button())\n >>> slider = Slider()\n >>> root.add_widget(slider)\n\n '''\n if not isinstance(widget, Widget):\n raise WidgetException(\n 'add_widget() can be used only with instances'\n ' of the Widget class.')\n\n widget = widget.__self__\n if widget is self:\n raise WidgetException(\n 'Widget instances cannot be added to themselves.')\n parent = widget.parent\n # Check if the widget is already a child of another widget.\n if parent:\n raise WidgetException('Cannot add %r, it already has a parent %r'\n % (widget, parent))\n widget.parent = parent = self\n # Child will be disabled if added to a disabled parent.\n if parent.disabled:\n widget.disabled = True\n\n canvas = self.canvas.before if canvas == 'before' else \\\n self.canvas.after if canvas == 'after' else self.canvas\n\n if index == 0 or len(self.children) == 0:\n self.children.insert(0, widget)\n canvas.add(widget.canvas)\n else:\n canvas = self.canvas\n children = self.children\n if index >= len(children):\n index = len(children)\n next_index = 0\n else:\n next_child = children[index]\n next_index = canvas.indexof(next_child.canvas)\n if next_index == -1:\n next_index = canvas.length()\n else:\n next_index += 1\n\n children.insert(index, widget)\n # We never want to insert widget _before_ canvas.before.\n if next_index == 0 and canvas.has_before:\n next_index = 1\n canvas.insert(next_index, widget.canvas)\n\n def remove_widget(self, widget):\n '''Remove a widget from the children of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to remove from our children list.\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> root = Widget()\n >>> button = Button()\n >>> root.add_widget(button)\n >>> root.remove_widget(button)\n '''\n if widget not in self.children:\n return\n self.children.remove(widget)\n if widget.canvas in self.canvas.children:\n self.canvas.remove(widget.canvas)\n elif widget.canvas in self.canvas.after.children:\n self.canvas.after.remove(widget.canvas)\n elif widget.canvas in self.canvas.before.children:\n self.canvas.before.remove(widget.canvas)\n widget.parent = None\n\n def clear_widgets(self, children=None):\n '''\n Remove all (or the specified) :attr:`~Widget.children` of this widget.\n If the 'children' argument is specified, it should be a list (or\n filtered list) of children of the current widget.\n\n .. versionchanged:: 1.8.0\n The `children` argument can be used to specify the children you\n want to remove.\n '''\n\n if not children:\n children = self.children\n remove_widget = self.remove_widget\n for child in children[:]:\n remove_widget(child)\n\n def export_to_png(self, filename, *args):\n '''Saves an image of the widget and its children in png format at the\n specified filename. Works by removing the widget canvas from its\n parent, rendering to an :class:`~kivy.graphics.fbo.Fbo`, and calling\n :meth:`~kivy.graphics.texture.Texture.save`.\n\n .. note::\n\n The image includes only this widget and its children. If you want\n to include widgets elsewhere in the tree, you must call\n :meth:`~Widget.export_to_png` from their common parent, or use\n :meth:`~kivy.core.window.WindowBase.screenshot` to capture the whole\n window.\n\n .. note::\n\n The image will be saved in png format, you should include the\n extension in your filename.\n\n .. versionadded:: 1.9.0\n '''\n\n if self.parent is not None:\n canvas_parent_index = self.parent.canvas.indexof(self.canvas)\n if canvas_parent_index > -1:\n self.parent.canvas.remove(self.canvas)\n\n fbo = Fbo(size=self.size, with_stencilbuffer=True)\n\n with fbo:\n ClearColor(0, 0, 0, 1)\n ClearBuffers()\n Scale(1, -1, 1)\n Translate(-self.x, -self.y - self.height, 0)\n\n fbo.add(self.canvas)\n fbo.draw()\n fbo.texture.save(filename, flipped=False)\n fbo.remove(self.canvas)\n\n if self.parent is not None and canvas_parent_index > -1:\n self.parent.canvas.insert(canvas_parent_index, self.canvas)\n\n return True\n\n def get_root_window(self):\n '''Return the root window.\n\n :Returns:\n Instance of the root window. Can be a\n :class:`~kivy.core.window.WindowBase` or\n :class:`Widget`.\n '''\n if self.parent:\n return self.parent.get_root_window()\n\n def get_parent_window(self):\n '''Return the parent window.\n\n :Returns:\n Instance of the parent window. Can be a\n :class:`~kivy.core.window.WindowBase` or\n :class:`Widget`.\n '''\n if self.parent:\n return self.parent.get_parent_window()\n\n def _walk(self, restrict=False, loopback=False, index=None):\n # We pass index only when we are going on the parent\n # so don't yield the parent as well.\n if index is None:\n index = len(self.children)\n yield self\n\n for child in reversed(self.children[:index]):\n for walk_child in child._walk(restrict=True):\n yield walk_child\n\n # If we want to continue with our parent, just do it.\n if not restrict:\n parent = self.parent\n try:\n if parent is None or not isinstance(parent, Widget):\n raise ValueError\n index = parent.children.index(self)\n except ValueError:\n # Self is root, if we want to loopback from the first element:\n if not loopback:\n return\n # If we started with root (i.e. index==None), then we have to\n # start from root again, so we return self again. Otherwise, we\n # never returned it, so return it now starting with it.\n parent = self\n index = None\n for walk_child in parent._walk(loopback=loopback, index=index):\n yield walk_child\n\n def walk(self, restrict=False, loopback=False):\n ''' Iterator that walks the widget tree starting with this widget and\n goes forward returning widgets in the order in which layouts display\n them.\n\n :Parameters:\n `restrict`: bool, defaults to False\n If True, it will only iterate through the widget and its\n children (or children of its children etc.). Defaults to False.\n `loopback`: bool, defaults to False\n If True, when the last widget in the tree is reached,\n it'll loop back to the uppermost root and start walking until\n we hit this widget again. Naturally, it can only loop back when\n `restrict` is False. Defaults to False.\n\n :return:\n A generator that walks the tree, returning widgets in the\n forward layout order.\n\n For example, given a tree with the following structure:\n\n .. code-block:: kv\n\n GridLayout:\n Button\n BoxLayout:\n id: box\n Widget\n Button\n Widget\n\n walking this tree:\n\n .. code-block:: python\n\n >>> # Call walk on box with loopback True, and restrict False\n >>> [type(widget) for widget in box.walk(loopback=True)]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,\n <class 'Widget'>, <class 'GridLayout'>, <class 'Button'>]\n >>> # Now with loopback False, and restrict False\n >>> [type(widget) for widget in box.walk()]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,\n <class 'Widget'>]\n >>> # Now with restrict True\n >>> [type(widget) for widget in box.walk(restrict=True)]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>]\n\n .. versionadded:: 1.9.0\n '''\n gen = self._walk(restrict, loopback)\n yield next(gen)\n for node in gen:\n if node is self:\n return\n yield node\n\n def _walk_reverse(self, loopback=False, go_up=False):\n # process is walk up level, walk down its children tree, then walk up\n # next level etc.\n # default just walk down the children tree\n root = self\n index = 0\n # we need to go up a level before walking tree\n if go_up:\n root = self.parent\n try:\n if root is None or not isinstance(root, Widget):\n raise ValueError\n index = root.children.index(self) + 1\n except ValueError:\n if not loopback:\n return\n index = 0\n go_up = False\n root = self\n\n # now walk children tree starting with last-most child\n for child in islice(root.children, index, None):\n for walk_child in child._walk_reverse(loopback=loopback):\n yield walk_child\n # we need to return ourself last, in all cases\n yield root\n\n # if going up, continue walking up the parent tree\n if go_up:\n for walk_child in root._walk_reverse(loopback=loopback,\n go_up=go_up):\n yield walk_child\n\n def walk_reverse(self, loopback=False):\n ''' Iterator that walks the widget tree backwards starting with the\n widget before this, and going backwards returning widgets in the\n reverse order in which layouts display them.\n\n This walks in the opposite direction of :meth:`walk`, so a list of the\n tree generated with :meth:`walk` will be in reverse order compared\n to the list generated with this, provided `loopback` is True.\n\n :Parameters:\n `loopback`: bool, defaults to False\n If True, when the uppermost root in the tree is\n reached, it'll loop back to the last widget and start walking\n back until after we hit widget again. Defaults to False.\n\n :return:\n A generator that walks the tree, returning widgets in the\n reverse layout order.\n\n For example, given a tree with the following structure:\n\n .. code-block:: kv\n\n GridLayout:\n Button\n BoxLayout:\n id: box\n Widget\n Button\n Widget\n\n walking this tree:\n\n .. code-block:: python\n\n >>> # Call walk on box with loopback True\n >>> [type(widget) for widget in box.walk_reverse(loopback=True)]\n [<class 'Button'>, <class 'GridLayout'>, <class 'Widget'>,\n <class 'Button'>, <class 'Widget'>, <class 'BoxLayout'>]\n >>> # Now with loopback False\n >>> [type(widget) for widget in box.walk_reverse()]\n [<class 'Button'>, <class 'GridLayout'>]\n >>> forward = [w for w in box.walk(loopback=True)]\n >>> backward = [w for w in box.walk_reverse(loopback=True)]\n >>> forward == backward[::-1]\n True\n\n .. versionadded:: 1.9.0\n\n '''\n for node in self._walk_reverse(loopback=loopback, go_up=True):\n yield node\n if node is self:\n return\n\n def to_widget(self, x, y, relative=False):\n '''Convert the given coordinate from window to local widget\n coordinates. See :mod:`~kivy.uix.relativelayout` for details on the\n coordinate systems.\n '''\n if self.parent:\n x, y = self.parent.to_widget(x, y)\n return self.to_local(x, y, relative=relative)\n\n def to_window(self, x, y, initial=True, relative=False):\n '''Transform local coordinates to window coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n '''\n if not initial:\n x, y = self.to_parent(x, y, relative=relative)\n if self.parent:\n return self.parent.to_window(x, y, initial=False,\n relative=relative)\n return (x, y)\n\n def to_parent(self, x, y, relative=False):\n '''Transform local coordinates to parent coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n\n :Parameters:\n `relative`: bool, defaults to False\n Change to True if you want to translate relative positions from\n a widget to its parent coordinates.\n '''\n if relative:\n return (x + self.x, y + self.y)\n return (x, y)\n\n def to_local(self, x, y, relative=False):\n '''Transform parent coordinates to local coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n\n :Parameters:\n `relative`: bool, defaults to False\n Change to True if you want to translate coordinates to\n relative widget coordinates.\n '''\n if relative:\n return (x - self.x, y - self.y)\n return (x, y)\n\n def _apply_transform(self, m, pos=None):\n if self.parent:\n x, y = self.parent.to_widget(relative=True,\n *self.to_window(*(pos or self.pos)))\n m.translate(x, y, 0)\n m = self.parent._apply_transform(m) if self.parent else m\n return m\n\n def get_window_matrix(self, x=0, y=0):\n '''Calculate the transformation matrix to convert between window and\n widget coordinates.\n\n :Parameters:\n `x`: float, defaults to 0\n Translates the matrix on the x axis.\n `y`: float, defaults to 0\n Translates the matrix on the y axis.\n '''\n m = Matrix()\n m.translate(x, y, 0)\n m = self._apply_transform(m)\n return m\n\n x = NumericProperty(0)\n '''X position of the widget.\n\n :attr:`x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.\n '''\n\n y = NumericProperty(0)\n '''Y position of the widget.\n\n :attr:`y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.\n '''\n\n width = NumericProperty(100)\n '''Width of the widget.\n\n :attr:`width` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 100.\n\n .. warning::\n Keep in mind that the `width` property is subject to layout logic and\n that this has not yet happened at the time of the widget's `__init__`\n method.\n '''\n\n height = NumericProperty(100)\n '''Height of the widget.\n\n :attr:`height` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 100.\n\n .. warning::\n Keep in mind that the `height` property is subject to layout logic and\n that this has not yet happened at the time of the widget's `__init__`\n method.\n '''\n\n pos = ReferenceListProperty(x, y)\n '''Position of the widget.\n\n :attr:`pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`x`, :attr:`y`) properties.\n '''\n\n size = ReferenceListProperty(width, height)\n '''Size of the widget.\n\n :attr:`size` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`width`, :attr:`height`) properties.\n '''\n\n def get_right(self):\n return self.x + self.width\n\n def set_right(self, value):\n self.x = value - self.width\n\n right = AliasProperty(get_right, set_right, bind=('x', 'width'))\n '''Right position of the widget.\n\n :attr:`right` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`x` + :attr:`width`).\n '''\n\n def get_top(self):\n return self.y + self.height\n\n def set_top(self, value):\n self.y = value - self.height\n\n top = AliasProperty(get_top, set_top, bind=('y', 'height'))\n '''Top position of the widget.\n\n :attr:`top` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`y` + :attr:`height`).\n '''\n\n def get_center_x(self):\n return self.x + self.width / 2.\n\n def set_center_x(self, value):\n self.x = value - self.width / 2.\n\n center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))\n '''X center position of the widget.\n\n :attr:`center_x` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`x` + :attr:`width` / 2.).\n '''\n\n def get_center_y(self):\n return self.y + self.height / 2.\n\n def set_center_y(self, value):\n self.y = value - self.height / 2.\n\n center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))\n '''Y center position of the widget.\n\n :attr:`center_y` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`y` + :attr:`height` / 2.).\n '''\n\n center = ReferenceListProperty(center_x, center_y)\n '''Center position of the widget.\n\n :attr:`center` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`center_x`, :attr:`center_y`) properties.\n '''\n\n cls = ListProperty([])\n '''Class of the widget, used for styling.\n '''\n\n id = StringProperty(None, allownone=True)\n '''Unique identifier of the widget in the tree.\n\n :attr:`id` is a :class:`~kivy.properties.StringProperty` and defaults to\n None.\n\n .. warning::\n\n If the :attr:`id` is already used in the tree, an exception will\n be raised.\n '''\n\n children = ListProperty([])\n '''List of children of this widget.\n\n :attr:`children` is a :class:`~kivy.properties.ListProperty` and\n defaults to an empty list.\n\n Use :meth:`add_widget` and :meth:`remove_widget` for manipulating the\n children list. Don't manipulate the children list directly unless you know\n what you are doing.\n '''\n\n parent = ObjectProperty(None, allownone=True, rebind=True)\n '''Parent of this widget. The parent of a widget is set when the widget\n is added to another widget and unset when the widget is removed from its\n parent.\n\n :attr:`parent` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n size_hint_x = NumericProperty(1, allownone=True)\n '''X size hint. Represents how much space the widget should use in the\n direction of the X axis relative to its parent's width.\n Only the :class:`~kivy.uix.layout.Layout` and\n :class:`~kivy.core.window.Window` classes make use of the hint.\n\n The size_hint is used by layouts for two purposes:\n\n - When the layout considers widgets on their own rather than in\n relation to its other children, the size_hint_x is a direct proportion\n of the parent width, normally between 0.0 and 1.0. For instance, a\n widget with ``size_hint_x=0.5`` in\n a vertical BoxLayout will take up half the BoxLayout's width, or\n a widget in a FloatLayout with ``size_hint_x=0.2`` will take up 20%\n of the FloatLayout width. If the size_hint is greater than 1, the\n widget will be wider than the parent.\n - When multiple widgets can share a row of a layout, such as in a\n horizontal BoxLayout, their widths will be their size_hint_x as a\n fraction of the sum of widget size_hints. For instance, if the\n size_hint_xs are (0.5, 1.0, 0.5), the first widget will have a\n width of 25% of the parent width.\n\n :attr:`size_hint_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n size_hint_y = NumericProperty(1, allownone=True)\n '''Y size hint.\n\n :attr:`size_hint_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n\n See :attr:`size_hint_x` for more information, but with widths and heights\n swapped.\n '''\n\n size_hint = ReferenceListProperty(size_hint_x, size_hint_y)\n '''Size hint.\n\n :attr:`size_hint` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`size_hint_x`, :attr:`size_hint_y`) properties.\n\n See :attr:`size_hint_x` for more information.\n '''\n\n pos_hint = ObjectProperty({})\n '''Position hint. This property allows you to set the position of\n the widget inside its parent layout, in percent (similar to\n size_hint).\n\n For example, if you want to set the top of the widget to be at 90%\n height of its parent layout, you can write::\n\n widget = Widget(pos_hint={'top': 0.9})\n\n The keys 'x', 'right' and 'center_x' will use the parent width.\n The keys 'y', 'top' and 'center_y' will use the parent height.\n\n See :doc:`api-kivy.uix.floatlayout` for further reference.\n\n .. note::\n :attr:`pos_hint` is not used by all layouts. Check the documentation\n of the layout in question to see if it supports pos_hint.\n\n :attr:`pos_hint` is an :class:`~kivy.properties.ObjectProperty`\n containing a dict.\n '''\n\n ids = DictProperty({})\n '''This is a dictionary of ids defined in your kv language. This will only\n be populated if you use ids in your kv language code.\n\n .. versionadded:: 1.7.0\n\n :attr:`ids` is a :class:`~kivy.properties.DictProperty` and defaults to an\n empty dict {}.\n\n The :attr:`ids` are populated for each root level widget definition. For\n example:\n\n .. code-block:: kv\n\n # in kv\n <MyWidget@Widget>:\n id: my_widget\n Label:\n id: label_widget\n Widget:\n id: inner_widget\n Label:\n id: inner_label\n TextInput:\n id: text_input\n OtherWidget:\n id: other_widget\n\n\n <OtherWidget@Widget>\n id: other_widget\n Label:\n id: other_label\n TextInput:\n id: other_textinput\n\n Then, in python:\n\n .. code-block:: python\n\n >>> widget = MyWidget()\n >>> print(widget.ids)\n {'other_widget': <weakproxy at 041CFED0 to OtherWidget at 041BEC38>,\n 'inner_widget': <weakproxy at 04137EA0 to Widget at 04138228>,\n 'inner_label': <weakproxy at 04143540 to Label at 04138260>,\n 'label_widget': <weakproxy at 04137B70 to Label at 040F97A0>,\n 'text_input': <weakproxy at 041BB5D0 to TextInput at 041BEC00>}\n >>> print(widget.ids['other_widget'].ids)\n {'other_textinput': <weakproxy at 041DBB40 to TextInput at 041BEF48>,\n 'other_label': <weakproxy at 041DB570 to Label at 041BEEA0>}\n >>> print(widget.ids['label_widget'].ids)\n {}\n '''\n\n opacity = NumericProperty(1.0)\n '''Opacity of the widget and all its children.\n\n .. versionadded:: 1.4.1\n\n The opacity attribute controls the opacity of the widget and its children.\n Be careful, it's a cumulative attribute: the value is multiplied by the\n current global opacity and the result is applied to the current context\n color.\n\n For example, if the parent has an opacity of 0.5 and a child has an\n opacity of 0.2, the real opacity of the child will be 0.5 * 0.2 = 0.1.\n\n Then, the opacity is applied by the shader as:\n\n .. code-block:: python\n\n frag_color = color * vec4(1.0, 1.0, 1.0, opacity);\n\n :attr:`opacity` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.0.\n '''\n\n def on_opacity(self, instance, value):\n canvas = self.canvas\n if canvas is not None:\n canvas.opacity = value\n\n canvas = None\n '''Canvas of the widget.\n\n The canvas is a graphics object that contains all the drawing instructions\n for the graphical representation of the widget.\n\n There are no general properties for the Widget class, such as background\n color, to keep the design simple and lean. Some derived classes, such as\n Button, do add such convenience properties but generally the developer is\n responsible for implementing the graphics representation for a custom\n widget from the ground up. See the derived widget classes for patterns to\n follow and extend.\n\n See :class:`~kivy.graphics.Canvas` for more information about the usage.\n '''\n\n disabled = BooleanProperty(False)\n '''Indicates whether this widget can interact with input or not.\n\n .. note::\n\n 1. Child Widgets, when added to a disabled widget, will be disabled\n automatically.\n 2. Disabling/enabling a parent disables/enables all\n of its children.\n\n .. versionadded:: 1.8.0\n\n :attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to False.\n '''\n", "path": "kivy/uix/widget.py"}], "after_files": [{"content": "'''\nWidget class\n============\n\nThe :class:`Widget` class is the base class required for creating Widgets.\nThis widget class was designed with a couple of principles in mind:\n\n* *Event Driven*\n\n Widget interaction is built on top of events that occur. If a property\n changes, the widget can respond to the change in the 'on_<propname>'\n callback. If nothing changes, nothing will be done. That's the main\n goal of the :class:`~kivy.properties.Property` class.\n\n* *Separation Of Concerns (the widget and its graphical representation)*\n\n Widgets don't have a `draw()` method. This is done on purpose: The idea\n is to allow you to create your own graphical representation outside the\n widget class.\n Obviously you can still use all the available properties to do that, so\n that your representation properly reflects the widget's current state.\n Every widget has its own :class:`~kivy.graphics.Canvas` that you\n can use to draw. This separation allows Kivy to run your\n application in a very efficient manner.\n\n* *Bounding Box / Collision*\n\n Often you want to know if a certain point is within the bounds of your\n widget. An example would be a button widget where you only want to\n trigger an action when the button itself is actually touched.\n For this, you can use the :meth:`~Widget.collide_point` method, which\n will return True if the point you pass to it is inside the axis-aligned\n bounding box defined by the widget's position and size.\n If a simple AABB is not sufficient, you can override the method to\n perform the collision checks with more complex shapes, e.g. a polygon.\n You can also check if a widget collides with another widget with\n :meth:`~Widget.collide_widget`.\n\n\nWe also have some default values and behaviors that you should be aware of:\n\n* A :class:`Widget` is not a :class:`~kivy.uix.layout.Layout`: it will not\n change the position or the size of its children. If you want control over\n positioning or sizing, use a :class:`~kivy.uix.layout.Layout`.\n\n* The default size of a widget is (100, 100). This is only changed if the\n parent is a :class:`~kivy.uix.layout.Layout`.\n For example, if you add a :class:`Label` inside a\n :class:`Button`, the label will not inherit the button's size or position\n because the button is not a *Layout*: it's just another *Widget*.\n\n* The default size_hint is (1, 1). If the parent is a :class:`Layout`, then the\n widget size will be the parent layout's size.\n\n* :meth:`~Widget.on_touch_down`, :meth:`~Widget.on_touch_move`,\n :meth:`~Widget.on_touch_up` don't do any sort of collisions. If you want to\n know if the touch is inside your widget, use :meth:`~Widget.collide_point`.\n\nUsing Properties\n----------------\n\nWhen you read the documentation, all properties are described in the format::\n\n <name> is a <property class> and defaults to <default value>.\n\ne.g.\n\n :attr:`~kivy.uix.label.Label.text` is a\n :class:`~kivy.properties.StringProperty` and defaults to ''.\n\nIf you want to be notified when the pos attribute changes, i.e. when the\nwidget moves, you can bind your own callback function like this::\n\n def callback_pos(instance, value):\n print('The widget', instance, 'moved to', value)\n\n wid = Widget()\n wid.bind(pos=callback_pos)\n\nRead more about :doc:`/api-kivy.properties`.\n\nBasic drawing\n-------------\n\nWidgets support a range of drawing instructions that you can use to customize\nthe look of your widgets and layouts. For example, to draw a background image\nfor your widget, you can do the following:\n\n.. code-block:: python\n\n def redraw(self, args):\n self.bg_rect.size = self.size\n self.bg_rect.pos = self.pos\n\n widget = Widget()\n with widget.canvas:\n widget.bg_rect = Rectangle(source=\"cover.jpg\", pos=self.pos, \\\nsize=self.size)\n widget.bind(pos=redraw, size=redraw)\n\nTo draw a background in kv:\n\n.. code-block:: kv\n\n Widget:\n canvas:\n Rectangle:\n source: \"cover.jpg\"\n size: self.size\n pos: self.pos\n\nThese examples only scratch the surface. Please see the :mod:`kivy.graphics`\ndocumentation for more information.\n\n.. _widget-event-bubbling:\n\nWidget touch event bubbling\n---------------------------\n\nWhen you catch touch events between multiple widgets, you often\nneed to be aware of the order in which these events are propagated. In Kivy,\nevents bubble up from the first child upwards through the other children.\nIf a widget has children, the event is passed through its children before\nbeing passed on to the widget after it.\n\nAs the :meth:`~kivy.uix.widget.Widget.on_touch_up` method inserts widgets at\nindex 0 by default, this means the event goes from the most recently added\nwidget back to the first one added. Consider the following:\n\n.. code-block:: python\n\n box = BoxLayout()\n box.add_widget(Label(text=\"a\"))\n box.add_widget(Label(text=\"b\"))\n box.add_widget(Label(text=\"c\"))\n\nThe label with text \"c\" gets the event first, \"b\" second and \"a\" last. You can\nreverse this order by manually specifying the index:\n\n.. code-block:: python\n\n box = BoxLayout()\n box.add_widget(Label(text=\"a\"), index=0)\n box.add_widget(Label(text=\"b\"), index=1)\n box.add_widget(Label(text=\"c\"), index=2)\n\nNow the order would be \"a\", \"b\" then \"c\". One thing to keep in mind when using\nkv is that declaring a widget uses the\n:meth:`~kivy.uix.widget.Widget.add_widget` method for insertion. Hence, using\n\n.. code-block:: kv\n\n BoxLayout:\n MyLabel:\n text: \"a\"\n MyLabel:\n text: \"b\"\n MyLabel:\n text: \"c\"\n\nwould result in the event order \"c\", \"b\" then \"a\" as \"c\" was actually the last\nadded widget. It thus has index 0, \"b\" index 1 and \"a\" index 2. Effectively,\nthe child order is the reverse of its listed order.\n\nThis ordering is the same for the :meth:`~kivy.uix.widget.Widget.on_touch_move`\nand :meth:`~kivy.uix.widget.Widget.on_touch_up` events.\n\nIn order to stop this event bubbling, a method can return `True`. This tells\nKivy the event has been handled and the event propagation stops. For example:\n\n.. code-block:: python\n\n class MyWidget(Widget):\n def on_touch_down(self, touch):\n If <some_condition>:\n # Do stuff here and kill the event\n return True\n else:\n return super(MyWidget, self).on_touch_down(touch)\n\nThis approach gives you good control over exactly how events are dispatched\nand managed. Sometimes, however, you may wish to let the event be completely\npropagated before taking action. You can use the\n:class:`~kivy.clock.Clock` to help you here:\n\n.. code-block:: python\n\n class MyWidget(Label):\n def on_touch_down(self, touch, after=False):\n if after:\n print \"Fired after the event has been dispatched!\"\n else:\n Clock.schedule_once(lambda dt: self.on_touch_down(touch, True))\n return super(MyWidget, self).on_touch_down(touch)\n\nUsage of :attr:`Widget.center`, :attr:`Widget.right`, and :attr:`Widget.top`\n----------------------------------------------------------------------------\n\nA common mistake when using one of the computed properties such as\n:attr:`Widget.right` is to use it to make a widget follow its parent with a\nKV rule such as `right: self.parent.right`. Consider, for example:\n\n.. code-block:: kv\n\n FloatLayout:\n id: layout\n width: 100\n Widget:\n id: wid\n right: layout.right\n\nThe (mistaken) expectation is that this rule ensures that wid's right will\nalways be whatever layout's right is - that is wid.right and layout.right will\nalways be identical. In actual fact, this rule only says that \"whenever\nlayout's `right` changes, wid's right will be set to that value\". The\ndifference being that as long as `layout.right` doesn't change, `wid.right`\ncould be anything, even a value that will make them different.\n\nSpecifically, for the KV code above, consider the following example::\n\n >>> print(layout.right, wid.right)\n (100, 100)\n >>> wid.x = 200\n >>> print(layout.right, wid.right)\n (100, 300)\n\nAs can be seen, initially they are in sync, however, when we change `wid.x`\nthey go out of sync because `layout.right` is not changed and the rule is not\ntriggered.\n\nThe proper way to make the widget follow its parent's right is to use\n:attr:`Widget.pos_hint`. If instead of `right: layout.right` we did\n`pos_hint: {'right': 1}`, then the widgets right will always be set to be\nat the parent's right at each layout update.\n'''\n\n__all__ = ('Widget', 'WidgetException')\n\nfrom kivy.event import EventDispatcher\nfrom kivy.factory import Factory\nfrom kivy.properties import (\n NumericProperty, StringProperty, AliasProperty, ReferenceListProperty,\n ObjectProperty, ListProperty, DictProperty, BooleanProperty)\nfrom kivy.graphics import (\n Canvas, Translate, Fbo, ClearColor, ClearBuffers, Scale)\nfrom kivy.graphics.transformation import Matrix\nfrom kivy.base import EventLoop\nfrom kivy.lang import Builder\nfrom kivy.context import get_current_context\nfrom kivy.weakproxy import WeakProxy\nfrom functools import partial\nfrom itertools import islice\n\n\n# References to all the widget destructors (partial method with widget uid as\n# key).\n_widget_destructors = {}\n\n\ndef _widget_destructor(uid, r):\n # Internal method called when a widget is deleted from memory. the only\n # thing we remember about it is its uid. Clear all the associated callbacks\n # created in kv language.\n del _widget_destructors[uid]\n Builder.unbind_widget(uid)\n\n\nclass WidgetException(Exception):\n '''Fired when the widget gets an exception.\n '''\n pass\n\n\nclass WidgetMetaclass(type):\n '''Metaclass to automatically register new widgets for the\n :class:`~kivy.factory.Factory`.\n\n .. warning::\n This metaclass is used by the Widget. Do not use it directly!\n '''\n def __init__(mcs, name, bases, attrs):\n super(WidgetMetaclass, mcs).__init__(name, bases, attrs)\n Factory.register(name, cls=mcs)\n\n\n#: Base class used for Widget, that inherits from :class:`EventDispatcher`\nWidgetBase = WidgetMetaclass('WidgetBase', (EventDispatcher, ), {})\n\n\nclass Widget(WidgetBase):\n '''Widget class. See module documentation for more information.\n\n :Events:\n `on_touch_down`:\n Fired when a new touch event occurs\n `on_touch_move`:\n Fired when an existing touch moves\n `on_touch_up`:\n Fired when an existing touch disappears\n\n .. warning::\n Adding a `__del__` method to a class derived from Widget with Python\n prior to 3.4 will disable automatic garbage collection for instances\n of that class. This is because the Widget class creates reference\n cycles, thereby `preventing garbage collection\n <https://docs.python.org/2/library/gc.html#gc.garbage>`_.\n\n .. versionchanged:: 1.0.9\n Everything related to event properties has been moved to the\n :class:`~kivy.event.EventDispatcher`. Event properties can now be used\n when contructing a simple class without subclassing :class:`Widget`.\n\n .. versionchanged:: 1.5.0\n The constructor now accepts on_* arguments to automatically bind\n callbacks to properties or events, as in the Kv language.\n '''\n\n __metaclass__ = WidgetMetaclass\n __events__ = ('on_touch_down', 'on_touch_move', 'on_touch_up')\n _proxy_ref = None\n\n def __init__(self, **kwargs):\n # Before doing anything, ensure the windows exist.\n EventLoop.ensure_window()\n\n # Assign the default context of the widget creation.\n if not hasattr(self, '_context'):\n self._context = get_current_context()\n\n no_builder = '__no_builder' in kwargs\n if no_builder:\n del kwargs['__no_builder']\n on_args = {k: v for k, v in kwargs.items() if k[:3] == 'on_'}\n for key in on_args:\n del kwargs[key]\n\n super(Widget, self).__init__(**kwargs)\n\n # Create the default canvas if it does not exist.\n if self.canvas is None:\n self.canvas = Canvas(opacity=self.opacity)\n\n # Apply all the styles.\n if not no_builder:\n Builder.apply(self, ignored_consts=self._kwargs_applied_init)\n\n # Bind all the events.\n if on_args:\n self.bind(**on_args)\n\n @property\n def proxy_ref(self):\n '''Return a proxy reference to the widget, i.e. without creating a\n reference to the widget. See `weakref.proxy\n <http://docs.python.org/2/library/weakref.html?highlight\\\n =proxy#weakref.proxy>`_ for more information.\n\n .. versionadded:: 1.7.2\n '''\n _proxy_ref = self._proxy_ref\n if _proxy_ref is not None:\n return _proxy_ref\n\n f = partial(_widget_destructor, self.uid)\n self._proxy_ref = _proxy_ref = WeakProxy(self, f)\n # Only f should be enough here, but it appears that is a very\n # specific case, the proxy destructor is not called if both f and\n # _proxy_ref are not together in a tuple.\n _widget_destructors[self.uid] = (f, _proxy_ref)\n return _proxy_ref\n\n def __hash__(self):\n return id(self)\n\n @property\n def __self__(self):\n return self\n\n #\n # Collision\n #\n def collide_point(self, x, y):\n '''\n Check if a point (x, y) is inside the widget's axis aligned bounding\n box.\n\n :Parameters:\n `x`: numeric\n x position of the point (in window coordinates)\n `y`: numeric\n y position of the point (in window coordinates)\n\n :Returns:\n A bool. True if the point is inside the bounding box, False\n otherwise.\n\n .. code-block:: python\n\n >>> Widget(pos=(10, 10), size=(50, 50)).collide_point(40, 40)\n True\n '''\n return self.x <= x <= self.right and self.y <= y <= self.top\n\n def collide_widget(self, wid):\n '''\n Check if another widget collides with this widget. This function\n performs an axis-aligned bounding box intersection test by default.\n\n :Parameters:\n `wid`: :class:`Widget` class\n Widget to collide with.\n\n :Returns:\n bool. True if the other widget collides with this widget, False\n otherwise.\n\n .. code-block:: python\n\n >>> wid = Widget(size=(50, 50))\n >>> wid2 = Widget(size=(50, 50), pos=(25, 25))\n >>> wid.collide_widget(wid2)\n True\n >>> wid2.pos = (55, 55)\n >>> wid.collide_widget(wid2)\n False\n '''\n if self.right < wid.x:\n return False\n if self.x > wid.right:\n return False\n if self.top < wid.y:\n return False\n if self.y > wid.top:\n return False\n return True\n\n #\n # Default event handlers\n #\n def on_touch_down(self, touch):\n '''Receive a touch down event.\n\n :Parameters:\n `touch`: :class:`~kivy.input.motionevent.MotionEvent` class\n Touch received. The touch is in parent coordinates. See\n :mod:`~kivy.uix.relativelayout` for a discussion on\n coordinate systems.\n\n :Returns:\n bool. If True, the dispatching of the touch event will stop.\n If False, the event will continue to be dispatched to the rest\n of the widget tree.\n '''\n if self.disabled and self.collide_point(*touch.pos):\n return True\n for child in self.children[:]:\n if child.dispatch('on_touch_down', touch):\n return True\n\n def on_touch_move(self, touch):\n '''Receive a touch move event. The touch is in parent coordinates.\n\n See :meth:`on_touch_down` for more information.\n '''\n if self.disabled:\n return\n for child in self.children[:]:\n if child.dispatch('on_touch_move', touch):\n return True\n\n def on_touch_up(self, touch):\n '''Receive a touch up event. The touch is in parent coordinates.\n\n See :meth:`on_touch_down` for more information.\n '''\n if self.disabled:\n return\n for child in self.children[:]:\n if child.dispatch('on_touch_up', touch):\n return True\n\n def on_disabled(self, instance, value):\n for child in self.children:\n child.disabled = value\n\n #\n # Tree management\n #\n def add_widget(self, widget, index=0, canvas=None):\n '''Add a new widget as a child of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to add to our list of children.\n `index`: int, defaults to 0\n Index to insert the widget in the list. Notice that the default\n of 0 means the widget is inserted at the beginning of the list\n and will thus be drawn on top of other sibling widgets. For a\n full discussion of the index and widget hierarchy, please see\n the :doc:`Widgets Programming Guide <guide/widgets>`.\n\n .. versionadded:: 1.0.5\n `canvas`: str, defaults to None\n Canvas to add widget's canvas to. Can be 'before', 'after' or\n None for the default canvas.\n\n .. versionadded:: 1.9.0\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> from kivy.uix.slider import Slider\n >>> root = Widget()\n >>> root.add_widget(Button())\n >>> slider = Slider()\n >>> root.add_widget(slider)\n\n '''\n if not isinstance(widget, Widget):\n raise WidgetException(\n 'add_widget() can be used only with instances'\n ' of the Widget class.')\n\n widget = widget.__self__\n if widget is self:\n raise WidgetException(\n 'Widget instances cannot be added to themselves.')\n parent = widget.parent\n # Check if the widget is already a child of another widget.\n if parent:\n raise WidgetException('Cannot add %r, it already has a parent %r'\n % (widget, parent))\n widget.parent = parent = self\n # Child will be disabled if added to a disabled parent.\n if parent.disabled:\n widget.disabled = True\n\n canvas = self.canvas.before if canvas == 'before' else \\\n self.canvas.after if canvas == 'after' else self.canvas\n\n if index == 0 or len(self.children) == 0:\n self.children.insert(0, widget)\n canvas.add(widget.canvas)\n else:\n canvas = self.canvas\n children = self.children\n if index >= len(children):\n index = len(children)\n next_index = canvas.indexof(children[-1].canvas)\n else:\n next_child = children[index]\n next_index = canvas.indexof(next_child.canvas)\n if next_index == -1:\n next_index = canvas.length()\n else:\n next_index += 1\n\n children.insert(index, widget)\n # We never want to insert widget _before_ canvas.before.\n if next_index == 0 and canvas.has_before:\n next_index = 1\n canvas.insert(next_index, widget.canvas)\n\n def remove_widget(self, widget):\n '''Remove a widget from the children of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to remove from our children list.\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> root = Widget()\n >>> button = Button()\n >>> root.add_widget(button)\n >>> root.remove_widget(button)\n '''\n if widget not in self.children:\n return\n self.children.remove(widget)\n if widget.canvas in self.canvas.children:\n self.canvas.remove(widget.canvas)\n elif widget.canvas in self.canvas.after.children:\n self.canvas.after.remove(widget.canvas)\n elif widget.canvas in self.canvas.before.children:\n self.canvas.before.remove(widget.canvas)\n widget.parent = None\n\n def clear_widgets(self, children=None):\n '''\n Remove all (or the specified) :attr:`~Widget.children` of this widget.\n If the 'children' argument is specified, it should be a list (or\n filtered list) of children of the current widget.\n\n .. versionchanged:: 1.8.0\n The `children` argument can be used to specify the children you\n want to remove.\n '''\n\n if not children:\n children = self.children\n remove_widget = self.remove_widget\n for child in children[:]:\n remove_widget(child)\n\n def export_to_png(self, filename, *args):\n '''Saves an image of the widget and its children in png format at the\n specified filename. Works by removing the widget canvas from its\n parent, rendering to an :class:`~kivy.graphics.fbo.Fbo`, and calling\n :meth:`~kivy.graphics.texture.Texture.save`.\n\n .. note::\n\n The image includes only this widget and its children. If you want\n to include widgets elsewhere in the tree, you must call\n :meth:`~Widget.export_to_png` from their common parent, or use\n :meth:`~kivy.core.window.WindowBase.screenshot` to capture the whole\n window.\n\n .. note::\n\n The image will be saved in png format, you should include the\n extension in your filename.\n\n .. versionadded:: 1.9.0\n '''\n\n if self.parent is not None:\n canvas_parent_index = self.parent.canvas.indexof(self.canvas)\n if canvas_parent_index > -1:\n self.parent.canvas.remove(self.canvas)\n\n fbo = Fbo(size=self.size, with_stencilbuffer=True)\n\n with fbo:\n ClearColor(0, 0, 0, 1)\n ClearBuffers()\n Scale(1, -1, 1)\n Translate(-self.x, -self.y - self.height, 0)\n\n fbo.add(self.canvas)\n fbo.draw()\n fbo.texture.save(filename, flipped=False)\n fbo.remove(self.canvas)\n\n if self.parent is not None and canvas_parent_index > -1:\n self.parent.canvas.insert(canvas_parent_index, self.canvas)\n\n return True\n\n def get_root_window(self):\n '''Return the root window.\n\n :Returns:\n Instance of the root window. Can be a\n :class:`~kivy.core.window.WindowBase` or\n :class:`Widget`.\n '''\n if self.parent:\n return self.parent.get_root_window()\n\n def get_parent_window(self):\n '''Return the parent window.\n\n :Returns:\n Instance of the parent window. Can be a\n :class:`~kivy.core.window.WindowBase` or\n :class:`Widget`.\n '''\n if self.parent:\n return self.parent.get_parent_window()\n\n def _walk(self, restrict=False, loopback=False, index=None):\n # We pass index only when we are going on the parent\n # so don't yield the parent as well.\n if index is None:\n index = len(self.children)\n yield self\n\n for child in reversed(self.children[:index]):\n for walk_child in child._walk(restrict=True):\n yield walk_child\n\n # If we want to continue with our parent, just do it.\n if not restrict:\n parent = self.parent\n try:\n if parent is None or not isinstance(parent, Widget):\n raise ValueError\n index = parent.children.index(self)\n except ValueError:\n # Self is root, if we want to loopback from the first element:\n if not loopback:\n return\n # If we started with root (i.e. index==None), then we have to\n # start from root again, so we return self again. Otherwise, we\n # never returned it, so return it now starting with it.\n parent = self\n index = None\n for walk_child in parent._walk(loopback=loopback, index=index):\n yield walk_child\n\n def walk(self, restrict=False, loopback=False):\n ''' Iterator that walks the widget tree starting with this widget and\n goes forward returning widgets in the order in which layouts display\n them.\n\n :Parameters:\n `restrict`: bool, defaults to False\n If True, it will only iterate through the widget and its\n children (or children of its children etc.). Defaults to False.\n `loopback`: bool, defaults to False\n If True, when the last widget in the tree is reached,\n it'll loop back to the uppermost root and start walking until\n we hit this widget again. Naturally, it can only loop back when\n `restrict` is False. Defaults to False.\n\n :return:\n A generator that walks the tree, returning widgets in the\n forward layout order.\n\n For example, given a tree with the following structure:\n\n .. code-block:: kv\n\n GridLayout:\n Button\n BoxLayout:\n id: box\n Widget\n Button\n Widget\n\n walking this tree:\n\n .. code-block:: python\n\n >>> # Call walk on box with loopback True, and restrict False\n >>> [type(widget) for widget in box.walk(loopback=True)]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,\n <class 'Widget'>, <class 'GridLayout'>, <class 'Button'>]\n >>> # Now with loopback False, and restrict False\n >>> [type(widget) for widget in box.walk()]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,\n <class 'Widget'>]\n >>> # Now with restrict True\n >>> [type(widget) for widget in box.walk(restrict=True)]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>]\n\n .. versionadded:: 1.9.0\n '''\n gen = self._walk(restrict, loopback)\n yield next(gen)\n for node in gen:\n if node is self:\n return\n yield node\n\n def _walk_reverse(self, loopback=False, go_up=False):\n # process is walk up level, walk down its children tree, then walk up\n # next level etc.\n # default just walk down the children tree\n root = self\n index = 0\n # we need to go up a level before walking tree\n if go_up:\n root = self.parent\n try:\n if root is None or not isinstance(root, Widget):\n raise ValueError\n index = root.children.index(self) + 1\n except ValueError:\n if not loopback:\n return\n index = 0\n go_up = False\n root = self\n\n # now walk children tree starting with last-most child\n for child in islice(root.children, index, None):\n for walk_child in child._walk_reverse(loopback=loopback):\n yield walk_child\n # we need to return ourself last, in all cases\n yield root\n\n # if going up, continue walking up the parent tree\n if go_up:\n for walk_child in root._walk_reverse(loopback=loopback,\n go_up=go_up):\n yield walk_child\n\n def walk_reverse(self, loopback=False):\n ''' Iterator that walks the widget tree backwards starting with the\n widget before this, and going backwards returning widgets in the\n reverse order in which layouts display them.\n\n This walks in the opposite direction of :meth:`walk`, so a list of the\n tree generated with :meth:`walk` will be in reverse order compared\n to the list generated with this, provided `loopback` is True.\n\n :Parameters:\n `loopback`: bool, defaults to False\n If True, when the uppermost root in the tree is\n reached, it'll loop back to the last widget and start walking\n back until after we hit widget again. Defaults to False.\n\n :return:\n A generator that walks the tree, returning widgets in the\n reverse layout order.\n\n For example, given a tree with the following structure:\n\n .. code-block:: kv\n\n GridLayout:\n Button\n BoxLayout:\n id: box\n Widget\n Button\n Widget\n\n walking this tree:\n\n .. code-block:: python\n\n >>> # Call walk on box with loopback True\n >>> [type(widget) for widget in box.walk_reverse(loopback=True)]\n [<class 'Button'>, <class 'GridLayout'>, <class 'Widget'>,\n <class 'Button'>, <class 'Widget'>, <class 'BoxLayout'>]\n >>> # Now with loopback False\n >>> [type(widget) for widget in box.walk_reverse()]\n [<class 'Button'>, <class 'GridLayout'>]\n >>> forward = [w for w in box.walk(loopback=True)]\n >>> backward = [w for w in box.walk_reverse(loopback=True)]\n >>> forward == backward[::-1]\n True\n\n .. versionadded:: 1.9.0\n\n '''\n for node in self._walk_reverse(loopback=loopback, go_up=True):\n yield node\n if node is self:\n return\n\n def to_widget(self, x, y, relative=False):\n '''Convert the given coordinate from window to local widget\n coordinates. See :mod:`~kivy.uix.relativelayout` for details on the\n coordinate systems.\n '''\n if self.parent:\n x, y = self.parent.to_widget(x, y)\n return self.to_local(x, y, relative=relative)\n\n def to_window(self, x, y, initial=True, relative=False):\n '''Transform local coordinates to window coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n '''\n if not initial:\n x, y = self.to_parent(x, y, relative=relative)\n if self.parent:\n return self.parent.to_window(x, y, initial=False,\n relative=relative)\n return (x, y)\n\n def to_parent(self, x, y, relative=False):\n '''Transform local coordinates to parent coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n\n :Parameters:\n `relative`: bool, defaults to False\n Change to True if you want to translate relative positions from\n a widget to its parent coordinates.\n '''\n if relative:\n return (x + self.x, y + self.y)\n return (x, y)\n\n def to_local(self, x, y, relative=False):\n '''Transform parent coordinates to local coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n\n :Parameters:\n `relative`: bool, defaults to False\n Change to True if you want to translate coordinates to\n relative widget coordinates.\n '''\n if relative:\n return (x - self.x, y - self.y)\n return (x, y)\n\n def _apply_transform(self, m, pos=None):\n if self.parent:\n x, y = self.parent.to_widget(relative=True,\n *self.to_window(*(pos or self.pos)))\n m.translate(x, y, 0)\n m = self.parent._apply_transform(m) if self.parent else m\n return m\n\n def get_window_matrix(self, x=0, y=0):\n '''Calculate the transformation matrix to convert between window and\n widget coordinates.\n\n :Parameters:\n `x`: float, defaults to 0\n Translates the matrix on the x axis.\n `y`: float, defaults to 0\n Translates the matrix on the y axis.\n '''\n m = Matrix()\n m.translate(x, y, 0)\n m = self._apply_transform(m)\n return m\n\n x = NumericProperty(0)\n '''X position of the widget.\n\n :attr:`x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.\n '''\n\n y = NumericProperty(0)\n '''Y position of the widget.\n\n :attr:`y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.\n '''\n\n width = NumericProperty(100)\n '''Width of the widget.\n\n :attr:`width` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 100.\n\n .. warning::\n Keep in mind that the `width` property is subject to layout logic and\n that this has not yet happened at the time of the widget's `__init__`\n method.\n '''\n\n height = NumericProperty(100)\n '''Height of the widget.\n\n :attr:`height` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 100.\n\n .. warning::\n Keep in mind that the `height` property is subject to layout logic and\n that this has not yet happened at the time of the widget's `__init__`\n method.\n '''\n\n pos = ReferenceListProperty(x, y)\n '''Position of the widget.\n\n :attr:`pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`x`, :attr:`y`) properties.\n '''\n\n size = ReferenceListProperty(width, height)\n '''Size of the widget.\n\n :attr:`size` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`width`, :attr:`height`) properties.\n '''\n\n def get_right(self):\n return self.x + self.width\n\n def set_right(self, value):\n self.x = value - self.width\n\n right = AliasProperty(get_right, set_right, bind=('x', 'width'))\n '''Right position of the widget.\n\n :attr:`right` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`x` + :attr:`width`).\n '''\n\n def get_top(self):\n return self.y + self.height\n\n def set_top(self, value):\n self.y = value - self.height\n\n top = AliasProperty(get_top, set_top, bind=('y', 'height'))\n '''Top position of the widget.\n\n :attr:`top` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`y` + :attr:`height`).\n '''\n\n def get_center_x(self):\n return self.x + self.width / 2.\n\n def set_center_x(self, value):\n self.x = value - self.width / 2.\n\n center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))\n '''X center position of the widget.\n\n :attr:`center_x` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`x` + :attr:`width` / 2.).\n '''\n\n def get_center_y(self):\n return self.y + self.height / 2.\n\n def set_center_y(self, value):\n self.y = value - self.height / 2.\n\n center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))\n '''Y center position of the widget.\n\n :attr:`center_y` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`y` + :attr:`height` / 2.).\n '''\n\n center = ReferenceListProperty(center_x, center_y)\n '''Center position of the widget.\n\n :attr:`center` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`center_x`, :attr:`center_y`) properties.\n '''\n\n cls = ListProperty([])\n '''Class of the widget, used for styling.\n '''\n\n id = StringProperty(None, allownone=True)\n '''Unique identifier of the widget in the tree.\n\n :attr:`id` is a :class:`~kivy.properties.StringProperty` and defaults to\n None.\n\n .. warning::\n\n If the :attr:`id` is already used in the tree, an exception will\n be raised.\n '''\n\n children = ListProperty([])\n '''List of children of this widget.\n\n :attr:`children` is a :class:`~kivy.properties.ListProperty` and\n defaults to an empty list.\n\n Use :meth:`add_widget` and :meth:`remove_widget` for manipulating the\n children list. Don't manipulate the children list directly unless you know\n what you are doing.\n '''\n\n parent = ObjectProperty(None, allownone=True, rebind=True)\n '''Parent of this widget. The parent of a widget is set when the widget\n is added to another widget and unset when the widget is removed from its\n parent.\n\n :attr:`parent` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n size_hint_x = NumericProperty(1, allownone=True)\n '''X size hint. Represents how much space the widget should use in the\n direction of the X axis relative to its parent's width.\n Only the :class:`~kivy.uix.layout.Layout` and\n :class:`~kivy.core.window.Window` classes make use of the hint.\n\n The size_hint is used by layouts for two purposes:\n\n - When the layout considers widgets on their own rather than in\n relation to its other children, the size_hint_x is a direct proportion\n of the parent width, normally between 0.0 and 1.0. For instance, a\n widget with ``size_hint_x=0.5`` in\n a vertical BoxLayout will take up half the BoxLayout's width, or\n a widget in a FloatLayout with ``size_hint_x=0.2`` will take up 20%\n of the FloatLayout width. If the size_hint is greater than 1, the\n widget will be wider than the parent.\n - When multiple widgets can share a row of a layout, such as in a\n horizontal BoxLayout, their widths will be their size_hint_x as a\n fraction of the sum of widget size_hints. For instance, if the\n size_hint_xs are (0.5, 1.0, 0.5), the first widget will have a\n width of 25% of the parent width.\n\n :attr:`size_hint_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n size_hint_y = NumericProperty(1, allownone=True)\n '''Y size hint.\n\n :attr:`size_hint_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n\n See :attr:`size_hint_x` for more information, but with widths and heights\n swapped.\n '''\n\n size_hint = ReferenceListProperty(size_hint_x, size_hint_y)\n '''Size hint.\n\n :attr:`size_hint` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`size_hint_x`, :attr:`size_hint_y`) properties.\n\n See :attr:`size_hint_x` for more information.\n '''\n\n pos_hint = ObjectProperty({})\n '''Position hint. This property allows you to set the position of\n the widget inside its parent layout, in percent (similar to\n size_hint).\n\n For example, if you want to set the top of the widget to be at 90%\n height of its parent layout, you can write::\n\n widget = Widget(pos_hint={'top': 0.9})\n\n The keys 'x', 'right' and 'center_x' will use the parent width.\n The keys 'y', 'top' and 'center_y' will use the parent height.\n\n See :doc:`api-kivy.uix.floatlayout` for further reference.\n\n .. note::\n :attr:`pos_hint` is not used by all layouts. Check the documentation\n of the layout in question to see if it supports pos_hint.\n\n :attr:`pos_hint` is an :class:`~kivy.properties.ObjectProperty`\n containing a dict.\n '''\n\n ids = DictProperty({})\n '''This is a dictionary of ids defined in your kv language. This will only\n be populated if you use ids in your kv language code.\n\n .. versionadded:: 1.7.0\n\n :attr:`ids` is a :class:`~kivy.properties.DictProperty` and defaults to an\n empty dict {}.\n\n The :attr:`ids` are populated for each root level widget definition. For\n example:\n\n .. code-block:: kv\n\n # in kv\n <MyWidget@Widget>:\n id: my_widget\n Label:\n id: label_widget\n Widget:\n id: inner_widget\n Label:\n id: inner_label\n TextInput:\n id: text_input\n OtherWidget:\n id: other_widget\n\n\n <OtherWidget@Widget>\n id: other_widget\n Label:\n id: other_label\n TextInput:\n id: other_textinput\n\n Then, in python:\n\n .. code-block:: python\n\n >>> widget = MyWidget()\n >>> print(widget.ids)\n {'other_widget': <weakproxy at 041CFED0 to OtherWidget at 041BEC38>,\n 'inner_widget': <weakproxy at 04137EA0 to Widget at 04138228>,\n 'inner_label': <weakproxy at 04143540 to Label at 04138260>,\n 'label_widget': <weakproxy at 04137B70 to Label at 040F97A0>,\n 'text_input': <weakproxy at 041BB5D0 to TextInput at 041BEC00>}\n >>> print(widget.ids['other_widget'].ids)\n {'other_textinput': <weakproxy at 041DBB40 to TextInput at 041BEF48>,\n 'other_label': <weakproxy at 041DB570 to Label at 041BEEA0>}\n >>> print(widget.ids['label_widget'].ids)\n {}\n '''\n\n opacity = NumericProperty(1.0)\n '''Opacity of the widget and all its children.\n\n .. versionadded:: 1.4.1\n\n The opacity attribute controls the opacity of the widget and its children.\n Be careful, it's a cumulative attribute: the value is multiplied by the\n current global opacity and the result is applied to the current context\n color.\n\n For example, if the parent has an opacity of 0.5 and a child has an\n opacity of 0.2, the real opacity of the child will be 0.5 * 0.2 = 0.1.\n\n Then, the opacity is applied by the shader as:\n\n .. code-block:: python\n\n frag_color = color * vec4(1.0, 1.0, 1.0, opacity);\n\n :attr:`opacity` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.0.\n '''\n\n def on_opacity(self, instance, value):\n canvas = self.canvas\n if canvas is not None:\n canvas.opacity = value\n\n canvas = None\n '''Canvas of the widget.\n\n The canvas is a graphics object that contains all the drawing instructions\n for the graphical representation of the widget.\n\n There are no general properties for the Widget class, such as background\n color, to keep the design simple and lean. Some derived classes, such as\n Button, do add such convenience properties but generally the developer is\n responsible for implementing the graphics representation for a custom\n widget from the ground up. See the derived widget classes for patterns to\n follow and extend.\n\n See :class:`~kivy.graphics.Canvas` for more information about the usage.\n '''\n\n disabled = BooleanProperty(False)\n '''Indicates whether this widget can interact with input or not.\n\n .. note::\n\n 1. Child Widgets, when added to a disabled widget, will be disabled\n automatically.\n 2. Disabling/enabling a parent disables/enables all\n of its children.\n\n .. versionadded:: 1.8.0\n\n :attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to False.\n '''\n", "path": "kivy/uix/widget.py"}]} |
gh_patches_debug_1568 | rasdani/github-patches | git_diff | oppia__oppia-14800 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Schema validation error on uploading an exploration YAML
<!--
- Thanks for taking the time to report a bug in the Oppia project.
- Before filing a new issue, please do a quick search to check that it hasn't
- already been filed on the [issue tracker](https://github.com/oppia/oppia/issues)._
-->
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Set `ALLOW_YAML_FILE_UPLOAD` to true in `constants.ts`
2. Go to the creator dashboard.
3. Upload a test exploration YAML file from [here](https://github.com/oppia/oppia/tree/develop/data/explorations).
4. See error.
**Observed behavior**
Valid exploration YAML does not get uploaded.
**Expected behavior**
Valid exploration YAML should be uploaded correctly.
**Screenshots**
https://user-images.githubusercontent.com/11008603/150233493-27963be2-18ac-4c2d-a184-c39cd3268f09.mp4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/controllers/creator_dashboard.py`
Content:
```
1 # Copyright 2014 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Controllers for the creator dashboard, notifications, and creating new
16 activities.
17 """
18
19 from __future__ import annotations
20
21 import logging
22
23 from core import feconf
24 from core import utils
25 from core.constants import constants
26 from core.controllers import acl_decorators
27 from core.controllers import base
28 from core.domain import collection_domain
29 from core.domain import collection_services
30 from core.domain import exp_domain
31 from core.domain import exp_fetchers
32 from core.domain import exp_services
33 from core.domain import feedback_services
34 from core.domain import role_services
35 from core.domain import subscription_services
36 from core.domain import suggestion_services
37 from core.domain import summary_services
38 from core.domain import topic_fetchers
39 from core.domain import user_services
40
41 EXPLORATION_ID_KEY = 'exploration_id'
42 COLLECTION_ID_KEY = 'collection_id'
43
44
45 class OldContributorDashboardRedirectPage(base.BaseHandler):
46 """Redirects the old contributor dashboard URL to the new one."""
47
48 URL_PATH_ARGS_SCHEMAS = {}
49 HANDLER_ARGS_SCHEMAS = {'GET': {}}
50
51 @acl_decorators.open_access
52 def get(self):
53 """Handles GET requests."""
54 self.redirect('/contributor-dashboard', permanent=True)
55
56
57 class OldCreatorDashboardRedirectPage(base.BaseHandler):
58 """Redirects the old creator dashboard URL to the new one."""
59
60 URL_PATH_ARGS_SCHEMAS = {}
61 HANDLER_ARGS_SCHEMAS = {'GET': {}}
62
63 @acl_decorators.open_access
64 def get(self):
65 """Handles GET requests."""
66 self.redirect(feconf.CREATOR_DASHBOARD_URL, permanent=True)
67
68
69 class CreatorDashboardPage(base.BaseHandler):
70 """Page showing the user's creator dashboard."""
71
72 ADDITIONAL_DEPENDENCY_IDS = ['codemirror']
73 URL_PATH_ARGS_SCHEMAS = {}
74 HANDLER_ARGS_SCHEMAS = {'GET': {}}
75
76 @acl_decorators.can_access_creator_dashboard
77 def get(self):
78
79 self.render_template('creator-dashboard-page.mainpage.html')
80
81
82 class CreatorDashboardHandler(base.BaseHandler):
83 """Provides data for the user's creator dashboard page."""
84
85 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
86 URL_PATH_ARGS_SCHEMAS = {}
87 HANDLER_ARGS_SCHEMAS = {
88 'GET': {},
89 'POST': {
90 'display_preference': {
91 'schema': {
92 'type': 'basestring',
93 'choices': (
94 constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS
95 .values()
96 )
97 }
98 }
99 }
100 }
101
102 @acl_decorators.can_access_creator_dashboard
103 def get(self):
104 """Handles GET requests."""
105
106 def _round_average_ratings(rating):
107 """Returns the rounded average rating to display on the creator
108 dashboard.
109
110 Args:
111 rating: float. The rating of the lesson.
112
113 Returns:
114 float. The rounded average value of rating.
115 """
116 return round(
117 rating, feconf.AVERAGE_RATINGS_DASHBOARD_PRECISION)
118
119 subscribed_exploration_summaries = (
120 exp_fetchers.get_exploration_summaries_subscribed_to(
121 self.user_id))
122 subscribed_collection_summaries = (
123 collection_services.get_collection_summaries_subscribed_to(
124 self.user_id))
125
126 exploration_ids_subscribed_to = [
127 summary.id for summary in subscribed_exploration_summaries]
128
129 exp_summary_dicts = summary_services.get_displayable_exp_summary_dicts(
130 subscribed_exploration_summaries)
131 collection_summary_dicts = []
132
133 feedback_thread_analytics = (
134 feedback_services.get_thread_analytics_multi(
135 exploration_ids_subscribed_to))
136
137 # TODO(bhenning): Update this to use unresolved answers from
138 # stats_services once the training interface is enabled and it's cheaper
139 # to retrieve top answers from stats_services.
140 for ind, exploration in enumerate(exp_summary_dicts):
141 exploration.update(feedback_thread_analytics[ind].to_dict())
142
143 exp_summary_dicts = sorted(
144 exp_summary_dicts,
145 key=lambda x: (x['num_open_threads'], x['last_updated_msec']),
146 reverse=True)
147
148 topic_summaries = topic_fetchers.get_all_topic_summaries()
149 topic_summary_dicts = [
150 summary.to_dict() for summary in topic_summaries]
151
152 if role_services.ACTION_CREATE_COLLECTION in self.user.actions:
153 for collection_summary in subscribed_collection_summaries:
154 # TODO(sll): Reuse _get_displayable_collection_summary_dicts()
155 # in summary_services, instead of replicating it like this.
156 collection_summary_dicts.append({
157 'id': collection_summary.id,
158 'title': collection_summary.title,
159 'category': collection_summary.category,
160 'objective': collection_summary.objective,
161 'language_code': collection_summary.language_code,
162 'last_updated_msec': utils.get_time_in_millisecs(
163 collection_summary.collection_model_last_updated),
164 'created_on': utils.get_time_in_millisecs(
165 collection_summary.collection_model_created_on),
166 'status': collection_summary.status,
167 'node_count': collection_summary.node_count,
168 'community_owned': collection_summary.community_owned,
169 'thumbnail_icon_url': (
170 utils.get_thumbnail_icon_url_for_category(
171 collection_summary.category)),
172 'thumbnail_bg_color': utils.get_hex_color_for_category(
173 collection_summary.category),
174 })
175
176 dashboard_stats = user_services.get_dashboard_stats(self.user_id)
177 dashboard_stats.update({
178 'total_open_feedback': feedback_services.get_total_open_threads(
179 feedback_thread_analytics)
180 })
181 if dashboard_stats and dashboard_stats.get('average_ratings'):
182 dashboard_stats['average_ratings'] = (
183 _round_average_ratings(dashboard_stats['average_ratings']))
184
185 last_week_stats = (
186 user_services.get_last_week_dashboard_stats(self.user_id))
187
188 if last_week_stats and len(list(last_week_stats.keys())) != 1:
189 logging.exception(
190 '\'last_week_stats\' should contain only one key-value pair'
191 ' denoting last week dashboard stats of the user keyed by a'
192 ' datetime string.')
193 last_week_stats = None
194
195 if last_week_stats:
196 # 'last_week_stats' is a dict with only one key-value pair denoting
197 # last week dashboard stats of the user keyed by a datetime string.
198 datetime_of_stats = list(last_week_stats.keys())[0]
199 last_week_stats_average_ratings = (
200 list(last_week_stats.values())[0].get('average_ratings'))
201 if last_week_stats_average_ratings:
202 last_week_stats[datetime_of_stats]['average_ratings'] = (
203 _round_average_ratings(last_week_stats_average_ratings))
204
205 subscriber_ids = subscription_services.get_all_subscribers_of_creator(
206 self.user_id)
207 subscribers_settings = user_services.get_users_settings(subscriber_ids)
208 subscribers_list = []
209 for index, subscriber_settings in enumerate(subscribers_settings):
210 subscriber_summary = {
211 'subscriber_picture_data_url': (
212 subscriber_settings.profile_picture_data_url),
213 'subscriber_username': subscriber_settings.username,
214 'subscriber_impact': (
215 user_services.get_user_impact_score(subscriber_ids[index]))
216 }
217
218 subscribers_list.append(subscriber_summary)
219
220 user_settings = user_services.get_user_settings(
221 self.user_id, strict=False)
222 creator_dashboard_display_pref = (
223 user_settings.creator_dashboard_display_pref)
224
225 suggestions_created_by_user = suggestion_services.query_suggestions(
226 [('author_id', self.user_id),
227 (
228 'suggestion_type',
229 feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT)])
230 suggestions_which_can_be_reviewed = (
231 suggestion_services
232 .get_all_suggestions_that_can_be_reviewed_by_user(self.user_id))
233
234 for s in suggestions_created_by_user:
235 s.populate_old_value_of_change()
236
237 for s in suggestions_which_can_be_reviewed:
238 s.populate_old_value_of_change()
239
240 suggestion_dicts_created_by_user = (
241 [s.to_dict() for s in suggestions_created_by_user])
242 suggestion_dicts_which_can_be_reviewed = (
243 [s.to_dict() for s in suggestions_which_can_be_reviewed])
244
245 ids_of_suggestions_created_by_user = (
246 [s['suggestion_id'] for s in suggestion_dicts_created_by_user])
247 ids_of_suggestions_which_can_be_reviewed = (
248 [s['suggestion_id']
249 for s in suggestion_dicts_which_can_be_reviewed])
250
251 threads_linked_to_suggestions_by_user = (
252 [t.to_dict() for t in feedback_services.get_multiple_threads(
253 ids_of_suggestions_created_by_user)])
254 threads_linked_to_suggestions_which_can_be_reviewed = (
255 [t.to_dict() for t in feedback_services.get_multiple_threads(
256 ids_of_suggestions_which_can_be_reviewed)])
257
258 self.values.update({
259 'explorations_list': exp_summary_dicts,
260 'collections_list': collection_summary_dicts,
261 'dashboard_stats': dashboard_stats,
262 'last_week_stats': last_week_stats,
263 'subscribers_list': subscribers_list,
264 'display_preference': creator_dashboard_display_pref,
265 'threads_for_created_suggestions_list': (
266 threads_linked_to_suggestions_by_user),
267 'threads_for_suggestions_to_review_list': (
268 threads_linked_to_suggestions_which_can_be_reviewed),
269 'created_suggestions_list': suggestion_dicts_created_by_user,
270 'suggestions_to_review_list': (
271 suggestion_dicts_which_can_be_reviewed),
272 'topic_summary_dicts': topic_summary_dicts
273 })
274
275 self.render_json(self.values)
276
277 @acl_decorators.can_access_creator_dashboard
278 def post(self):
279 creator_dashboard_display_pref = (
280 self.normalized_payload.get('display_preference'))
281 user_services.update_user_creator_dashboard_display(
282 self.user_id, creator_dashboard_display_pref)
283 self.render_json({})
284
285
286 class NewExplorationHandler(base.BaseHandler):
287 """Creates a new exploration."""
288
289 URL_PATH_ARGS_SCHEMAS = {}
290 HANDLER_ARGS_SCHEMAS = {
291 'POST': {
292 'title': {
293 'schema': {
294 'type': 'basestring'
295 },
296 'default_value': feconf.DEFAULT_EXPLORATION_TITLE
297 }
298 }
299 }
300
301 @acl_decorators.can_create_exploration
302 def post(self):
303 """Handles POST requests."""
304 title = self.normalized_payload.get('title')
305
306 new_exploration_id = exp_fetchers.get_new_exploration_id()
307 exploration = exp_domain.Exploration.create_default_exploration(
308 new_exploration_id, title=title)
309 exp_services.save_new_exploration(self.user_id, exploration)
310
311 self.render_json({
312 EXPLORATION_ID_KEY: new_exploration_id
313 })
314
315
316 class NewCollectionHandler(base.BaseHandler):
317 """Creates a new collection."""
318
319 URL_PATH_ARGS_SCHEMAS = {}
320 HANDLER_ARGS_SCHEMAS = {
321 'POST': {}
322 }
323
324 @acl_decorators.can_create_collection
325 def post(self):
326 """Handles POST requests."""
327 new_collection_id = collection_services.get_new_collection_id()
328 collection = collection_domain.Collection.create_default_collection(
329 new_collection_id)
330 collection_services.save_new_collection(self.user_id, collection)
331
332 self.render_json({
333 COLLECTION_ID_KEY: new_collection_id
334 })
335
336
337 class UploadExplorationHandler(base.BaseHandler):
338 """Uploads a new exploration."""
339
340 URL_PATH_ARGS_SCHEMAS = {}
341 HANDLER_ARGS_SCHEMAS = {
342 'POST': {
343 'yaml_file': {
344 'schema': {
345 'type': 'basestring'
346 },
347 'default_value': None
348 }
349 }
350 }
351
352 @acl_decorators.can_upload_exploration
353 def post(self):
354 """Handles POST requests."""
355 yaml_content = self.normalized_payload.get('yaml_file')
356
357 new_exploration_id = exp_fetchers.get_new_exploration_id()
358 if constants.ALLOW_YAML_FILE_UPLOAD:
359 exp_services.save_new_exploration_from_yaml_and_assets(
360 self.user_id, yaml_content, new_exploration_id, [],
361 strip_voiceovers=True)
362 self.render_json({
363 EXPLORATION_ID_KEY: new_exploration_id
364 })
365 else:
366 raise self.InvalidInputException(
367 'This server does not allow file uploads.')
368
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/controllers/creator_dashboard.py b/core/controllers/creator_dashboard.py
--- a/core/controllers/creator_dashboard.py
+++ b/core/controllers/creator_dashboard.py
@@ -352,7 +352,7 @@
@acl_decorators.can_upload_exploration
def post(self):
"""Handles POST requests."""
- yaml_content = self.normalized_payload.get('yaml_file')
+ yaml_content = self.normalized_request.get('yaml_file')
new_exploration_id = exp_fetchers.get_new_exploration_id()
if constants.ALLOW_YAML_FILE_UPLOAD:
| {"golden_diff": "diff --git a/core/controllers/creator_dashboard.py b/core/controllers/creator_dashboard.py\n--- a/core/controllers/creator_dashboard.py\n+++ b/core/controllers/creator_dashboard.py\n@@ -352,7 +352,7 @@\n @acl_decorators.can_upload_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n- yaml_content = self.normalized_payload.get('yaml_file')\n+ yaml_content = self.normalized_request.get('yaml_file')\n \n new_exploration_id = exp_fetchers.get_new_exploration_id()\n if constants.ALLOW_YAML_FILE_UPLOAD:\n", "issue": "Schema validation error on uploading an exploration YAML\n<!--\r\n - Thanks for taking the time to report a bug in the Oppia project.\r\n - Before filing a new issue, please do a quick search to check that it hasn't\r\n - already been filed on the [issue tracker](https://github.com/oppia/oppia/issues)._\r\n -->\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n 1. Set `ALLOW_YAML_FILE_UPLOAD` to true in `constants.ts`\r\n 2. Go to the creator dashboard.\r\n 3. Upload a test exploration YAML file from [here](https://github.com/oppia/oppia/tree/develop/data/explorations).\r\n 4. See error.\r\n\r\n**Observed behavior**\r\nValid exploration YAML does not get uploaded.\r\n\r\n**Expected behavior**\r\nValid exploration YAML should be uploaded correctly.\r\n\r\n**Screenshots**\r\n\r\nhttps://user-images.githubusercontent.com/11008603/150233493-27963be2-18ac-4c2d-a184-c39cd3268f09.mp4\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the creator dashboard, notifications, and creating new\nactivities.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nfrom core import feconf\nfrom core import utils\nfrom core.constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import collection_domain\nfrom core.domain import collection_services\nfrom core.domain import exp_domain\nfrom core.domain import exp_fetchers\nfrom core.domain import exp_services\nfrom core.domain import feedback_services\nfrom core.domain import role_services\nfrom core.domain import subscription_services\nfrom core.domain import suggestion_services\nfrom core.domain import summary_services\nfrom core.domain import topic_fetchers\nfrom core.domain import user_services\n\nEXPLORATION_ID_KEY = 'exploration_id'\nCOLLECTION_ID_KEY = 'collection_id'\n\n\nclass OldContributorDashboardRedirectPage(base.BaseHandler):\n \"\"\"Redirects the old contributor dashboard URL to the new one.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n self.redirect('/contributor-dashboard', permanent=True)\n\n\nclass OldCreatorDashboardRedirectPage(base.BaseHandler):\n \"\"\"Redirects the old creator dashboard URL to the new one.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n self.redirect(feconf.CREATOR_DASHBOARD_URL, permanent=True)\n\n\nclass CreatorDashboardPage(base.BaseHandler):\n \"\"\"Page showing the user's creator dashboard.\"\"\"\n\n ADDITIONAL_DEPENDENCY_IDS = ['codemirror']\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.can_access_creator_dashboard\n def get(self):\n\n self.render_template('creator-dashboard-page.mainpage.html')\n\n\nclass CreatorDashboardHandler(base.BaseHandler):\n \"\"\"Provides data for the user's creator dashboard page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'GET': {},\n 'POST': {\n 'display_preference': {\n 'schema': {\n 'type': 'basestring',\n 'choices': (\n constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS\n .values()\n )\n }\n }\n }\n }\n\n @acl_decorators.can_access_creator_dashboard\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n\n def _round_average_ratings(rating):\n \"\"\"Returns the rounded average rating to display on the creator\n dashboard.\n\n Args:\n rating: float. The rating of the lesson.\n\n Returns:\n float. The rounded average value of rating.\n \"\"\"\n return round(\n rating, feconf.AVERAGE_RATINGS_DASHBOARD_PRECISION)\n\n subscribed_exploration_summaries = (\n exp_fetchers.get_exploration_summaries_subscribed_to(\n self.user_id))\n subscribed_collection_summaries = (\n collection_services.get_collection_summaries_subscribed_to(\n self.user_id))\n\n exploration_ids_subscribed_to = [\n summary.id for summary in subscribed_exploration_summaries]\n\n exp_summary_dicts = summary_services.get_displayable_exp_summary_dicts(\n subscribed_exploration_summaries)\n collection_summary_dicts = []\n\n feedback_thread_analytics = (\n feedback_services.get_thread_analytics_multi(\n exploration_ids_subscribed_to))\n\n # TODO(bhenning): Update this to use unresolved answers from\n # stats_services once the training interface is enabled and it's cheaper\n # to retrieve top answers from stats_services.\n for ind, exploration in enumerate(exp_summary_dicts):\n exploration.update(feedback_thread_analytics[ind].to_dict())\n\n exp_summary_dicts = sorted(\n exp_summary_dicts,\n key=lambda x: (x['num_open_threads'], x['last_updated_msec']),\n reverse=True)\n\n topic_summaries = topic_fetchers.get_all_topic_summaries()\n topic_summary_dicts = [\n summary.to_dict() for summary in topic_summaries]\n\n if role_services.ACTION_CREATE_COLLECTION in self.user.actions:\n for collection_summary in subscribed_collection_summaries:\n # TODO(sll): Reuse _get_displayable_collection_summary_dicts()\n # in summary_services, instead of replicating it like this.\n collection_summary_dicts.append({\n 'id': collection_summary.id,\n 'title': collection_summary.title,\n 'category': collection_summary.category,\n 'objective': collection_summary.objective,\n 'language_code': collection_summary.language_code,\n 'last_updated_msec': utils.get_time_in_millisecs(\n collection_summary.collection_model_last_updated),\n 'created_on': utils.get_time_in_millisecs(\n collection_summary.collection_model_created_on),\n 'status': collection_summary.status,\n 'node_count': collection_summary.node_count,\n 'community_owned': collection_summary.community_owned,\n 'thumbnail_icon_url': (\n utils.get_thumbnail_icon_url_for_category(\n collection_summary.category)),\n 'thumbnail_bg_color': utils.get_hex_color_for_category(\n collection_summary.category),\n })\n\n dashboard_stats = user_services.get_dashboard_stats(self.user_id)\n dashboard_stats.update({\n 'total_open_feedback': feedback_services.get_total_open_threads(\n feedback_thread_analytics)\n })\n if dashboard_stats and dashboard_stats.get('average_ratings'):\n dashboard_stats['average_ratings'] = (\n _round_average_ratings(dashboard_stats['average_ratings']))\n\n last_week_stats = (\n user_services.get_last_week_dashboard_stats(self.user_id))\n\n if last_week_stats and len(list(last_week_stats.keys())) != 1:\n logging.exception(\n '\\'last_week_stats\\' should contain only one key-value pair'\n ' denoting last week dashboard stats of the user keyed by a'\n ' datetime string.')\n last_week_stats = None\n\n if last_week_stats:\n # 'last_week_stats' is a dict with only one key-value pair denoting\n # last week dashboard stats of the user keyed by a datetime string.\n datetime_of_stats = list(last_week_stats.keys())[0]\n last_week_stats_average_ratings = (\n list(last_week_stats.values())[0].get('average_ratings'))\n if last_week_stats_average_ratings:\n last_week_stats[datetime_of_stats]['average_ratings'] = (\n _round_average_ratings(last_week_stats_average_ratings))\n\n subscriber_ids = subscription_services.get_all_subscribers_of_creator(\n self.user_id)\n subscribers_settings = user_services.get_users_settings(subscriber_ids)\n subscribers_list = []\n for index, subscriber_settings in enumerate(subscribers_settings):\n subscriber_summary = {\n 'subscriber_picture_data_url': (\n subscriber_settings.profile_picture_data_url),\n 'subscriber_username': subscriber_settings.username,\n 'subscriber_impact': (\n user_services.get_user_impact_score(subscriber_ids[index]))\n }\n\n subscribers_list.append(subscriber_summary)\n\n user_settings = user_services.get_user_settings(\n self.user_id, strict=False)\n creator_dashboard_display_pref = (\n user_settings.creator_dashboard_display_pref)\n\n suggestions_created_by_user = suggestion_services.query_suggestions(\n [('author_id', self.user_id),\n (\n 'suggestion_type',\n feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT)])\n suggestions_which_can_be_reviewed = (\n suggestion_services\n .get_all_suggestions_that_can_be_reviewed_by_user(self.user_id))\n\n for s in suggestions_created_by_user:\n s.populate_old_value_of_change()\n\n for s in suggestions_which_can_be_reviewed:\n s.populate_old_value_of_change()\n\n suggestion_dicts_created_by_user = (\n [s.to_dict() for s in suggestions_created_by_user])\n suggestion_dicts_which_can_be_reviewed = (\n [s.to_dict() for s in suggestions_which_can_be_reviewed])\n\n ids_of_suggestions_created_by_user = (\n [s['suggestion_id'] for s in suggestion_dicts_created_by_user])\n ids_of_suggestions_which_can_be_reviewed = (\n [s['suggestion_id']\n for s in suggestion_dicts_which_can_be_reviewed])\n\n threads_linked_to_suggestions_by_user = (\n [t.to_dict() for t in feedback_services.get_multiple_threads(\n ids_of_suggestions_created_by_user)])\n threads_linked_to_suggestions_which_can_be_reviewed = (\n [t.to_dict() for t in feedback_services.get_multiple_threads(\n ids_of_suggestions_which_can_be_reviewed)])\n\n self.values.update({\n 'explorations_list': exp_summary_dicts,\n 'collections_list': collection_summary_dicts,\n 'dashboard_stats': dashboard_stats,\n 'last_week_stats': last_week_stats,\n 'subscribers_list': subscribers_list,\n 'display_preference': creator_dashboard_display_pref,\n 'threads_for_created_suggestions_list': (\n threads_linked_to_suggestions_by_user),\n 'threads_for_suggestions_to_review_list': (\n threads_linked_to_suggestions_which_can_be_reviewed),\n 'created_suggestions_list': suggestion_dicts_created_by_user,\n 'suggestions_to_review_list': (\n suggestion_dicts_which_can_be_reviewed),\n 'topic_summary_dicts': topic_summary_dicts\n })\n\n self.render_json(self.values)\n\n @acl_decorators.can_access_creator_dashboard\n def post(self):\n creator_dashboard_display_pref = (\n self.normalized_payload.get('display_preference'))\n user_services.update_user_creator_dashboard_display(\n self.user_id, creator_dashboard_display_pref)\n self.render_json({})\n\n\nclass NewExplorationHandler(base.BaseHandler):\n \"\"\"Creates a new exploration.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {\n 'title': {\n 'schema': {\n 'type': 'basestring'\n },\n 'default_value': feconf.DEFAULT_EXPLORATION_TITLE\n }\n }\n }\n\n @acl_decorators.can_create_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n title = self.normalized_payload.get('title')\n\n new_exploration_id = exp_fetchers.get_new_exploration_id()\n exploration = exp_domain.Exploration.create_default_exploration(\n new_exploration_id, title=title)\n exp_services.save_new_exploration(self.user_id, exploration)\n\n self.render_json({\n EXPLORATION_ID_KEY: new_exploration_id\n })\n\n\nclass NewCollectionHandler(base.BaseHandler):\n \"\"\"Creates a new collection.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {}\n }\n\n @acl_decorators.can_create_collection\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n new_collection_id = collection_services.get_new_collection_id()\n collection = collection_domain.Collection.create_default_collection(\n new_collection_id)\n collection_services.save_new_collection(self.user_id, collection)\n\n self.render_json({\n COLLECTION_ID_KEY: new_collection_id\n })\n\n\nclass UploadExplorationHandler(base.BaseHandler):\n \"\"\"Uploads a new exploration.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {\n 'yaml_file': {\n 'schema': {\n 'type': 'basestring'\n },\n 'default_value': None\n }\n }\n }\n\n @acl_decorators.can_upload_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n yaml_content = self.normalized_payload.get('yaml_file')\n\n new_exploration_id = exp_fetchers.get_new_exploration_id()\n if constants.ALLOW_YAML_FILE_UPLOAD:\n exp_services.save_new_exploration_from_yaml_and_assets(\n self.user_id, yaml_content, new_exploration_id, [],\n strip_voiceovers=True)\n self.render_json({\n EXPLORATION_ID_KEY: new_exploration_id\n })\n else:\n raise self.InvalidInputException(\n 'This server does not allow file uploads.')\n", "path": "core/controllers/creator_dashboard.py"}], "after_files": [{"content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the creator dashboard, notifications, and creating new\nactivities.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nfrom core import feconf\nfrom core import utils\nfrom core.constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import collection_domain\nfrom core.domain import collection_services\nfrom core.domain import exp_domain\nfrom core.domain import exp_fetchers\nfrom core.domain import exp_services\nfrom core.domain import feedback_services\nfrom core.domain import role_services\nfrom core.domain import subscription_services\nfrom core.domain import suggestion_services\nfrom core.domain import summary_services\nfrom core.domain import topic_fetchers\nfrom core.domain import user_services\n\nEXPLORATION_ID_KEY = 'exploration_id'\nCOLLECTION_ID_KEY = 'collection_id'\n\n\nclass OldContributorDashboardRedirectPage(base.BaseHandler):\n \"\"\"Redirects the old contributor dashboard URL to the new one.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n self.redirect('/contributor-dashboard', permanent=True)\n\n\nclass OldCreatorDashboardRedirectPage(base.BaseHandler):\n \"\"\"Redirects the old creator dashboard URL to the new one.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n self.redirect(feconf.CREATOR_DASHBOARD_URL, permanent=True)\n\n\nclass CreatorDashboardPage(base.BaseHandler):\n \"\"\"Page showing the user's creator dashboard.\"\"\"\n\n ADDITIONAL_DEPENDENCY_IDS = ['codemirror']\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.can_access_creator_dashboard\n def get(self):\n\n self.render_template('creator-dashboard-page.mainpage.html')\n\n\nclass CreatorDashboardHandler(base.BaseHandler):\n \"\"\"Provides data for the user's creator dashboard page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'GET': {},\n 'POST': {\n 'display_preference': {\n 'schema': {\n 'type': 'basestring',\n 'choices': (\n constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS\n .values()\n )\n }\n }\n }\n }\n\n @acl_decorators.can_access_creator_dashboard\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n\n def _round_average_ratings(rating):\n \"\"\"Returns the rounded average rating to display on the creator\n dashboard.\n\n Args:\n rating: float. The rating of the lesson.\n\n Returns:\n float. The rounded average value of rating.\n \"\"\"\n return round(\n rating, feconf.AVERAGE_RATINGS_DASHBOARD_PRECISION)\n\n subscribed_exploration_summaries = (\n exp_fetchers.get_exploration_summaries_subscribed_to(\n self.user_id))\n subscribed_collection_summaries = (\n collection_services.get_collection_summaries_subscribed_to(\n self.user_id))\n\n exploration_ids_subscribed_to = [\n summary.id for summary in subscribed_exploration_summaries]\n\n exp_summary_dicts = summary_services.get_displayable_exp_summary_dicts(\n subscribed_exploration_summaries)\n collection_summary_dicts = []\n\n feedback_thread_analytics = (\n feedback_services.get_thread_analytics_multi(\n exploration_ids_subscribed_to))\n\n # TODO(bhenning): Update this to use unresolved answers from\n # stats_services once the training interface is enabled and it's cheaper\n # to retrieve top answers from stats_services.\n for ind, exploration in enumerate(exp_summary_dicts):\n exploration.update(feedback_thread_analytics[ind].to_dict())\n\n exp_summary_dicts = sorted(\n exp_summary_dicts,\n key=lambda x: (x['num_open_threads'], x['last_updated_msec']),\n reverse=True)\n\n topic_summaries = topic_fetchers.get_all_topic_summaries()\n topic_summary_dicts = [\n summary.to_dict() for summary in topic_summaries]\n\n if role_services.ACTION_CREATE_COLLECTION in self.user.actions:\n for collection_summary in subscribed_collection_summaries:\n # TODO(sll): Reuse _get_displayable_collection_summary_dicts()\n # in summary_services, instead of replicating it like this.\n collection_summary_dicts.append({\n 'id': collection_summary.id,\n 'title': collection_summary.title,\n 'category': collection_summary.category,\n 'objective': collection_summary.objective,\n 'language_code': collection_summary.language_code,\n 'last_updated_msec': utils.get_time_in_millisecs(\n collection_summary.collection_model_last_updated),\n 'created_on': utils.get_time_in_millisecs(\n collection_summary.collection_model_created_on),\n 'status': collection_summary.status,\n 'node_count': collection_summary.node_count,\n 'community_owned': collection_summary.community_owned,\n 'thumbnail_icon_url': (\n utils.get_thumbnail_icon_url_for_category(\n collection_summary.category)),\n 'thumbnail_bg_color': utils.get_hex_color_for_category(\n collection_summary.category),\n })\n\n dashboard_stats = user_services.get_dashboard_stats(self.user_id)\n dashboard_stats.update({\n 'total_open_feedback': feedback_services.get_total_open_threads(\n feedback_thread_analytics)\n })\n if dashboard_stats and dashboard_stats.get('average_ratings'):\n dashboard_stats['average_ratings'] = (\n _round_average_ratings(dashboard_stats['average_ratings']))\n\n last_week_stats = (\n user_services.get_last_week_dashboard_stats(self.user_id))\n\n if last_week_stats and len(list(last_week_stats.keys())) != 1:\n logging.exception(\n '\\'last_week_stats\\' should contain only one key-value pair'\n ' denoting last week dashboard stats of the user keyed by a'\n ' datetime string.')\n last_week_stats = None\n\n if last_week_stats:\n # 'last_week_stats' is a dict with only one key-value pair denoting\n # last week dashboard stats of the user keyed by a datetime string.\n datetime_of_stats = list(last_week_stats.keys())[0]\n last_week_stats_average_ratings = (\n list(last_week_stats.values())[0].get('average_ratings'))\n if last_week_stats_average_ratings:\n last_week_stats[datetime_of_stats]['average_ratings'] = (\n _round_average_ratings(last_week_stats_average_ratings))\n\n subscriber_ids = subscription_services.get_all_subscribers_of_creator(\n self.user_id)\n subscribers_settings = user_services.get_users_settings(subscriber_ids)\n subscribers_list = []\n for index, subscriber_settings in enumerate(subscribers_settings):\n subscriber_summary = {\n 'subscriber_picture_data_url': (\n subscriber_settings.profile_picture_data_url),\n 'subscriber_username': subscriber_settings.username,\n 'subscriber_impact': (\n user_services.get_user_impact_score(subscriber_ids[index]))\n }\n\n subscribers_list.append(subscriber_summary)\n\n user_settings = user_services.get_user_settings(\n self.user_id, strict=False)\n creator_dashboard_display_pref = (\n user_settings.creator_dashboard_display_pref)\n\n suggestions_created_by_user = suggestion_services.query_suggestions(\n [('author_id', self.user_id),\n (\n 'suggestion_type',\n feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT)])\n suggestions_which_can_be_reviewed = (\n suggestion_services\n .get_all_suggestions_that_can_be_reviewed_by_user(self.user_id))\n\n for s in suggestions_created_by_user:\n s.populate_old_value_of_change()\n\n for s in suggestions_which_can_be_reviewed:\n s.populate_old_value_of_change()\n\n suggestion_dicts_created_by_user = (\n [s.to_dict() for s in suggestions_created_by_user])\n suggestion_dicts_which_can_be_reviewed = (\n [s.to_dict() for s in suggestions_which_can_be_reviewed])\n\n ids_of_suggestions_created_by_user = (\n [s['suggestion_id'] for s in suggestion_dicts_created_by_user])\n ids_of_suggestions_which_can_be_reviewed = (\n [s['suggestion_id']\n for s in suggestion_dicts_which_can_be_reviewed])\n\n threads_linked_to_suggestions_by_user = (\n [t.to_dict() for t in feedback_services.get_multiple_threads(\n ids_of_suggestions_created_by_user)])\n threads_linked_to_suggestions_which_can_be_reviewed = (\n [t.to_dict() for t in feedback_services.get_multiple_threads(\n ids_of_suggestions_which_can_be_reviewed)])\n\n self.values.update({\n 'explorations_list': exp_summary_dicts,\n 'collections_list': collection_summary_dicts,\n 'dashboard_stats': dashboard_stats,\n 'last_week_stats': last_week_stats,\n 'subscribers_list': subscribers_list,\n 'display_preference': creator_dashboard_display_pref,\n 'threads_for_created_suggestions_list': (\n threads_linked_to_suggestions_by_user),\n 'threads_for_suggestions_to_review_list': (\n threads_linked_to_suggestions_which_can_be_reviewed),\n 'created_suggestions_list': suggestion_dicts_created_by_user,\n 'suggestions_to_review_list': (\n suggestion_dicts_which_can_be_reviewed),\n 'topic_summary_dicts': topic_summary_dicts\n })\n\n self.render_json(self.values)\n\n @acl_decorators.can_access_creator_dashboard\n def post(self):\n creator_dashboard_display_pref = (\n self.normalized_payload.get('display_preference'))\n user_services.update_user_creator_dashboard_display(\n self.user_id, creator_dashboard_display_pref)\n self.render_json({})\n\n\nclass NewExplorationHandler(base.BaseHandler):\n \"\"\"Creates a new exploration.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {\n 'title': {\n 'schema': {\n 'type': 'basestring'\n },\n 'default_value': feconf.DEFAULT_EXPLORATION_TITLE\n }\n }\n }\n\n @acl_decorators.can_create_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n title = self.normalized_payload.get('title')\n\n new_exploration_id = exp_fetchers.get_new_exploration_id()\n exploration = exp_domain.Exploration.create_default_exploration(\n new_exploration_id, title=title)\n exp_services.save_new_exploration(self.user_id, exploration)\n\n self.render_json({\n EXPLORATION_ID_KEY: new_exploration_id\n })\n\n\nclass NewCollectionHandler(base.BaseHandler):\n \"\"\"Creates a new collection.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {}\n }\n\n @acl_decorators.can_create_collection\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n new_collection_id = collection_services.get_new_collection_id()\n collection = collection_domain.Collection.create_default_collection(\n new_collection_id)\n collection_services.save_new_collection(self.user_id, collection)\n\n self.render_json({\n COLLECTION_ID_KEY: new_collection_id\n })\n\n\nclass UploadExplorationHandler(base.BaseHandler):\n \"\"\"Uploads a new exploration.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {\n 'yaml_file': {\n 'schema': {\n 'type': 'basestring'\n },\n 'default_value': None\n }\n }\n }\n\n @acl_decorators.can_upload_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n yaml_content = self.normalized_request.get('yaml_file')\n\n new_exploration_id = exp_fetchers.get_new_exploration_id()\n if constants.ALLOW_YAML_FILE_UPLOAD:\n exp_services.save_new_exploration_from_yaml_and_assets(\n self.user_id, yaml_content, new_exploration_id, [],\n strip_voiceovers=True)\n self.render_json({\n EXPLORATION_ID_KEY: new_exploration_id\n })\n else:\n raise self.InvalidInputException(\n 'This server does not allow file uploads.')\n", "path": "core/controllers/creator_dashboard.py"}]} |
gh_patches_debug_1569 | rasdani/github-patches | git_diff | google__pytype-807 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AssertionError: Don't know how to match <class 'pytype.pytd.pytd.Name…
…dType'> against <class 'pytype.pytd.pytd.Literal'>
I opened an issue for this error. If this a possible fix for this issue, feel free to merge the fix.
Issue = https://github.com/google/pytype/issues/802
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytype/pytd/type_match.py`
Content:
```
1 """Match pytd types against each other.
2
3 "Matching" x against y means roughly: If we have a function f(param: y) and
4 a type x, would we be able to pass (an instance of) x to f. (I.e.,
5 "execute f(x)"). So for example, str would "match" against basestring, and
6 list[int] would match against list[Number].
7
8 This is used for converting structural types to nominal types during type
9 inference, but could also be used when merging pytd files, to match existing
10 signatures against new inference results.
11 """
12
13 import logging
14
15 from pytype import utils
16 from pytype.pytd import booleq
17 from pytype.pytd import escape
18 from pytype.pytd import pytd
19 from pytype.pytd import pytd_utils
20 from pytype.pytd import visitors
21 from pytype.pytd.parse import node
22
23 log = logging.getLogger(__name__)
24
25
26 is_complete = escape.is_complete
27
28
29 # Might not be needed anymore once pytd has builtin support for ~unknown.
30 def is_unknown(t):
31 """Return True if this is an ~unknown."""
32 if isinstance(t, (pytd.ClassType, pytd.NamedType, pytd.Class, StrictType)):
33 return escape.is_unknown(t.name)
34 elif isinstance(t, str):
35 return escape.is_unknown(t)
36 else:
37 return False
38
39
40 def get_all_subclasses(asts):
41 """Compute a class->subclasses mapping.
42
43 Args:
44 asts: A list of ASTs.
45
46 Returns:
47 A dictionary, mapping instances of pytd.Type (types) to lists of
48 pytd.Class (the derived classes).
49 """
50 hierarchy = {}
51 for ast in asts:
52 hierarchy.update(ast.Visit(visitors.ExtractSuperClasses()))
53 def filter_superclasses(superclasses):
54 return [superclass for superclass in superclasses
55 if is_complete(superclass)]
56 hierarchy = {cls: filter_superclasses(superclasses)
57 for cls, superclasses in hierarchy.items() if is_complete(cls)}
58 # typically this is a fairly short list, e.g.:
59 # [ClassType(basestring), ClassType(int), ClassType(object)]
60 return utils.invert_dict(hierarchy)
61
62
63 class StrictType(node.Node("name")):
64 """A type that doesn't allow sub- or superclasses to match.
65
66 For example, "int" is considered a valid argument for a function that accepts
67 "object", but StrictType("int") is not.
68 """
69
70 def __str__(self):
71 return self.name
72
73
74 class TypeMatch(pytd_utils.TypeMatcher):
75 """Class for matching types against other types."""
76
77 def __init__(self, direct_subclasses=None, any_also_is_bottom=True):
78 """Construct.
79
80 Args:
81 direct_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type.
82 any_also_is_bottom: Whether we should, (if True) consider
83 pytd.AnythingType() to also be at the bottom of the type hierarchy,
84 thus making it a subclass of everything, or (if False) to be only
85 at the top.
86 """
87 self.direct_subclasses = direct_subclasses or {}
88 self.any_also_is_bottom = any_also_is_bottom
89 self.solver = booleq.Solver()
90 self._implications = {}
91
92 def default_match(self, t1, t2, *unused_args, **unused_kwargs):
93 # Don't allow pytd_utils.TypeMatcher to do default matching.
94 raise AssertionError(
95 "Can't compare %s and %s" % (type(t1).__name__, type(t2).__name__))
96
97 def get_superclasses(self, t):
98 """Get all base classes of this type.
99
100 Args:
101 t: A pytd.Type
102 Returns:
103 A list of pytd.Type.
104 """
105 if isinstance(t, pytd.ClassType):
106 return sum((self.get_superclasses(c) for c in t.cls.parents), [t])
107 elif isinstance(t, pytd.AnythingType):
108 # All types, even "?", inherit from object.
109 return [pytd.NamedType("builtins.object")]
110 elif isinstance(t, pytd.GenericType):
111 return self.get_superclasses(t.base_type)
112 else:
113 log.warning("Can't extract superclasses from %s", type(t))
114 return [pytd.NamedType("builtins.object")]
115
116 def get_subclasses(self, t):
117 """Get all classes derived from this type.
118
119 Args:
120 t: A pytd.Type
121 Returns:
122 A list of pytd.Type.
123 """
124 if isinstance(t, pytd.ClassType):
125 subclasses = self.direct_subclasses.get(t, [])
126 return sum((self.get_subclasses(pytd.ClassType(c.name, c))
127 for c in subclasses), [t])
128 else:
129 raise NotImplementedError("Can't extract subclasses from %s" % type(t))
130
131 def type_parameter(self, unknown, base_class, item):
132 """This generates the type parameter when matching against a generic type.
133
134 For example, when we match ~unknown1 against list[T], we need an additional
135 type to model the T in "~unknown1[T]". This type would have the name
136 "~unknown1.list.T".
137
138 Args:
139 unknown: An unknown type. This is the type that's matched against
140 base_class[T]
141 base_class: The base class of the generic we're matching the unknown
142 against. E.g. "list".
143 item: The pytd.TemplateItem, i.e., the actual type parameter. ("T" in
144 the examples above)
145 Returns:
146 A type (pytd.Node) to represent this type parameter.
147 """
148 assert is_unknown(unknown)
149 assert isinstance(base_class, pytd.Class)
150 name = unknown.name + "." + base_class.name + "." + item.type_param.name
151 # We do *not* consider subclasses or superclasses when matching type
152 # parameters.
153 # So for example, if we pass list[int] to f(x: list[T]), we assume that
154 # T can only be "int", not "int + object". This might be considered
155 # incorrect, but typically gives us more intuitive results.
156 # Note that this only happens if we match ~unknown against generic types,
157 # not for matching of "known" types against each other.
158 return StrictType(name)
159
160 def _get_parameters(self, t1, t2):
161 if isinstance(t1, pytd.TupleType) and isinstance(t2, pytd.TupleType):
162 # No change needed; the parameters will be compared element-wise.
163 return t1.parameters, t2.parameters
164 elif isinstance(t2, pytd.TupleType):
165 # Since we call _get_parameters after confirming that t1 and t2 have
166 # compatible base types, t1 is a homogeneous tuple here.
167 return (t1.element_type,) * len(t2.parameters), t2.parameters
168 elif isinstance(t1, pytd.TupleType):
169 return (pytd_utils.JoinTypes(t1.parameters),), t2.parameters
170 elif (isinstance(t1, pytd.CallableType) and
171 isinstance(t2, pytd.CallableType)):
172 # Flip the arguments, since argument types are contravariant.
173 return t2.args + (t1.ret,), t1.args + (t2.ret,)
174 elif (t1.base_type.cls.name == "builtins.type" and
175 t2.base_type.cls.name == "typing.Callable"):
176 # We'll only check the return type, since getting the argument types for
177 # initializing a class is tricky.
178 return t1.parameters, (t2.parameters[-1],)
179 elif (t1.base_type.cls.name == "typing.Callable" and
180 t2.base_type.cls.name == "builtins.type"):
181 return (t1.parameters[-1],), t2.parameters
182 elif isinstance(t1, pytd.CallableType):
183 # We're matching against GenericType(Callable, (Any, _RET)), so we don't
184 # need the argument types.
185 return (pytd.AnythingType(), t1.ret), t2.parameters
186 elif isinstance(t2, pytd.CallableType):
187 return t1.parameters, (pytd.AnythingType(), t2.ret)
188 else:
189 num_extra_params = len(t1.parameters) - len(t2.parameters)
190 # Matching, e.g., Dict[str, int] against Iterable[K] is legitimate.
191 assert num_extra_params >= 0, (t1.base_type.cls.name,
192 t2.base_type.cls.name)
193 t2_parameters = t2.parameters + (pytd.AnythingType(),) * num_extra_params
194 return t1.parameters, t2_parameters
195
196 def match_Generic_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name
197 """Match a pytd.GenericType against another pytd.GenericType."""
198 assert isinstance(t1.base_type, pytd.ClassType), type(t1.base_type)
199 assert isinstance(t2.base_type, pytd.ClassType), type(t2.base_type)
200 base1 = pytd.ClassType(t1.base_type.cls.name, t1.base_type.cls)
201 base2 = pytd.ClassType(t2.base_type.cls.name, t2.base_type.cls)
202 base_type_cmp = self.match_type_against_type(base1, base2, subst)
203 if base_type_cmp is booleq.FALSE:
204 return booleq.FALSE
205 t1_parameters, t2_parameters = self._get_parameters(t1, t2)
206 if len(t1_parameters) != len(t2_parameters):
207 return booleq.FALSE
208 # Type parameters are covariant:
209 # E.g. passing list[int] as argument for list[object] succeeds.
210 param_cmp = [self.match_type_against_type(p1, p2, subst)
211 for p1, p2 in zip(t1_parameters, t2_parameters)]
212 return booleq.And([base_type_cmp] + param_cmp)
213
214 def match_Unknown_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name
215 assert isinstance(t2.base_type, pytd.ClassType)
216 # No inheritance for base classes - you can only inherit from an
217 # instantiated template, but not from a template itself.
218 base_match = booleq.Eq(t1.name, t2.base_type.cls.name)
219 type_params = [self.type_parameter(t1, t2.base_type.cls, item)
220 for item in t2.base_type.cls.template]
221 for type_param in type_params:
222 self.solver.register_variable(type_param.name)
223 if isinstance(t2, pytd.TupleType):
224 t2_parameters = (pytd_utils.JoinTypes(t2.parameters),)
225 else:
226 t2_parameters = t2.parameters
227 params = [self.match_type_against_type(p1, p2, subst)
228 for p1, p2 in zip(type_params, t2_parameters)]
229 return booleq.And([base_match] + params)
230
231 def match_Generic_against_Unknown(self, t1, t2, subst): # pylint: disable=invalid-name
232 # Note: This flips p1 and p2 above.
233 return self.match_Unknown_against_Generic(t2, t1, subst) # pylint: disable=arguments-out-of-order
234
235 def maybe_lookup_type_param(self, t, subst):
236 while isinstance(t, pytd.TypeParameter):
237 # We can only have type parameters in a class, and if so, we should have
238 # added them to the type parameter substitution map (subst) beforehand:
239 assert t in subst
240 if subst[t] is None:
241 # Function type parameter. Can be anything.
242 t = pytd.AnythingType()
243 else:
244 assert subst[t] != t, "Cyclic type parameter."
245 t = subst[t]
246 return t
247
248 def unclass(self, t):
249 """Prevent further subclass or superclass expansion for this type."""
250 if isinstance(t, pytd.ClassType):
251 # When t.name and t.cls.name differ (e.g., int vs. builtins.int), the
252 # latter is the complete name.
253 return pytd.NamedType(t.cls.name)
254 else:
255 return t
256
257 def expand_superclasses(self, t):
258 class_and_superclasses = self.get_superclasses(t)
259 return [self.unclass(t) for t in class_and_superclasses]
260
261 def expand_subclasses(self, t):
262 class_and_subclasses = self.get_subclasses(t)
263 return [self.unclass(t) for t in class_and_subclasses]
264
265 def match_type_against_type(self, t1, t2, subst):
266 types = (t1, t2, frozenset(subst.items()))
267 if types in self._implications:
268 return self._implications[types]
269 implication = self._implications[types] = self._match_type_against_type(
270 t1, t2, subst)
271 return implication
272
273 def _full_name(self, t):
274 return t.name
275
276 def _match_type_against_type(self, t1, t2, subst):
277 """Match a pytd.Type against another pytd.Type."""
278 t1 = self.maybe_lookup_type_param(t1, subst)
279 t2 = self.maybe_lookup_type_param(t2, subst)
280 # TODO(b/159058933): Use utils:TypeMatcher to simplify this?
281 if isinstance(t2, pytd.AnythingType):
282 # We can match anything against AnythingType. (It's like top)
283 return booleq.TRUE
284 elif isinstance(t1, pytd.AnythingType):
285 if self.any_also_is_bottom:
286 # We can match AnythingType against everything. (It's like bottom)
287 return booleq.TRUE
288 else:
289 return booleq.FALSE
290 elif isinstance(t1, pytd.NothingType):
291 # nothing as an actual type matches against everything, since it
292 # represents an empty value.
293 return booleq.TRUE
294 elif isinstance(t2, pytd.NothingType):
295 # We can't match anything against nothing as an expected type (except
296 # nothing itself, above).
297 return booleq.FALSE
298 elif isinstance(t1, pytd.UnionType):
299 return booleq.And(self.match_type_against_type(u, t2, subst)
300 for u in t1.type_list)
301 elif isinstance(t2, pytd.UnionType):
302 return booleq.Or(self.match_type_against_type(t1, u, subst)
303 for u in t2.type_list)
304 elif (isinstance(t1, pytd.ClassType) and isinstance(t2, StrictType) or
305 isinstance(t1, StrictType) and isinstance(t2, pytd.ClassType)):
306 # For strict types, avoid subclasses of the left side.
307 return booleq.Eq(self._full_name(t1), self._full_name(t2))
308 elif isinstance(t1, pytd.ClassType) and t2.name == "builtins.object":
309 return booleq.TRUE
310 elif (t1.name in ("builtins.type", "typing.Callable") and
311 t2.name in ("builtins.type", "typing.Callable")):
312 return booleq.TRUE
313 elif isinstance(t1, pytd.ClassType):
314 # ClassTypes are similar to Unions, except they're disjunctions: We can
315 # match the type or any of its base classes against the formal parameter.
316 return booleq.Or(self.match_type_against_type(t, t2, subst)
317 for t in self.expand_superclasses(t1))
318 elif isinstance(t2, pytd.ClassType):
319 # ClassTypes on the right are exactly like Unions: We can match against
320 # this type or any of its subclasses.
321 return booleq.Or(self.match_type_against_type(t1, t, subst)
322 for t in self.expand_subclasses(t2))
323 assert not isinstance(t1, pytd.ClassType)
324 assert not isinstance(t2, pytd.ClassType)
325 if is_unknown(t1) and isinstance(t2, pytd.GenericType):
326 return self.match_Unknown_against_Generic(t1, t2, subst)
327 elif isinstance(t1, pytd.GenericType) and is_unknown(t2):
328 return self.match_Generic_against_Unknown(t1, t2, subst)
329 elif isinstance(t1, pytd.GenericType) and isinstance(t2, pytd.GenericType):
330 return self.match_Generic_against_Generic(t1, t2, subst)
331 elif isinstance(t1, pytd.GenericType):
332 # E.g. list[...] matches against list, or even object.
333 return self.match_type_against_type(t1.base_type, t2, subst)
334 elif isinstance(t2, pytd.GenericType):
335 if self.any_also_is_bottom:
336 # E.g. list (a.k.a. list[Any]) matches against list[str]
337 return self.match_type_against_type(t1, t2.base_type, subst)
338 else:
339 return booleq.FALSE
340 elif is_unknown(t1) and is_unknown(t2):
341 return booleq.Eq(t1.name, t2.name)
342 elif (isinstance(t1, (pytd.NamedType, StrictType)) and
343 isinstance(t2, (pytd.NamedType, StrictType))):
344 if is_complete(t1) and is_complete(t2) and t1.name != t2.name:
345 # Optimization: If we know these two can never be equal, just return
346 # false right away.
347 return booleq.FALSE
348 else:
349 return booleq.Eq(t1.name, t2.name)
350 elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):
351 # Unresolved types never match against anything.
352 return booleq.FALSE
353 else:
354 raise AssertionError("Don't know how to match %s against %s" % (
355 type(t1), type(t2)))
356
357 # pylint: disable=invalid-name
358 def match_Signature_against_Signature(self, sig1, sig2, subst,
359 skip_self=False):
360 """Match a pytd.Signature against another pytd.Signature.
361
362 Args:
363 sig1: The caller
364 sig2: The callee
365 subst: Current type parameters.
366 skip_self: If True, doesn't compare the first parameter, which is
367 considered (and verified) to be "self".
368 Returns:
369 An instance of booleq.BooleanTerm, i.e. a boolean formula.
370 """
371 assert not sig1.template
372 # Signatures have type parameters, too. We ignore them, since they can
373 # be anything. (See maybe_lookup_type_param())
374 subst.update({p.type_param: None for p in sig2.template})
375 params1 = sig1.params
376 params2 = sig2.params
377 if skip_self:
378 # Methods in an ~unknown need to declare their methods with "self"
379 assert params1 and params1[0].name == "self"
380 params1 = params1[1:]
381 if params2 and params2[0].name == "self":
382 params2 = params2[1:]
383 equalities = []
384 if len(params1) > len(params2) and not sig2.has_optional:
385 return booleq.FALSE # extra parameters
386 if sig1.starargs is not None and sig2.starargs is not None:
387 equalities.append(self.match_type_against_type(
388 sig1.starargs.type, sig2.starargs.type, subst))
389 if sig1.starstarargs is not None and sig2.starstarargs is not None:
390 equalities.append(self.match_type_against_type(
391 sig1.starstarargs.type, sig2.starstarargs.type, subst))
392 # TODO(b/159058933): Handle kwonly parameters (on either side). Presumably,
393 # a kwonly on the left side means that it was a keyword param.
394 for p1, p2 in zip(params1, params2):
395 if p1.optional and not p2.optional:
396 return booleq.FALSE # needed for optimize.py:RemoveRedundantSignatures
397 for i, p2 in enumerate(params2):
398 if i >= len(params1):
399 if not p2.optional:
400 return booleq.FALSE # missing parameter
401 else:
402 pass
403 else:
404 p1 = params1[i]
405 if p1.name != p2.name and not (
406 pytd_utils.ANON_PARAM.match(p1.name) or
407 pytd_utils.ANON_PARAM.match(p2.name)):
408 return booleq.FALSE
409 equalities.append(self.match_type_against_type(p1.type, p2.type, subst))
410 equalities.append(
411 self.match_type_against_type(
412 sig1.return_type, sig2.return_type, subst))
413 return booleq.And(equalities)
414
415 def match_Signature_against_Function(self, sig, f, subst, skip_self=False): # pylint: disable=invalid-name
416 def make_or(inner):
417 return booleq.Or(
418 self.match_Signature_against_Signature(inner, s, subst, skip_self)
419 for s in f.signatures)
420 return booleq.And(make_or(inner) for inner in visitors.ExpandSignature(sig))
421
422 def match_Function_against_Function(self, f1, f2, subst, skip_self=False): # pylint: disable=invalid-name
423 return booleq.And(
424 self.match_Signature_against_Function(s1, f2, subst, skip_self)
425 for s1 in f1.signatures)
426
427 def match_Function_against_Class(self, f1, cls2, subst, cache):
428 cls2_methods = cache.get(id(cls2))
429 if cls2_methods is None:
430 cls2_methods = cache[id(cls2)] = {f.name: f for f in cls2.methods}
431 if f1.name not in cls2_methods:
432 # The class itself doesn't have this method, but base classes might.
433 # TODO(b/159058933): This should do MRO order, not depth-first.
434 for base in cls2.parents:
435 if isinstance(base, pytd.AnythingType):
436 # AnythingType can contain any method. However, that would mean that
437 # a class that inherits from AnythingType contains any method
438 # imaginable, and hence is a match for anything. To prevent the bad
439 # results caused by that, return FALSE here.
440 return booleq.FALSE
441 elif isinstance(base, (pytd.ClassType, pytd.GenericType)):
442 if isinstance(base, pytd.ClassType):
443 cls = base.cls
444 values = tuple(pytd.AnythingType() for _ in cls.template)
445 elif isinstance(base, pytd.TupleType):
446 cls = base.base_type.cls
447 values = (pytd_utils.JoinTypes(base.parameters),)
448 else:
449 cls = base.base_type.cls
450 values = base.parameters
451 if values:
452 subst = subst.copy()
453 for param, value in zip(cls.template, values):
454 subst[param.type_param] = value
455 implication = self.match_Function_against_Class(f1, cls, subst, cache)
456 if implication is not booleq.FALSE:
457 return implication
458 else:
459 # Funky types like UnionType are hard to match against (and shouldn't
460 # appear as a base class) so we treat them as catch-all.
461 log.warning("Assuming that %s has method %s",
462 pytd_utils.Print(base), f1.name)
463 return booleq.TRUE
464 return booleq.FALSE
465 else:
466 f2 = cls2_methods[f1.name]
467 return self.match_Function_against_Function(
468 f1, f2, subst, skip_self=True)
469
470 def match_Class_against_Class(self, cls1, cls2, subst): # pylint: disable=invalid-name
471 """Match a pytd.Class against another pytd.Class."""
472 return self.match_Functions_against_Class(
473 cls1.methods, cls2, subst)
474
475 def match_Protocol_against_Unknown(self, protocol, unknown, subst): # pylint: disable=invalid-name
476 """Match a typing.Protocol against an unknown class."""
477 filtered_methods = [f for f in protocol.methods if f.is_abstract]
478 return self.match_Functions_against_Class(
479 filtered_methods, unknown, subst)
480
481 def match_Functions_against_Class(self, methods, cls2, subst):
482 implications = []
483 cache = {}
484 for f1 in methods:
485 implication = self.match_Function_against_Class(f1, cls2, subst, cache)
486 implications.append(implication)
487 if implication is booleq.FALSE:
488 break
489 # TODO(b/159058933): class attributes
490 return booleq.And(implications)
491
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytype/pytd/type_match.py b/pytype/pytd/type_match.py
--- a/pytype/pytd/type_match.py
+++ b/pytype/pytd/type_match.py
@@ -347,6 +347,8 @@
return booleq.FALSE
else:
return booleq.Eq(t1.name, t2.name)
+ elif isinstance(t1, pytd.NamedType) and isinstance(t2, pytd.Literal):
+ return booleq.FALSE
elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):
# Unresolved types never match against anything.
return booleq.FALSE
| {"golden_diff": "diff --git a/pytype/pytd/type_match.py b/pytype/pytd/type_match.py\n--- a/pytype/pytd/type_match.py\n+++ b/pytype/pytd/type_match.py\n@@ -347,6 +347,8 @@\n return booleq.FALSE\n else:\n return booleq.Eq(t1.name, t2.name)\n+ elif isinstance(t1, pytd.NamedType) and isinstance(t2, pytd.Literal):\n+ return booleq.FALSE\n elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):\n # Unresolved types never match against anything.\n return booleq.FALSE\n", "issue": "AssertionError: Don't know how to match <class 'pytype.pytd.pytd.Name\u2026\n\u2026dType'> against <class 'pytype.pytd.pytd.Literal'>\r\n\r\nI opened an issue for this error. If this a possible fix for this issue, feel free to merge the fix. \r\nIssue = https://github.com/google/pytype/issues/802\n", "before_files": [{"content": "\"\"\"Match pytd types against each other.\n\n\"Matching\" x against y means roughly: If we have a function f(param: y) and\na type x, would we be able to pass (an instance of) x to f. (I.e.,\n\"execute f(x)\"). So for example, str would \"match\" against basestring, and\nlist[int] would match against list[Number].\n\nThis is used for converting structural types to nominal types during type\ninference, but could also be used when merging pytd files, to match existing\nsignatures against new inference results.\n\"\"\"\n\nimport logging\n\nfrom pytype import utils\nfrom pytype.pytd import booleq\nfrom pytype.pytd import escape\nfrom pytype.pytd import pytd\nfrom pytype.pytd import pytd_utils\nfrom pytype.pytd import visitors\nfrom pytype.pytd.parse import node\n\nlog = logging.getLogger(__name__)\n\n\nis_complete = escape.is_complete\n\n\n# Might not be needed anymore once pytd has builtin support for ~unknown.\ndef is_unknown(t):\n \"\"\"Return True if this is an ~unknown.\"\"\"\n if isinstance(t, (pytd.ClassType, pytd.NamedType, pytd.Class, StrictType)):\n return escape.is_unknown(t.name)\n elif isinstance(t, str):\n return escape.is_unknown(t)\n else:\n return False\n\n\ndef get_all_subclasses(asts):\n \"\"\"Compute a class->subclasses mapping.\n\n Args:\n asts: A list of ASTs.\n\n Returns:\n A dictionary, mapping instances of pytd.Type (types) to lists of\n pytd.Class (the derived classes).\n \"\"\"\n hierarchy = {}\n for ast in asts:\n hierarchy.update(ast.Visit(visitors.ExtractSuperClasses()))\n def filter_superclasses(superclasses):\n return [superclass for superclass in superclasses\n if is_complete(superclass)]\n hierarchy = {cls: filter_superclasses(superclasses)\n for cls, superclasses in hierarchy.items() if is_complete(cls)}\n # typically this is a fairly short list, e.g.:\n # [ClassType(basestring), ClassType(int), ClassType(object)]\n return utils.invert_dict(hierarchy)\n\n\nclass StrictType(node.Node(\"name\")):\n \"\"\"A type that doesn't allow sub- or superclasses to match.\n\n For example, \"int\" is considered a valid argument for a function that accepts\n \"object\", but StrictType(\"int\") is not.\n \"\"\"\n\n def __str__(self):\n return self.name\n\n\nclass TypeMatch(pytd_utils.TypeMatcher):\n \"\"\"Class for matching types against other types.\"\"\"\n\n def __init__(self, direct_subclasses=None, any_also_is_bottom=True):\n \"\"\"Construct.\n\n Args:\n direct_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type.\n any_also_is_bottom: Whether we should, (if True) consider\n pytd.AnythingType() to also be at the bottom of the type hierarchy,\n thus making it a subclass of everything, or (if False) to be only\n at the top.\n \"\"\"\n self.direct_subclasses = direct_subclasses or {}\n self.any_also_is_bottom = any_also_is_bottom\n self.solver = booleq.Solver()\n self._implications = {}\n\n def default_match(self, t1, t2, *unused_args, **unused_kwargs):\n # Don't allow pytd_utils.TypeMatcher to do default matching.\n raise AssertionError(\n \"Can't compare %s and %s\" % (type(t1).__name__, type(t2).__name__))\n\n def get_superclasses(self, t):\n \"\"\"Get all base classes of this type.\n\n Args:\n t: A pytd.Type\n Returns:\n A list of pytd.Type.\n \"\"\"\n if isinstance(t, pytd.ClassType):\n return sum((self.get_superclasses(c) for c in t.cls.parents), [t])\n elif isinstance(t, pytd.AnythingType):\n # All types, even \"?\", inherit from object.\n return [pytd.NamedType(\"builtins.object\")]\n elif isinstance(t, pytd.GenericType):\n return self.get_superclasses(t.base_type)\n else:\n log.warning(\"Can't extract superclasses from %s\", type(t))\n return [pytd.NamedType(\"builtins.object\")]\n\n def get_subclasses(self, t):\n \"\"\"Get all classes derived from this type.\n\n Args:\n t: A pytd.Type\n Returns:\n A list of pytd.Type.\n \"\"\"\n if isinstance(t, pytd.ClassType):\n subclasses = self.direct_subclasses.get(t, [])\n return sum((self.get_subclasses(pytd.ClassType(c.name, c))\n for c in subclasses), [t])\n else:\n raise NotImplementedError(\"Can't extract subclasses from %s\" % type(t))\n\n def type_parameter(self, unknown, base_class, item):\n \"\"\"This generates the type parameter when matching against a generic type.\n\n For example, when we match ~unknown1 against list[T], we need an additional\n type to model the T in \"~unknown1[T]\". This type would have the name\n \"~unknown1.list.T\".\n\n Args:\n unknown: An unknown type. This is the type that's matched against\n base_class[T]\n base_class: The base class of the generic we're matching the unknown\n against. E.g. \"list\".\n item: The pytd.TemplateItem, i.e., the actual type parameter. (\"T\" in\n the examples above)\n Returns:\n A type (pytd.Node) to represent this type parameter.\n \"\"\"\n assert is_unknown(unknown)\n assert isinstance(base_class, pytd.Class)\n name = unknown.name + \".\" + base_class.name + \".\" + item.type_param.name\n # We do *not* consider subclasses or superclasses when matching type\n # parameters.\n # So for example, if we pass list[int] to f(x: list[T]), we assume that\n # T can only be \"int\", not \"int + object\". This might be considered\n # incorrect, but typically gives us more intuitive results.\n # Note that this only happens if we match ~unknown against generic types,\n # not for matching of \"known\" types against each other.\n return StrictType(name)\n\n def _get_parameters(self, t1, t2):\n if isinstance(t1, pytd.TupleType) and isinstance(t2, pytd.TupleType):\n # No change needed; the parameters will be compared element-wise.\n return t1.parameters, t2.parameters\n elif isinstance(t2, pytd.TupleType):\n # Since we call _get_parameters after confirming that t1 and t2 have\n # compatible base types, t1 is a homogeneous tuple here.\n return (t1.element_type,) * len(t2.parameters), t2.parameters\n elif isinstance(t1, pytd.TupleType):\n return (pytd_utils.JoinTypes(t1.parameters),), t2.parameters\n elif (isinstance(t1, pytd.CallableType) and\n isinstance(t2, pytd.CallableType)):\n # Flip the arguments, since argument types are contravariant.\n return t2.args + (t1.ret,), t1.args + (t2.ret,)\n elif (t1.base_type.cls.name == \"builtins.type\" and\n t2.base_type.cls.name == \"typing.Callable\"):\n # We'll only check the return type, since getting the argument types for\n # initializing a class is tricky.\n return t1.parameters, (t2.parameters[-1],)\n elif (t1.base_type.cls.name == \"typing.Callable\" and\n t2.base_type.cls.name == \"builtins.type\"):\n return (t1.parameters[-1],), t2.parameters\n elif isinstance(t1, pytd.CallableType):\n # We're matching against GenericType(Callable, (Any, _RET)), so we don't\n # need the argument types.\n return (pytd.AnythingType(), t1.ret), t2.parameters\n elif isinstance(t2, pytd.CallableType):\n return t1.parameters, (pytd.AnythingType(), t2.ret)\n else:\n num_extra_params = len(t1.parameters) - len(t2.parameters)\n # Matching, e.g., Dict[str, int] against Iterable[K] is legitimate.\n assert num_extra_params >= 0, (t1.base_type.cls.name,\n t2.base_type.cls.name)\n t2_parameters = t2.parameters + (pytd.AnythingType(),) * num_extra_params\n return t1.parameters, t2_parameters\n\n def match_Generic_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name\n \"\"\"Match a pytd.GenericType against another pytd.GenericType.\"\"\"\n assert isinstance(t1.base_type, pytd.ClassType), type(t1.base_type)\n assert isinstance(t2.base_type, pytd.ClassType), type(t2.base_type)\n base1 = pytd.ClassType(t1.base_type.cls.name, t1.base_type.cls)\n base2 = pytd.ClassType(t2.base_type.cls.name, t2.base_type.cls)\n base_type_cmp = self.match_type_against_type(base1, base2, subst)\n if base_type_cmp is booleq.FALSE:\n return booleq.FALSE\n t1_parameters, t2_parameters = self._get_parameters(t1, t2)\n if len(t1_parameters) != len(t2_parameters):\n return booleq.FALSE\n # Type parameters are covariant:\n # E.g. passing list[int] as argument for list[object] succeeds.\n param_cmp = [self.match_type_against_type(p1, p2, subst)\n for p1, p2 in zip(t1_parameters, t2_parameters)]\n return booleq.And([base_type_cmp] + param_cmp)\n\n def match_Unknown_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name\n assert isinstance(t2.base_type, pytd.ClassType)\n # No inheritance for base classes - you can only inherit from an\n # instantiated template, but not from a template itself.\n base_match = booleq.Eq(t1.name, t2.base_type.cls.name)\n type_params = [self.type_parameter(t1, t2.base_type.cls, item)\n for item in t2.base_type.cls.template]\n for type_param in type_params:\n self.solver.register_variable(type_param.name)\n if isinstance(t2, pytd.TupleType):\n t2_parameters = (pytd_utils.JoinTypes(t2.parameters),)\n else:\n t2_parameters = t2.parameters\n params = [self.match_type_against_type(p1, p2, subst)\n for p1, p2 in zip(type_params, t2_parameters)]\n return booleq.And([base_match] + params)\n\n def match_Generic_against_Unknown(self, t1, t2, subst): # pylint: disable=invalid-name\n # Note: This flips p1 and p2 above.\n return self.match_Unknown_against_Generic(t2, t1, subst) # pylint: disable=arguments-out-of-order\n\n def maybe_lookup_type_param(self, t, subst):\n while isinstance(t, pytd.TypeParameter):\n # We can only have type parameters in a class, and if so, we should have\n # added them to the type parameter substitution map (subst) beforehand:\n assert t in subst\n if subst[t] is None:\n # Function type parameter. Can be anything.\n t = pytd.AnythingType()\n else:\n assert subst[t] != t, \"Cyclic type parameter.\"\n t = subst[t]\n return t\n\n def unclass(self, t):\n \"\"\"Prevent further subclass or superclass expansion for this type.\"\"\"\n if isinstance(t, pytd.ClassType):\n # When t.name and t.cls.name differ (e.g., int vs. builtins.int), the\n # latter is the complete name.\n return pytd.NamedType(t.cls.name)\n else:\n return t\n\n def expand_superclasses(self, t):\n class_and_superclasses = self.get_superclasses(t)\n return [self.unclass(t) for t in class_and_superclasses]\n\n def expand_subclasses(self, t):\n class_and_subclasses = self.get_subclasses(t)\n return [self.unclass(t) for t in class_and_subclasses]\n\n def match_type_against_type(self, t1, t2, subst):\n types = (t1, t2, frozenset(subst.items()))\n if types in self._implications:\n return self._implications[types]\n implication = self._implications[types] = self._match_type_against_type(\n t1, t2, subst)\n return implication\n\n def _full_name(self, t):\n return t.name\n\n def _match_type_against_type(self, t1, t2, subst):\n \"\"\"Match a pytd.Type against another pytd.Type.\"\"\"\n t1 = self.maybe_lookup_type_param(t1, subst)\n t2 = self.maybe_lookup_type_param(t2, subst)\n # TODO(b/159058933): Use utils:TypeMatcher to simplify this?\n if isinstance(t2, pytd.AnythingType):\n # We can match anything against AnythingType. (It's like top)\n return booleq.TRUE\n elif isinstance(t1, pytd.AnythingType):\n if self.any_also_is_bottom:\n # We can match AnythingType against everything. (It's like bottom)\n return booleq.TRUE\n else:\n return booleq.FALSE\n elif isinstance(t1, pytd.NothingType):\n # nothing as an actual type matches against everything, since it\n # represents an empty value.\n return booleq.TRUE\n elif isinstance(t2, pytd.NothingType):\n # We can't match anything against nothing as an expected type (except\n # nothing itself, above).\n return booleq.FALSE\n elif isinstance(t1, pytd.UnionType):\n return booleq.And(self.match_type_against_type(u, t2, subst)\n for u in t1.type_list)\n elif isinstance(t2, pytd.UnionType):\n return booleq.Or(self.match_type_against_type(t1, u, subst)\n for u in t2.type_list)\n elif (isinstance(t1, pytd.ClassType) and isinstance(t2, StrictType) or\n isinstance(t1, StrictType) and isinstance(t2, pytd.ClassType)):\n # For strict types, avoid subclasses of the left side.\n return booleq.Eq(self._full_name(t1), self._full_name(t2))\n elif isinstance(t1, pytd.ClassType) and t2.name == \"builtins.object\":\n return booleq.TRUE\n elif (t1.name in (\"builtins.type\", \"typing.Callable\") and\n t2.name in (\"builtins.type\", \"typing.Callable\")):\n return booleq.TRUE\n elif isinstance(t1, pytd.ClassType):\n # ClassTypes are similar to Unions, except they're disjunctions: We can\n # match the type or any of its base classes against the formal parameter.\n return booleq.Or(self.match_type_against_type(t, t2, subst)\n for t in self.expand_superclasses(t1))\n elif isinstance(t2, pytd.ClassType):\n # ClassTypes on the right are exactly like Unions: We can match against\n # this type or any of its subclasses.\n return booleq.Or(self.match_type_against_type(t1, t, subst)\n for t in self.expand_subclasses(t2))\n assert not isinstance(t1, pytd.ClassType)\n assert not isinstance(t2, pytd.ClassType)\n if is_unknown(t1) and isinstance(t2, pytd.GenericType):\n return self.match_Unknown_against_Generic(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType) and is_unknown(t2):\n return self.match_Generic_against_Unknown(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType) and isinstance(t2, pytd.GenericType):\n return self.match_Generic_against_Generic(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType):\n # E.g. list[...] matches against list, or even object.\n return self.match_type_against_type(t1.base_type, t2, subst)\n elif isinstance(t2, pytd.GenericType):\n if self.any_also_is_bottom:\n # E.g. list (a.k.a. list[Any]) matches against list[str]\n return self.match_type_against_type(t1, t2.base_type, subst)\n else:\n return booleq.FALSE\n elif is_unknown(t1) and is_unknown(t2):\n return booleq.Eq(t1.name, t2.name)\n elif (isinstance(t1, (pytd.NamedType, StrictType)) and\n isinstance(t2, (pytd.NamedType, StrictType))):\n if is_complete(t1) and is_complete(t2) and t1.name != t2.name:\n # Optimization: If we know these two can never be equal, just return\n # false right away.\n return booleq.FALSE\n else:\n return booleq.Eq(t1.name, t2.name)\n elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):\n # Unresolved types never match against anything.\n return booleq.FALSE\n else:\n raise AssertionError(\"Don't know how to match %s against %s\" % (\n type(t1), type(t2)))\n\n # pylint: disable=invalid-name\n def match_Signature_against_Signature(self, sig1, sig2, subst,\n skip_self=False):\n \"\"\"Match a pytd.Signature against another pytd.Signature.\n\n Args:\n sig1: The caller\n sig2: The callee\n subst: Current type parameters.\n skip_self: If True, doesn't compare the first parameter, which is\n considered (and verified) to be \"self\".\n Returns:\n An instance of booleq.BooleanTerm, i.e. a boolean formula.\n \"\"\"\n assert not sig1.template\n # Signatures have type parameters, too. We ignore them, since they can\n # be anything. (See maybe_lookup_type_param())\n subst.update({p.type_param: None for p in sig2.template})\n params1 = sig1.params\n params2 = sig2.params\n if skip_self:\n # Methods in an ~unknown need to declare their methods with \"self\"\n assert params1 and params1[0].name == \"self\"\n params1 = params1[1:]\n if params2 and params2[0].name == \"self\":\n params2 = params2[1:]\n equalities = []\n if len(params1) > len(params2) and not sig2.has_optional:\n return booleq.FALSE # extra parameters\n if sig1.starargs is not None and sig2.starargs is not None:\n equalities.append(self.match_type_against_type(\n sig1.starargs.type, sig2.starargs.type, subst))\n if sig1.starstarargs is not None and sig2.starstarargs is not None:\n equalities.append(self.match_type_against_type(\n sig1.starstarargs.type, sig2.starstarargs.type, subst))\n # TODO(b/159058933): Handle kwonly parameters (on either side). Presumably,\n # a kwonly on the left side means that it was a keyword param.\n for p1, p2 in zip(params1, params2):\n if p1.optional and not p2.optional:\n return booleq.FALSE # needed for optimize.py:RemoveRedundantSignatures\n for i, p2 in enumerate(params2):\n if i >= len(params1):\n if not p2.optional:\n return booleq.FALSE # missing parameter\n else:\n pass\n else:\n p1 = params1[i]\n if p1.name != p2.name and not (\n pytd_utils.ANON_PARAM.match(p1.name) or\n pytd_utils.ANON_PARAM.match(p2.name)):\n return booleq.FALSE\n equalities.append(self.match_type_against_type(p1.type, p2.type, subst))\n equalities.append(\n self.match_type_against_type(\n sig1.return_type, sig2.return_type, subst))\n return booleq.And(equalities)\n\n def match_Signature_against_Function(self, sig, f, subst, skip_self=False): # pylint: disable=invalid-name\n def make_or(inner):\n return booleq.Or(\n self.match_Signature_against_Signature(inner, s, subst, skip_self)\n for s in f.signatures)\n return booleq.And(make_or(inner) for inner in visitors.ExpandSignature(sig))\n\n def match_Function_against_Function(self, f1, f2, subst, skip_self=False): # pylint: disable=invalid-name\n return booleq.And(\n self.match_Signature_against_Function(s1, f2, subst, skip_self)\n for s1 in f1.signatures)\n\n def match_Function_against_Class(self, f1, cls2, subst, cache):\n cls2_methods = cache.get(id(cls2))\n if cls2_methods is None:\n cls2_methods = cache[id(cls2)] = {f.name: f for f in cls2.methods}\n if f1.name not in cls2_methods:\n # The class itself doesn't have this method, but base classes might.\n # TODO(b/159058933): This should do MRO order, not depth-first.\n for base in cls2.parents:\n if isinstance(base, pytd.AnythingType):\n # AnythingType can contain any method. However, that would mean that\n # a class that inherits from AnythingType contains any method\n # imaginable, and hence is a match for anything. To prevent the bad\n # results caused by that, return FALSE here.\n return booleq.FALSE\n elif isinstance(base, (pytd.ClassType, pytd.GenericType)):\n if isinstance(base, pytd.ClassType):\n cls = base.cls\n values = tuple(pytd.AnythingType() for _ in cls.template)\n elif isinstance(base, pytd.TupleType):\n cls = base.base_type.cls\n values = (pytd_utils.JoinTypes(base.parameters),)\n else:\n cls = base.base_type.cls\n values = base.parameters\n if values:\n subst = subst.copy()\n for param, value in zip(cls.template, values):\n subst[param.type_param] = value\n implication = self.match_Function_against_Class(f1, cls, subst, cache)\n if implication is not booleq.FALSE:\n return implication\n else:\n # Funky types like UnionType are hard to match against (and shouldn't\n # appear as a base class) so we treat them as catch-all.\n log.warning(\"Assuming that %s has method %s\",\n pytd_utils.Print(base), f1.name)\n return booleq.TRUE\n return booleq.FALSE\n else:\n f2 = cls2_methods[f1.name]\n return self.match_Function_against_Function(\n f1, f2, subst, skip_self=True)\n\n def match_Class_against_Class(self, cls1, cls2, subst): # pylint: disable=invalid-name\n \"\"\"Match a pytd.Class against another pytd.Class.\"\"\"\n return self.match_Functions_against_Class(\n cls1.methods, cls2, subst)\n\n def match_Protocol_against_Unknown(self, protocol, unknown, subst): # pylint: disable=invalid-name\n \"\"\"Match a typing.Protocol against an unknown class.\"\"\"\n filtered_methods = [f for f in protocol.methods if f.is_abstract]\n return self.match_Functions_against_Class(\n filtered_methods, unknown, subst)\n\n def match_Functions_against_Class(self, methods, cls2, subst):\n implications = []\n cache = {}\n for f1 in methods:\n implication = self.match_Function_against_Class(f1, cls2, subst, cache)\n implications.append(implication)\n if implication is booleq.FALSE:\n break\n # TODO(b/159058933): class attributes\n return booleq.And(implications)\n", "path": "pytype/pytd/type_match.py"}], "after_files": [{"content": "\"\"\"Match pytd types against each other.\n\n\"Matching\" x against y means roughly: If we have a function f(param: y) and\na type x, would we be able to pass (an instance of) x to f. (I.e.,\n\"execute f(x)\"). So for example, str would \"match\" against basestring, and\nlist[int] would match against list[Number].\n\nThis is used for converting structural types to nominal types during type\ninference, but could also be used when merging pytd files, to match existing\nsignatures against new inference results.\n\"\"\"\n\nimport logging\n\nfrom pytype import utils\nfrom pytype.pytd import booleq\nfrom pytype.pytd import escape\nfrom pytype.pytd import pytd\nfrom pytype.pytd import pytd_utils\nfrom pytype.pytd import visitors\nfrom pytype.pytd.parse import node\n\nlog = logging.getLogger(__name__)\n\n\nis_complete = escape.is_complete\n\n\n# Might not be needed anymore once pytd has builtin support for ~unknown.\ndef is_unknown(t):\n \"\"\"Return True if this is an ~unknown.\"\"\"\n if isinstance(t, (pytd.ClassType, pytd.NamedType, pytd.Class, StrictType)):\n return escape.is_unknown(t.name)\n elif isinstance(t, str):\n return escape.is_unknown(t)\n else:\n return False\n\n\ndef get_all_subclasses(asts):\n \"\"\"Compute a class->subclasses mapping.\n\n Args:\n asts: A list of ASTs.\n\n Returns:\n A dictionary, mapping instances of pytd.Type (types) to lists of\n pytd.Class (the derived classes).\n \"\"\"\n hierarchy = {}\n for ast in asts:\n hierarchy.update(ast.Visit(visitors.ExtractSuperClasses()))\n def filter_superclasses(superclasses):\n return [superclass for superclass in superclasses\n if is_complete(superclass)]\n hierarchy = {cls: filter_superclasses(superclasses)\n for cls, superclasses in hierarchy.items() if is_complete(cls)}\n # typically this is a fairly short list, e.g.:\n # [ClassType(basestring), ClassType(int), ClassType(object)]\n return utils.invert_dict(hierarchy)\n\n\nclass StrictType(node.Node(\"name\")):\n \"\"\"A type that doesn't allow sub- or superclasses to match.\n\n For example, \"int\" is considered a valid argument for a function that accepts\n \"object\", but StrictType(\"int\") is not.\n \"\"\"\n\n def __str__(self):\n return self.name\n\n\nclass TypeMatch(pytd_utils.TypeMatcher):\n \"\"\"Class for matching types against other types.\"\"\"\n\n def __init__(self, direct_subclasses=None, any_also_is_bottom=True):\n \"\"\"Construct.\n\n Args:\n direct_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type.\n any_also_is_bottom: Whether we should, (if True) consider\n pytd.AnythingType() to also be at the bottom of the type hierarchy,\n thus making it a subclass of everything, or (if False) to be only\n at the top.\n \"\"\"\n self.direct_subclasses = direct_subclasses or {}\n self.any_also_is_bottom = any_also_is_bottom\n self.solver = booleq.Solver()\n self._implications = {}\n\n def default_match(self, t1, t2, *unused_args, **unused_kwargs):\n # Don't allow pytd_utils.TypeMatcher to do default matching.\n raise AssertionError(\n \"Can't compare %s and %s\" % (type(t1).__name__, type(t2).__name__))\n\n def get_superclasses(self, t):\n \"\"\"Get all base classes of this type.\n\n Args:\n t: A pytd.Type\n Returns:\n A list of pytd.Type.\n \"\"\"\n if isinstance(t, pytd.ClassType):\n return sum((self.get_superclasses(c) for c in t.cls.parents), [t])\n elif isinstance(t, pytd.AnythingType):\n # All types, even \"?\", inherit from object.\n return [pytd.NamedType(\"builtins.object\")]\n elif isinstance(t, pytd.GenericType):\n return self.get_superclasses(t.base_type)\n else:\n log.warning(\"Can't extract superclasses from %s\", type(t))\n return [pytd.NamedType(\"builtins.object\")]\n\n def get_subclasses(self, t):\n \"\"\"Get all classes derived from this type.\n\n Args:\n t: A pytd.Type\n Returns:\n A list of pytd.Type.\n \"\"\"\n if isinstance(t, pytd.ClassType):\n subclasses = self.direct_subclasses.get(t, [])\n return sum((self.get_subclasses(pytd.ClassType(c.name, c))\n for c in subclasses), [t])\n else:\n raise NotImplementedError(\"Can't extract subclasses from %s\" % type(t))\n\n def type_parameter(self, unknown, base_class, item):\n \"\"\"This generates the type parameter when matching against a generic type.\n\n For example, when we match ~unknown1 against list[T], we need an additional\n type to model the T in \"~unknown1[T]\". This type would have the name\n \"~unknown1.list.T\".\n\n Args:\n unknown: An unknown type. This is the type that's matched against\n base_class[T]\n base_class: The base class of the generic we're matching the unknown\n against. E.g. \"list\".\n item: The pytd.TemplateItem, i.e., the actual type parameter. (\"T\" in\n the examples above)\n Returns:\n A type (pytd.Node) to represent this type parameter.\n \"\"\"\n assert is_unknown(unknown)\n assert isinstance(base_class, pytd.Class)\n name = unknown.name + \".\" + base_class.name + \".\" + item.type_param.name\n # We do *not* consider subclasses or superclasses when matching type\n # parameters.\n # So for example, if we pass list[int] to f(x: list[T]), we assume that\n # T can only be \"int\", not \"int + object\". This might be considered\n # incorrect, but typically gives us more intuitive results.\n # Note that this only happens if we match ~unknown against generic types,\n # not for matching of \"known\" types against each other.\n return StrictType(name)\n\n def _get_parameters(self, t1, t2):\n if isinstance(t1, pytd.TupleType) and isinstance(t2, pytd.TupleType):\n # No change needed; the parameters will be compared element-wise.\n return t1.parameters, t2.parameters\n elif isinstance(t2, pytd.TupleType):\n # Since we call _get_parameters after confirming that t1 and t2 have\n # compatible base types, t1 is a homogeneous tuple here.\n return (t1.element_type,) * len(t2.parameters), t2.parameters\n elif isinstance(t1, pytd.TupleType):\n return (pytd_utils.JoinTypes(t1.parameters),), t2.parameters\n elif (isinstance(t1, pytd.CallableType) and\n isinstance(t2, pytd.CallableType)):\n # Flip the arguments, since argument types are contravariant.\n return t2.args + (t1.ret,), t1.args + (t2.ret,)\n elif (t1.base_type.cls.name == \"builtins.type\" and\n t2.base_type.cls.name == \"typing.Callable\"):\n # We'll only check the return type, since getting the argument types for\n # initializing a class is tricky.\n return t1.parameters, (t2.parameters[-1],)\n elif (t1.base_type.cls.name == \"typing.Callable\" and\n t2.base_type.cls.name == \"builtins.type\"):\n return (t1.parameters[-1],), t2.parameters\n elif isinstance(t1, pytd.CallableType):\n # We're matching against GenericType(Callable, (Any, _RET)), so we don't\n # need the argument types.\n return (pytd.AnythingType(), t1.ret), t2.parameters\n elif isinstance(t2, pytd.CallableType):\n return t1.parameters, (pytd.AnythingType(), t2.ret)\n else:\n num_extra_params = len(t1.parameters) - len(t2.parameters)\n # Matching, e.g., Dict[str, int] against Iterable[K] is legitimate.\n assert num_extra_params >= 0, (t1.base_type.cls.name,\n t2.base_type.cls.name)\n t2_parameters = t2.parameters + (pytd.AnythingType(),) * num_extra_params\n return t1.parameters, t2_parameters\n\n def match_Generic_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name\n \"\"\"Match a pytd.GenericType against another pytd.GenericType.\"\"\"\n assert isinstance(t1.base_type, pytd.ClassType), type(t1.base_type)\n assert isinstance(t2.base_type, pytd.ClassType), type(t2.base_type)\n base1 = pytd.ClassType(t1.base_type.cls.name, t1.base_type.cls)\n base2 = pytd.ClassType(t2.base_type.cls.name, t2.base_type.cls)\n base_type_cmp = self.match_type_against_type(base1, base2, subst)\n if base_type_cmp is booleq.FALSE:\n return booleq.FALSE\n t1_parameters, t2_parameters = self._get_parameters(t1, t2)\n if len(t1_parameters) != len(t2_parameters):\n return booleq.FALSE\n # Type parameters are covariant:\n # E.g. passing list[int] as argument for list[object] succeeds.\n param_cmp = [self.match_type_against_type(p1, p2, subst)\n for p1, p2 in zip(t1_parameters, t2_parameters)]\n return booleq.And([base_type_cmp] + param_cmp)\n\n def match_Unknown_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name\n assert isinstance(t2.base_type, pytd.ClassType)\n # No inheritance for base classes - you can only inherit from an\n # instantiated template, but not from a template itself.\n base_match = booleq.Eq(t1.name, t2.base_type.cls.name)\n type_params = [self.type_parameter(t1, t2.base_type.cls, item)\n for item in t2.base_type.cls.template]\n for type_param in type_params:\n self.solver.register_variable(type_param.name)\n if isinstance(t2, pytd.TupleType):\n t2_parameters = (pytd_utils.JoinTypes(t2.parameters),)\n else:\n t2_parameters = t2.parameters\n params = [self.match_type_against_type(p1, p2, subst)\n for p1, p2 in zip(type_params, t2_parameters)]\n return booleq.And([base_match] + params)\n\n def match_Generic_against_Unknown(self, t1, t2, subst): # pylint: disable=invalid-name\n # Note: This flips p1 and p2 above.\n return self.match_Unknown_against_Generic(t2, t1, subst) # pylint: disable=arguments-out-of-order\n\n def maybe_lookup_type_param(self, t, subst):\n while isinstance(t, pytd.TypeParameter):\n # We can only have type parameters in a class, and if so, we should have\n # added them to the type parameter substitution map (subst) beforehand:\n assert t in subst\n if subst[t] is None:\n # Function type parameter. Can be anything.\n t = pytd.AnythingType()\n else:\n assert subst[t] != t, \"Cyclic type parameter.\"\n t = subst[t]\n return t\n\n def unclass(self, t):\n \"\"\"Prevent further subclass or superclass expansion for this type.\"\"\"\n if isinstance(t, pytd.ClassType):\n # When t.name and t.cls.name differ (e.g., int vs. builtins.int), the\n # latter is the complete name.\n return pytd.NamedType(t.cls.name)\n else:\n return t\n\n def expand_superclasses(self, t):\n class_and_superclasses = self.get_superclasses(t)\n return [self.unclass(t) for t in class_and_superclasses]\n\n def expand_subclasses(self, t):\n class_and_subclasses = self.get_subclasses(t)\n return [self.unclass(t) for t in class_and_subclasses]\n\n def match_type_against_type(self, t1, t2, subst):\n types = (t1, t2, frozenset(subst.items()))\n if types in self._implications:\n return self._implications[types]\n implication = self._implications[types] = self._match_type_against_type(\n t1, t2, subst)\n return implication\n\n def _full_name(self, t):\n return t.name\n\n def _match_type_against_type(self, t1, t2, subst):\n \"\"\"Match a pytd.Type against another pytd.Type.\"\"\"\n t1 = self.maybe_lookup_type_param(t1, subst)\n t2 = self.maybe_lookup_type_param(t2, subst)\n # TODO(b/159058933): Use utils:TypeMatcher to simplify this?\n if isinstance(t2, pytd.AnythingType):\n # We can match anything against AnythingType. (It's like top)\n return booleq.TRUE\n elif isinstance(t1, pytd.AnythingType):\n if self.any_also_is_bottom:\n # We can match AnythingType against everything. (It's like bottom)\n return booleq.TRUE\n else:\n return booleq.FALSE\n elif isinstance(t1, pytd.NothingType):\n # nothing as an actual type matches against everything, since it\n # represents an empty value.\n return booleq.TRUE\n elif isinstance(t2, pytd.NothingType):\n # We can't match anything against nothing as an expected type (except\n # nothing itself, above).\n return booleq.FALSE\n elif isinstance(t1, pytd.UnionType):\n return booleq.And(self.match_type_against_type(u, t2, subst)\n for u in t1.type_list)\n elif isinstance(t2, pytd.UnionType):\n return booleq.Or(self.match_type_against_type(t1, u, subst)\n for u in t2.type_list)\n elif (isinstance(t1, pytd.ClassType) and isinstance(t2, StrictType) or\n isinstance(t1, StrictType) and isinstance(t2, pytd.ClassType)):\n # For strict types, avoid subclasses of the left side.\n return booleq.Eq(self._full_name(t1), self._full_name(t2))\n elif isinstance(t1, pytd.ClassType) and t2.name == \"builtins.object\":\n return booleq.TRUE\n elif (t1.name in (\"builtins.type\", \"typing.Callable\") and\n t2.name in (\"builtins.type\", \"typing.Callable\")):\n return booleq.TRUE\n elif isinstance(t1, pytd.ClassType):\n # ClassTypes are similar to Unions, except they're disjunctions: We can\n # match the type or any of its base classes against the formal parameter.\n return booleq.Or(self.match_type_against_type(t, t2, subst)\n for t in self.expand_superclasses(t1))\n elif isinstance(t2, pytd.ClassType):\n # ClassTypes on the right are exactly like Unions: We can match against\n # this type or any of its subclasses.\n return booleq.Or(self.match_type_against_type(t1, t, subst)\n for t in self.expand_subclasses(t2))\n assert not isinstance(t1, pytd.ClassType)\n assert not isinstance(t2, pytd.ClassType)\n if is_unknown(t1) and isinstance(t2, pytd.GenericType):\n return self.match_Unknown_against_Generic(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType) and is_unknown(t2):\n return self.match_Generic_against_Unknown(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType) and isinstance(t2, pytd.GenericType):\n return self.match_Generic_against_Generic(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType):\n # E.g. list[...] matches against list, or even object.\n return self.match_type_against_type(t1.base_type, t2, subst)\n elif isinstance(t2, pytd.GenericType):\n if self.any_also_is_bottom:\n # E.g. list (a.k.a. list[Any]) matches against list[str]\n return self.match_type_against_type(t1, t2.base_type, subst)\n else:\n return booleq.FALSE\n elif is_unknown(t1) and is_unknown(t2):\n return booleq.Eq(t1.name, t2.name)\n elif (isinstance(t1, (pytd.NamedType, StrictType)) and\n isinstance(t2, (pytd.NamedType, StrictType))):\n if is_complete(t1) and is_complete(t2) and t1.name != t2.name:\n # Optimization: If we know these two can never be equal, just return\n # false right away.\n return booleq.FALSE\n else:\n return booleq.Eq(t1.name, t2.name)\n elif isinstance(t1, pytd.NamedType) and isinstance(t2, pytd.Literal):\n return booleq.FALSE\n elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):\n # Unresolved types never match against anything.\n return booleq.FALSE\n else:\n raise AssertionError(\"Don't know how to match %s against %s\" % (\n type(t1), type(t2)))\n\n # pylint: disable=invalid-name\n def match_Signature_against_Signature(self, sig1, sig2, subst,\n skip_self=False):\n \"\"\"Match a pytd.Signature against another pytd.Signature.\n\n Args:\n sig1: The caller\n sig2: The callee\n subst: Current type parameters.\n skip_self: If True, doesn't compare the first parameter, which is\n considered (and verified) to be \"self\".\n Returns:\n An instance of booleq.BooleanTerm, i.e. a boolean formula.\n \"\"\"\n assert not sig1.template\n # Signatures have type parameters, too. We ignore them, since they can\n # be anything. (See maybe_lookup_type_param())\n subst.update({p.type_param: None for p in sig2.template})\n params1 = sig1.params\n params2 = sig2.params\n if skip_self:\n # Methods in an ~unknown need to declare their methods with \"self\"\n assert params1 and params1[0].name == \"self\"\n params1 = params1[1:]\n if params2 and params2[0].name == \"self\":\n params2 = params2[1:]\n equalities = []\n if len(params1) > len(params2) and not sig2.has_optional:\n return booleq.FALSE # extra parameters\n if sig1.starargs is not None and sig2.starargs is not None:\n equalities.append(self.match_type_against_type(\n sig1.starargs.type, sig2.starargs.type, subst))\n if sig1.starstarargs is not None and sig2.starstarargs is not None:\n equalities.append(self.match_type_against_type(\n sig1.starstarargs.type, sig2.starstarargs.type, subst))\n # TODO(b/159058933): Handle kwonly parameters (on either side). Presumably,\n # a kwonly on the left side means that it was a keyword param.\n for p1, p2 in zip(params1, params2):\n if p1.optional and not p2.optional:\n return booleq.FALSE # needed for optimize.py:RemoveRedundantSignatures\n for i, p2 in enumerate(params2):\n if i >= len(params1):\n if not p2.optional:\n return booleq.FALSE # missing parameter\n else:\n pass\n else:\n p1 = params1[i]\n if p1.name != p2.name and not (\n pytd_utils.ANON_PARAM.match(p1.name) or\n pytd_utils.ANON_PARAM.match(p2.name)):\n return booleq.FALSE\n equalities.append(self.match_type_against_type(p1.type, p2.type, subst))\n equalities.append(\n self.match_type_against_type(\n sig1.return_type, sig2.return_type, subst))\n return booleq.And(equalities)\n\n def match_Signature_against_Function(self, sig, f, subst, skip_self=False): # pylint: disable=invalid-name\n def make_or(inner):\n return booleq.Or(\n self.match_Signature_against_Signature(inner, s, subst, skip_self)\n for s in f.signatures)\n return booleq.And(make_or(inner) for inner in visitors.ExpandSignature(sig))\n\n def match_Function_against_Function(self, f1, f2, subst, skip_self=False): # pylint: disable=invalid-name\n return booleq.And(\n self.match_Signature_against_Function(s1, f2, subst, skip_self)\n for s1 in f1.signatures)\n\n def match_Function_against_Class(self, f1, cls2, subst, cache):\n cls2_methods = cache.get(id(cls2))\n if cls2_methods is None:\n cls2_methods = cache[id(cls2)] = {f.name: f for f in cls2.methods}\n if f1.name not in cls2_methods:\n # The class itself doesn't have this method, but base classes might.\n # TODO(b/159058933): This should do MRO order, not depth-first.\n for base in cls2.parents:\n if isinstance(base, pytd.AnythingType):\n # AnythingType can contain any method. However, that would mean that\n # a class that inherits from AnythingType contains any method\n # imaginable, and hence is a match for anything. To prevent the bad\n # results caused by that, return FALSE here.\n return booleq.FALSE\n elif isinstance(base, (pytd.ClassType, pytd.GenericType)):\n if isinstance(base, pytd.ClassType):\n cls = base.cls\n values = tuple(pytd.AnythingType() for _ in cls.template)\n elif isinstance(base, pytd.TupleType):\n cls = base.base_type.cls\n values = (pytd_utils.JoinTypes(base.parameters),)\n else:\n cls = base.base_type.cls\n values = base.parameters\n if values:\n subst = subst.copy()\n for param, value in zip(cls.template, values):\n subst[param.type_param] = value\n implication = self.match_Function_against_Class(f1, cls, subst, cache)\n if implication is not booleq.FALSE:\n return implication\n else:\n # Funky types like UnionType are hard to match against (and shouldn't\n # appear as a base class) so we treat them as catch-all.\n log.warning(\"Assuming that %s has method %s\",\n pytd_utils.Print(base), f1.name)\n return booleq.TRUE\n return booleq.FALSE\n else:\n f2 = cls2_methods[f1.name]\n return self.match_Function_against_Function(\n f1, f2, subst, skip_self=True)\n\n def match_Class_against_Class(self, cls1, cls2, subst): # pylint: disable=invalid-name\n \"\"\"Match a pytd.Class against another pytd.Class.\"\"\"\n return self.match_Functions_against_Class(\n cls1.methods, cls2, subst)\n\n def match_Protocol_against_Unknown(self, protocol, unknown, subst): # pylint: disable=invalid-name\n \"\"\"Match a typing.Protocol against an unknown class.\"\"\"\n filtered_methods = [f for f in protocol.methods if f.is_abstract]\n return self.match_Functions_against_Class(\n filtered_methods, unknown, subst)\n\n def match_Functions_against_Class(self, methods, cls2, subst):\n implications = []\n cache = {}\n for f1 in methods:\n implication = self.match_Function_against_Class(f1, cls2, subst, cache)\n implications.append(implication)\n if implication is booleq.FALSE:\n break\n # TODO(b/159058933): class attributes\n return booleq.And(implications)\n", "path": "pytype/pytd/type_match.py"}]} |
gh_patches_debug_1570 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-2656 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The import/export between Menorca and Mallorca appears reversed
Our data is showing the export going one way, but our data source is showing the reverse according to one user. See screenshots:


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/ES_IB.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import logging
4 from arrow import get
5 from requests import Session
6 from ree import (Formentera, Ibiza,
7 Mallorca, Menorca,
8 BalearicIslands)
9 # package "ree" is used to parse data from www.ree.es // maintained on github by @hectorespert
10
11 from .lib.exceptions import ParserException
12 from .lib.validation import validate, validate_production_diffs
13
14 ## Guess we'll need to figure these out later?! Adapted from ES-CN:
15
16 # Minimum valid zone demand. This is used to eliminate some cases
17 # where generation for one or more modes is obviously missing.
18 FLOORS = {
19 'ES-IB': 0,
20 'ES-IB-FO': 0,
21 'ES-IB-IZ': 0,
22 'ES-IB-MA': 0,
23 'ES-IB-ME': 0,
24 }
25
26
27 def fetch_island_data(zone_key, session):
28 if zone_key == 'ES-IB-FO':
29 formentera_data = Formentera(session, verify=False).get_all()
30 if not formentera_data:
31 raise ParserException(zone_key, "Formentera doesn't respond")
32 else:
33 return formentera_data
34 elif zone_key == 'ES-IB-IZ':
35 ibiza_data = Ibiza(session, verify=False).get_all()
36 if not ibiza_data:
37 raise ParserException(zone_key, "Party is over, Ibiza doesn't respond")
38 else:
39 return ibiza_data
40 elif zone_key == 'ES-IB-MA':
41 mallorca_data = Mallorca(session, verify=False).get_all()
42 if not mallorca_data:
43 raise ParserException(zone_key, "Mallorca doesn't respond")
44 else:
45 return mallorca_data
46 elif zone_key == 'ES-IB-ME':
47 menorca_data = Menorca(session, verify=False).get_all()
48 if not menorca_data:
49 raise ParserException(zone_key, "Menorca doesn't respond")
50 else:
51 return menorca_data
52 elif zone_key == 'ES-IB':
53 balearic_islands = BalearicIslands(session, verify=False).get_all()
54 if not balearic_islands:
55 raise ParserException(zone_key, "Balearic Islands doesn't respond")
56 else:
57 return balearic_islands
58 else:
59 raise ParserException(zone_key, 'Can\'t read this country code {0}'.format(zone_key))
60
61
62 def fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):
63 if target_datetime:
64 raise NotImplementedError('This parser is not yet able to parse past dates')
65
66 ses = session or Session()
67 island_data = fetch_island_data(zone_key, ses)
68 data = []
69 for response in island_data:
70 response_data = {
71 'zoneKey': zone_key,
72 'datetime': get(response.timestamp).datetime,
73 'consumption': response.demand,
74 'source': 'demanda.ree.es'
75 }
76
77 data.append(response_data)
78
79 return data
80
81
82 def fetch_production(zone_key, session=None, target_datetime=None,
83 logger=logging.getLogger(__name__)):
84 if target_datetime:
85 raise NotImplementedError('This parser is not yet able to parse past dates')
86
87 ses = session or Session()
88 island_data = fetch_island_data(zone_key, ses)
89 data = []
90
91 if zone_key == 'ES-IB':
92 expected_range = {'coal': (50,600)}
93 else:
94 expected_range = None
95
96 for response in island_data:
97 if response.production() >= 0:
98 response_data = {
99 'zoneKey': zone_key,
100 'datetime': get(response.timestamp).datetime,
101 'production': {
102 'coal': response.carbon,
103 'gas': round(response.gas + response.combined, 2),
104 'solar': response.solar,
105 'oil': round(response.vapor + response.diesel, 2),
106 'wind': response.wind,
107 'hydro': response.hydraulic,
108 'biomass': response.waste,
109 'nuclear': 0.0,
110 'geothermal': 0.0,
111 'unknown': response.other
112 },
113 'storage': {
114 'hydro': 0.0,
115 'battery': 0.0
116 },
117 'source': 'demanda.ree.es',
118 }
119
120 response_data = validate(response_data, logger,
121 floor=FLOORS[zone_key],
122 expected_range = expected_range)
123
124 if response_data:
125 # append if valid
126 data.append(response_data)
127
128 if len(data) > 1:
129 # granularity is 10 minutes, drops points with change in coal > 100MW
130 data = validate_production_diffs(data, {'coal': 150}, logger)
131
132 return data
133
134
135 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
136
137 if target_datetime:
138 raise NotImplementedError('This parser is not yet able to parse past dates')
139
140 sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
141
142 ses = session or Session()
143
144 if sorted_zone_keys == 'ES->ES-IB':
145 responses = BalearicIslands(ses, verify=False).get_all()
146 if not responses:
147 raise ParserException("ES-IB", "No responses")
148 elif sorted_zone_keys == 'ES->ES-IB-MA' or sorted_zone_keys == 'ES-IB-MA->ES-IB-ME' or sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':
149 responses = Mallorca(ses, verify=False).get_all()
150 if not responses:
151 raise ParserException("ES-IB-MA", "No responses")
152 elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':
153 responses = Formentera(ses, verify=False).get_all()
154 if not responses:
155 raise ParserException("ES-IB-FO", "No responses")
156 else:
157 raise NotImplementedError('This exchange pair is not implemented')
158
159 exchanges = []
160 for response in responses:
161
162 if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':
163 net_flow = response.link['ma_me']
164 elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':
165 net_flow = response.link['ma_ib']
166 elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':
167 net_flow = -1 * response.link['ib_fo']
168 else:
169 net_flow = response.link['pe_ma']
170
171 exchange = {
172 'sortedZoneKeys': sorted_zone_keys,
173 'datetime': get(response.timestamp).datetime,
174 'netFlow': net_flow,
175 'source': 'demanda.ree.es',
176 }
177
178 exchanges.append(exchange)
179
180 return exchanges
181
182
183 if __name__ == '__main__':
184 session = Session
185 print("fetch_consumption(ES-IB)")
186 print(fetch_consumption('ES-IB', session))
187
188 print("fetch_production(ES-IB)")
189 print(fetch_production('ES-IB', session))
190
191 print("fetch_exchange(ES, ES-IB)")
192 print(fetch_exchange('ES', 'ES-IB', session))
193
194 print("fetch_consumption(ES-IB-FO)")
195 print(fetch_consumption('ES-IB-FO'))
196 print("fetch_production(ES-IB-FO)")
197 print(fetch_production('ES-IB-FO'))
198 print("fetch_consumption(ES-IB-IZ)")
199 print(fetch_consumption('ES-IB-IZ'))
200 print("fetch_production(ES-IB-IZ)")
201 print(fetch_production('ES-IB-IZ'))
202 print("fetch_consumption(ES-IB-MA)")
203 print(fetch_consumption('ES-IB-MA'))
204 print("fetch_production(ES-IB-MA)")
205 print(fetch_production('ES-IB-MA'))
206 print("fetch_consumption(ES-IB-ME)")
207 print(fetch_consumption('ES-IB-ME'))
208 print("fetch_production(ES-IB-ME)")
209 print(fetch_production('ES-IB-ME'))
210 print("fetch_exchange(ES, ES-IB-MA)")
211 print(fetch_exchange('ES', 'ES-IB-MA'))
212 print("fetch_exchange(ES-IB-MA, ES-IB-ME)")
213 print(fetch_exchange('ES-IB-MA', 'ES-IB-ME'))
214 print("fetch_exchange(ES-IB-MA, ES-IB-IZ)")
215 print(fetch_exchange('ES-IB-MA', 'ES-IB-IZ'))
216 print("fetch_exchange(ES-IB-IZ, ES-IB-FO)")
217 print(fetch_exchange('ES-IB-IZ', 'ES-IB-FO'))
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/ES_IB.py b/parsers/ES_IB.py
--- a/parsers/ES_IB.py
+++ b/parsers/ES_IB.py
@@ -160,7 +160,7 @@
for response in responses:
if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':
- net_flow = response.link['ma_me']
+ net_flow = -1 * response.link['ma_me']
elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':
net_flow = response.link['ma_ib']
elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':
| {"golden_diff": "diff --git a/parsers/ES_IB.py b/parsers/ES_IB.py\n--- a/parsers/ES_IB.py\n+++ b/parsers/ES_IB.py\n@@ -160,7 +160,7 @@\n for response in responses:\n \n if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':\n- net_flow = response.link['ma_me']\n+ net_flow = -1 * response.link['ma_me']\n elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n net_flow = response.link['ma_ib']\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n", "issue": "The import/export between Menorca and Mallorca appears reversed\nOur data is showing the export going one way, but our data source is showing the reverse according to one user. See screenshots:\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport logging\nfrom arrow import get\nfrom requests import Session\nfrom ree import (Formentera, Ibiza,\n Mallorca, Menorca,\n BalearicIslands)\n# package \"ree\" is used to parse data from www.ree.es // maintained on github by @hectorespert\n\nfrom .lib.exceptions import ParserException\nfrom .lib.validation import validate, validate_production_diffs\n\n## Guess we'll need to figure these out later?! Adapted from ES-CN:\n\n# Minimum valid zone demand. This is used to eliminate some cases\n# where generation for one or more modes is obviously missing.\nFLOORS = {\n 'ES-IB': 0,\n 'ES-IB-FO': 0,\n 'ES-IB-IZ': 0,\n 'ES-IB-MA': 0,\n 'ES-IB-ME': 0,\n}\n\n\ndef fetch_island_data(zone_key, session):\n if zone_key == 'ES-IB-FO':\n formentera_data = Formentera(session, verify=False).get_all()\n if not formentera_data:\n raise ParserException(zone_key, \"Formentera doesn't respond\")\n else:\n return formentera_data\n elif zone_key == 'ES-IB-IZ':\n ibiza_data = Ibiza(session, verify=False).get_all()\n if not ibiza_data:\n raise ParserException(zone_key, \"Party is over, Ibiza doesn't respond\")\n else:\n return ibiza_data\n elif zone_key == 'ES-IB-MA':\n mallorca_data = Mallorca(session, verify=False).get_all()\n if not mallorca_data:\n raise ParserException(zone_key, \"Mallorca doesn't respond\")\n else:\n return mallorca_data\n elif zone_key == 'ES-IB-ME':\n menorca_data = Menorca(session, verify=False).get_all()\n if not menorca_data:\n raise ParserException(zone_key, \"Menorca doesn't respond\")\n else:\n return menorca_data\n elif zone_key == 'ES-IB':\n balearic_islands = BalearicIslands(session, verify=False).get_all()\n if not balearic_islands:\n raise ParserException(zone_key, \"Balearic Islands doesn't respond\")\n else:\n return balearic_islands\n else:\n raise ParserException(zone_key, 'Can\\'t read this country code {0}'.format(zone_key))\n\n\ndef fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n for response in island_data:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'consumption': response.demand,\n 'source': 'demanda.ree.es'\n }\n\n data.append(response_data)\n\n return data\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n\n if zone_key == 'ES-IB':\n expected_range = {'coal': (50,600)}\n else:\n expected_range = None\n\n for response in island_data:\n if response.production() >= 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': response.carbon,\n 'gas': round(response.gas + response.combined, 2),\n 'solar': response.solar,\n 'oil': round(response.vapor + response.diesel, 2),\n 'wind': response.wind,\n 'hydro': response.hydraulic,\n 'biomass': response.waste,\n 'nuclear': 0.0,\n 'geothermal': 0.0,\n 'unknown': response.other\n },\n 'storage': {\n 'hydro': 0.0,\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key],\n expected_range = expected_range)\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n if len(data) > 1:\n # granularity is 10 minutes, drops points with change in coal > 100MW\n data = validate_production_diffs(data, {'coal': 150}, logger)\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n ses = session or Session()\n\n if sorted_zone_keys == 'ES->ES-IB':\n responses = BalearicIslands(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB\", \"No responses\")\n elif sorted_zone_keys == 'ES->ES-IB-MA' or sorted_zone_keys == 'ES-IB-MA->ES-IB-ME' or sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n responses = Mallorca(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-MA\", \"No responses\")\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n responses = Formentera(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-FO\", \"No responses\")\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n exchanges = []\n for response in responses:\n\n if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':\n net_flow = response.link['ma_me']\n elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n net_flow = response.link['ma_ib']\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n net_flow = -1 * response.link['ib_fo']\n else:\n net_flow = response.link['pe_ma']\n\n exchange = {\n 'sortedZoneKeys': sorted_zone_keys,\n 'datetime': get(response.timestamp).datetime,\n 'netFlow': net_flow,\n 'source': 'demanda.ree.es',\n }\n\n exchanges.append(exchange)\n\n return exchanges\n\n\nif __name__ == '__main__':\n session = Session\n print(\"fetch_consumption(ES-IB)\")\n print(fetch_consumption('ES-IB', session))\n\n print(\"fetch_production(ES-IB)\")\n print(fetch_production('ES-IB', session))\n\n print(\"fetch_exchange(ES, ES-IB)\")\n print(fetch_exchange('ES', 'ES-IB', session))\n\n print(\"fetch_consumption(ES-IB-FO)\")\n print(fetch_consumption('ES-IB-FO'))\n print(\"fetch_production(ES-IB-FO)\")\n print(fetch_production('ES-IB-FO'))\n print(\"fetch_consumption(ES-IB-IZ)\")\n print(fetch_consumption('ES-IB-IZ'))\n print(\"fetch_production(ES-IB-IZ)\")\n print(fetch_production('ES-IB-IZ'))\n print(\"fetch_consumption(ES-IB-MA)\")\n print(fetch_consumption('ES-IB-MA'))\n print(\"fetch_production(ES-IB-MA)\")\n print(fetch_production('ES-IB-MA'))\n print(\"fetch_consumption(ES-IB-ME)\")\n print(fetch_consumption('ES-IB-ME'))\n print(\"fetch_production(ES-IB-ME)\")\n print(fetch_production('ES-IB-ME'))\n print(\"fetch_exchange(ES, ES-IB-MA)\")\n print(fetch_exchange('ES', 'ES-IB-MA'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-ME)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-ME'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-IZ)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-IZ'))\n print(\"fetch_exchange(ES-IB-IZ, ES-IB-FO)\")\n print(fetch_exchange('ES-IB-IZ', 'ES-IB-FO'))\n", "path": "parsers/ES_IB.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport logging\nfrom arrow import get\nfrom requests import Session\nfrom ree import (Formentera, Ibiza,\n Mallorca, Menorca,\n BalearicIslands)\n# package \"ree\" is used to parse data from www.ree.es // maintained on github by @hectorespert\n\nfrom .lib.exceptions import ParserException\nfrom .lib.validation import validate, validate_production_diffs\n\n## Guess we'll need to figure these out later?! Adapted from ES-CN:\n\n# Minimum valid zone demand. This is used to eliminate some cases\n# where generation for one or more modes is obviously missing.\nFLOORS = {\n 'ES-IB': 0,\n 'ES-IB-FO': 0,\n 'ES-IB-IZ': 0,\n 'ES-IB-MA': 0,\n 'ES-IB-ME': 0,\n}\n\n\ndef fetch_island_data(zone_key, session):\n if zone_key == 'ES-IB-FO':\n formentera_data = Formentera(session, verify=False).get_all()\n if not formentera_data:\n raise ParserException(zone_key, \"Formentera doesn't respond\")\n else:\n return formentera_data\n elif zone_key == 'ES-IB-IZ':\n ibiza_data = Ibiza(session, verify=False).get_all()\n if not ibiza_data:\n raise ParserException(zone_key, \"Party is over, Ibiza doesn't respond\")\n else:\n return ibiza_data\n elif zone_key == 'ES-IB-MA':\n mallorca_data = Mallorca(session, verify=False).get_all()\n if not mallorca_data:\n raise ParserException(zone_key, \"Mallorca doesn't respond\")\n else:\n return mallorca_data\n elif zone_key == 'ES-IB-ME':\n menorca_data = Menorca(session, verify=False).get_all()\n if not menorca_data:\n raise ParserException(zone_key, \"Menorca doesn't respond\")\n else:\n return menorca_data\n elif zone_key == 'ES-IB':\n balearic_islands = BalearicIslands(session, verify=False).get_all()\n if not balearic_islands:\n raise ParserException(zone_key, \"Balearic Islands doesn't respond\")\n else:\n return balearic_islands\n else:\n raise ParserException(zone_key, 'Can\\'t read this country code {0}'.format(zone_key))\n\n\ndef fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n for response in island_data:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'consumption': response.demand,\n 'source': 'demanda.ree.es'\n }\n\n data.append(response_data)\n\n return data\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n\n if zone_key == 'ES-IB':\n expected_range = {'coal': (50,600)}\n else:\n expected_range = None\n\n for response in island_data:\n if response.production() >= 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': response.carbon,\n 'gas': round(response.gas + response.combined, 2),\n 'solar': response.solar,\n 'oil': round(response.vapor + response.diesel, 2),\n 'wind': response.wind,\n 'hydro': response.hydraulic,\n 'biomass': response.waste,\n 'nuclear': 0.0,\n 'geothermal': 0.0,\n 'unknown': response.other\n },\n 'storage': {\n 'hydro': 0.0,\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key],\n expected_range = expected_range)\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n if len(data) > 1:\n # granularity is 10 minutes, drops points with change in coal > 100MW\n data = validate_production_diffs(data, {'coal': 150}, logger)\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n ses = session or Session()\n\n if sorted_zone_keys == 'ES->ES-IB':\n responses = BalearicIslands(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB\", \"No responses\")\n elif sorted_zone_keys == 'ES->ES-IB-MA' or sorted_zone_keys == 'ES-IB-MA->ES-IB-ME' or sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n responses = Mallorca(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-MA\", \"No responses\")\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n responses = Formentera(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-FO\", \"No responses\")\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n exchanges = []\n for response in responses:\n\n if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':\n net_flow = -1 * response.link['ma_me']\n elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n net_flow = response.link['ma_ib']\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n net_flow = -1 * response.link['ib_fo']\n else:\n net_flow = response.link['pe_ma']\n\n exchange = {\n 'sortedZoneKeys': sorted_zone_keys,\n 'datetime': get(response.timestamp).datetime,\n 'netFlow': net_flow,\n 'source': 'demanda.ree.es',\n }\n\n exchanges.append(exchange)\n\n return exchanges\n\n\nif __name__ == '__main__':\n session = Session\n print(\"fetch_consumption(ES-IB)\")\n print(fetch_consumption('ES-IB', session))\n\n print(\"fetch_production(ES-IB)\")\n print(fetch_production('ES-IB', session))\n\n print(\"fetch_exchange(ES, ES-IB)\")\n print(fetch_exchange('ES', 'ES-IB', session))\n\n print(\"fetch_consumption(ES-IB-FO)\")\n print(fetch_consumption('ES-IB-FO'))\n print(\"fetch_production(ES-IB-FO)\")\n print(fetch_production('ES-IB-FO'))\n print(\"fetch_consumption(ES-IB-IZ)\")\n print(fetch_consumption('ES-IB-IZ'))\n print(\"fetch_production(ES-IB-IZ)\")\n print(fetch_production('ES-IB-IZ'))\n print(\"fetch_consumption(ES-IB-MA)\")\n print(fetch_consumption('ES-IB-MA'))\n print(\"fetch_production(ES-IB-MA)\")\n print(fetch_production('ES-IB-MA'))\n print(\"fetch_consumption(ES-IB-ME)\")\n print(fetch_consumption('ES-IB-ME'))\n print(\"fetch_production(ES-IB-ME)\")\n print(fetch_production('ES-IB-ME'))\n print(\"fetch_exchange(ES, ES-IB-MA)\")\n print(fetch_exchange('ES', 'ES-IB-MA'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-ME)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-ME'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-IZ)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-IZ'))\n print(\"fetch_exchange(ES-IB-IZ, ES-IB-FO)\")\n print(fetch_exchange('ES-IB-IZ', 'ES-IB-FO'))\n", "path": "parsers/ES_IB.py"}]} |
gh_patches_debug_1571 | rasdani/github-patches | git_diff | pypi__warehouse-6337 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upload to PyPI fails when dependency version contains *
**Describe the bug**
PyPI raises `400 Client error` when uploading a package that specifies requirement using `== 2.*`. This is a valid version specifier referred to [PEP 440](https://www.python.org/dev/peps/pep-0440/#compatible-release).
The whole error is:
```
HTTPError: 400 Client Error: Invalid value for requires_dist. Error: Invalid version: '2.*' for url: https://test.pypi.org/legacy/
```
**Expected behavior**
Upload should pass with no errors. (Note that in the example below the expected behaviour would be to fail with authentication error, as you don't have sufficient permissions on the project.)
**To Reproduce**
This is a minimal reproducer: https://github.com/dblenkus/warehouse-requirements-issue
Install twine with `pip install twine` and try to upload the package with `twine upload -r testpypi dist/*`.
**My Platform**
MacOS 10.14.6
twine 1.13.0
**Additional context**
This has worked few days ago on July 26th 2019.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/forklift/legacy.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import email
14 import hashlib
15 import hmac
16 import os.path
17 import re
18 import tarfile
19 import tempfile
20 import zipfile
21
22 from cgi import FieldStorage, parse_header
23 from itertools import chain
24
25 import packaging.requirements
26 import packaging.specifiers
27 import packaging.utils
28 import packaging.version
29 import pkg_resources
30 import requests
31 import stdlib_list
32 import wtforms
33 import wtforms.validators
34
35 from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone
36 from pyramid.response import Response
37 from pyramid.view import view_config
38 from sqlalchemy import exists, func, orm
39 from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
40
41 from warehouse import forms
42 from warehouse.admin.squats import Squat
43 from warehouse.classifiers.models import Classifier
44 from warehouse.metrics import IMetricsService
45 from warehouse.packaging.interfaces import IFileStorage
46 from warehouse.packaging.models import (
47 BlacklistedProject,
48 Dependency,
49 DependencyKind,
50 Description,
51 File,
52 Filename,
53 JournalEntry,
54 Project,
55 Release,
56 Role,
57 )
58 from warehouse.utils import http, readme
59
60 MAX_FILESIZE = 60 * 1024 * 1024 # 60M
61 MAX_SIGSIZE = 8 * 1024 # 8K
62
63 PATH_HASHER = "blake2_256"
64
65
66 def namespace_stdlib_list(module_list):
67 for module_name in module_list:
68 parts = module_name.split(".")
69 for i, part in enumerate(parts):
70 yield ".".join(parts[: i + 1])
71
72
73 STDLIB_PROHIBITTED = {
74 packaging.utils.canonicalize_name(s.rstrip("-_.").lstrip("-_."))
75 for s in chain.from_iterable(
76 namespace_stdlib_list(stdlib_list.stdlib_list(version))
77 for version in stdlib_list.short_versions
78 )
79 }
80
81 # Wheel platform checking
82
83 # Note: defining new platform ABI compatibility tags that don't
84 # have a python.org binary release to anchor them is a
85 # complex task that needs more than just OS+architecture info.
86 # For Linux specifically, the platform ABI is defined by each
87 # individual distro version, so wheels built on one version may
88 # not even work on older versions of the same distro, let alone
89 # a completely different distro.
90 #
91 # That means new entries should only be added given an
92 # accompanying ABI spec that explains how to build a
93 # compatible binary (see the manylinux specs as examples).
94
95 # These platforms can be handled by a simple static list:
96 _allowed_platforms = {
97 "any",
98 "win32",
99 "win_amd64",
100 "win_ia64",
101 "manylinux1_x86_64",
102 "manylinux1_i686",
103 "manylinux2010_x86_64",
104 "manylinux2010_i686",
105 "linux_armv6l",
106 "linux_armv7l",
107 }
108 # macosx is a little more complicated:
109 _macosx_platform_re = re.compile(r"macosx_10_(\d+)+_(?P<arch>.*)")
110 _macosx_arches = {
111 "ppc",
112 "ppc64",
113 "i386",
114 "x86_64",
115 "intel",
116 "fat",
117 "fat32",
118 "fat64",
119 "universal",
120 }
121
122
123 # Actual checking code;
124 def _valid_platform_tag(platform_tag):
125 if platform_tag in _allowed_platforms:
126 return True
127 m = _macosx_platform_re.match(platform_tag)
128 if m and m.group("arch") in _macosx_arches:
129 return True
130 return False
131
132
133 _error_message_order = ["metadata_version", "name", "version"]
134
135
136 _dist_file_regexes = {
137 # True/False is for legacy or not.
138 True: re.compile(r".+?\.(exe|tar\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$", re.I),
139 False: re.compile(r".+?\.(tar\.gz|zip|whl|egg)$", re.I),
140 }
141
142
143 _wheel_file_re = re.compile(
144 r"""
145 ^
146 (?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
147 (
148 (-(?P<build>\d.*?))?
149 -(?P<pyver>.+?)
150 -(?P<abi>.+?)
151 -(?P<plat>.+?)
152 (?:\.whl|\.dist-info)
153 )
154 $
155 """,
156 re.VERBOSE,
157 )
158
159
160 _project_name_re = re.compile(
161 r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
162 )
163
164
165 _legacy_specifier_re = re.compile(r"^(?P<name>\S+)(?: \((?P<specifier>\S+)\))?$")
166
167
168 _valid_description_content_types = {"text/plain", "text/x-rst", "text/markdown"}
169
170 _valid_markdown_variants = {"CommonMark", "GFM"}
171
172
173 def _exc_with_message(exc, message):
174 # The crappy old API that PyPI offered uses the status to pass down
175 # messages to the client. So this function will make that easier to do.
176 resp = exc(message)
177 resp.status = "{} {}".format(resp.status_code, message)
178 return resp
179
180
181 def _validate_pep440_version(form, field):
182 parsed = packaging.version.parse(field.data)
183
184 # Check that this version is a valid PEP 440 version at all.
185 if not isinstance(parsed, packaging.version.Version):
186 raise wtforms.validators.ValidationError(
187 "Start and end with a letter or numeral containing only "
188 "ASCII numeric and '.', '_' and '-'."
189 )
190
191 # Check that this version does not have a PEP 440 local segment attached
192 # to it.
193 if parsed.local is not None:
194 raise wtforms.validators.ValidationError("Can't use PEP 440 local versions.")
195
196
197 def _parse_legacy_requirement(requirement):
198 parsed = _legacy_specifier_re.search(requirement)
199 if parsed is None:
200 raise ValueError("Invalid requirement.")
201 return parsed.groupdict()["name"], parsed.groupdict()["specifier"]
202
203
204 def _validate_pep440_specifier(specifier):
205 try:
206 packaging.specifiers.SpecifierSet(specifier)
207 except packaging.specifiers.InvalidSpecifier:
208 raise wtforms.validators.ValidationError(
209 "Invalid specifier in requirement."
210 ) from None
211
212
213 def _validate_pep440_specifier_field(form, field):
214 return _validate_pep440_specifier(field.data)
215
216
217 def _validate_legacy_non_dist_req(requirement):
218 try:
219 req = packaging.requirements.Requirement(requirement.replace("_", ""))
220 except packaging.requirements.InvalidRequirement:
221 raise wtforms.validators.ValidationError(
222 "Invalid requirement: {!r}".format(requirement)
223 ) from None
224
225 if req.url is not None:
226 raise wtforms.validators.ValidationError(
227 "Can't direct dependency: {!r}".format(requirement)
228 )
229
230 if any(
231 not identifier.isalnum() or identifier[0].isdigit()
232 for identifier in req.name.split(".")
233 ):
234 raise wtforms.validators.ValidationError("Use a valid Python identifier.")
235
236
237 def _validate_legacy_non_dist_req_list(form, field):
238 for datum in field.data:
239 _validate_legacy_non_dist_req(datum)
240
241
242 def _validate_legacy_dist_req(requirement):
243 try:
244 req = packaging.requirements.Requirement(requirement)
245 except packaging.requirements.InvalidRequirement:
246 raise wtforms.validators.ValidationError(
247 "Invalid requirement: {!r}.".format(requirement)
248 ) from None
249
250 if req.url is not None:
251 raise wtforms.validators.ValidationError(
252 "Can't have direct dependency: {!r}".format(requirement)
253 )
254
255 if any(packaging.version.Version(spec.version).local for spec in req.specifier):
256 raise wtforms.validators.ValidationError(
257 "Can't have dependency with local version: {!r}".format(requirement)
258 )
259
260
261 def _validate_legacy_dist_req_list(form, field):
262 for datum in field.data:
263 _validate_legacy_dist_req(datum)
264
265
266 def _validate_requires_external(requirement):
267 name, specifier = _parse_legacy_requirement(requirement)
268
269 # TODO: Is it really reasonable to parse the specifier using PEP 440?
270 if specifier is not None:
271 _validate_pep440_specifier(specifier)
272
273
274 def _validate_requires_external_list(form, field):
275 for datum in field.data:
276 _validate_requires_external(datum)
277
278
279 def _validate_project_url(value):
280 try:
281 label, url = value.split(", ", 1)
282 except ValueError:
283 raise wtforms.validators.ValidationError(
284 "Use both a label and an URL."
285 ) from None
286
287 if not label:
288 raise wtforms.validators.ValidationError("Use a label.")
289
290 if len(label) > 32:
291 raise wtforms.validators.ValidationError("Use 32 characters or less.")
292
293 if not url:
294 raise wtforms.validators.ValidationError("Use an URL.")
295
296 if not http.is_valid_uri(url, require_authority=False):
297 raise wtforms.validators.ValidationError("Use valid URL.")
298
299
300 def _validate_project_url_list(form, field):
301 for datum in field.data:
302 _validate_project_url(datum)
303
304
305 def _validate_rfc822_email_field(form, field):
306 email_validator = wtforms.validators.Email(message="Use a valid email address")
307 addresses = email.utils.getaddresses([field.data])
308
309 for real_name, address in addresses:
310 email_validator(form, type("field", (), {"data": address}))
311
312
313 def _validate_description_content_type(form, field):
314 def _raise(message):
315 raise wtforms.validators.ValidationError(
316 f"Invalid description content type: {message}"
317 )
318
319 content_type, parameters = parse_header(field.data)
320 if content_type not in _valid_description_content_types:
321 _raise("type/subtype is not valid")
322
323 charset = parameters.get("charset")
324 if charset and charset != "UTF-8":
325 _raise("Use a valid charset")
326
327 variant = parameters.get("variant")
328 if (
329 content_type == "text/markdown"
330 and variant
331 and variant not in _valid_markdown_variants
332 ):
333 _raise(
334 "Use a valid variant, expected one of {}".format(
335 ", ".join(_valid_markdown_variants)
336 )
337 )
338
339
340 def _construct_dependencies(form, types):
341 for name, kind in types.items():
342 for item in getattr(form, name).data:
343 yield Dependency(kind=kind.value, specifier=item)
344
345
346 class ListField(wtforms.Field):
347 def process_formdata(self, valuelist):
348 self.data = [v.strip() for v in valuelist if v.strip()]
349
350
351 # TODO: Eventually this whole validation thing should move to the packaging
352 # library and we should just call that. However until PEP 426 is done
353 # that library won't have an API for this.
354 class MetadataForm(forms.Form):
355
356 # Metadata version
357 metadata_version = wtforms.StringField(
358 description="Metadata-Version",
359 validators=[
360 wtforms.validators.DataRequired(),
361 wtforms.validators.AnyOf(
362 # Note: This isn't really Metadata 2.0, however bdist_wheel
363 # claims it is producing a Metadata 2.0 metadata when in
364 # reality it's more like 1.2 with some extensions.
365 ["1.0", "1.1", "1.2", "2.0", "2.1"],
366 message="Use a known metadata version.",
367 ),
368 ],
369 )
370
371 # Identity Project and Release
372 name = wtforms.StringField(
373 description="Name",
374 validators=[
375 wtforms.validators.DataRequired(),
376 wtforms.validators.Regexp(
377 _project_name_re,
378 re.IGNORECASE,
379 message=(
380 "Start and end with a letter or numeral containing "
381 "only ASCII numeric and '.', '_' and '-'."
382 ),
383 ),
384 ],
385 )
386 version = wtforms.StringField(
387 description="Version",
388 validators=[
389 wtforms.validators.DataRequired(),
390 wtforms.validators.Regexp(
391 r"^(?!\s).*(?<!\s)$",
392 message="Can't have leading or trailing whitespace.",
393 ),
394 _validate_pep440_version,
395 ],
396 )
397
398 # Additional Release metadata
399 summary = wtforms.StringField(
400 description="Summary",
401 validators=[
402 wtforms.validators.Optional(),
403 wtforms.validators.Length(max=512),
404 wtforms.validators.Regexp(
405 r"^.+$", # Rely on the fact that . doesn't match a newline.
406 message="Use a single line only.",
407 ),
408 ],
409 )
410 description = wtforms.StringField(
411 description="Description", validators=[wtforms.validators.Optional()]
412 )
413 author = wtforms.StringField(
414 description="Author", validators=[wtforms.validators.Optional()]
415 )
416 description_content_type = wtforms.StringField(
417 description="Description-Content-Type",
418 validators=[wtforms.validators.Optional(), _validate_description_content_type],
419 )
420 author_email = wtforms.StringField(
421 description="Author-email",
422 validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],
423 )
424 maintainer = wtforms.StringField(
425 description="Maintainer", validators=[wtforms.validators.Optional()]
426 )
427 maintainer_email = wtforms.StringField(
428 description="Maintainer-email",
429 validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],
430 )
431 license = wtforms.StringField(
432 description="License", validators=[wtforms.validators.Optional()]
433 )
434 keywords = wtforms.StringField(
435 description="Keywords", validators=[wtforms.validators.Optional()]
436 )
437 classifiers = wtforms.fields.SelectMultipleField(description="Classifier")
438 platform = wtforms.StringField(
439 description="Platform", validators=[wtforms.validators.Optional()]
440 )
441
442 # URLs
443 home_page = wtforms.StringField(
444 description="Home-Page",
445 validators=[wtforms.validators.Optional(), forms.URIValidator()],
446 )
447 download_url = wtforms.StringField(
448 description="Download-URL",
449 validators=[wtforms.validators.Optional(), forms.URIValidator()],
450 )
451
452 # Dependency Information
453 requires_python = wtforms.StringField(
454 description="Requires-Python",
455 validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],
456 )
457
458 # File information
459 pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])
460 filetype = wtforms.StringField(
461 validators=[
462 wtforms.validators.DataRequired(),
463 wtforms.validators.AnyOf(
464 [
465 "bdist_dmg",
466 "bdist_dumb",
467 "bdist_egg",
468 "bdist_msi",
469 "bdist_rpm",
470 "bdist_wheel",
471 "bdist_wininst",
472 "sdist",
473 ],
474 message="Use a known file type.",
475 ),
476 ]
477 )
478 comment = wtforms.StringField(validators=[wtforms.validators.Optional()])
479 md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])
480 sha256_digest = wtforms.StringField(
481 validators=[
482 wtforms.validators.Optional(),
483 wtforms.validators.Regexp(
484 r"^[A-F0-9]{64}$",
485 re.IGNORECASE,
486 message="Use a valid, hex-encoded, SHA256 message digest.",
487 ),
488 ]
489 )
490 blake2_256_digest = wtforms.StringField(
491 validators=[
492 wtforms.validators.Optional(),
493 wtforms.validators.Regexp(
494 r"^[A-F0-9]{64}$",
495 re.IGNORECASE,
496 message="Use a valid, hex-encoded, BLAKE2 message digest.",
497 ),
498 ]
499 )
500
501 # Legacy dependency information
502 requires = ListField(
503 validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]
504 )
505 provides = ListField(
506 validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]
507 )
508 obsoletes = ListField(
509 validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]
510 )
511
512 # Newer dependency information
513 requires_dist = ListField(
514 description="Requires-Dist",
515 validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],
516 )
517 provides_dist = ListField(
518 description="Provides-Dist",
519 validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],
520 )
521 obsoletes_dist = ListField(
522 description="Obsoletes-Dist",
523 validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],
524 )
525 requires_external = ListField(
526 description="Requires-External",
527 validators=[wtforms.validators.Optional(), _validate_requires_external_list],
528 )
529
530 # Newer metadata information
531 project_urls = ListField(
532 description="Project-URL",
533 validators=[wtforms.validators.Optional(), _validate_project_url_list],
534 )
535
536 def full_validate(self):
537 # All non source releases *must* have a pyversion
538 if (
539 self.filetype.data
540 and self.filetype.data != "sdist"
541 and not self.pyversion.data
542 ):
543 raise wtforms.validators.ValidationError(
544 "Python version is required for binary distribution uploads."
545 )
546
547 # All source releases *must* have a pyversion of "source"
548 if self.filetype.data == "sdist":
549 if not self.pyversion.data:
550 self.pyversion.data = "source"
551 elif self.pyversion.data != "source":
552 raise wtforms.validators.ValidationError(
553 "Use 'source' as Python version for an sdist."
554 )
555
556 # We *must* have at least one digest to verify against.
557 if not self.md5_digest.data and not self.sha256_digest.data:
558 raise wtforms.validators.ValidationError(
559 "Include at least one message digest."
560 )
561
562
563 _safe_zipnames = re.compile(r"(purelib|platlib|headers|scripts|data).+", re.I)
564 # .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2
565 _tar_filenames_re = re.compile(r"\.(?:tar$|t(?:ar\.)?(?P<z_type>gz|bz2)$)")
566
567
568 def _is_valid_dist_file(filename, filetype):
569 """
570 Perform some basic checks to see whether the indicated file could be
571 a valid distribution file.
572 """
573
574 # If our file is a zipfile, then ensure that it's members are only
575 # compressed with supported compression methods.
576 if zipfile.is_zipfile(filename):
577 with zipfile.ZipFile(filename) as zfp:
578 for zinfo in zfp.infolist():
579 if zinfo.compress_type not in {
580 zipfile.ZIP_STORED,
581 zipfile.ZIP_DEFLATED,
582 }:
583 return False
584
585 tar_fn_match = _tar_filenames_re.search(filename)
586 if tar_fn_match:
587 # Ensure that this is a valid tar file, and that it contains PKG-INFO.
588 z_type = tar_fn_match.group("z_type") or ""
589 try:
590 with tarfile.open(filename, f"r:{z_type}") as tar:
591 # This decompresses the entire stream to validate it and the
592 # tar within. Easy CPU DoS attack. :/
593 bad_tar = True
594 member = tar.next()
595 while member:
596 parts = os.path.split(member.name)
597 if len(parts) == 2 and parts[1] == "PKG-INFO":
598 bad_tar = False
599 member = tar.next()
600 if bad_tar:
601 return False
602 except tarfile.ReadError:
603 return False
604 elif filename.endswith(".exe"):
605 # The only valid filetype for a .exe file is "bdist_wininst".
606 if filetype != "bdist_wininst":
607 return False
608
609 # Ensure that the .exe is a valid zip file, and that all of the files
610 # contained within it have safe filenames.
611 try:
612 with zipfile.ZipFile(filename, "r") as zfp:
613 # We need the no branch below to work around a bug in
614 # coverage.py where it's detecting a missed branch where there
615 # isn't one.
616 for zipname in zfp.namelist(): # pragma: no branch
617 if not _safe_zipnames.match(zipname):
618 return False
619 except zipfile.BadZipFile:
620 return False
621 elif filename.endswith(".msi"):
622 # The only valid filetype for a .msi is "bdist_msi"
623 if filetype != "bdist_msi":
624 return False
625
626 # Check the first 8 bytes of the MSI file. This was taken from the
627 # legacy implementation of PyPI which itself took it from the
628 # implementation of `file` I believe.
629 with open(filename, "rb") as fp:
630 if fp.read(8) != b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1":
631 return False
632 elif filename.endswith(".zip") or filename.endswith(".egg"):
633 # Ensure that the .zip/.egg is a valid zip file, and that it has a
634 # PKG-INFO file.
635 try:
636 with zipfile.ZipFile(filename, "r") as zfp:
637 for zipname in zfp.namelist():
638 parts = os.path.split(zipname)
639 if len(parts) == 2 and parts[1] == "PKG-INFO":
640 # We need the no branch below to work around a bug in
641 # coverage.py where it's detecting a missed branch
642 # where there isn't one.
643 break # pragma: no branch
644 else:
645 return False
646 except zipfile.BadZipFile:
647 return False
648 elif filename.endswith(".whl"):
649 # Ensure that the .whl is a valid zip file, and that it has a WHEEL
650 # file.
651 try:
652 with zipfile.ZipFile(filename, "r") as zfp:
653 for zipname in zfp.namelist():
654 parts = os.path.split(zipname)
655 if len(parts) == 2 and parts[1] == "WHEEL":
656 # We need the no branch below to work around a bug in
657 # coverage.py where it's detecting a missed branch
658 # where there isn't one.
659 break # pragma: no branch
660 else:
661 return False
662 except zipfile.BadZipFile:
663 return False
664
665 # If we haven't yet decided it's not valid, then we'll assume it is and
666 # allow it.
667 return True
668
669
670 def _is_duplicate_file(db_session, filename, hashes):
671 """
672 Check to see if file already exists, and if it's content matches.
673 A file is considered to exist if its filename *or* blake2 digest are
674 present in a file row in the database.
675
676 Returns:
677 - True: This file is a duplicate and all further processing should halt.
678 - False: This file exists, but it is not a duplicate.
679 - None: This file does not exist.
680 """
681
682 file_ = (
683 db_session.query(File)
684 .filter(
685 (File.filename == filename)
686 | (File.blake2_256_digest == hashes["blake2_256"])
687 )
688 .first()
689 )
690
691 if file_ is not None:
692 return (
693 file_.filename == filename
694 and file_.sha256_digest == hashes["sha256"]
695 and file_.md5_digest == hashes["md5"]
696 and file_.blake2_256_digest == hashes["blake2_256"]
697 )
698
699 return None
700
701
702 def _no_deprecated_classifiers(request):
703 deprecated_classifiers = {
704 classifier.classifier
705 for classifier in (
706 request.db.query(Classifier.classifier)
707 .filter(Classifier.deprecated.is_(True))
708 .all()
709 )
710 }
711
712 def validate_no_deprecated_classifiers(form, field):
713 invalid_classifiers = set(field.data or []) & deprecated_classifiers
714 if invalid_classifiers:
715 first_invalid_classifier = sorted(invalid_classifiers)[0]
716 host = request.registry.settings.get("warehouse.domain")
717 classifiers_url = request.route_url("classifiers", _host=host)
718
719 raise wtforms.validators.ValidationError(
720 f"Classifier {first_invalid_classifier!r} has been "
721 f"deprecated, see {classifiers_url} for a list of valid "
722 "classifiers."
723 )
724
725 return validate_no_deprecated_classifiers
726
727
728 @view_config(
729 route_name="forklift.legacy.file_upload",
730 uses_session=True,
731 require_csrf=False,
732 require_methods=["POST"],
733 )
734 def file_upload(request):
735 # If we're in read-only mode, let upload clients know
736 if request.flags.enabled("read-only"):
737 raise _exc_with_message(
738 HTTPForbidden, "Read-only mode: Uploads are temporarily disabled"
739 )
740
741 # Log an attempt to upload
742 metrics = request.find_service(IMetricsService, context=None)
743 metrics.increment("warehouse.upload.attempt")
744
745 # Before we do anything, if there isn't an authenticated user with this
746 # request, then we'll go ahead and bomb out.
747 if request.authenticated_userid is None:
748 raise _exc_with_message(
749 HTTPForbidden, "Invalid or non-existent authentication information."
750 )
751
752 # Ensure that user has a verified, primary email address. This should both
753 # reduce the ease of spam account creation and activity, as well as act as
754 # a forcing function for https://github.com/pypa/warehouse/issues/3632.
755 # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,
756 # we might consider a different condition, possibly looking at
757 # User.is_active instead.
758 if not (request.user.primary_email and request.user.primary_email.verified):
759 raise _exc_with_message(
760 HTTPBadRequest,
761 (
762 "User {!r} does not have a verified primary email address. "
763 "Please add a verified primary email before attempting to "
764 "upload to PyPI. See {project_help} for more information."
765 "for more information."
766 ).format(
767 request.user.username,
768 project_help=request.help_url(_anchor="verified-email"),
769 ),
770 ) from None
771
772 # Do some cleanup of the various form fields
773 for key in list(request.POST):
774 value = request.POST.get(key)
775 if isinstance(value, str):
776 # distutils "helpfully" substitutes unknown, but "required" values
777 # with the string "UNKNOWN". This is basically never what anyone
778 # actually wants so we'll just go ahead and delete anything whose
779 # value is UNKNOWN.
780 if value.strip() == "UNKNOWN":
781 del request.POST[key]
782
783 # Escape NUL characters, which psycopg doesn't like
784 if "\x00" in value:
785 request.POST[key] = value.replace("\x00", "\\x00")
786
787 # We require protocol_version 1, it's the only supported version however
788 # passing a different version should raise an error.
789 if request.POST.get("protocol_version", "1") != "1":
790 raise _exc_with_message(HTTPBadRequest, "Unknown protocol version.")
791
792 # Check if any fields were supplied as a tuple and have become a
793 # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a
794 # FieldStorage, however.
795 # ref: https://github.com/pypa/warehouse/issues/2185
796 # ref: https://github.com/pypa/warehouse/issues/2491
797 for field in set(request.POST) - {"content", "gpg_signature"}:
798 values = request.POST.getall(field)
799 if any(isinstance(value, FieldStorage) for value in values):
800 raise _exc_with_message(HTTPBadRequest, f"{field}: Should not be a tuple.")
801
802 # Look up all of the valid classifiers
803 all_classifiers = request.db.query(Classifier).all()
804
805 # Validate and process the incoming metadata.
806 form = MetadataForm(request.POST)
807
808 # Add a validator for deprecated classifiers
809 form.classifiers.validators.append(_no_deprecated_classifiers(request))
810
811 form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]
812 if not form.validate():
813 for field_name in _error_message_order:
814 if field_name in form.errors:
815 break
816 else:
817 field_name = sorted(form.errors.keys())[0]
818
819 if field_name in form:
820 field = form[field_name]
821 if field.description and isinstance(field, wtforms.StringField):
822 error_message = (
823 "{value!r} is an invalid value for {field}. ".format(
824 value=field.data, field=field.description
825 )
826 + "Error: {} ".format(form.errors[field_name][0])
827 + "See "
828 "https://packaging.python.org/specifications/core-metadata"
829 )
830 else:
831 error_message = "Invalid value for {field}. Error: {msgs[0]}".format(
832 field=field_name, msgs=form.errors[field_name]
833 )
834 else:
835 error_message = "Error: {}".format(form.errors[field_name][0])
836
837 raise _exc_with_message(HTTPBadRequest, error_message)
838
839 # Ensure that we have file data in the request.
840 if "content" not in request.POST:
841 raise _exc_with_message(HTTPBadRequest, "Upload payload does not have a file.")
842
843 # Look up the project first before doing anything else, this is so we can
844 # automatically register it if we need to and can check permissions before
845 # going any further.
846 try:
847 project = (
848 request.db.query(Project)
849 .filter(
850 Project.normalized_name == func.normalize_pep426_name(form.name.data)
851 )
852 .one()
853 )
854 except NoResultFound:
855 # Check for AdminFlag set by a PyPI Administrator disabling new project
856 # registration, reasons for this include Spammers, security
857 # vulnerabilities, or just wanting to be lazy and not worry ;)
858 if request.flags.enabled("disallow-new-project-registration"):
859 raise _exc_with_message(
860 HTTPForbidden,
861 (
862 "New project registration temporarily disabled. "
863 "See {projecthelp} for details"
864 ).format(projecthelp=request.help_url(_anchor="admin-intervention")),
865 ) from None
866
867 # Before we create the project, we're going to check our blacklist to
868 # see if this project is even allowed to be registered. If it is not,
869 # then we're going to deny the request to create this project.
870 if request.db.query(
871 exists().where(
872 BlacklistedProject.name == func.normalize_pep426_name(form.name.data)
873 )
874 ).scalar():
875 raise _exc_with_message(
876 HTTPBadRequest,
877 (
878 "The name {name!r} isn't allowed. "
879 "See {projecthelp} "
880 "for more information."
881 ).format(
882 name=form.name.data,
883 projecthelp=request.help_url(_anchor="project-name"),
884 ),
885 ) from None
886
887 # Also check for collisions with Python Standard Library modules.
888 if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:
889 raise _exc_with_message(
890 HTTPBadRequest,
891 (
892 "The name {name!r} isn't allowed (conflict with Python "
893 "Standard Library module name). See "
894 "{projecthelp} for more information."
895 ).format(
896 name=form.name.data,
897 projecthelp=request.help_url(_anchor="project-name"),
898 ),
899 ) from None
900
901 # The project doesn't exist in our database, so first we'll check for
902 # projects with a similar name
903 squattees = (
904 request.db.query(Project)
905 .filter(
906 func.levenshtein(
907 Project.normalized_name, func.normalize_pep426_name(form.name.data)
908 )
909 <= 2
910 )
911 .all()
912 )
913
914 # Next we'll create the project
915 project = Project(name=form.name.data)
916 request.db.add(project)
917
918 # Now that the project exists, add any squats which it is the squatter for
919 for squattee in squattees:
920 request.db.add(Squat(squatter=project, squattee=squattee))
921
922 # Then we'll add a role setting the current user as the "Owner" of the
923 # project.
924 request.db.add(Role(user=request.user, project=project, role_name="Owner"))
925 # TODO: This should be handled by some sort of database trigger or a
926 # SQLAlchemy hook or the like instead of doing it inline in this
927 # view.
928 request.db.add(
929 JournalEntry(
930 name=project.name,
931 action="create",
932 submitted_by=request.user,
933 submitted_from=request.remote_addr,
934 )
935 )
936 request.db.add(
937 JournalEntry(
938 name=project.name,
939 action="add Owner {}".format(request.user.username),
940 submitted_by=request.user,
941 submitted_from=request.remote_addr,
942 )
943 )
944
945 # Check that the user has permission to do things to this project, if this
946 # is a new project this will act as a sanity check for the role we just
947 # added above.
948 if not request.has_permission("upload", project):
949 raise _exc_with_message(
950 HTTPForbidden,
951 (
952 "The credential associated with user '{0}' "
953 "isn't allowed to upload to project '{1}'. "
954 "See {2} for more information."
955 ).format(
956 request.user.username,
957 project.name,
958 request.help_url(_anchor="project-name"),
959 ),
960 )
961
962 # Update name if it differs but is still equivalent. We don't need to check if
963 # they are equivalent when normalized because that's already been done when we
964 # queried for the project.
965 if project.name != form.name.data:
966 project.name = form.name.data
967
968 # Render our description so we can save from having to render this data every time
969 # we load a project description page.
970 rendered = None
971 if form.description.data:
972 description_content_type = form.description_content_type.data
973 if not description_content_type:
974 description_content_type = "text/x-rst"
975
976 rendered = readme.render(
977 form.description.data, description_content_type, use_fallback=False
978 )
979
980 # Uploading should prevent broken rendered descriptions.
981 if rendered is None:
982 if form.description_content_type.data:
983 message = (
984 "The description failed to render "
985 "for '{description_content_type}'."
986 ).format(description_content_type=description_content_type)
987 else:
988 message = (
989 "The description failed to render "
990 "in the default format of reStructuredText."
991 )
992 raise _exc_with_message(
993 HTTPBadRequest,
994 "{message} See {projecthelp} for more information.".format(
995 message=message,
996 projecthelp=request.help_url(_anchor="description-content-type"),
997 ),
998 ) from None
999
1000 try:
1001 canonical_version = packaging.utils.canonicalize_version(form.version.data)
1002 release = (
1003 request.db.query(Release)
1004 .filter(
1005 (Release.project == project)
1006 & (Release.canonical_version == canonical_version)
1007 )
1008 .one()
1009 )
1010 except MultipleResultsFound:
1011 # There are multiple releases of this project which have the same
1012 # canonical version that were uploaded before we checked for
1013 # canonical version equivalence, so return the exact match instead
1014 release = (
1015 request.db.query(Release)
1016 .filter(
1017 (Release.project == project) & (Release.version == form.version.data)
1018 )
1019 .one()
1020 )
1021 except NoResultFound:
1022 release = Release(
1023 project=project,
1024 _classifiers=[
1025 c for c in all_classifiers if c.classifier in form.classifiers.data
1026 ],
1027 dependencies=list(
1028 _construct_dependencies(
1029 form,
1030 {
1031 "requires": DependencyKind.requires,
1032 "provides": DependencyKind.provides,
1033 "obsoletes": DependencyKind.obsoletes,
1034 "requires_dist": DependencyKind.requires_dist,
1035 "provides_dist": DependencyKind.provides_dist,
1036 "obsoletes_dist": DependencyKind.obsoletes_dist,
1037 "requires_external": DependencyKind.requires_external,
1038 "project_urls": DependencyKind.project_url,
1039 },
1040 )
1041 ),
1042 canonical_version=canonical_version,
1043 description=Description(
1044 content_type=form.description_content_type.data,
1045 raw=form.description.data or "",
1046 html=rendered or "",
1047 rendered_by=readme.renderer_version(),
1048 ),
1049 **{
1050 k: getattr(form, k).data
1051 for k in {
1052 # This is a list of all the fields in the form that we
1053 # should pull off and insert into our new release.
1054 "version",
1055 "summary",
1056 "license",
1057 "author",
1058 "author_email",
1059 "maintainer",
1060 "maintainer_email",
1061 "keywords",
1062 "platform",
1063 "home_page",
1064 "download_url",
1065 "requires_python",
1066 }
1067 },
1068 uploader=request.user,
1069 uploaded_via=request.user_agent,
1070 )
1071 request.db.add(release)
1072 # TODO: This should be handled by some sort of database trigger or
1073 # a SQLAlchemy hook or the like instead of doing it inline in
1074 # this view.
1075 request.db.add(
1076 JournalEntry(
1077 name=release.project.name,
1078 version=release.version,
1079 action="new release",
1080 submitted_by=request.user,
1081 submitted_from=request.remote_addr,
1082 )
1083 )
1084
1085 # TODO: We need a better solution to this than to just do it inline inside
1086 # this method. Ideally the version field would just be sortable, but
1087 # at least this should be some sort of hook or trigger.
1088 releases = (
1089 request.db.query(Release)
1090 .filter(Release.project == project)
1091 .options(orm.load_only(Release._pypi_ordering))
1092 .all()
1093 )
1094 for i, r in enumerate(
1095 sorted(releases, key=lambda x: packaging.version.parse(x.version))
1096 ):
1097 r._pypi_ordering = i
1098
1099 # Pull the filename out of our POST data.
1100 filename = request.POST["content"].filename
1101
1102 # Make sure that the filename does not contain any path separators.
1103 if "/" in filename or "\\" in filename:
1104 raise _exc_with_message(
1105 HTTPBadRequest, "Cannot upload a file with '/' or '\\' in the name."
1106 )
1107
1108 # Make sure the filename ends with an allowed extension.
1109 if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:
1110 raise _exc_with_message(
1111 HTTPBadRequest,
1112 "Invalid file extension: Use .egg, .tar.gz, .whl or .zip "
1113 "extension. (https://www.python.org/dev/peps/pep-0527)",
1114 )
1115
1116 # Make sure that our filename matches the project that it is being uploaded
1117 # to.
1118 prefix = pkg_resources.safe_name(project.name).lower()
1119 if not pkg_resources.safe_name(filename).lower().startswith(prefix):
1120 raise _exc_with_message(
1121 HTTPBadRequest,
1122 "Start filename for {!r} with {!r}.".format(project.name, prefix),
1123 )
1124
1125 # Check the content type of what is being uploaded
1126 if not request.POST["content"].type or request.POST["content"].type.startswith(
1127 "image/"
1128 ):
1129 raise _exc_with_message(HTTPBadRequest, "Invalid distribution file.")
1130
1131 # Ensure that the package filetype is allowed.
1132 # TODO: Once PEP 527 is completely implemented we should be able to delete
1133 # this and just move it into the form itself.
1134 if not project.allow_legacy_files and form.filetype.data not in {
1135 "sdist",
1136 "bdist_wheel",
1137 "bdist_egg",
1138 }:
1139 raise _exc_with_message(HTTPBadRequest, "Unknown type of file.")
1140
1141 # The project may or may not have a file size specified on the project, if
1142 # it does then it may or may not be smaller or larger than our global file
1143 # size limits.
1144 file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))
1145
1146 with tempfile.TemporaryDirectory() as tmpdir:
1147 temporary_filename = os.path.join(tmpdir, filename)
1148
1149 # Buffer the entire file onto disk, checking the hash of the file as we
1150 # go along.
1151 with open(temporary_filename, "wb") as fp:
1152 file_size = 0
1153 file_hashes = {
1154 "md5": hashlib.md5(),
1155 "sha256": hashlib.sha256(),
1156 "blake2_256": hashlib.blake2b(digest_size=256 // 8),
1157 }
1158 for chunk in iter(lambda: request.POST["content"].file.read(8096), b""):
1159 file_size += len(chunk)
1160 if file_size > file_size_limit:
1161 raise _exc_with_message(
1162 HTTPBadRequest,
1163 "File too large. "
1164 + "Limit for project {name!r} is {limit} MB. ".format(
1165 name=project.name, limit=file_size_limit // (1024 * 1024)
1166 )
1167 + "See "
1168 + request.help_url(_anchor="file-size-limit"),
1169 )
1170 fp.write(chunk)
1171 for hasher in file_hashes.values():
1172 hasher.update(chunk)
1173
1174 # Take our hash functions and compute the final hashes for them now.
1175 file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}
1176
1177 # Actually verify the digests that we've gotten. We're going to use
1178 # hmac.compare_digest even though we probably don't actually need to
1179 # because it's better safe than sorry. In the case of multiple digests
1180 # we expect them all to be given.
1181 if not all(
1182 [
1183 hmac.compare_digest(
1184 getattr(form, "{}_digest".format(digest_name)).data.lower(),
1185 digest_value,
1186 )
1187 for digest_name, digest_value in file_hashes.items()
1188 if getattr(form, "{}_digest".format(digest_name)).data
1189 ]
1190 ):
1191 raise _exc_with_message(
1192 HTTPBadRequest,
1193 "The digest supplied does not match a digest calculated "
1194 "from the uploaded file.",
1195 )
1196
1197 # Check to see if the file that was uploaded exists already or not.
1198 is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)
1199 if is_duplicate:
1200 return Response()
1201 elif is_duplicate is not None:
1202 raise _exc_with_message(
1203 HTTPBadRequest,
1204 # Note: Changing this error message to something that doesn't
1205 # start with "File already exists" will break the
1206 # --skip-existing functionality in twine
1207 # ref: https://github.com/pypa/warehouse/issues/3482
1208 # ref: https://github.com/pypa/twine/issues/332
1209 "File already exists. See "
1210 + request.help_url(_anchor="file-name-reuse"),
1211 )
1212
1213 # Check to see if the file that was uploaded exists in our filename log
1214 if request.db.query(
1215 request.db.query(Filename).filter(Filename.filename == filename).exists()
1216 ).scalar():
1217 raise _exc_with_message(
1218 HTTPBadRequest,
1219 "This filename has already been used, use a "
1220 "different version. "
1221 "See " + request.help_url(_anchor="file-name-reuse"),
1222 )
1223
1224 # Check to see if uploading this file would create a duplicate sdist
1225 # for the current release.
1226 if (
1227 form.filetype.data == "sdist"
1228 and request.db.query(
1229 request.db.query(File)
1230 .filter((File.release == release) & (File.packagetype == "sdist"))
1231 .exists()
1232 ).scalar()
1233 ):
1234 raise _exc_with_message(
1235 HTTPBadRequest, "Only one sdist may be uploaded per release."
1236 )
1237
1238 # Check the file to make sure it is a valid distribution file.
1239 if not _is_valid_dist_file(temporary_filename, form.filetype.data):
1240 raise _exc_with_message(HTTPBadRequest, "Invalid distribution file.")
1241
1242 # Check that if it's a binary wheel, it's on a supported platform
1243 if filename.endswith(".whl"):
1244 wheel_info = _wheel_file_re.match(filename)
1245 plats = wheel_info.group("plat").split(".")
1246 for plat in plats:
1247 if not _valid_platform_tag(plat):
1248 raise _exc_with_message(
1249 HTTPBadRequest,
1250 "Binary wheel '{filename}' has an unsupported "
1251 "platform tag '{plat}'.".format(filename=filename, plat=plat),
1252 )
1253
1254 # Also buffer the entire signature file to disk.
1255 if "gpg_signature" in request.POST:
1256 has_signature = True
1257 with open(os.path.join(tmpdir, filename + ".asc"), "wb") as fp:
1258 signature_size = 0
1259 for chunk in iter(
1260 lambda: request.POST["gpg_signature"].file.read(8096), b""
1261 ):
1262 signature_size += len(chunk)
1263 if signature_size > MAX_SIGSIZE:
1264 raise _exc_with_message(HTTPBadRequest, "Signature too large.")
1265 fp.write(chunk)
1266
1267 # Check whether signature is ASCII armored
1268 with open(os.path.join(tmpdir, filename + ".asc"), "rb") as fp:
1269 if not fp.read().startswith(b"-----BEGIN PGP SIGNATURE-----"):
1270 raise _exc_with_message(
1271 HTTPBadRequest, "PGP signature isn't ASCII armored."
1272 )
1273 else:
1274 has_signature = False
1275
1276 # TODO: This should be handled by some sort of database trigger or a
1277 # SQLAlchemy hook or the like instead of doing it inline in this
1278 # view.
1279 request.db.add(Filename(filename=filename))
1280
1281 # Store the information about the file in the database.
1282 file_ = File(
1283 release=release,
1284 filename=filename,
1285 python_version=form.pyversion.data,
1286 packagetype=form.filetype.data,
1287 comment_text=form.comment.data,
1288 size=file_size,
1289 has_signature=bool(has_signature),
1290 md5_digest=file_hashes["md5"],
1291 sha256_digest=file_hashes["sha256"],
1292 blake2_256_digest=file_hashes["blake2_256"],
1293 # Figure out what our filepath is going to be, we're going to use a
1294 # directory structure based on the hash of the file contents. This
1295 # will ensure that the contents of the file cannot change without
1296 # it also changing the path that the file is saved too.
1297 path="/".join(
1298 [
1299 file_hashes[PATH_HASHER][:2],
1300 file_hashes[PATH_HASHER][2:4],
1301 file_hashes[PATH_HASHER][4:],
1302 filename,
1303 ]
1304 ),
1305 uploaded_via=request.user_agent,
1306 )
1307 request.db.add(file_)
1308
1309 # TODO: This should be handled by some sort of database trigger or a
1310 # SQLAlchemy hook or the like instead of doing it inline in this
1311 # view.
1312 request.db.add(
1313 JournalEntry(
1314 name=release.project.name,
1315 version=release.version,
1316 action="add {python_version} file {filename}".format(
1317 python_version=file_.python_version, filename=file_.filename
1318 ),
1319 submitted_by=request.user,
1320 submitted_from=request.remote_addr,
1321 )
1322 )
1323
1324 # TODO: We need a better answer about how to make this transactional so
1325 # this won't take affect until after a commit has happened, for
1326 # now we'll just ignore it and save it before the transaction is
1327 # committed.
1328 storage = request.find_service(IFileStorage)
1329 storage.store(
1330 file_.path,
1331 os.path.join(tmpdir, filename),
1332 meta={
1333 "project": file_.release.project.normalized_name,
1334 "version": file_.release.version,
1335 "package-type": file_.packagetype,
1336 "python-version": file_.python_version,
1337 },
1338 )
1339 if has_signature:
1340 storage.store(
1341 file_.pgp_path,
1342 os.path.join(tmpdir, filename + ".asc"),
1343 meta={
1344 "project": file_.release.project.normalized_name,
1345 "version": file_.release.version,
1346 "package-type": file_.packagetype,
1347 "python-version": file_.python_version,
1348 },
1349 )
1350
1351 # Log a successful upload
1352 metrics.increment("warehouse.upload.ok", tags=[f"filetype:{form.filetype.data}"])
1353
1354 return Response()
1355
1356
1357 def _legacy_purge(status, *args, **kwargs):
1358 if status:
1359 requests.post(*args, **kwargs)
1360
1361
1362 @view_config(
1363 route_name="forklift.legacy.submit", require_csrf=False, require_methods=["POST"]
1364 )
1365 @view_config(
1366 route_name="forklift.legacy.submit_pkg_info",
1367 require_csrf=False,
1368 require_methods=["POST"],
1369 )
1370 def submit(request):
1371 return _exc_with_message(
1372 HTTPGone,
1373 (
1374 "Project pre-registration is no longer required or supported, "
1375 "upload your files instead."
1376 ),
1377 )
1378
1379
1380 @view_config(
1381 route_name="forklift.legacy.doc_upload",
1382 require_csrf=False,
1383 require_methods=["POST"],
1384 )
1385 def doc_upload(request):
1386 return _exc_with_message(
1387 HTTPGone,
1388 "Uploading documentation is no longer supported, we recommend using "
1389 "https://readthedocs.org/.",
1390 )
1391
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py
--- a/warehouse/forklift/legacy.py
+++ b/warehouse/forklift/legacy.py
@@ -252,11 +252,6 @@
"Can't have direct dependency: {!r}".format(requirement)
)
- if any(packaging.version.Version(spec.version).local for spec in req.specifier):
- raise wtforms.validators.ValidationError(
- "Can't have dependency with local version: {!r}".format(requirement)
- )
-
def _validate_legacy_dist_req_list(form, field):
for datum in field.data:
| {"golden_diff": "diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py\n--- a/warehouse/forklift/legacy.py\n+++ b/warehouse/forklift/legacy.py\n@@ -252,11 +252,6 @@\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n \n- if any(packaging.version.Version(spec.version).local for spec in req.specifier):\n- raise wtforms.validators.ValidationError(\n- \"Can't have dependency with local version: {!r}\".format(requirement)\n- )\n-\n \n def _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n", "issue": "Upload to PyPI fails when dependency version contains *\n**Describe the bug**\r\nPyPI raises `400 Client error` when uploading a package that specifies requirement using `== 2.*`. This is a valid version specifier referred to [PEP 440](https://www.python.org/dev/peps/pep-0440/#compatible-release).\r\n\r\nThe whole error is:\r\n```\r\nHTTPError: 400 Client Error: Invalid value for requires_dist. Error: Invalid version: '2.*' for url: https://test.pypi.org/legacy/\r\n```\r\n\r\n**Expected behavior**\r\nUpload should pass with no errors. (Note that in the example below the expected behaviour would be to fail with authentication error, as you don't have sufficient permissions on the project.)\r\n\r\n**To Reproduce**\r\nThis is a minimal reproducer: https://github.com/dblenkus/warehouse-requirements-issue\r\n\r\nInstall twine with `pip install twine` and try to upload the package with `twine upload -r testpypi dist/*`.\r\n\r\n**My Platform**\r\nMacOS 10.14.6\r\ntwine 1.13.0\r\n\r\n**Additional context**\r\nThis has worked few days ago on July 26th 2019.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport email\nimport hashlib\nimport hmac\nimport os.path\nimport re\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom cgi import FieldStorage, parse_header\nfrom itertools import chain\n\nimport packaging.requirements\nimport packaging.specifiers\nimport packaging.utils\nimport packaging.version\nimport pkg_resources\nimport requests\nimport stdlib_list\nimport wtforms\nimport wtforms.validators\n\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import exists, func, orm\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom warehouse import forms\nfrom warehouse.admin.squats import Squat\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import (\n BlacklistedProject,\n Dependency,\n DependencyKind,\n Description,\n File,\n Filename,\n JournalEntry,\n Project,\n Release,\n Role,\n)\nfrom warehouse.utils import http, readme\n\nMAX_FILESIZE = 60 * 1024 * 1024 # 60M\nMAX_SIGSIZE = 8 * 1024 # 8K\n\nPATH_HASHER = \"blake2_256\"\n\n\ndef namespace_stdlib_list(module_list):\n for module_name in module_list:\n parts = module_name.split(\".\")\n for i, part in enumerate(parts):\n yield \".\".join(parts[: i + 1])\n\n\nSTDLIB_PROHIBITTED = {\n packaging.utils.canonicalize_name(s.rstrip(\"-_.\").lstrip(\"-_.\"))\n for s in chain.from_iterable(\n namespace_stdlib_list(stdlib_list.stdlib_list(version))\n for version in stdlib_list.short_versions\n )\n}\n\n# Wheel platform checking\n\n# Note: defining new platform ABI compatibility tags that don't\n# have a python.org binary release to anchor them is a\n# complex task that needs more than just OS+architecture info.\n# For Linux specifically, the platform ABI is defined by each\n# individual distro version, so wheels built on one version may\n# not even work on older versions of the same distro, let alone\n# a completely different distro.\n#\n# That means new entries should only be added given an\n# accompanying ABI spec that explains how to build a\n# compatible binary (see the manylinux specs as examples).\n\n# These platforms can be handled by a simple static list:\n_allowed_platforms = {\n \"any\",\n \"win32\",\n \"win_amd64\",\n \"win_ia64\",\n \"manylinux1_x86_64\",\n \"manylinux1_i686\",\n \"manylinux2010_x86_64\",\n \"manylinux2010_i686\",\n \"linux_armv6l\",\n \"linux_armv7l\",\n}\n# macosx is a little more complicated:\n_macosx_platform_re = re.compile(r\"macosx_10_(\\d+)+_(?P<arch>.*)\")\n_macosx_arches = {\n \"ppc\",\n \"ppc64\",\n \"i386\",\n \"x86_64\",\n \"intel\",\n \"fat\",\n \"fat32\",\n \"fat64\",\n \"universal\",\n}\n\n\n# Actual checking code;\ndef _valid_platform_tag(platform_tag):\n if platform_tag in _allowed_platforms:\n return True\n m = _macosx_platform_re.match(platform_tag)\n if m and m.group(\"arch\") in _macosx_arches:\n return True\n return False\n\n\n_error_message_order = [\"metadata_version\", \"name\", \"version\"]\n\n\n_dist_file_regexes = {\n # True/False is for legacy or not.\n True: re.compile(r\".+?\\.(exe|tar\\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$\", re.I),\n False: re.compile(r\".+?\\.(tar\\.gz|zip|whl|egg)$\", re.I),\n}\n\n\n_wheel_file_re = re.compile(\n r\"\"\"\n ^\n (?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n (\n (-(?P<build>\\d.*?))?\n -(?P<pyver>.+?)\n -(?P<abi>.+?)\n -(?P<plat>.+?)\n (?:\\.whl|\\.dist-info)\n )\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\n_project_name_re = re.compile(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\", re.IGNORECASE\n)\n\n\n_legacy_specifier_re = re.compile(r\"^(?P<name>\\S+)(?: \\((?P<specifier>\\S+)\\))?$\")\n\n\n_valid_description_content_types = {\"text/plain\", \"text/x-rst\", \"text/markdown\"}\n\n_valid_markdown_variants = {\"CommonMark\", \"GFM\"}\n\n\ndef _exc_with_message(exc, message):\n # The crappy old API that PyPI offered uses the status to pass down\n # messages to the client. So this function will make that easier to do.\n resp = exc(message)\n resp.status = \"{} {}\".format(resp.status_code, message)\n return resp\n\n\ndef _validate_pep440_version(form, field):\n parsed = packaging.version.parse(field.data)\n\n # Check that this version is a valid PEP 440 version at all.\n if not isinstance(parsed, packaging.version.Version):\n raise wtforms.validators.ValidationError(\n \"Start and end with a letter or numeral containing only \"\n \"ASCII numeric and '.', '_' and '-'.\"\n )\n\n # Check that this version does not have a PEP 440 local segment attached\n # to it.\n if parsed.local is not None:\n raise wtforms.validators.ValidationError(\"Can't use PEP 440 local versions.\")\n\n\ndef _parse_legacy_requirement(requirement):\n parsed = _legacy_specifier_re.search(requirement)\n if parsed is None:\n raise ValueError(\"Invalid requirement.\")\n return parsed.groupdict()[\"name\"], parsed.groupdict()[\"specifier\"]\n\n\ndef _validate_pep440_specifier(specifier):\n try:\n packaging.specifiers.SpecifierSet(specifier)\n except packaging.specifiers.InvalidSpecifier:\n raise wtforms.validators.ValidationError(\n \"Invalid specifier in requirement.\"\n ) from None\n\n\ndef _validate_pep440_specifier_field(form, field):\n return _validate_pep440_specifier(field.data)\n\n\ndef _validate_legacy_non_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement.replace(\"_\", \"\"))\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't direct dependency: {!r}\".format(requirement)\n )\n\n if any(\n not identifier.isalnum() or identifier[0].isdigit()\n for identifier in req.name.split(\".\")\n ):\n raise wtforms.validators.ValidationError(\"Use a valid Python identifier.\")\n\n\ndef _validate_legacy_non_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_non_dist_req(datum)\n\n\ndef _validate_legacy_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement)\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}.\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n\n if any(packaging.version.Version(spec.version).local for spec in req.specifier):\n raise wtforms.validators.ValidationError(\n \"Can't have dependency with local version: {!r}\".format(requirement)\n )\n\n\ndef _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_dist_req(datum)\n\n\ndef _validate_requires_external(requirement):\n name, specifier = _parse_legacy_requirement(requirement)\n\n # TODO: Is it really reasonable to parse the specifier using PEP 440?\n if specifier is not None:\n _validate_pep440_specifier(specifier)\n\n\ndef _validate_requires_external_list(form, field):\n for datum in field.data:\n _validate_requires_external(datum)\n\n\ndef _validate_project_url(value):\n try:\n label, url = value.split(\", \", 1)\n except ValueError:\n raise wtforms.validators.ValidationError(\n \"Use both a label and an URL.\"\n ) from None\n\n if not label:\n raise wtforms.validators.ValidationError(\"Use a label.\")\n\n if len(label) > 32:\n raise wtforms.validators.ValidationError(\"Use 32 characters or less.\")\n\n if not url:\n raise wtforms.validators.ValidationError(\"Use an URL.\")\n\n if not http.is_valid_uri(url, require_authority=False):\n raise wtforms.validators.ValidationError(\"Use valid URL.\")\n\n\ndef _validate_project_url_list(form, field):\n for datum in field.data:\n _validate_project_url(datum)\n\n\ndef _validate_rfc822_email_field(form, field):\n email_validator = wtforms.validators.Email(message=\"Use a valid email address\")\n addresses = email.utils.getaddresses([field.data])\n\n for real_name, address in addresses:\n email_validator(form, type(\"field\", (), {\"data\": address}))\n\n\ndef _validate_description_content_type(form, field):\n def _raise(message):\n raise wtforms.validators.ValidationError(\n f\"Invalid description content type: {message}\"\n )\n\n content_type, parameters = parse_header(field.data)\n if content_type not in _valid_description_content_types:\n _raise(\"type/subtype is not valid\")\n\n charset = parameters.get(\"charset\")\n if charset and charset != \"UTF-8\":\n _raise(\"Use a valid charset\")\n\n variant = parameters.get(\"variant\")\n if (\n content_type == \"text/markdown\"\n and variant\n and variant not in _valid_markdown_variants\n ):\n _raise(\n \"Use a valid variant, expected one of {}\".format(\n \", \".join(_valid_markdown_variants)\n )\n )\n\n\ndef _construct_dependencies(form, types):\n for name, kind in types.items():\n for item in getattr(form, name).data:\n yield Dependency(kind=kind.value, specifier=item)\n\n\nclass ListField(wtforms.Field):\n def process_formdata(self, valuelist):\n self.data = [v.strip() for v in valuelist if v.strip()]\n\n\n# TODO: Eventually this whole validation thing should move to the packaging\n# library and we should just call that. However until PEP 426 is done\n# that library won't have an API for this.\nclass MetadataForm(forms.Form):\n\n # Metadata version\n metadata_version = wtforms.StringField(\n description=\"Metadata-Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n # Note: This isn't really Metadata 2.0, however bdist_wheel\n # claims it is producing a Metadata 2.0 metadata when in\n # reality it's more like 1.2 with some extensions.\n [\"1.0\", \"1.1\", \"1.2\", \"2.0\", \"2.1\"],\n message=\"Use a known metadata version.\",\n ),\n ],\n )\n\n # Identity Project and Release\n name = wtforms.StringField(\n description=\"Name\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n _project_name_re,\n re.IGNORECASE,\n message=(\n \"Start and end with a letter or numeral containing \"\n \"only ASCII numeric and '.', '_' and '-'.\"\n ),\n ),\n ],\n )\n version = wtforms.StringField(\n description=\"Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n r\"^(?!\\s).*(?<!\\s)$\",\n message=\"Can't have leading or trailing whitespace.\",\n ),\n _validate_pep440_version,\n ],\n )\n\n # Additional Release metadata\n summary = wtforms.StringField(\n description=\"Summary\",\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Length(max=512),\n wtforms.validators.Regexp(\n r\"^.+$\", # Rely on the fact that . doesn't match a newline.\n message=\"Use a single line only.\",\n ),\n ],\n )\n description = wtforms.StringField(\n description=\"Description\", validators=[wtforms.validators.Optional()]\n )\n author = wtforms.StringField(\n description=\"Author\", validators=[wtforms.validators.Optional()]\n )\n description_content_type = wtforms.StringField(\n description=\"Description-Content-Type\",\n validators=[wtforms.validators.Optional(), _validate_description_content_type],\n )\n author_email = wtforms.StringField(\n description=\"Author-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n maintainer = wtforms.StringField(\n description=\"Maintainer\", validators=[wtforms.validators.Optional()]\n )\n maintainer_email = wtforms.StringField(\n description=\"Maintainer-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n license = wtforms.StringField(\n description=\"License\", validators=[wtforms.validators.Optional()]\n )\n keywords = wtforms.StringField(\n description=\"Keywords\", validators=[wtforms.validators.Optional()]\n )\n classifiers = wtforms.fields.SelectMultipleField(description=\"Classifier\")\n platform = wtforms.StringField(\n description=\"Platform\", validators=[wtforms.validators.Optional()]\n )\n\n # URLs\n home_page = wtforms.StringField(\n description=\"Home-Page\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n download_url = wtforms.StringField(\n description=\"Download-URL\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n\n # Dependency Information\n requires_python = wtforms.StringField(\n description=\"Requires-Python\",\n validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],\n )\n\n # File information\n pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])\n filetype = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n [\n \"bdist_dmg\",\n \"bdist_dumb\",\n \"bdist_egg\",\n \"bdist_msi\",\n \"bdist_rpm\",\n \"bdist_wheel\",\n \"bdist_wininst\",\n \"sdist\",\n ],\n message=\"Use a known file type.\",\n ),\n ]\n )\n comment = wtforms.StringField(validators=[wtforms.validators.Optional()])\n md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])\n sha256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, SHA256 message digest.\",\n ),\n ]\n )\n blake2_256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, BLAKE2 message digest.\",\n ),\n ]\n )\n\n # Legacy dependency information\n requires = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n provides = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n obsoletes = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n\n # Newer dependency information\n requires_dist = ListField(\n description=\"Requires-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n provides_dist = ListField(\n description=\"Provides-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n obsoletes_dist = ListField(\n description=\"Obsoletes-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n requires_external = ListField(\n description=\"Requires-External\",\n validators=[wtforms.validators.Optional(), _validate_requires_external_list],\n )\n\n # Newer metadata information\n project_urls = ListField(\n description=\"Project-URL\",\n validators=[wtforms.validators.Optional(), _validate_project_url_list],\n )\n\n def full_validate(self):\n # All non source releases *must* have a pyversion\n if (\n self.filetype.data\n and self.filetype.data != \"sdist\"\n and not self.pyversion.data\n ):\n raise wtforms.validators.ValidationError(\n \"Python version is required for binary distribution uploads.\"\n )\n\n # All source releases *must* have a pyversion of \"source\"\n if self.filetype.data == \"sdist\":\n if not self.pyversion.data:\n self.pyversion.data = \"source\"\n elif self.pyversion.data != \"source\":\n raise wtforms.validators.ValidationError(\n \"Use 'source' as Python version for an sdist.\"\n )\n\n # We *must* have at least one digest to verify against.\n if not self.md5_digest.data and not self.sha256_digest.data:\n raise wtforms.validators.ValidationError(\n \"Include at least one message digest.\"\n )\n\n\n_safe_zipnames = re.compile(r\"(purelib|platlib|headers|scripts|data).+\", re.I)\n# .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2\n_tar_filenames_re = re.compile(r\"\\.(?:tar$|t(?:ar\\.)?(?P<z_type>gz|bz2)$)\")\n\n\ndef _is_valid_dist_file(filename, filetype):\n \"\"\"\n Perform some basic checks to see whether the indicated file could be\n a valid distribution file.\n \"\"\"\n\n # If our file is a zipfile, then ensure that it's members are only\n # compressed with supported compression methods.\n if zipfile.is_zipfile(filename):\n with zipfile.ZipFile(filename) as zfp:\n for zinfo in zfp.infolist():\n if zinfo.compress_type not in {\n zipfile.ZIP_STORED,\n zipfile.ZIP_DEFLATED,\n }:\n return False\n\n tar_fn_match = _tar_filenames_re.search(filename)\n if tar_fn_match:\n # Ensure that this is a valid tar file, and that it contains PKG-INFO.\n z_type = tar_fn_match.group(\"z_type\") or \"\"\n try:\n with tarfile.open(filename, f\"r:{z_type}\") as tar:\n # This decompresses the entire stream to validate it and the\n # tar within. Easy CPU DoS attack. :/\n bad_tar = True\n member = tar.next()\n while member:\n parts = os.path.split(member.name)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n bad_tar = False\n member = tar.next()\n if bad_tar:\n return False\n except tarfile.ReadError:\n return False\n elif filename.endswith(\".exe\"):\n # The only valid filetype for a .exe file is \"bdist_wininst\".\n if filetype != \"bdist_wininst\":\n return False\n\n # Ensure that the .exe is a valid zip file, and that all of the files\n # contained within it have safe filenames.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch where there\n # isn't one.\n for zipname in zfp.namelist(): # pragma: no branch\n if not _safe_zipnames.match(zipname):\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".msi\"):\n # The only valid filetype for a .msi is \"bdist_msi\"\n if filetype != \"bdist_msi\":\n return False\n\n # Check the first 8 bytes of the MSI file. This was taken from the\n # legacy implementation of PyPI which itself took it from the\n # implementation of `file` I believe.\n with open(filename, \"rb\") as fp:\n if fp.read(8) != b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\":\n return False\n elif filename.endswith(\".zip\") or filename.endswith(\".egg\"):\n # Ensure that the .zip/.egg is a valid zip file, and that it has a\n # PKG-INFO file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".whl\"):\n # Ensure that the .whl is a valid zip file, and that it has a WHEEL\n # file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"WHEEL\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n\n # If we haven't yet decided it's not valid, then we'll assume it is and\n # allow it.\n return True\n\n\ndef _is_duplicate_file(db_session, filename, hashes):\n \"\"\"\n Check to see if file already exists, and if it's content matches.\n A file is considered to exist if its filename *or* blake2 digest are\n present in a file row in the database.\n\n Returns:\n - True: This file is a duplicate and all further processing should halt.\n - False: This file exists, but it is not a duplicate.\n - None: This file does not exist.\n \"\"\"\n\n file_ = (\n db_session.query(File)\n .filter(\n (File.filename == filename)\n | (File.blake2_256_digest == hashes[\"blake2_256\"])\n )\n .first()\n )\n\n if file_ is not None:\n return (\n file_.filename == filename\n and file_.sha256_digest == hashes[\"sha256\"]\n and file_.md5_digest == hashes[\"md5\"]\n and file_.blake2_256_digest == hashes[\"blake2_256\"]\n )\n\n return None\n\n\ndef _no_deprecated_classifiers(request):\n deprecated_classifiers = {\n classifier.classifier\n for classifier in (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(True))\n .all()\n )\n }\n\n def validate_no_deprecated_classifiers(form, field):\n invalid_classifiers = set(field.data or []) & deprecated_classifiers\n if invalid_classifiers:\n first_invalid_classifier = sorted(invalid_classifiers)[0]\n host = request.registry.settings.get(\"warehouse.domain\")\n classifiers_url = request.route_url(\"classifiers\", _host=host)\n\n raise wtforms.validators.ValidationError(\n f\"Classifier {first_invalid_classifier!r} has been \"\n f\"deprecated, see {classifiers_url} for a list of valid \"\n \"classifiers.\"\n )\n\n return validate_no_deprecated_classifiers\n\n\n@view_config(\n route_name=\"forklift.legacy.file_upload\",\n uses_session=True,\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef file_upload(request):\n # If we're in read-only mode, let upload clients know\n if request.flags.enabled(\"read-only\"):\n raise _exc_with_message(\n HTTPForbidden, \"Read-only mode: Uploads are temporarily disabled\"\n )\n\n # Log an attempt to upload\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.upload.attempt\")\n\n # Before we do anything, if there isn't an authenticated user with this\n # request, then we'll go ahead and bomb out.\n if request.authenticated_userid is None:\n raise _exc_with_message(\n HTTPForbidden, \"Invalid or non-existent authentication information.\"\n )\n\n # Ensure that user has a verified, primary email address. This should both\n # reduce the ease of spam account creation and activity, as well as act as\n # a forcing function for https://github.com/pypa/warehouse/issues/3632.\n # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,\n # we might consider a different condition, possibly looking at\n # User.is_active instead.\n if not (request.user.primary_email and request.user.primary_email.verified):\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"User {!r} does not have a verified primary email address. \"\n \"Please add a verified primary email before attempting to \"\n \"upload to PyPI. See {project_help} for more information.\"\n \"for more information.\"\n ).format(\n request.user.username,\n project_help=request.help_url(_anchor=\"verified-email\"),\n ),\n ) from None\n\n # Do some cleanup of the various form fields\n for key in list(request.POST):\n value = request.POST.get(key)\n if isinstance(value, str):\n # distutils \"helpfully\" substitutes unknown, but \"required\" values\n # with the string \"UNKNOWN\". This is basically never what anyone\n # actually wants so we'll just go ahead and delete anything whose\n # value is UNKNOWN.\n if value.strip() == \"UNKNOWN\":\n del request.POST[key]\n\n # Escape NUL characters, which psycopg doesn't like\n if \"\\x00\" in value:\n request.POST[key] = value.replace(\"\\x00\", \"\\\\x00\")\n\n # We require protocol_version 1, it's the only supported version however\n # passing a different version should raise an error.\n if request.POST.get(\"protocol_version\", \"1\") != \"1\":\n raise _exc_with_message(HTTPBadRequest, \"Unknown protocol version.\")\n\n # Check if any fields were supplied as a tuple and have become a\n # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a\n # FieldStorage, however.\n # ref: https://github.com/pypa/warehouse/issues/2185\n # ref: https://github.com/pypa/warehouse/issues/2491\n for field in set(request.POST) - {\"content\", \"gpg_signature\"}:\n values = request.POST.getall(field)\n if any(isinstance(value, FieldStorage) for value in values):\n raise _exc_with_message(HTTPBadRequest, f\"{field}: Should not be a tuple.\")\n\n # Look up all of the valid classifiers\n all_classifiers = request.db.query(Classifier).all()\n\n # Validate and process the incoming metadata.\n form = MetadataForm(request.POST)\n\n # Add a validator for deprecated classifiers\n form.classifiers.validators.append(_no_deprecated_classifiers(request))\n\n form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]\n if not form.validate():\n for field_name in _error_message_order:\n if field_name in form.errors:\n break\n else:\n field_name = sorted(form.errors.keys())[0]\n\n if field_name in form:\n field = form[field_name]\n if field.description and isinstance(field, wtforms.StringField):\n error_message = (\n \"{value!r} is an invalid value for {field}. \".format(\n value=field.data, field=field.description\n )\n + \"Error: {} \".format(form.errors[field_name][0])\n + \"See \"\n \"https://packaging.python.org/specifications/core-metadata\"\n )\n else:\n error_message = \"Invalid value for {field}. Error: {msgs[0]}\".format(\n field=field_name, msgs=form.errors[field_name]\n )\n else:\n error_message = \"Error: {}\".format(form.errors[field_name][0])\n\n raise _exc_with_message(HTTPBadRequest, error_message)\n\n # Ensure that we have file data in the request.\n if \"content\" not in request.POST:\n raise _exc_with_message(HTTPBadRequest, \"Upload payload does not have a file.\")\n\n # Look up the project first before doing anything else, this is so we can\n # automatically register it if we need to and can check permissions before\n # going any further.\n try:\n project = (\n request.db.query(Project)\n .filter(\n Project.normalized_name == func.normalize_pep426_name(form.name.data)\n )\n .one()\n )\n except NoResultFound:\n # Check for AdminFlag set by a PyPI Administrator disabling new project\n # registration, reasons for this include Spammers, security\n # vulnerabilities, or just wanting to be lazy and not worry ;)\n if request.flags.enabled(\"disallow-new-project-registration\"):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"New project registration temporarily disabled. \"\n \"See {projecthelp} for details\"\n ).format(projecthelp=request.help_url(_anchor=\"admin-intervention\")),\n ) from None\n\n # Before we create the project, we're going to check our blacklist to\n # see if this project is even allowed to be registered. If it is not,\n # then we're going to deny the request to create this project.\n if request.db.query(\n exists().where(\n BlacklistedProject.name == func.normalize_pep426_name(form.name.data)\n )\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed. \"\n \"See {projecthelp} \"\n \"for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # Also check for collisions with Python Standard Library modules.\n if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed (conflict with Python \"\n \"Standard Library module name). See \"\n \"{projecthelp} for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # The project doesn't exist in our database, so first we'll check for\n # projects with a similar name\n squattees = (\n request.db.query(Project)\n .filter(\n func.levenshtein(\n Project.normalized_name, func.normalize_pep426_name(form.name.data)\n )\n <= 2\n )\n .all()\n )\n\n # Next we'll create the project\n project = Project(name=form.name.data)\n request.db.add(project)\n\n # Now that the project exists, add any squats which it is the squatter for\n for squattee in squattees:\n request.db.add(Squat(squatter=project, squattee=squattee))\n\n # Then we'll add a role setting the current user as the \"Owner\" of the\n # project.\n request.db.add(Role(user=request.user, project=project, role_name=\"Owner\"))\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"create\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"add Owner {}\".format(request.user.username),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # Check that the user has permission to do things to this project, if this\n # is a new project this will act as a sanity check for the role we just\n # added above.\n if not request.has_permission(\"upload\", project):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"The credential associated with user '{0}' \"\n \"isn't allowed to upload to project '{1}'. \"\n \"See {2} for more information.\"\n ).format(\n request.user.username,\n project.name,\n request.help_url(_anchor=\"project-name\"),\n ),\n )\n\n # Update name if it differs but is still equivalent. We don't need to check if\n # they are equivalent when normalized because that's already been done when we\n # queried for the project.\n if project.name != form.name.data:\n project.name = form.name.data\n\n # Render our description so we can save from having to render this data every time\n # we load a project description page.\n rendered = None\n if form.description.data:\n description_content_type = form.description_content_type.data\n if not description_content_type:\n description_content_type = \"text/x-rst\"\n\n rendered = readme.render(\n form.description.data, description_content_type, use_fallback=False\n )\n\n # Uploading should prevent broken rendered descriptions.\n if rendered is None:\n if form.description_content_type.data:\n message = (\n \"The description failed to render \"\n \"for '{description_content_type}'.\"\n ).format(description_content_type=description_content_type)\n else:\n message = (\n \"The description failed to render \"\n \"in the default format of reStructuredText.\"\n )\n raise _exc_with_message(\n HTTPBadRequest,\n \"{message} See {projecthelp} for more information.\".format(\n message=message,\n projecthelp=request.help_url(_anchor=\"description-content-type\"),\n ),\n ) from None\n\n try:\n canonical_version = packaging.utils.canonicalize_version(form.version.data)\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project)\n & (Release.canonical_version == canonical_version)\n )\n .one()\n )\n except MultipleResultsFound:\n # There are multiple releases of this project which have the same\n # canonical version that were uploaded before we checked for\n # canonical version equivalence, so return the exact match instead\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project) & (Release.version == form.version.data)\n )\n .one()\n )\n except NoResultFound:\n release = Release(\n project=project,\n _classifiers=[\n c for c in all_classifiers if c.classifier in form.classifiers.data\n ],\n dependencies=list(\n _construct_dependencies(\n form,\n {\n \"requires\": DependencyKind.requires,\n \"provides\": DependencyKind.provides,\n \"obsoletes\": DependencyKind.obsoletes,\n \"requires_dist\": DependencyKind.requires_dist,\n \"provides_dist\": DependencyKind.provides_dist,\n \"obsoletes_dist\": DependencyKind.obsoletes_dist,\n \"requires_external\": DependencyKind.requires_external,\n \"project_urls\": DependencyKind.project_url,\n },\n )\n ),\n canonical_version=canonical_version,\n description=Description(\n content_type=form.description_content_type.data,\n raw=form.description.data or \"\",\n html=rendered or \"\",\n rendered_by=readme.renderer_version(),\n ),\n **{\n k: getattr(form, k).data\n for k in {\n # This is a list of all the fields in the form that we\n # should pull off and insert into our new release.\n \"version\",\n \"summary\",\n \"license\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"keywords\",\n \"platform\",\n \"home_page\",\n \"download_url\",\n \"requires_python\",\n }\n },\n uploader=request.user,\n uploaded_via=request.user_agent,\n )\n request.db.add(release)\n # TODO: This should be handled by some sort of database trigger or\n # a SQLAlchemy hook or the like instead of doing it inline in\n # this view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"new release\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better solution to this than to just do it inline inside\n # this method. Ideally the version field would just be sortable, but\n # at least this should be some sort of hook or trigger.\n releases = (\n request.db.query(Release)\n .filter(Release.project == project)\n .options(orm.load_only(Release._pypi_ordering))\n .all()\n )\n for i, r in enumerate(\n sorted(releases, key=lambda x: packaging.version.parse(x.version))\n ):\n r._pypi_ordering = i\n\n # Pull the filename out of our POST data.\n filename = request.POST[\"content\"].filename\n\n # Make sure that the filename does not contain any path separators.\n if \"/\" in filename or \"\\\\\" in filename:\n raise _exc_with_message(\n HTTPBadRequest, \"Cannot upload a file with '/' or '\\\\' in the name.\"\n )\n\n # Make sure the filename ends with an allowed extension.\n if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:\n raise _exc_with_message(\n HTTPBadRequest,\n \"Invalid file extension: Use .egg, .tar.gz, .whl or .zip \"\n \"extension. (https://www.python.org/dev/peps/pep-0527)\",\n )\n\n # Make sure that our filename matches the project that it is being uploaded\n # to.\n prefix = pkg_resources.safe_name(project.name).lower()\n if not pkg_resources.safe_name(filename).lower().startswith(prefix):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Start filename for {!r} with {!r}.\".format(project.name, prefix),\n )\n\n # Check the content type of what is being uploaded\n if not request.POST[\"content\"].type or request.POST[\"content\"].type.startswith(\n \"image/\"\n ):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Ensure that the package filetype is allowed.\n # TODO: Once PEP 527 is completely implemented we should be able to delete\n # this and just move it into the form itself.\n if not project.allow_legacy_files and form.filetype.data not in {\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_egg\",\n }:\n raise _exc_with_message(HTTPBadRequest, \"Unknown type of file.\")\n\n # The project may or may not have a file size specified on the project, if\n # it does then it may or may not be smaller or larger than our global file\n # size limits.\n file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n temporary_filename = os.path.join(tmpdir, filename)\n\n # Buffer the entire file onto disk, checking the hash of the file as we\n # go along.\n with open(temporary_filename, \"wb\") as fp:\n file_size = 0\n file_hashes = {\n \"md5\": hashlib.md5(),\n \"sha256\": hashlib.sha256(),\n \"blake2_256\": hashlib.blake2b(digest_size=256 // 8),\n }\n for chunk in iter(lambda: request.POST[\"content\"].file.read(8096), b\"\"):\n file_size += len(chunk)\n if file_size > file_size_limit:\n raise _exc_with_message(\n HTTPBadRequest,\n \"File too large. \"\n + \"Limit for project {name!r} is {limit} MB. \".format(\n name=project.name, limit=file_size_limit // (1024 * 1024)\n )\n + \"See \"\n + request.help_url(_anchor=\"file-size-limit\"),\n )\n fp.write(chunk)\n for hasher in file_hashes.values():\n hasher.update(chunk)\n\n # Take our hash functions and compute the final hashes for them now.\n file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}\n\n # Actually verify the digests that we've gotten. We're going to use\n # hmac.compare_digest even though we probably don't actually need to\n # because it's better safe than sorry. In the case of multiple digests\n # we expect them all to be given.\n if not all(\n [\n hmac.compare_digest(\n getattr(form, \"{}_digest\".format(digest_name)).data.lower(),\n digest_value,\n )\n for digest_name, digest_value in file_hashes.items()\n if getattr(form, \"{}_digest\".format(digest_name)).data\n ]\n ):\n raise _exc_with_message(\n HTTPBadRequest,\n \"The digest supplied does not match a digest calculated \"\n \"from the uploaded file.\",\n )\n\n # Check to see if the file that was uploaded exists already or not.\n is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)\n if is_duplicate:\n return Response()\n elif is_duplicate is not None:\n raise _exc_with_message(\n HTTPBadRequest,\n # Note: Changing this error message to something that doesn't\n # start with \"File already exists\" will break the\n # --skip-existing functionality in twine\n # ref: https://github.com/pypa/warehouse/issues/3482\n # ref: https://github.com/pypa/twine/issues/332\n \"File already exists. See \"\n + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if the file that was uploaded exists in our filename log\n if request.db.query(\n request.db.query(Filename).filter(Filename.filename == filename).exists()\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n \"This filename has already been used, use a \"\n \"different version. \"\n \"See \" + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if uploading this file would create a duplicate sdist\n # for the current release.\n if (\n form.filetype.data == \"sdist\"\n and request.db.query(\n request.db.query(File)\n .filter((File.release == release) & (File.packagetype == \"sdist\"))\n .exists()\n ).scalar()\n ):\n raise _exc_with_message(\n HTTPBadRequest, \"Only one sdist may be uploaded per release.\"\n )\n\n # Check the file to make sure it is a valid distribution file.\n if not _is_valid_dist_file(temporary_filename, form.filetype.data):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Check that if it's a binary wheel, it's on a supported platform\n if filename.endswith(\".whl\"):\n wheel_info = _wheel_file_re.match(filename)\n plats = wheel_info.group(\"plat\").split(\".\")\n for plat in plats:\n if not _valid_platform_tag(plat):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Binary wheel '{filename}' has an unsupported \"\n \"platform tag '{plat}'.\".format(filename=filename, plat=plat),\n )\n\n # Also buffer the entire signature file to disk.\n if \"gpg_signature\" in request.POST:\n has_signature = True\n with open(os.path.join(tmpdir, filename + \".asc\"), \"wb\") as fp:\n signature_size = 0\n for chunk in iter(\n lambda: request.POST[\"gpg_signature\"].file.read(8096), b\"\"\n ):\n signature_size += len(chunk)\n if signature_size > MAX_SIGSIZE:\n raise _exc_with_message(HTTPBadRequest, \"Signature too large.\")\n fp.write(chunk)\n\n # Check whether signature is ASCII armored\n with open(os.path.join(tmpdir, filename + \".asc\"), \"rb\") as fp:\n if not fp.read().startswith(b\"-----BEGIN PGP SIGNATURE-----\"):\n raise _exc_with_message(\n HTTPBadRequest, \"PGP signature isn't ASCII armored.\"\n )\n else:\n has_signature = False\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(Filename(filename=filename))\n\n # Store the information about the file in the database.\n file_ = File(\n release=release,\n filename=filename,\n python_version=form.pyversion.data,\n packagetype=form.filetype.data,\n comment_text=form.comment.data,\n size=file_size,\n has_signature=bool(has_signature),\n md5_digest=file_hashes[\"md5\"],\n sha256_digest=file_hashes[\"sha256\"],\n blake2_256_digest=file_hashes[\"blake2_256\"],\n # Figure out what our filepath is going to be, we're going to use a\n # directory structure based on the hash of the file contents. This\n # will ensure that the contents of the file cannot change without\n # it also changing the path that the file is saved too.\n path=\"/\".join(\n [\n file_hashes[PATH_HASHER][:2],\n file_hashes[PATH_HASHER][2:4],\n file_hashes[PATH_HASHER][4:],\n filename,\n ]\n ),\n uploaded_via=request.user_agent,\n )\n request.db.add(file_)\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"add {python_version} file {filename}\".format(\n python_version=file_.python_version, filename=file_.filename\n ),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better answer about how to make this transactional so\n # this won't take affect until after a commit has happened, for\n # now we'll just ignore it and save it before the transaction is\n # committed.\n storage = request.find_service(IFileStorage)\n storage.store(\n file_.path,\n os.path.join(tmpdir, filename),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n if has_signature:\n storage.store(\n file_.pgp_path,\n os.path.join(tmpdir, filename + \".asc\"),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n\n # Log a successful upload\n metrics.increment(\"warehouse.upload.ok\", tags=[f\"filetype:{form.filetype.data}\"])\n\n return Response()\n\n\ndef _legacy_purge(status, *args, **kwargs):\n if status:\n requests.post(*args, **kwargs)\n\n\n@view_config(\n route_name=\"forklift.legacy.submit\", require_csrf=False, require_methods=[\"POST\"]\n)\n@view_config(\n route_name=\"forklift.legacy.submit_pkg_info\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef submit(request):\n return _exc_with_message(\n HTTPGone,\n (\n \"Project pre-registration is no longer required or supported, \"\n \"upload your files instead.\"\n ),\n )\n\n\n@view_config(\n route_name=\"forklift.legacy.doc_upload\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef doc_upload(request):\n return _exc_with_message(\n HTTPGone,\n \"Uploading documentation is no longer supported, we recommend using \"\n \"https://readthedocs.org/.\",\n )\n", "path": "warehouse/forklift/legacy.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport email\nimport hashlib\nimport hmac\nimport os.path\nimport re\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom cgi import FieldStorage, parse_header\nfrom itertools import chain\n\nimport packaging.requirements\nimport packaging.specifiers\nimport packaging.utils\nimport packaging.version\nimport pkg_resources\nimport requests\nimport stdlib_list\nimport wtforms\nimport wtforms.validators\n\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import exists, func, orm\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom warehouse import forms\nfrom warehouse.admin.squats import Squat\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import (\n BlacklistedProject,\n Dependency,\n DependencyKind,\n Description,\n File,\n Filename,\n JournalEntry,\n Project,\n Release,\n Role,\n)\nfrom warehouse.utils import http, readme\n\nMAX_FILESIZE = 60 * 1024 * 1024 # 60M\nMAX_SIGSIZE = 8 * 1024 # 8K\n\nPATH_HASHER = \"blake2_256\"\n\n\ndef namespace_stdlib_list(module_list):\n for module_name in module_list:\n parts = module_name.split(\".\")\n for i, part in enumerate(parts):\n yield \".\".join(parts[: i + 1])\n\n\nSTDLIB_PROHIBITTED = {\n packaging.utils.canonicalize_name(s.rstrip(\"-_.\").lstrip(\"-_.\"))\n for s in chain.from_iterable(\n namespace_stdlib_list(stdlib_list.stdlib_list(version))\n for version in stdlib_list.short_versions\n )\n}\n\n# Wheel platform checking\n\n# Note: defining new platform ABI compatibility tags that don't\n# have a python.org binary release to anchor them is a\n# complex task that needs more than just OS+architecture info.\n# For Linux specifically, the platform ABI is defined by each\n# individual distro version, so wheels built on one version may\n# not even work on older versions of the same distro, let alone\n# a completely different distro.\n#\n# That means new entries should only be added given an\n# accompanying ABI spec that explains how to build a\n# compatible binary (see the manylinux specs as examples).\n\n# These platforms can be handled by a simple static list:\n_allowed_platforms = {\n \"any\",\n \"win32\",\n \"win_amd64\",\n \"win_ia64\",\n \"manylinux1_x86_64\",\n \"manylinux1_i686\",\n \"manylinux2010_x86_64\",\n \"manylinux2010_i686\",\n \"linux_armv6l\",\n \"linux_armv7l\",\n}\n# macosx is a little more complicated:\n_macosx_platform_re = re.compile(r\"macosx_10_(\\d+)+_(?P<arch>.*)\")\n_macosx_arches = {\n \"ppc\",\n \"ppc64\",\n \"i386\",\n \"x86_64\",\n \"intel\",\n \"fat\",\n \"fat32\",\n \"fat64\",\n \"universal\",\n}\n\n\n# Actual checking code;\ndef _valid_platform_tag(platform_tag):\n if platform_tag in _allowed_platforms:\n return True\n m = _macosx_platform_re.match(platform_tag)\n if m and m.group(\"arch\") in _macosx_arches:\n return True\n return False\n\n\n_error_message_order = [\"metadata_version\", \"name\", \"version\"]\n\n\n_dist_file_regexes = {\n # True/False is for legacy or not.\n True: re.compile(r\".+?\\.(exe|tar\\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$\", re.I),\n False: re.compile(r\".+?\\.(tar\\.gz|zip|whl|egg)$\", re.I),\n}\n\n\n_wheel_file_re = re.compile(\n r\"\"\"\n ^\n (?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n (\n (-(?P<build>\\d.*?))?\n -(?P<pyver>.+?)\n -(?P<abi>.+?)\n -(?P<plat>.+?)\n (?:\\.whl|\\.dist-info)\n )\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\n_project_name_re = re.compile(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\", re.IGNORECASE\n)\n\n\n_legacy_specifier_re = re.compile(r\"^(?P<name>\\S+)(?: \\((?P<specifier>\\S+)\\))?$\")\n\n\n_valid_description_content_types = {\"text/plain\", \"text/x-rst\", \"text/markdown\"}\n\n_valid_markdown_variants = {\"CommonMark\", \"GFM\"}\n\n\ndef _exc_with_message(exc, message):\n # The crappy old API that PyPI offered uses the status to pass down\n # messages to the client. So this function will make that easier to do.\n resp = exc(message)\n resp.status = \"{} {}\".format(resp.status_code, message)\n return resp\n\n\ndef _validate_pep440_version(form, field):\n parsed = packaging.version.parse(field.data)\n\n # Check that this version is a valid PEP 440 version at all.\n if not isinstance(parsed, packaging.version.Version):\n raise wtforms.validators.ValidationError(\n \"Start and end with a letter or numeral containing only \"\n \"ASCII numeric and '.', '_' and '-'.\"\n )\n\n # Check that this version does not have a PEP 440 local segment attached\n # to it.\n if parsed.local is not None:\n raise wtforms.validators.ValidationError(\"Can't use PEP 440 local versions.\")\n\n\ndef _parse_legacy_requirement(requirement):\n parsed = _legacy_specifier_re.search(requirement)\n if parsed is None:\n raise ValueError(\"Invalid requirement.\")\n return parsed.groupdict()[\"name\"], parsed.groupdict()[\"specifier\"]\n\n\ndef _validate_pep440_specifier(specifier):\n try:\n packaging.specifiers.SpecifierSet(specifier)\n except packaging.specifiers.InvalidSpecifier:\n raise wtforms.validators.ValidationError(\n \"Invalid specifier in requirement.\"\n ) from None\n\n\ndef _validate_pep440_specifier_field(form, field):\n return _validate_pep440_specifier(field.data)\n\n\ndef _validate_legacy_non_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement.replace(\"_\", \"\"))\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't direct dependency: {!r}\".format(requirement)\n )\n\n if any(\n not identifier.isalnum() or identifier[0].isdigit()\n for identifier in req.name.split(\".\")\n ):\n raise wtforms.validators.ValidationError(\"Use a valid Python identifier.\")\n\n\ndef _validate_legacy_non_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_non_dist_req(datum)\n\n\ndef _validate_legacy_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement)\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}.\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n\n\ndef _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_dist_req(datum)\n\n\ndef _validate_requires_external(requirement):\n name, specifier = _parse_legacy_requirement(requirement)\n\n # TODO: Is it really reasonable to parse the specifier using PEP 440?\n if specifier is not None:\n _validate_pep440_specifier(specifier)\n\n\ndef _validate_requires_external_list(form, field):\n for datum in field.data:\n _validate_requires_external(datum)\n\n\ndef _validate_project_url(value):\n try:\n label, url = value.split(\", \", 1)\n except ValueError:\n raise wtforms.validators.ValidationError(\n \"Use both a label and an URL.\"\n ) from None\n\n if not label:\n raise wtforms.validators.ValidationError(\"Use a label.\")\n\n if len(label) > 32:\n raise wtforms.validators.ValidationError(\"Use 32 characters or less.\")\n\n if not url:\n raise wtforms.validators.ValidationError(\"Use an URL.\")\n\n if not http.is_valid_uri(url, require_authority=False):\n raise wtforms.validators.ValidationError(\"Use valid URL.\")\n\n\ndef _validate_project_url_list(form, field):\n for datum in field.data:\n _validate_project_url(datum)\n\n\ndef _validate_rfc822_email_field(form, field):\n email_validator = wtforms.validators.Email(message=\"Use a valid email address\")\n addresses = email.utils.getaddresses([field.data])\n\n for real_name, address in addresses:\n email_validator(form, type(\"field\", (), {\"data\": address}))\n\n\ndef _validate_description_content_type(form, field):\n def _raise(message):\n raise wtforms.validators.ValidationError(\n f\"Invalid description content type: {message}\"\n )\n\n content_type, parameters = parse_header(field.data)\n if content_type not in _valid_description_content_types:\n _raise(\"type/subtype is not valid\")\n\n charset = parameters.get(\"charset\")\n if charset and charset != \"UTF-8\":\n _raise(\"Use a valid charset\")\n\n variant = parameters.get(\"variant\")\n if (\n content_type == \"text/markdown\"\n and variant\n and variant not in _valid_markdown_variants\n ):\n _raise(\n \"Use a valid variant, expected one of {}\".format(\n \", \".join(_valid_markdown_variants)\n )\n )\n\n\ndef _construct_dependencies(form, types):\n for name, kind in types.items():\n for item in getattr(form, name).data:\n yield Dependency(kind=kind.value, specifier=item)\n\n\nclass ListField(wtforms.Field):\n def process_formdata(self, valuelist):\n self.data = [v.strip() for v in valuelist if v.strip()]\n\n\n# TODO: Eventually this whole validation thing should move to the packaging\n# library and we should just call that. However until PEP 426 is done\n# that library won't have an API for this.\nclass MetadataForm(forms.Form):\n\n # Metadata version\n metadata_version = wtforms.StringField(\n description=\"Metadata-Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n # Note: This isn't really Metadata 2.0, however bdist_wheel\n # claims it is producing a Metadata 2.0 metadata when in\n # reality it's more like 1.2 with some extensions.\n [\"1.0\", \"1.1\", \"1.2\", \"2.0\", \"2.1\"],\n message=\"Use a known metadata version.\",\n ),\n ],\n )\n\n # Identity Project and Release\n name = wtforms.StringField(\n description=\"Name\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n _project_name_re,\n re.IGNORECASE,\n message=(\n \"Start and end with a letter or numeral containing \"\n \"only ASCII numeric and '.', '_' and '-'.\"\n ),\n ),\n ],\n )\n version = wtforms.StringField(\n description=\"Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n r\"^(?!\\s).*(?<!\\s)$\",\n message=\"Can't have leading or trailing whitespace.\",\n ),\n _validate_pep440_version,\n ],\n )\n\n # Additional Release metadata\n summary = wtforms.StringField(\n description=\"Summary\",\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Length(max=512),\n wtforms.validators.Regexp(\n r\"^.+$\", # Rely on the fact that . doesn't match a newline.\n message=\"Use a single line only.\",\n ),\n ],\n )\n description = wtforms.StringField(\n description=\"Description\", validators=[wtforms.validators.Optional()]\n )\n author = wtforms.StringField(\n description=\"Author\", validators=[wtforms.validators.Optional()]\n )\n description_content_type = wtforms.StringField(\n description=\"Description-Content-Type\",\n validators=[wtforms.validators.Optional(), _validate_description_content_type],\n )\n author_email = wtforms.StringField(\n description=\"Author-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n maintainer = wtforms.StringField(\n description=\"Maintainer\", validators=[wtforms.validators.Optional()]\n )\n maintainer_email = wtforms.StringField(\n description=\"Maintainer-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n license = wtforms.StringField(\n description=\"License\", validators=[wtforms.validators.Optional()]\n )\n keywords = wtforms.StringField(\n description=\"Keywords\", validators=[wtforms.validators.Optional()]\n )\n classifiers = wtforms.fields.SelectMultipleField(description=\"Classifier\")\n platform = wtforms.StringField(\n description=\"Platform\", validators=[wtforms.validators.Optional()]\n )\n\n # URLs\n home_page = wtforms.StringField(\n description=\"Home-Page\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n download_url = wtforms.StringField(\n description=\"Download-URL\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n\n # Dependency Information\n requires_python = wtforms.StringField(\n description=\"Requires-Python\",\n validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],\n )\n\n # File information\n pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])\n filetype = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n [\n \"bdist_dmg\",\n \"bdist_dumb\",\n \"bdist_egg\",\n \"bdist_msi\",\n \"bdist_rpm\",\n \"bdist_wheel\",\n \"bdist_wininst\",\n \"sdist\",\n ],\n message=\"Use a known file type.\",\n ),\n ]\n )\n comment = wtforms.StringField(validators=[wtforms.validators.Optional()])\n md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])\n sha256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, SHA256 message digest.\",\n ),\n ]\n )\n blake2_256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, BLAKE2 message digest.\",\n ),\n ]\n )\n\n # Legacy dependency information\n requires = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n provides = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n obsoletes = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n\n # Newer dependency information\n requires_dist = ListField(\n description=\"Requires-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n provides_dist = ListField(\n description=\"Provides-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n obsoletes_dist = ListField(\n description=\"Obsoletes-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n requires_external = ListField(\n description=\"Requires-External\",\n validators=[wtforms.validators.Optional(), _validate_requires_external_list],\n )\n\n # Newer metadata information\n project_urls = ListField(\n description=\"Project-URL\",\n validators=[wtforms.validators.Optional(), _validate_project_url_list],\n )\n\n def full_validate(self):\n # All non source releases *must* have a pyversion\n if (\n self.filetype.data\n and self.filetype.data != \"sdist\"\n and not self.pyversion.data\n ):\n raise wtforms.validators.ValidationError(\n \"Python version is required for binary distribution uploads.\"\n )\n\n # All source releases *must* have a pyversion of \"source\"\n if self.filetype.data == \"sdist\":\n if not self.pyversion.data:\n self.pyversion.data = \"source\"\n elif self.pyversion.data != \"source\":\n raise wtforms.validators.ValidationError(\n \"Use 'source' as Python version for an sdist.\"\n )\n\n # We *must* have at least one digest to verify against.\n if not self.md5_digest.data and not self.sha256_digest.data:\n raise wtforms.validators.ValidationError(\n \"Include at least one message digest.\"\n )\n\n\n_safe_zipnames = re.compile(r\"(purelib|platlib|headers|scripts|data).+\", re.I)\n# .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2\n_tar_filenames_re = re.compile(r\"\\.(?:tar$|t(?:ar\\.)?(?P<z_type>gz|bz2)$)\")\n\n\ndef _is_valid_dist_file(filename, filetype):\n \"\"\"\n Perform some basic checks to see whether the indicated file could be\n a valid distribution file.\n \"\"\"\n\n # If our file is a zipfile, then ensure that it's members are only\n # compressed with supported compression methods.\n if zipfile.is_zipfile(filename):\n with zipfile.ZipFile(filename) as zfp:\n for zinfo in zfp.infolist():\n if zinfo.compress_type not in {\n zipfile.ZIP_STORED,\n zipfile.ZIP_DEFLATED,\n }:\n return False\n\n tar_fn_match = _tar_filenames_re.search(filename)\n if tar_fn_match:\n # Ensure that this is a valid tar file, and that it contains PKG-INFO.\n z_type = tar_fn_match.group(\"z_type\") or \"\"\n try:\n with tarfile.open(filename, f\"r:{z_type}\") as tar:\n # This decompresses the entire stream to validate it and the\n # tar within. Easy CPU DoS attack. :/\n bad_tar = True\n member = tar.next()\n while member:\n parts = os.path.split(member.name)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n bad_tar = False\n member = tar.next()\n if bad_tar:\n return False\n except tarfile.ReadError:\n return False\n elif filename.endswith(\".exe\"):\n # The only valid filetype for a .exe file is \"bdist_wininst\".\n if filetype != \"bdist_wininst\":\n return False\n\n # Ensure that the .exe is a valid zip file, and that all of the files\n # contained within it have safe filenames.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch where there\n # isn't one.\n for zipname in zfp.namelist(): # pragma: no branch\n if not _safe_zipnames.match(zipname):\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".msi\"):\n # The only valid filetype for a .msi is \"bdist_msi\"\n if filetype != \"bdist_msi\":\n return False\n\n # Check the first 8 bytes of the MSI file. This was taken from the\n # legacy implementation of PyPI which itself took it from the\n # implementation of `file` I believe.\n with open(filename, \"rb\") as fp:\n if fp.read(8) != b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\":\n return False\n elif filename.endswith(\".zip\") or filename.endswith(\".egg\"):\n # Ensure that the .zip/.egg is a valid zip file, and that it has a\n # PKG-INFO file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".whl\"):\n # Ensure that the .whl is a valid zip file, and that it has a WHEEL\n # file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"WHEEL\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n\n # If we haven't yet decided it's not valid, then we'll assume it is and\n # allow it.\n return True\n\n\ndef _is_duplicate_file(db_session, filename, hashes):\n \"\"\"\n Check to see if file already exists, and if it's content matches.\n A file is considered to exist if its filename *or* blake2 digest are\n present in a file row in the database.\n\n Returns:\n - True: This file is a duplicate and all further processing should halt.\n - False: This file exists, but it is not a duplicate.\n - None: This file does not exist.\n \"\"\"\n\n file_ = (\n db_session.query(File)\n .filter(\n (File.filename == filename)\n | (File.blake2_256_digest == hashes[\"blake2_256\"])\n )\n .first()\n )\n\n if file_ is not None:\n return (\n file_.filename == filename\n and file_.sha256_digest == hashes[\"sha256\"]\n and file_.md5_digest == hashes[\"md5\"]\n and file_.blake2_256_digest == hashes[\"blake2_256\"]\n )\n\n return None\n\n\ndef _no_deprecated_classifiers(request):\n deprecated_classifiers = {\n classifier.classifier\n for classifier in (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(True))\n .all()\n )\n }\n\n def validate_no_deprecated_classifiers(form, field):\n invalid_classifiers = set(field.data or []) & deprecated_classifiers\n if invalid_classifiers:\n first_invalid_classifier = sorted(invalid_classifiers)[0]\n host = request.registry.settings.get(\"warehouse.domain\")\n classifiers_url = request.route_url(\"classifiers\", _host=host)\n\n raise wtforms.validators.ValidationError(\n f\"Classifier {first_invalid_classifier!r} has been \"\n f\"deprecated, see {classifiers_url} for a list of valid \"\n \"classifiers.\"\n )\n\n return validate_no_deprecated_classifiers\n\n\n@view_config(\n route_name=\"forklift.legacy.file_upload\",\n uses_session=True,\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef file_upload(request):\n # If we're in read-only mode, let upload clients know\n if request.flags.enabled(\"read-only\"):\n raise _exc_with_message(\n HTTPForbidden, \"Read-only mode: Uploads are temporarily disabled\"\n )\n\n # Log an attempt to upload\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.upload.attempt\")\n\n # Before we do anything, if there isn't an authenticated user with this\n # request, then we'll go ahead and bomb out.\n if request.authenticated_userid is None:\n raise _exc_with_message(\n HTTPForbidden, \"Invalid or non-existent authentication information.\"\n )\n\n # Ensure that user has a verified, primary email address. This should both\n # reduce the ease of spam account creation and activity, as well as act as\n # a forcing function for https://github.com/pypa/warehouse/issues/3632.\n # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,\n # we might consider a different condition, possibly looking at\n # User.is_active instead.\n if not (request.user.primary_email and request.user.primary_email.verified):\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"User {!r} does not have a verified primary email address. \"\n \"Please add a verified primary email before attempting to \"\n \"upload to PyPI. See {project_help} for more information.\"\n \"for more information.\"\n ).format(\n request.user.username,\n project_help=request.help_url(_anchor=\"verified-email\"),\n ),\n ) from None\n\n # Do some cleanup of the various form fields\n for key in list(request.POST):\n value = request.POST.get(key)\n if isinstance(value, str):\n # distutils \"helpfully\" substitutes unknown, but \"required\" values\n # with the string \"UNKNOWN\". This is basically never what anyone\n # actually wants so we'll just go ahead and delete anything whose\n # value is UNKNOWN.\n if value.strip() == \"UNKNOWN\":\n del request.POST[key]\n\n # Escape NUL characters, which psycopg doesn't like\n if \"\\x00\" in value:\n request.POST[key] = value.replace(\"\\x00\", \"\\\\x00\")\n\n # We require protocol_version 1, it's the only supported version however\n # passing a different version should raise an error.\n if request.POST.get(\"protocol_version\", \"1\") != \"1\":\n raise _exc_with_message(HTTPBadRequest, \"Unknown protocol version.\")\n\n # Check if any fields were supplied as a tuple and have become a\n # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a\n # FieldStorage, however.\n # ref: https://github.com/pypa/warehouse/issues/2185\n # ref: https://github.com/pypa/warehouse/issues/2491\n for field in set(request.POST) - {\"content\", \"gpg_signature\"}:\n values = request.POST.getall(field)\n if any(isinstance(value, FieldStorage) for value in values):\n raise _exc_with_message(HTTPBadRequest, f\"{field}: Should not be a tuple.\")\n\n # Look up all of the valid classifiers\n all_classifiers = request.db.query(Classifier).all()\n\n # Validate and process the incoming metadata.\n form = MetadataForm(request.POST)\n\n # Add a validator for deprecated classifiers\n form.classifiers.validators.append(_no_deprecated_classifiers(request))\n\n form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]\n if not form.validate():\n for field_name in _error_message_order:\n if field_name in form.errors:\n break\n else:\n field_name = sorted(form.errors.keys())[0]\n\n if field_name in form:\n field = form[field_name]\n if field.description and isinstance(field, wtforms.StringField):\n error_message = (\n \"{value!r} is an invalid value for {field}. \".format(\n value=field.data, field=field.description\n )\n + \"Error: {} \".format(form.errors[field_name][0])\n + \"See \"\n \"https://packaging.python.org/specifications/core-metadata\"\n )\n else:\n error_message = \"Invalid value for {field}. Error: {msgs[0]}\".format(\n field=field_name, msgs=form.errors[field_name]\n )\n else:\n error_message = \"Error: {}\".format(form.errors[field_name][0])\n\n raise _exc_with_message(HTTPBadRequest, error_message)\n\n # Ensure that we have file data in the request.\n if \"content\" not in request.POST:\n raise _exc_with_message(HTTPBadRequest, \"Upload payload does not have a file.\")\n\n # Look up the project first before doing anything else, this is so we can\n # automatically register it if we need to and can check permissions before\n # going any further.\n try:\n project = (\n request.db.query(Project)\n .filter(\n Project.normalized_name == func.normalize_pep426_name(form.name.data)\n )\n .one()\n )\n except NoResultFound:\n # Check for AdminFlag set by a PyPI Administrator disabling new project\n # registration, reasons for this include Spammers, security\n # vulnerabilities, or just wanting to be lazy and not worry ;)\n if request.flags.enabled(\"disallow-new-project-registration\"):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"New project registration temporarily disabled. \"\n \"See {projecthelp} for details\"\n ).format(projecthelp=request.help_url(_anchor=\"admin-intervention\")),\n ) from None\n\n # Before we create the project, we're going to check our blacklist to\n # see if this project is even allowed to be registered. If it is not,\n # then we're going to deny the request to create this project.\n if request.db.query(\n exists().where(\n BlacklistedProject.name == func.normalize_pep426_name(form.name.data)\n )\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed. \"\n \"See {projecthelp} \"\n \"for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # Also check for collisions with Python Standard Library modules.\n if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed (conflict with Python \"\n \"Standard Library module name). See \"\n \"{projecthelp} for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # The project doesn't exist in our database, so first we'll check for\n # projects with a similar name\n squattees = (\n request.db.query(Project)\n .filter(\n func.levenshtein(\n Project.normalized_name, func.normalize_pep426_name(form.name.data)\n )\n <= 2\n )\n .all()\n )\n\n # Next we'll create the project\n project = Project(name=form.name.data)\n request.db.add(project)\n\n # Now that the project exists, add any squats which it is the squatter for\n for squattee in squattees:\n request.db.add(Squat(squatter=project, squattee=squattee))\n\n # Then we'll add a role setting the current user as the \"Owner\" of the\n # project.\n request.db.add(Role(user=request.user, project=project, role_name=\"Owner\"))\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"create\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"add Owner {}\".format(request.user.username),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # Check that the user has permission to do things to this project, if this\n # is a new project this will act as a sanity check for the role we just\n # added above.\n if not request.has_permission(\"upload\", project):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"The credential associated with user '{0}' \"\n \"isn't allowed to upload to project '{1}'. \"\n \"See {2} for more information.\"\n ).format(\n request.user.username,\n project.name,\n request.help_url(_anchor=\"project-name\"),\n ),\n )\n\n # Update name if it differs but is still equivalent. We don't need to check if\n # they are equivalent when normalized because that's already been done when we\n # queried for the project.\n if project.name != form.name.data:\n project.name = form.name.data\n\n # Render our description so we can save from having to render this data every time\n # we load a project description page.\n rendered = None\n if form.description.data:\n description_content_type = form.description_content_type.data\n if not description_content_type:\n description_content_type = \"text/x-rst\"\n\n rendered = readme.render(\n form.description.data, description_content_type, use_fallback=False\n )\n\n # Uploading should prevent broken rendered descriptions.\n if rendered is None:\n if form.description_content_type.data:\n message = (\n \"The description failed to render \"\n \"for '{description_content_type}'.\"\n ).format(description_content_type=description_content_type)\n else:\n message = (\n \"The description failed to render \"\n \"in the default format of reStructuredText.\"\n )\n raise _exc_with_message(\n HTTPBadRequest,\n \"{message} See {projecthelp} for more information.\".format(\n message=message,\n projecthelp=request.help_url(_anchor=\"description-content-type\"),\n ),\n ) from None\n\n try:\n canonical_version = packaging.utils.canonicalize_version(form.version.data)\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project)\n & (Release.canonical_version == canonical_version)\n )\n .one()\n )\n except MultipleResultsFound:\n # There are multiple releases of this project which have the same\n # canonical version that were uploaded before we checked for\n # canonical version equivalence, so return the exact match instead\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project) & (Release.version == form.version.data)\n )\n .one()\n )\n except NoResultFound:\n release = Release(\n project=project,\n _classifiers=[\n c for c in all_classifiers if c.classifier in form.classifiers.data\n ],\n dependencies=list(\n _construct_dependencies(\n form,\n {\n \"requires\": DependencyKind.requires,\n \"provides\": DependencyKind.provides,\n \"obsoletes\": DependencyKind.obsoletes,\n \"requires_dist\": DependencyKind.requires_dist,\n \"provides_dist\": DependencyKind.provides_dist,\n \"obsoletes_dist\": DependencyKind.obsoletes_dist,\n \"requires_external\": DependencyKind.requires_external,\n \"project_urls\": DependencyKind.project_url,\n },\n )\n ),\n canonical_version=canonical_version,\n description=Description(\n content_type=form.description_content_type.data,\n raw=form.description.data or \"\",\n html=rendered or \"\",\n rendered_by=readme.renderer_version(),\n ),\n **{\n k: getattr(form, k).data\n for k in {\n # This is a list of all the fields in the form that we\n # should pull off and insert into our new release.\n \"version\",\n \"summary\",\n \"license\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"keywords\",\n \"platform\",\n \"home_page\",\n \"download_url\",\n \"requires_python\",\n }\n },\n uploader=request.user,\n uploaded_via=request.user_agent,\n )\n request.db.add(release)\n # TODO: This should be handled by some sort of database trigger or\n # a SQLAlchemy hook or the like instead of doing it inline in\n # this view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"new release\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better solution to this than to just do it inline inside\n # this method. Ideally the version field would just be sortable, but\n # at least this should be some sort of hook or trigger.\n releases = (\n request.db.query(Release)\n .filter(Release.project == project)\n .options(orm.load_only(Release._pypi_ordering))\n .all()\n )\n for i, r in enumerate(\n sorted(releases, key=lambda x: packaging.version.parse(x.version))\n ):\n r._pypi_ordering = i\n\n # Pull the filename out of our POST data.\n filename = request.POST[\"content\"].filename\n\n # Make sure that the filename does not contain any path separators.\n if \"/\" in filename or \"\\\\\" in filename:\n raise _exc_with_message(\n HTTPBadRequest, \"Cannot upload a file with '/' or '\\\\' in the name.\"\n )\n\n # Make sure the filename ends with an allowed extension.\n if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:\n raise _exc_with_message(\n HTTPBadRequest,\n \"Invalid file extension: Use .egg, .tar.gz, .whl or .zip \"\n \"extension. (https://www.python.org/dev/peps/pep-0527)\",\n )\n\n # Make sure that our filename matches the project that it is being uploaded\n # to.\n prefix = pkg_resources.safe_name(project.name).lower()\n if not pkg_resources.safe_name(filename).lower().startswith(prefix):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Start filename for {!r} with {!r}.\".format(project.name, prefix),\n )\n\n # Check the content type of what is being uploaded\n if not request.POST[\"content\"].type or request.POST[\"content\"].type.startswith(\n \"image/\"\n ):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Ensure that the package filetype is allowed.\n # TODO: Once PEP 527 is completely implemented we should be able to delete\n # this and just move it into the form itself.\n if not project.allow_legacy_files and form.filetype.data not in {\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_egg\",\n }:\n raise _exc_with_message(HTTPBadRequest, \"Unknown type of file.\")\n\n # The project may or may not have a file size specified on the project, if\n # it does then it may or may not be smaller or larger than our global file\n # size limits.\n file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n temporary_filename = os.path.join(tmpdir, filename)\n\n # Buffer the entire file onto disk, checking the hash of the file as we\n # go along.\n with open(temporary_filename, \"wb\") as fp:\n file_size = 0\n file_hashes = {\n \"md5\": hashlib.md5(),\n \"sha256\": hashlib.sha256(),\n \"blake2_256\": hashlib.blake2b(digest_size=256 // 8),\n }\n for chunk in iter(lambda: request.POST[\"content\"].file.read(8096), b\"\"):\n file_size += len(chunk)\n if file_size > file_size_limit:\n raise _exc_with_message(\n HTTPBadRequest,\n \"File too large. \"\n + \"Limit for project {name!r} is {limit} MB. \".format(\n name=project.name, limit=file_size_limit // (1024 * 1024)\n )\n + \"See \"\n + request.help_url(_anchor=\"file-size-limit\"),\n )\n fp.write(chunk)\n for hasher in file_hashes.values():\n hasher.update(chunk)\n\n # Take our hash functions and compute the final hashes for them now.\n file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}\n\n # Actually verify the digests that we've gotten. We're going to use\n # hmac.compare_digest even though we probably don't actually need to\n # because it's better safe than sorry. In the case of multiple digests\n # we expect them all to be given.\n if not all(\n [\n hmac.compare_digest(\n getattr(form, \"{}_digest\".format(digest_name)).data.lower(),\n digest_value,\n )\n for digest_name, digest_value in file_hashes.items()\n if getattr(form, \"{}_digest\".format(digest_name)).data\n ]\n ):\n raise _exc_with_message(\n HTTPBadRequest,\n \"The digest supplied does not match a digest calculated \"\n \"from the uploaded file.\",\n )\n\n # Check to see if the file that was uploaded exists already or not.\n is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)\n if is_duplicate:\n return Response()\n elif is_duplicate is not None:\n raise _exc_with_message(\n HTTPBadRequest,\n # Note: Changing this error message to something that doesn't\n # start with \"File already exists\" will break the\n # --skip-existing functionality in twine\n # ref: https://github.com/pypa/warehouse/issues/3482\n # ref: https://github.com/pypa/twine/issues/332\n \"File already exists. See \"\n + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if the file that was uploaded exists in our filename log\n if request.db.query(\n request.db.query(Filename).filter(Filename.filename == filename).exists()\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n \"This filename has already been used, use a \"\n \"different version. \"\n \"See \" + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if uploading this file would create a duplicate sdist\n # for the current release.\n if (\n form.filetype.data == \"sdist\"\n and request.db.query(\n request.db.query(File)\n .filter((File.release == release) & (File.packagetype == \"sdist\"))\n .exists()\n ).scalar()\n ):\n raise _exc_with_message(\n HTTPBadRequest, \"Only one sdist may be uploaded per release.\"\n )\n\n # Check the file to make sure it is a valid distribution file.\n if not _is_valid_dist_file(temporary_filename, form.filetype.data):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Check that if it's a binary wheel, it's on a supported platform\n if filename.endswith(\".whl\"):\n wheel_info = _wheel_file_re.match(filename)\n plats = wheel_info.group(\"plat\").split(\".\")\n for plat in plats:\n if not _valid_platform_tag(plat):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Binary wheel '{filename}' has an unsupported \"\n \"platform tag '{plat}'.\".format(filename=filename, plat=plat),\n )\n\n # Also buffer the entire signature file to disk.\n if \"gpg_signature\" in request.POST:\n has_signature = True\n with open(os.path.join(tmpdir, filename + \".asc\"), \"wb\") as fp:\n signature_size = 0\n for chunk in iter(\n lambda: request.POST[\"gpg_signature\"].file.read(8096), b\"\"\n ):\n signature_size += len(chunk)\n if signature_size > MAX_SIGSIZE:\n raise _exc_with_message(HTTPBadRequest, \"Signature too large.\")\n fp.write(chunk)\n\n # Check whether signature is ASCII armored\n with open(os.path.join(tmpdir, filename + \".asc\"), \"rb\") as fp:\n if not fp.read().startswith(b\"-----BEGIN PGP SIGNATURE-----\"):\n raise _exc_with_message(\n HTTPBadRequest, \"PGP signature isn't ASCII armored.\"\n )\n else:\n has_signature = False\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(Filename(filename=filename))\n\n # Store the information about the file in the database.\n file_ = File(\n release=release,\n filename=filename,\n python_version=form.pyversion.data,\n packagetype=form.filetype.data,\n comment_text=form.comment.data,\n size=file_size,\n has_signature=bool(has_signature),\n md5_digest=file_hashes[\"md5\"],\n sha256_digest=file_hashes[\"sha256\"],\n blake2_256_digest=file_hashes[\"blake2_256\"],\n # Figure out what our filepath is going to be, we're going to use a\n # directory structure based on the hash of the file contents. This\n # will ensure that the contents of the file cannot change without\n # it also changing the path that the file is saved too.\n path=\"/\".join(\n [\n file_hashes[PATH_HASHER][:2],\n file_hashes[PATH_HASHER][2:4],\n file_hashes[PATH_HASHER][4:],\n filename,\n ]\n ),\n uploaded_via=request.user_agent,\n )\n request.db.add(file_)\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"add {python_version} file {filename}\".format(\n python_version=file_.python_version, filename=file_.filename\n ),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better answer about how to make this transactional so\n # this won't take affect until after a commit has happened, for\n # now we'll just ignore it and save it before the transaction is\n # committed.\n storage = request.find_service(IFileStorage)\n storage.store(\n file_.path,\n os.path.join(tmpdir, filename),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n if has_signature:\n storage.store(\n file_.pgp_path,\n os.path.join(tmpdir, filename + \".asc\"),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n\n # Log a successful upload\n metrics.increment(\"warehouse.upload.ok\", tags=[f\"filetype:{form.filetype.data}\"])\n\n return Response()\n\n\ndef _legacy_purge(status, *args, **kwargs):\n if status:\n requests.post(*args, **kwargs)\n\n\n@view_config(\n route_name=\"forklift.legacy.submit\", require_csrf=False, require_methods=[\"POST\"]\n)\n@view_config(\n route_name=\"forklift.legacy.submit_pkg_info\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef submit(request):\n return _exc_with_message(\n HTTPGone,\n (\n \"Project pre-registration is no longer required or supported, \"\n \"upload your files instead.\"\n ),\n )\n\n\n@view_config(\n route_name=\"forklift.legacy.doc_upload\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef doc_upload(request):\n return _exc_with_message(\n HTTPGone,\n \"Uploading documentation is no longer supported, we recommend using \"\n \"https://readthedocs.org/.\",\n )\n", "path": "warehouse/forklift/legacy.py"}]} |
gh_patches_debug_1572 | rasdani/github-patches | git_diff | pypa__pip-10029 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Vendoring of `tenacity` is leaky
### Description
Tenacity contains a [conditional import of tornado](https://github.com/pypa/pip/blob/88eb4f092e58f3aee1d389ad4f9047df46e44bb4/src/pip/_vendor/tenacity/__init__.py#L25-L28). This makes the behavior of `pip` sensitive to third-party packages outside of its vendor directory. Specifically, if a version of tornado that does not include the `tornado.gen.sleep` function is installed, `pip` will fail to start. (This is unlikely since this function has been around a long time and we have no plans of deprecating it or removing it. But we do have a report of this happening in https://github.com/tornadoweb/tornado/issues/3034)
### Expected behavior
Pip should not be affected by the presence or absence of any other third-party packages. Any conditional imports in its vendored dependencies should be modified to unconditionally fail (e.g. replace the above-linked block with `tornado = None`).
### pip version
21.1.2
### Python version
3.8
### OS
linux
### How to Reproduce
1. `pip install tornado==4.0.0`
2. `pip --version`
### Output
```sh-session
~/ImpressionableVoluminousCategories$ pip --version
pip 21.1.2 from /opt/virtualenvs/python3/lib/python3.8/site-packages/pip (python 3.8)
~/ImpressionableVoluminousCategories$ pip install tornado==4.0.0
Collecting tornado==4.0.0
Downloading tornado-4.0.tar.gz (313 kB)
|████████████████████████████████| 313 kB 4.5 MB/s
Requirement already satisfied: certifi in /opt/virtualenvs/python3/lib/python3.8/site-packages (from tornado==4.0.0) (2020.12.5)
Building wheels for collected packages: tornado
Building wheel for tornado (setup.py) ... done
Created wheel for tornado: filename=tornado-4.0-cp38-cp38-linux_x86_64.whl size=344556 sha256=d9c5e6911e5bdac5b90db4b33d01891562365e235396bd336380dd45cb61a9b7
Stored in directory: /home/runner/.cache/pip/wheels/9a/d7/93/a846246f95067512a78899329bdb84a695d693e67c28a4e71f
Successfully built tornado
Installing collected packages: tornado
Successfully installed tornado-4.0
~/ImpressionableVoluminousCategories$ pip --version
Traceback (most recent call last):
File "/opt/virtualenvs/python3/bin/pip", line 5, in <module>
from pip._internal.cli.main import main
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/main.py", line 9, in <module>
from pip._internal.cli.autocompletion import autocomplete
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/autocompletion.py", line 10, in <module>
from pip._internal.cli.main_parser import create_main_parser
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/main_parser.py", line 8, in <module>
from pip._internal.cli import cmdoptions
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/cmdoptions.py", line 23, in <module>
from pip._internal.cli.parser import ConfigOptionParser
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/parser.py", line 12, in <module>
from pip._internal.configuration import Configuration, ConfigurationError
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/configuration.py", line 27, in <module>
from pip._internal.utils.misc import ensure_dir, enum
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/utils/misc.py", line 38, in <module>
from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py", line 523, in <module>
from pip._vendor.tenacity.tornadoweb import TornadoRetrying
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/tornadoweb.py", line 26, in <module>
class TornadoRetrying(BaseRetrying):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/tornadoweb.py", line 27, in TornadoRetrying
def __init__(self, sleep=gen.sleep, **kwargs):
AttributeError: module 'tornado.gen' has no attribute 'sleep'
~/ImpressionableVoluminousCategories$
```
```
### Code of Conduct
- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_vendor/tenacity/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2016-2018 Julien Danjou
3 # Copyright 2017 Elisey Zanko
4 # Copyright 2016 Étienne Bersac
5 # Copyright 2016 Joshua Harlow
6 # Copyright 2013-2014 Ray Holder
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License");
9 # you may not use this file except in compliance with the License.
10 # You may obtain a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS,
16 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 # See the License for the specific language governing permissions and
18 # limitations under the License.
19
20 try:
21 from inspect import iscoroutinefunction
22 except ImportError:
23 iscoroutinefunction = None
24
25 try:
26 import tornado
27 except ImportError:
28 tornado = None
29
30 import sys
31 import threading
32 import typing as t
33 import warnings
34 from abc import ABCMeta, abstractmethod
35 from concurrent import futures
36
37
38 from pip._vendor import six
39
40 from pip._vendor.tenacity import _utils
41
42 # Import all built-in retry strategies for easier usage.
43 from .retry import retry_base # noqa
44 from .retry import retry_all # noqa
45 from .retry import retry_always # noqa
46 from .retry import retry_any # noqa
47 from .retry import retry_if_exception # noqa
48 from .retry import retry_if_exception_type # noqa
49 from .retry import retry_if_not_result # noqa
50 from .retry import retry_if_result # noqa
51 from .retry import retry_never # noqa
52 from .retry import retry_unless_exception_type # noqa
53 from .retry import retry_if_exception_message # noqa
54 from .retry import retry_if_not_exception_message # noqa
55
56 # Import all nap strategies for easier usage.
57 from .nap import sleep # noqa
58 from .nap import sleep_using_event # noqa
59
60 # Import all built-in stop strategies for easier usage.
61 from .stop import stop_after_attempt # noqa
62 from .stop import stop_after_delay # noqa
63 from .stop import stop_all # noqa
64 from .stop import stop_any # noqa
65 from .stop import stop_never # noqa
66 from .stop import stop_when_event_set # noqa
67
68 # Import all built-in wait strategies for easier usage.
69 from .wait import wait_chain # noqa
70 from .wait import wait_combine # noqa
71 from .wait import wait_exponential # noqa
72 from .wait import wait_fixed # noqa
73 from .wait import wait_incrementing # noqa
74 from .wait import wait_none # noqa
75 from .wait import wait_random # noqa
76 from .wait import wait_random_exponential # noqa
77 from .wait import wait_random_exponential as wait_full_jitter # noqa
78
79 # Import all built-in before strategies for easier usage.
80 from .before import before_log # noqa
81 from .before import before_nothing # noqa
82
83 # Import all built-in after strategies for easier usage.
84 from .after import after_log # noqa
85 from .after import after_nothing # noqa
86
87 # Import all built-in after strategies for easier usage.
88 from .before_sleep import before_sleep_log # noqa
89 from .before_sleep import before_sleep_nothing # noqa
90
91
92 WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable)
93
94
95 @t.overload
96 def retry(fn):
97 # type: (WrappedFn) -> WrappedFn
98 """Type signature for @retry as a raw decorator."""
99 pass
100
101
102 @t.overload
103 def retry(*dargs, **dkw): # noqa
104 # type: (...) -> t.Callable[[WrappedFn], WrappedFn]
105 """Type signature for the @retry() decorator constructor."""
106 pass
107
108
109 def retry(*dargs, **dkw): # noqa
110 """Wrap a function with a new `Retrying` object.
111
112 :param dargs: positional arguments passed to Retrying object
113 :param dkw: keyword arguments passed to the Retrying object
114 """
115 # support both @retry and @retry() as valid syntax
116 if len(dargs) == 1 and callable(dargs[0]):
117 return retry()(dargs[0])
118 else:
119
120 def wrap(f):
121 if isinstance(f, retry_base):
122 warnings.warn(
123 (
124 "Got retry_base instance ({cls}) as callable argument, "
125 + "this will probably hang indefinitely (did you mean "
126 + "retry={cls}(...)?)"
127 ).format(cls=f.__class__.__name__)
128 )
129 if iscoroutinefunction is not None and iscoroutinefunction(f):
130 r = AsyncRetrying(*dargs, **dkw)
131 elif (
132 tornado
133 and hasattr(tornado.gen, "is_coroutine_function")
134 and tornado.gen.is_coroutine_function(f)
135 ):
136 r = TornadoRetrying(*dargs, **dkw)
137 else:
138 r = Retrying(*dargs, **dkw)
139
140 return r.wraps(f)
141
142 return wrap
143
144
145 class TryAgain(Exception):
146 """Always retry the executed function when raised."""
147
148
149 NO_RESULT = object()
150
151
152 class DoAttempt(object):
153 pass
154
155
156 class DoSleep(float):
157 pass
158
159
160 class BaseAction(object):
161 """Base class for representing actions to take by retry object.
162
163 Concrete implementations must define:
164 - __init__: to initialize all necessary fields
165 - REPR_ATTRS: class variable specifying attributes to include in repr(self)
166 - NAME: for identification in retry object methods and callbacks
167 """
168
169 REPR_FIELDS = ()
170 NAME = None
171
172 def __repr__(self):
173 state_str = ", ".join(
174 "%s=%r" % (field, getattr(self, field)) for field in self.REPR_FIELDS
175 )
176 return "%s(%s)" % (type(self).__name__, state_str)
177
178 def __str__(self):
179 return repr(self)
180
181
182 class RetryAction(BaseAction):
183 REPR_FIELDS = ("sleep",)
184 NAME = "retry"
185
186 def __init__(self, sleep):
187 self.sleep = float(sleep)
188
189
190 _unset = object()
191
192
193 def _first_set(first, second):
194 return second if first is _unset else first
195
196
197 class RetryError(Exception):
198 """Encapsulates the last attempt instance right before giving up."""
199
200 def __init__(self, last_attempt):
201 self.last_attempt = last_attempt
202 super(RetryError, self).__init__(last_attempt)
203
204 def reraise(self):
205 if self.last_attempt.failed:
206 raise self.last_attempt.result()
207 raise self
208
209 def __str__(self):
210 return "{0}[{1}]".format(self.__class__.__name__, self.last_attempt)
211
212
213 class AttemptManager(object):
214 """Manage attempt context."""
215
216 def __init__(self, retry_state):
217 self.retry_state = retry_state
218
219 def __enter__(self):
220 pass
221
222 def __exit__(self, exc_type, exc_value, traceback):
223 if isinstance(exc_value, BaseException):
224 self.retry_state.set_exception((exc_type, exc_value, traceback))
225 return True # Swallow exception.
226 else:
227 # We don't have the result, actually.
228 self.retry_state.set_result(None)
229
230
231 class BaseRetrying(object):
232 __metaclass__ = ABCMeta
233
234 def __init__(
235 self,
236 sleep=sleep,
237 stop=stop_never,
238 wait=wait_none(),
239 retry=retry_if_exception_type(),
240 before=before_nothing,
241 after=after_nothing,
242 before_sleep=None,
243 reraise=False,
244 retry_error_cls=RetryError,
245 retry_error_callback=None,
246 ):
247 self.sleep = sleep
248 self.stop = stop
249 self.wait = wait
250 self.retry = retry
251 self.before = before
252 self.after = after
253 self.before_sleep = before_sleep
254 self.reraise = reraise
255 self._local = threading.local()
256 self.retry_error_cls = retry_error_cls
257 self.retry_error_callback = retry_error_callback
258
259 # This attribute was moved to RetryCallState and is deprecated on
260 # Retrying objects but kept for backward compatibility.
261 self.fn = None
262
263 def copy(
264 self,
265 sleep=_unset,
266 stop=_unset,
267 wait=_unset,
268 retry=_unset,
269 before=_unset,
270 after=_unset,
271 before_sleep=_unset,
272 reraise=_unset,
273 retry_error_cls=_unset,
274 retry_error_callback=_unset,
275 ):
276 """Copy this object with some parameters changed if needed."""
277 return self.__class__(
278 sleep=_first_set(sleep, self.sleep),
279 stop=_first_set(stop, self.stop),
280 wait=_first_set(wait, self.wait),
281 retry=_first_set(retry, self.retry),
282 before=_first_set(before, self.before),
283 after=_first_set(after, self.after),
284 before_sleep=_first_set(before_sleep, self.before_sleep),
285 reraise=_first_set(reraise, self.reraise),
286 retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),
287 retry_error_callback=_first_set(
288 retry_error_callback, self.retry_error_callback
289 ),
290 )
291
292 def __repr__(self):
293 attrs = dict(
294 _utils.visible_attrs(self, attrs={"me": id(self)}),
295 __class__=self.__class__.__name__,
296 )
297 return (
298 "<%(__class__)s object at 0x%(me)x (stop=%(stop)s, "
299 "wait=%(wait)s, sleep=%(sleep)s, retry=%(retry)s, "
300 "before=%(before)s, after=%(after)s)>"
301 ) % (attrs)
302
303 @property
304 def statistics(self):
305 """Return a dictionary of runtime statistics.
306
307 This dictionary will be empty when the controller has never been
308 ran. When it is running or has ran previously it should have (but
309 may not) have useful and/or informational keys and values when
310 running is underway and/or completed.
311
312 .. warning:: The keys in this dictionary **should** be some what
313 stable (not changing), but there existence **may**
314 change between major releases as new statistics are
315 gathered or removed so before accessing keys ensure that
316 they actually exist and handle when they do not.
317
318 .. note:: The values in this dictionary are local to the thread
319 running call (so if multiple threads share the same retrying
320 object - either directly or indirectly) they will each have
321 there own view of statistics they have collected (in the
322 future we may provide a way to aggregate the various
323 statistics from each thread).
324 """
325 try:
326 return self._local.statistics
327 except AttributeError:
328 self._local.statistics = {}
329 return self._local.statistics
330
331 def wraps(self, f):
332 """Wrap a function for retrying.
333
334 :param f: A function to wraps for retrying.
335 """
336
337 @_utils.wraps(f)
338 def wrapped_f(*args, **kw):
339 return self(f, *args, **kw)
340
341 def retry_with(*args, **kwargs):
342 return self.copy(*args, **kwargs).wraps(f)
343
344 wrapped_f.retry = self
345 wrapped_f.retry_with = retry_with
346
347 return wrapped_f
348
349 def begin(self, fn):
350 self.statistics.clear()
351 self.statistics["start_time"] = _utils.now()
352 self.statistics["attempt_number"] = 1
353 self.statistics["idle_for"] = 0
354 self.fn = fn
355
356 def iter(self, retry_state): # noqa
357 fut = retry_state.outcome
358 if fut is None:
359 if self.before is not None:
360 self.before(retry_state)
361 return DoAttempt()
362
363 is_explicit_retry = retry_state.outcome.failed and isinstance(
364 retry_state.outcome.exception(), TryAgain
365 )
366 if not (is_explicit_retry or self.retry(retry_state=retry_state)):
367 return fut.result()
368
369 if self.after is not None:
370 self.after(retry_state=retry_state)
371
372 self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start
373 if self.stop(retry_state=retry_state):
374 if self.retry_error_callback:
375 return self.retry_error_callback(retry_state=retry_state)
376 retry_exc = self.retry_error_cls(fut)
377 if self.reraise:
378 raise retry_exc.reraise()
379 six.raise_from(retry_exc, fut.exception())
380
381 if self.wait:
382 sleep = self.wait(retry_state=retry_state)
383 else:
384 sleep = 0.0
385 retry_state.next_action = RetryAction(sleep)
386 retry_state.idle_for += sleep
387 self.statistics["idle_for"] += sleep
388 self.statistics["attempt_number"] += 1
389
390 if self.before_sleep is not None:
391 self.before_sleep(retry_state=retry_state)
392
393 return DoSleep(sleep)
394
395 def __iter__(self):
396 self.begin(None)
397
398 retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
399 while True:
400 do = self.iter(retry_state=retry_state)
401 if isinstance(do, DoAttempt):
402 yield AttemptManager(retry_state=retry_state)
403 elif isinstance(do, DoSleep):
404 retry_state.prepare_for_next_attempt()
405 self.sleep(do)
406 else:
407 break
408
409 @abstractmethod
410 def __call__(self, *args, **kwargs):
411 pass
412
413 def call(self, *args, **kwargs):
414 """Use ``__call__`` instead because this method is deprecated."""
415 warnings.warn(
416 "'call()' method is deprecated. " + "Use '__call__()' instead",
417 DeprecationWarning,
418 )
419 return self.__call__(*args, **kwargs)
420
421
422 class Retrying(BaseRetrying):
423 """Retrying controller."""
424
425 def __call__(self, fn, *args, **kwargs):
426 self.begin(fn)
427
428 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
429 while True:
430 do = self.iter(retry_state=retry_state)
431 if isinstance(do, DoAttempt):
432 try:
433 result = fn(*args, **kwargs)
434 except BaseException: # noqa: B902
435 retry_state.set_exception(sys.exc_info())
436 else:
437 retry_state.set_result(result)
438 elif isinstance(do, DoSleep):
439 retry_state.prepare_for_next_attempt()
440 self.sleep(do)
441 else:
442 return do
443
444
445 class Future(futures.Future):
446 """Encapsulates a (future or past) attempted call to a target function."""
447
448 def __init__(self, attempt_number):
449 super(Future, self).__init__()
450 self.attempt_number = attempt_number
451
452 @property
453 def failed(self):
454 """Return whether a exception is being held in this future."""
455 return self.exception() is not None
456
457 @classmethod
458 def construct(cls, attempt_number, value, has_exception):
459 """Construct a new Future object."""
460 fut = cls(attempt_number)
461 if has_exception:
462 fut.set_exception(value)
463 else:
464 fut.set_result(value)
465 return fut
466
467
468 class RetryCallState(object):
469 """State related to a single call wrapped with Retrying."""
470
471 def __init__(self, retry_object, fn, args, kwargs):
472 #: Retry call start timestamp
473 self.start_time = _utils.now()
474 #: Retry manager object
475 self.retry_object = retry_object
476 #: Function wrapped by this retry call
477 self.fn = fn
478 #: Arguments of the function wrapped by this retry call
479 self.args = args
480 #: Keyword arguments of the function wrapped by this retry call
481 self.kwargs = kwargs
482
483 #: The number of the current attempt
484 self.attempt_number = 1
485 #: Last outcome (result or exception) produced by the function
486 self.outcome = None
487 #: Timestamp of the last outcome
488 self.outcome_timestamp = None
489 #: Time spent sleeping in retries
490 self.idle_for = 0
491 #: Next action as decided by the retry manager
492 self.next_action = None
493
494 @property
495 def seconds_since_start(self):
496 if self.outcome_timestamp is None:
497 return None
498 return self.outcome_timestamp - self.start_time
499
500 def prepare_for_next_attempt(self):
501 self.outcome = None
502 self.outcome_timestamp = None
503 self.attempt_number += 1
504 self.next_action = None
505
506 def set_result(self, val):
507 ts = _utils.now()
508 fut = Future(self.attempt_number)
509 fut.set_result(val)
510 self.outcome, self.outcome_timestamp = fut, ts
511
512 def set_exception(self, exc_info):
513 ts = _utils.now()
514 fut = Future(self.attempt_number)
515 _utils.capture(fut, exc_info)
516 self.outcome, self.outcome_timestamp = fut, ts
517
518
519 if iscoroutinefunction:
520 from pip._vendor.tenacity._asyncio import AsyncRetrying
521
522 if tornado:
523 from pip._vendor.tenacity.tornadoweb import TornadoRetrying
524
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py
--- a/src/pip/_vendor/tenacity/__init__.py
+++ b/src/pip/_vendor/tenacity/__init__.py
@@ -22,10 +22,12 @@
except ImportError:
iscoroutinefunction = None
-try:
- import tornado
-except ImportError:
- tornado = None
+# Replace a conditional import with a hard-coded None so that pip does
+# not attempt to use tornado even if it is present in the environment.
+# If tornado is non-None, tenacity will attempt to execute some code
+# that is sensitive to the version of tornado, which could break pip
+# if an old version is found.
+tornado = None
import sys
import threading
| {"golden_diff": "diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py\n--- a/src/pip/_vendor/tenacity/__init__.py\n+++ b/src/pip/_vendor/tenacity/__init__.py\n@@ -22,10 +22,12 @@\n except ImportError:\n iscoroutinefunction = None\n \n-try:\n- import tornado\n-except ImportError:\n- tornado = None\n+# Replace a conditional import with a hard-coded None so that pip does\n+# not attempt to use tornado even if it is present in the environment.\n+# If tornado is non-None, tenacity will attempt to execute some code\n+# that is sensitive to the version of tornado, which could break pip\n+# if an old version is found.\n+tornado = None\n \n import sys\n import threading\n", "issue": "Vendoring of `tenacity` is leaky\n### Description\n\nTenacity contains a [conditional import of tornado](https://github.com/pypa/pip/blob/88eb4f092e58f3aee1d389ad4f9047df46e44bb4/src/pip/_vendor/tenacity/__init__.py#L25-L28). This makes the behavior of `pip` sensitive to third-party packages outside of its vendor directory. Specifically, if a version of tornado that does not include the `tornado.gen.sleep` function is installed, `pip` will fail to start. (This is unlikely since this function has been around a long time and we have no plans of deprecating it or removing it. But we do have a report of this happening in https://github.com/tornadoweb/tornado/issues/3034)\n\n### Expected behavior\n\nPip should not be affected by the presence or absence of any other third-party packages. Any conditional imports in its vendored dependencies should be modified to unconditionally fail (e.g. replace the above-linked block with `tornado = None`). \n\n### pip version\n\n21.1.2\n\n### Python version\n\n3.8\n\n### OS\n\nlinux\n\n### How to Reproduce\n\n1. `pip install tornado==4.0.0`\r\n2. `pip --version`\n\n### Output\n\n```sh-session\n~/ImpressionableVoluminousCategories$ pip --version\r\npip 21.1.2 from /opt/virtualenvs/python3/lib/python3.8/site-packages/pip (python 3.8)\r\n~/ImpressionableVoluminousCategories$ pip install tornado==4.0.0\r\nCollecting tornado==4.0.0\r\n Downloading tornado-4.0.tar.gz (313 kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 313 kB 4.5 MB/s \r\nRequirement already satisfied: certifi in /opt/virtualenvs/python3/lib/python3.8/site-packages (from tornado==4.0.0) (2020.12.5)\r\nBuilding wheels for collected packages: tornado\r\n Building wheel for tornado (setup.py) ... done\r\n Created wheel for tornado: filename=tornado-4.0-cp38-cp38-linux_x86_64.whl size=344556 sha256=d9c5e6911e5bdac5b90db4b33d01891562365e235396bd336380dd45cb61a9b7\r\n Stored in directory: /home/runner/.cache/pip/wheels/9a/d7/93/a846246f95067512a78899329bdb84a695d693e67c28a4e71f\r\nSuccessfully built tornado\r\nInstalling collected packages: tornado\r\nSuccessfully installed tornado-4.0\r\n~/ImpressionableVoluminousCategories$ pip --version\r\nTraceback (most recent call last):\r\n File \"/opt/virtualenvs/python3/bin/pip\", line 5, in <module>\r\n from pip._internal.cli.main import main\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/main.py\", line 9, in <module>\r\n from pip._internal.cli.autocompletion import autocomplete\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/autocompletion.py\", line 10, in <module>\r\n from pip._internal.cli.main_parser import create_main_parser\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/main_parser.py\", line 8, in <module>\r\n from pip._internal.cli import cmdoptions\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/cmdoptions.py\", line 23, in <module>\r\n from pip._internal.cli.parser import ConfigOptionParser\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/parser.py\", line 12, in <module>\r\n from pip._internal.configuration import Configuration, ConfigurationError\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/configuration.py\", line 27, in <module>\r\n from pip._internal.utils.misc import ensure_dir, enum\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/utils/misc.py\", line 38, in <module>\r\n from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py\", line 523, in <module>\r\n from pip._vendor.tenacity.tornadoweb import TornadoRetrying\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/tornadoweb.py\", line 26, in <module>\r\n class TornadoRetrying(BaseRetrying):\r\n File \"/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/tornadoweb.py\", line 27, in TornadoRetrying\r\n def __init__(self, sleep=gen.sleep, **kwargs):\r\nAttributeError: module 'tornado.gen' has no attribute 'sleep'\r\n~/ImpressionableVoluminousCategories$ \r\n```\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016-2018 Julien Danjou\n# Copyright 2017 Elisey Zanko\n# Copyright 2016 \u00c9tienne Bersac\n# Copyright 2016 Joshua Harlow\n# Copyright 2013-2014 Ray Holder\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ntry:\n from inspect import iscoroutinefunction\nexcept ImportError:\n iscoroutinefunction = None\n\ntry:\n import tornado\nexcept ImportError:\n tornado = None\n\nimport sys\nimport threading\nimport typing as t\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom concurrent import futures\n\n\nfrom pip._vendor import six\n\nfrom pip._vendor.tenacity import _utils\n\n# Import all built-in retry strategies for easier usage.\nfrom .retry import retry_base # noqa\nfrom .retry import retry_all # noqa\nfrom .retry import retry_always # noqa\nfrom .retry import retry_any # noqa\nfrom .retry import retry_if_exception # noqa\nfrom .retry import retry_if_exception_type # noqa\nfrom .retry import retry_if_not_result # noqa\nfrom .retry import retry_if_result # noqa\nfrom .retry import retry_never # noqa\nfrom .retry import retry_unless_exception_type # noqa\nfrom .retry import retry_if_exception_message # noqa\nfrom .retry import retry_if_not_exception_message # noqa\n\n# Import all nap strategies for easier usage.\nfrom .nap import sleep # noqa\nfrom .nap import sleep_using_event # noqa\n\n# Import all built-in stop strategies for easier usage.\nfrom .stop import stop_after_attempt # noqa\nfrom .stop import stop_after_delay # noqa\nfrom .stop import stop_all # noqa\nfrom .stop import stop_any # noqa\nfrom .stop import stop_never # noqa\nfrom .stop import stop_when_event_set # noqa\n\n# Import all built-in wait strategies for easier usage.\nfrom .wait import wait_chain # noqa\nfrom .wait import wait_combine # noqa\nfrom .wait import wait_exponential # noqa\nfrom .wait import wait_fixed # noqa\nfrom .wait import wait_incrementing # noqa\nfrom .wait import wait_none # noqa\nfrom .wait import wait_random # noqa\nfrom .wait import wait_random_exponential # noqa\nfrom .wait import wait_random_exponential as wait_full_jitter # noqa\n\n# Import all built-in before strategies for easier usage.\nfrom .before import before_log # noqa\nfrom .before import before_nothing # noqa\n\n# Import all built-in after strategies for easier usage.\nfrom .after import after_log # noqa\nfrom .after import after_nothing # noqa\n\n# Import all built-in after strategies for easier usage.\nfrom .before_sleep import before_sleep_log # noqa\nfrom .before_sleep import before_sleep_nothing # noqa\n\n\nWrappedFn = t.TypeVar(\"WrappedFn\", bound=t.Callable)\n\n\[email protected]\ndef retry(fn):\n # type: (WrappedFn) -> WrappedFn\n \"\"\"Type signature for @retry as a raw decorator.\"\"\"\n pass\n\n\[email protected]\ndef retry(*dargs, **dkw): # noqa\n # type: (...) -> t.Callable[[WrappedFn], WrappedFn]\n \"\"\"Type signature for the @retry() decorator constructor.\"\"\"\n pass\n\n\ndef retry(*dargs, **dkw): # noqa\n \"\"\"Wrap a function with a new `Retrying` object.\n\n :param dargs: positional arguments passed to Retrying object\n :param dkw: keyword arguments passed to the Retrying object\n \"\"\"\n # support both @retry and @retry() as valid syntax\n if len(dargs) == 1 and callable(dargs[0]):\n return retry()(dargs[0])\n else:\n\n def wrap(f):\n if isinstance(f, retry_base):\n warnings.warn(\n (\n \"Got retry_base instance ({cls}) as callable argument, \"\n + \"this will probably hang indefinitely (did you mean \"\n + \"retry={cls}(...)?)\"\n ).format(cls=f.__class__.__name__)\n )\n if iscoroutinefunction is not None and iscoroutinefunction(f):\n r = AsyncRetrying(*dargs, **dkw)\n elif (\n tornado\n and hasattr(tornado.gen, \"is_coroutine_function\")\n and tornado.gen.is_coroutine_function(f)\n ):\n r = TornadoRetrying(*dargs, **dkw)\n else:\n r = Retrying(*dargs, **dkw)\n\n return r.wraps(f)\n\n return wrap\n\n\nclass TryAgain(Exception):\n \"\"\"Always retry the executed function when raised.\"\"\"\n\n\nNO_RESULT = object()\n\n\nclass DoAttempt(object):\n pass\n\n\nclass DoSleep(float):\n pass\n\n\nclass BaseAction(object):\n \"\"\"Base class for representing actions to take by retry object.\n\n Concrete implementations must define:\n - __init__: to initialize all necessary fields\n - REPR_ATTRS: class variable specifying attributes to include in repr(self)\n - NAME: for identification in retry object methods and callbacks\n \"\"\"\n\n REPR_FIELDS = ()\n NAME = None\n\n def __repr__(self):\n state_str = \", \".join(\n \"%s=%r\" % (field, getattr(self, field)) for field in self.REPR_FIELDS\n )\n return \"%s(%s)\" % (type(self).__name__, state_str)\n\n def __str__(self):\n return repr(self)\n\n\nclass RetryAction(BaseAction):\n REPR_FIELDS = (\"sleep\",)\n NAME = \"retry\"\n\n def __init__(self, sleep):\n self.sleep = float(sleep)\n\n\n_unset = object()\n\n\ndef _first_set(first, second):\n return second if first is _unset else first\n\n\nclass RetryError(Exception):\n \"\"\"Encapsulates the last attempt instance right before giving up.\"\"\"\n\n def __init__(self, last_attempt):\n self.last_attempt = last_attempt\n super(RetryError, self).__init__(last_attempt)\n\n def reraise(self):\n if self.last_attempt.failed:\n raise self.last_attempt.result()\n raise self\n\n def __str__(self):\n return \"{0}[{1}]\".format(self.__class__.__name__, self.last_attempt)\n\n\nclass AttemptManager(object):\n \"\"\"Manage attempt context.\"\"\"\n\n def __init__(self, retry_state):\n self.retry_state = retry_state\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_value, traceback):\n if isinstance(exc_value, BaseException):\n self.retry_state.set_exception((exc_type, exc_value, traceback))\n return True # Swallow exception.\n else:\n # We don't have the result, actually.\n self.retry_state.set_result(None)\n\n\nclass BaseRetrying(object):\n __metaclass__ = ABCMeta\n\n def __init__(\n self,\n sleep=sleep,\n stop=stop_never,\n wait=wait_none(),\n retry=retry_if_exception_type(),\n before=before_nothing,\n after=after_nothing,\n before_sleep=None,\n reraise=False,\n retry_error_cls=RetryError,\n retry_error_callback=None,\n ):\n self.sleep = sleep\n self.stop = stop\n self.wait = wait\n self.retry = retry\n self.before = before\n self.after = after\n self.before_sleep = before_sleep\n self.reraise = reraise\n self._local = threading.local()\n self.retry_error_cls = retry_error_cls\n self.retry_error_callback = retry_error_callback\n\n # This attribute was moved to RetryCallState and is deprecated on\n # Retrying objects but kept for backward compatibility.\n self.fn = None\n\n def copy(\n self,\n sleep=_unset,\n stop=_unset,\n wait=_unset,\n retry=_unset,\n before=_unset,\n after=_unset,\n before_sleep=_unset,\n reraise=_unset,\n retry_error_cls=_unset,\n retry_error_callback=_unset,\n ):\n \"\"\"Copy this object with some parameters changed if needed.\"\"\"\n return self.__class__(\n sleep=_first_set(sleep, self.sleep),\n stop=_first_set(stop, self.stop),\n wait=_first_set(wait, self.wait),\n retry=_first_set(retry, self.retry),\n before=_first_set(before, self.before),\n after=_first_set(after, self.after),\n before_sleep=_first_set(before_sleep, self.before_sleep),\n reraise=_first_set(reraise, self.reraise),\n retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),\n retry_error_callback=_first_set(\n retry_error_callback, self.retry_error_callback\n ),\n )\n\n def __repr__(self):\n attrs = dict(\n _utils.visible_attrs(self, attrs={\"me\": id(self)}),\n __class__=self.__class__.__name__,\n )\n return (\n \"<%(__class__)s object at 0x%(me)x (stop=%(stop)s, \"\n \"wait=%(wait)s, sleep=%(sleep)s, retry=%(retry)s, \"\n \"before=%(before)s, after=%(after)s)>\"\n ) % (attrs)\n\n @property\n def statistics(self):\n \"\"\"Return a dictionary of runtime statistics.\n\n This dictionary will be empty when the controller has never been\n ran. When it is running or has ran previously it should have (but\n may not) have useful and/or informational keys and values when\n running is underway and/or completed.\n\n .. warning:: The keys in this dictionary **should** be some what\n stable (not changing), but there existence **may**\n change between major releases as new statistics are\n gathered or removed so before accessing keys ensure that\n they actually exist and handle when they do not.\n\n .. note:: The values in this dictionary are local to the thread\n running call (so if multiple threads share the same retrying\n object - either directly or indirectly) they will each have\n there own view of statistics they have collected (in the\n future we may provide a way to aggregate the various\n statistics from each thread).\n \"\"\"\n try:\n return self._local.statistics\n except AttributeError:\n self._local.statistics = {}\n return self._local.statistics\n\n def wraps(self, f):\n \"\"\"Wrap a function for retrying.\n\n :param f: A function to wraps for retrying.\n \"\"\"\n\n @_utils.wraps(f)\n def wrapped_f(*args, **kw):\n return self(f, *args, **kw)\n\n def retry_with(*args, **kwargs):\n return self.copy(*args, **kwargs).wraps(f)\n\n wrapped_f.retry = self\n wrapped_f.retry_with = retry_with\n\n return wrapped_f\n\n def begin(self, fn):\n self.statistics.clear()\n self.statistics[\"start_time\"] = _utils.now()\n self.statistics[\"attempt_number\"] = 1\n self.statistics[\"idle_for\"] = 0\n self.fn = fn\n\n def iter(self, retry_state): # noqa\n fut = retry_state.outcome\n if fut is None:\n if self.before is not None:\n self.before(retry_state)\n return DoAttempt()\n\n is_explicit_retry = retry_state.outcome.failed and isinstance(\n retry_state.outcome.exception(), TryAgain\n )\n if not (is_explicit_retry or self.retry(retry_state=retry_state)):\n return fut.result()\n\n if self.after is not None:\n self.after(retry_state=retry_state)\n\n self.statistics[\"delay_since_first_attempt\"] = retry_state.seconds_since_start\n if self.stop(retry_state=retry_state):\n if self.retry_error_callback:\n return self.retry_error_callback(retry_state=retry_state)\n retry_exc = self.retry_error_cls(fut)\n if self.reraise:\n raise retry_exc.reraise()\n six.raise_from(retry_exc, fut.exception())\n\n if self.wait:\n sleep = self.wait(retry_state=retry_state)\n else:\n sleep = 0.0\n retry_state.next_action = RetryAction(sleep)\n retry_state.idle_for += sleep\n self.statistics[\"idle_for\"] += sleep\n self.statistics[\"attempt_number\"] += 1\n\n if self.before_sleep is not None:\n self.before_sleep(retry_state=retry_state)\n\n return DoSleep(sleep)\n\n def __iter__(self):\n self.begin(None)\n\n retry_state = RetryCallState(self, fn=None, args=(), kwargs={})\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n yield AttemptManager(retry_state=retry_state)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n break\n\n @abstractmethod\n def __call__(self, *args, **kwargs):\n pass\n\n def call(self, *args, **kwargs):\n \"\"\"Use ``__call__`` instead because this method is deprecated.\"\"\"\n warnings.warn(\n \"'call()' method is deprecated. \" + \"Use '__call__()' instead\",\n DeprecationWarning,\n )\n return self.__call__(*args, **kwargs)\n\n\nclass Retrying(BaseRetrying):\n \"\"\"Retrying controller.\"\"\"\n\n def __call__(self, fn, *args, **kwargs):\n self.begin(fn)\n\n retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n try:\n result = fn(*args, **kwargs)\n except BaseException: # noqa: B902\n retry_state.set_exception(sys.exc_info())\n else:\n retry_state.set_result(result)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n return do\n\n\nclass Future(futures.Future):\n \"\"\"Encapsulates a (future or past) attempted call to a target function.\"\"\"\n\n def __init__(self, attempt_number):\n super(Future, self).__init__()\n self.attempt_number = attempt_number\n\n @property\n def failed(self):\n \"\"\"Return whether a exception is being held in this future.\"\"\"\n return self.exception() is not None\n\n @classmethod\n def construct(cls, attempt_number, value, has_exception):\n \"\"\"Construct a new Future object.\"\"\"\n fut = cls(attempt_number)\n if has_exception:\n fut.set_exception(value)\n else:\n fut.set_result(value)\n return fut\n\n\nclass RetryCallState(object):\n \"\"\"State related to a single call wrapped with Retrying.\"\"\"\n\n def __init__(self, retry_object, fn, args, kwargs):\n #: Retry call start timestamp\n self.start_time = _utils.now()\n #: Retry manager object\n self.retry_object = retry_object\n #: Function wrapped by this retry call\n self.fn = fn\n #: Arguments of the function wrapped by this retry call\n self.args = args\n #: Keyword arguments of the function wrapped by this retry call\n self.kwargs = kwargs\n\n #: The number of the current attempt\n self.attempt_number = 1\n #: Last outcome (result or exception) produced by the function\n self.outcome = None\n #: Timestamp of the last outcome\n self.outcome_timestamp = None\n #: Time spent sleeping in retries\n self.idle_for = 0\n #: Next action as decided by the retry manager\n self.next_action = None\n\n @property\n def seconds_since_start(self):\n if self.outcome_timestamp is None:\n return None\n return self.outcome_timestamp - self.start_time\n\n def prepare_for_next_attempt(self):\n self.outcome = None\n self.outcome_timestamp = None\n self.attempt_number += 1\n self.next_action = None\n\n def set_result(self, val):\n ts = _utils.now()\n fut = Future(self.attempt_number)\n fut.set_result(val)\n self.outcome, self.outcome_timestamp = fut, ts\n\n def set_exception(self, exc_info):\n ts = _utils.now()\n fut = Future(self.attempt_number)\n _utils.capture(fut, exc_info)\n self.outcome, self.outcome_timestamp = fut, ts\n\n\nif iscoroutinefunction:\n from pip._vendor.tenacity._asyncio import AsyncRetrying\n\nif tornado:\n from pip._vendor.tenacity.tornadoweb import TornadoRetrying\n", "path": "src/pip/_vendor/tenacity/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016-2018 Julien Danjou\n# Copyright 2017 Elisey Zanko\n# Copyright 2016 \u00c9tienne Bersac\n# Copyright 2016 Joshua Harlow\n# Copyright 2013-2014 Ray Holder\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ntry:\n from inspect import iscoroutinefunction\nexcept ImportError:\n iscoroutinefunction = None\n\n# Replace a conditional import with a hard-coded None so that pip does\n# not attempt to use tornado even if it is present in the environment.\n# If tornado is non-None, tenacity will attempt to execute some code\n# that is sensitive to the version of tornado, which could break pip\n# if an old version is found.\ntornado = None\n\nimport sys\nimport threading\nimport typing as t\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom concurrent import futures\n\n\nfrom pip._vendor import six\n\nfrom pip._vendor.tenacity import _utils\n\n# Import all built-in retry strategies for easier usage.\nfrom .retry import retry_base # noqa\nfrom .retry import retry_all # noqa\nfrom .retry import retry_always # noqa\nfrom .retry import retry_any # noqa\nfrom .retry import retry_if_exception # noqa\nfrom .retry import retry_if_exception_type # noqa\nfrom .retry import retry_if_not_result # noqa\nfrom .retry import retry_if_result # noqa\nfrom .retry import retry_never # noqa\nfrom .retry import retry_unless_exception_type # noqa\nfrom .retry import retry_if_exception_message # noqa\nfrom .retry import retry_if_not_exception_message # noqa\n\n# Import all nap strategies for easier usage.\nfrom .nap import sleep # noqa\nfrom .nap import sleep_using_event # noqa\n\n# Import all built-in stop strategies for easier usage.\nfrom .stop import stop_after_attempt # noqa\nfrom .stop import stop_after_delay # noqa\nfrom .stop import stop_all # noqa\nfrom .stop import stop_any # noqa\nfrom .stop import stop_never # noqa\nfrom .stop import stop_when_event_set # noqa\n\n# Import all built-in wait strategies for easier usage.\nfrom .wait import wait_chain # noqa\nfrom .wait import wait_combine # noqa\nfrom .wait import wait_exponential # noqa\nfrom .wait import wait_fixed # noqa\nfrom .wait import wait_incrementing # noqa\nfrom .wait import wait_none # noqa\nfrom .wait import wait_random # noqa\nfrom .wait import wait_random_exponential # noqa\nfrom .wait import wait_random_exponential as wait_full_jitter # noqa\n\n# Import all built-in before strategies for easier usage.\nfrom .before import before_log # noqa\nfrom .before import before_nothing # noqa\n\n# Import all built-in after strategies for easier usage.\nfrom .after import after_log # noqa\nfrom .after import after_nothing # noqa\n\n# Import all built-in after strategies for easier usage.\nfrom .before_sleep import before_sleep_log # noqa\nfrom .before_sleep import before_sleep_nothing # noqa\n\n\nWrappedFn = t.TypeVar(\"WrappedFn\", bound=t.Callable)\n\n\[email protected]\ndef retry(fn):\n # type: (WrappedFn) -> WrappedFn\n \"\"\"Type signature for @retry as a raw decorator.\"\"\"\n pass\n\n\[email protected]\ndef retry(*dargs, **dkw): # noqa\n # type: (...) -> t.Callable[[WrappedFn], WrappedFn]\n \"\"\"Type signature for the @retry() decorator constructor.\"\"\"\n pass\n\n\ndef retry(*dargs, **dkw): # noqa\n \"\"\"Wrap a function with a new `Retrying` object.\n\n :param dargs: positional arguments passed to Retrying object\n :param dkw: keyword arguments passed to the Retrying object\n \"\"\"\n # support both @retry and @retry() as valid syntax\n if len(dargs) == 1 and callable(dargs[0]):\n return retry()(dargs[0])\n else:\n\n def wrap(f):\n if isinstance(f, retry_base):\n warnings.warn(\n (\n \"Got retry_base instance ({cls}) as callable argument, \"\n + \"this will probably hang indefinitely (did you mean \"\n + \"retry={cls}(...)?)\"\n ).format(cls=f.__class__.__name__)\n )\n if iscoroutinefunction is not None and iscoroutinefunction(f):\n r = AsyncRetrying(*dargs, **dkw)\n elif (\n tornado\n and hasattr(tornado.gen, \"is_coroutine_function\")\n and tornado.gen.is_coroutine_function(f)\n ):\n r = TornadoRetrying(*dargs, **dkw)\n else:\n r = Retrying(*dargs, **dkw)\n\n return r.wraps(f)\n\n return wrap\n\n\nclass TryAgain(Exception):\n \"\"\"Always retry the executed function when raised.\"\"\"\n\n\nNO_RESULT = object()\n\n\nclass DoAttempt(object):\n pass\n\n\nclass DoSleep(float):\n pass\n\n\nclass BaseAction(object):\n \"\"\"Base class for representing actions to take by retry object.\n\n Concrete implementations must define:\n - __init__: to initialize all necessary fields\n - REPR_ATTRS: class variable specifying attributes to include in repr(self)\n - NAME: for identification in retry object methods and callbacks\n \"\"\"\n\n REPR_FIELDS = ()\n NAME = None\n\n def __repr__(self):\n state_str = \", \".join(\n \"%s=%r\" % (field, getattr(self, field)) for field in self.REPR_FIELDS\n )\n return \"%s(%s)\" % (type(self).__name__, state_str)\n\n def __str__(self):\n return repr(self)\n\n\nclass RetryAction(BaseAction):\n REPR_FIELDS = (\"sleep\",)\n NAME = \"retry\"\n\n def __init__(self, sleep):\n self.sleep = float(sleep)\n\n\n_unset = object()\n\n\ndef _first_set(first, second):\n return second if first is _unset else first\n\n\nclass RetryError(Exception):\n \"\"\"Encapsulates the last attempt instance right before giving up.\"\"\"\n\n def __init__(self, last_attempt):\n self.last_attempt = last_attempt\n super(RetryError, self).__init__(last_attempt)\n\n def reraise(self):\n if self.last_attempt.failed:\n raise self.last_attempt.result()\n raise self\n\n def __str__(self):\n return \"{0}[{1}]\".format(self.__class__.__name__, self.last_attempt)\n\n\nclass AttemptManager(object):\n \"\"\"Manage attempt context.\"\"\"\n\n def __init__(self, retry_state):\n self.retry_state = retry_state\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_value, traceback):\n if isinstance(exc_value, BaseException):\n self.retry_state.set_exception((exc_type, exc_value, traceback))\n return True # Swallow exception.\n else:\n # We don't have the result, actually.\n self.retry_state.set_result(None)\n\n\nclass BaseRetrying(object):\n __metaclass__ = ABCMeta\n\n def __init__(\n self,\n sleep=sleep,\n stop=stop_never,\n wait=wait_none(),\n retry=retry_if_exception_type(),\n before=before_nothing,\n after=after_nothing,\n before_sleep=None,\n reraise=False,\n retry_error_cls=RetryError,\n retry_error_callback=None,\n ):\n self.sleep = sleep\n self.stop = stop\n self.wait = wait\n self.retry = retry\n self.before = before\n self.after = after\n self.before_sleep = before_sleep\n self.reraise = reraise\n self._local = threading.local()\n self.retry_error_cls = retry_error_cls\n self.retry_error_callback = retry_error_callback\n\n # This attribute was moved to RetryCallState and is deprecated on\n # Retrying objects but kept for backward compatibility.\n self.fn = None\n\n def copy(\n self,\n sleep=_unset,\n stop=_unset,\n wait=_unset,\n retry=_unset,\n before=_unset,\n after=_unset,\n before_sleep=_unset,\n reraise=_unset,\n retry_error_cls=_unset,\n retry_error_callback=_unset,\n ):\n \"\"\"Copy this object with some parameters changed if needed.\"\"\"\n return self.__class__(\n sleep=_first_set(sleep, self.sleep),\n stop=_first_set(stop, self.stop),\n wait=_first_set(wait, self.wait),\n retry=_first_set(retry, self.retry),\n before=_first_set(before, self.before),\n after=_first_set(after, self.after),\n before_sleep=_first_set(before_sleep, self.before_sleep),\n reraise=_first_set(reraise, self.reraise),\n retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),\n retry_error_callback=_first_set(\n retry_error_callback, self.retry_error_callback\n ),\n )\n\n def __repr__(self):\n attrs = dict(\n _utils.visible_attrs(self, attrs={\"me\": id(self)}),\n __class__=self.__class__.__name__,\n )\n return (\n \"<%(__class__)s object at 0x%(me)x (stop=%(stop)s, \"\n \"wait=%(wait)s, sleep=%(sleep)s, retry=%(retry)s, \"\n \"before=%(before)s, after=%(after)s)>\"\n ) % (attrs)\n\n @property\n def statistics(self):\n \"\"\"Return a dictionary of runtime statistics.\n\n This dictionary will be empty when the controller has never been\n ran. When it is running or has ran previously it should have (but\n may not) have useful and/or informational keys and values when\n running is underway and/or completed.\n\n .. warning:: The keys in this dictionary **should** be some what\n stable (not changing), but there existence **may**\n change between major releases as new statistics are\n gathered or removed so before accessing keys ensure that\n they actually exist and handle when they do not.\n\n .. note:: The values in this dictionary are local to the thread\n running call (so if multiple threads share the same retrying\n object - either directly or indirectly) they will each have\n there own view of statistics they have collected (in the\n future we may provide a way to aggregate the various\n statistics from each thread).\n \"\"\"\n try:\n return self._local.statistics\n except AttributeError:\n self._local.statistics = {}\n return self._local.statistics\n\n def wraps(self, f):\n \"\"\"Wrap a function for retrying.\n\n :param f: A function to wraps for retrying.\n \"\"\"\n\n @_utils.wraps(f)\n def wrapped_f(*args, **kw):\n return self(f, *args, **kw)\n\n def retry_with(*args, **kwargs):\n return self.copy(*args, **kwargs).wraps(f)\n\n wrapped_f.retry = self\n wrapped_f.retry_with = retry_with\n\n return wrapped_f\n\n def begin(self, fn):\n self.statistics.clear()\n self.statistics[\"start_time\"] = _utils.now()\n self.statistics[\"attempt_number\"] = 1\n self.statistics[\"idle_for\"] = 0\n self.fn = fn\n\n def iter(self, retry_state): # noqa\n fut = retry_state.outcome\n if fut is None:\n if self.before is not None:\n self.before(retry_state)\n return DoAttempt()\n\n is_explicit_retry = retry_state.outcome.failed and isinstance(\n retry_state.outcome.exception(), TryAgain\n )\n if not (is_explicit_retry or self.retry(retry_state=retry_state)):\n return fut.result()\n\n if self.after is not None:\n self.after(retry_state=retry_state)\n\n self.statistics[\"delay_since_first_attempt\"] = retry_state.seconds_since_start\n if self.stop(retry_state=retry_state):\n if self.retry_error_callback:\n return self.retry_error_callback(retry_state=retry_state)\n retry_exc = self.retry_error_cls(fut)\n if self.reraise:\n raise retry_exc.reraise()\n six.raise_from(retry_exc, fut.exception())\n\n if self.wait:\n sleep = self.wait(retry_state=retry_state)\n else:\n sleep = 0.0\n retry_state.next_action = RetryAction(sleep)\n retry_state.idle_for += sleep\n self.statistics[\"idle_for\"] += sleep\n self.statistics[\"attempt_number\"] += 1\n\n if self.before_sleep is not None:\n self.before_sleep(retry_state=retry_state)\n\n return DoSleep(sleep)\n\n def __iter__(self):\n self.begin(None)\n\n retry_state = RetryCallState(self, fn=None, args=(), kwargs={})\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n yield AttemptManager(retry_state=retry_state)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n break\n\n @abstractmethod\n def __call__(self, *args, **kwargs):\n pass\n\n def call(self, *args, **kwargs):\n \"\"\"Use ``__call__`` instead because this method is deprecated.\"\"\"\n warnings.warn(\n \"'call()' method is deprecated. \" + \"Use '__call__()' instead\",\n DeprecationWarning,\n )\n return self.__call__(*args, **kwargs)\n\n\nclass Retrying(BaseRetrying):\n \"\"\"Retrying controller.\"\"\"\n\n def __call__(self, fn, *args, **kwargs):\n self.begin(fn)\n\n retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n try:\n result = fn(*args, **kwargs)\n except BaseException: # noqa: B902\n retry_state.set_exception(sys.exc_info())\n else:\n retry_state.set_result(result)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n return do\n\n\nclass Future(futures.Future):\n \"\"\"Encapsulates a (future or past) attempted call to a target function.\"\"\"\n\n def __init__(self, attempt_number):\n super(Future, self).__init__()\n self.attempt_number = attempt_number\n\n @property\n def failed(self):\n \"\"\"Return whether a exception is being held in this future.\"\"\"\n return self.exception() is not None\n\n @classmethod\n def construct(cls, attempt_number, value, has_exception):\n \"\"\"Construct a new Future object.\"\"\"\n fut = cls(attempt_number)\n if has_exception:\n fut.set_exception(value)\n else:\n fut.set_result(value)\n return fut\n\n\nclass RetryCallState(object):\n \"\"\"State related to a single call wrapped with Retrying.\"\"\"\n\n def __init__(self, retry_object, fn, args, kwargs):\n #: Retry call start timestamp\n self.start_time = _utils.now()\n #: Retry manager object\n self.retry_object = retry_object\n #: Function wrapped by this retry call\n self.fn = fn\n #: Arguments of the function wrapped by this retry call\n self.args = args\n #: Keyword arguments of the function wrapped by this retry call\n self.kwargs = kwargs\n\n #: The number of the current attempt\n self.attempt_number = 1\n #: Last outcome (result or exception) produced by the function\n self.outcome = None\n #: Timestamp of the last outcome\n self.outcome_timestamp = None\n #: Time spent sleeping in retries\n self.idle_for = 0\n #: Next action as decided by the retry manager\n self.next_action = None\n\n @property\n def seconds_since_start(self):\n if self.outcome_timestamp is None:\n return None\n return self.outcome_timestamp - self.start_time\n\n def prepare_for_next_attempt(self):\n self.outcome = None\n self.outcome_timestamp = None\n self.attempt_number += 1\n self.next_action = None\n\n def set_result(self, val):\n ts = _utils.now()\n fut = Future(self.attempt_number)\n fut.set_result(val)\n self.outcome, self.outcome_timestamp = fut, ts\n\n def set_exception(self, exc_info):\n ts = _utils.now()\n fut = Future(self.attempt_number)\n _utils.capture(fut, exc_info)\n self.outcome, self.outcome_timestamp = fut, ts\n\n\nif iscoroutinefunction:\n from pip._vendor.tenacity._asyncio import AsyncRetrying\n\nif tornado:\n from pip._vendor.tenacity.tornadoweb import TornadoRetrying\n", "path": "src/pip/_vendor/tenacity/__init__.py"}]} |
gh_patches_debug_1573 | rasdani/github-patches | git_diff | spotify__luigi-2323 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: 'str' does not support the buffer interface in luigi.contrib.hive
Hi,
I'm running luigi task with python3.4 and trying call print(HiveTableTarget(table = "tbl", database = "db").exists()).
I'm getting error the following stacktrace:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/luigi/worker.py", line 137, in run
new_deps = self._run_get_new_deps()
File "/usr/local/lib/python3.4/dist-packages/luigi/worker.py", line 88, in _run_get_new_deps
task_gen = self.task.run()
File "hive.py", line 10, in run
print(str(target.exists()))
File "/usr/local/lib/python3.4/dist-packages/luigi/contrib/hive.py", line 376, in exists
return self.client.table_exists(self.table, self.database)
File "/usr/local/lib/python3.4/dist-packages/luigi/contrib/hive.py", line 141, in table_exists
return stdout and table.lower() in stdout
TypeError: 'str' does not support the buffer interface
```
I changed the last line in the stacktrace into
```
return stdout and table.lower() in str(stdout)
```
and it works now.
Is this a bug or do I use wrong python version or something else?
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `luigi/contrib/hive.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17
18 import abc
19 import logging
20 import operator
21 import os
22 import subprocess
23 import tempfile
24 import warnings
25
26 from luigi import six
27
28 import luigi
29 import luigi.contrib.hadoop
30 from luigi.target import FileAlreadyExists, FileSystemTarget
31 from luigi.task import flatten
32
33 if six.PY3:
34 unicode = str
35
36 logger = logging.getLogger('luigi-interface')
37
38
39 class HiveCommandError(RuntimeError):
40
41 def __init__(self, message, out=None, err=None):
42 super(HiveCommandError, self).__init__(message, out, err)
43 self.message = message
44 self.out = out
45 self.err = err
46
47
48 def load_hive_cmd():
49 return luigi.configuration.get_config().get('hive', 'command', 'hive').split(' ')
50
51
52 def get_hive_syntax():
53 return luigi.configuration.get_config().get('hive', 'release', 'cdh4')
54
55
56 def run_hive(args, check_return_code=True):
57 """
58 Runs the `hive` from the command line, passing in the given args, and
59 returning stdout.
60
61 With the apache release of Hive, so of the table existence checks
62 (which are done using DESCRIBE do not exit with a return code of 0
63 so we need an option to ignore the return code and just return stdout for parsing
64 """
65 cmd = load_hive_cmd() + args
66 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
67 stdout, stderr = p.communicate()
68 if check_return_code and p.returncode != 0:
69 raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode),
70 stdout, stderr)
71 return stdout
72
73
74 def run_hive_cmd(hivecmd, check_return_code=True):
75 """
76 Runs the given hive query and returns stdout.
77 """
78 return run_hive(['-e', hivecmd], check_return_code)
79
80
81 def run_hive_script(script):
82 """
83 Runs the contents of the given script in hive and returns stdout.
84 """
85 if not os.path.isfile(script):
86 raise RuntimeError("Hive script: {0} does not exist.".format(script))
87 return run_hive(['-f', script])
88
89
90 @six.add_metaclass(abc.ABCMeta)
91 class HiveClient(object): # interface
92
93 @abc.abstractmethod
94 def table_location(self, table, database='default', partition=None):
95 """
96 Returns location of db.table (or db.table.partition). partition is a dict of partition key to
97 value.
98 """
99 pass
100
101 @abc.abstractmethod
102 def table_schema(self, table, database='default'):
103 """
104 Returns list of [(name, type)] for each column in database.table.
105 """
106 pass
107
108 @abc.abstractmethod
109 def table_exists(self, table, database='default', partition=None):
110 """
111 Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to
112 value.
113 """
114 pass
115
116 @abc.abstractmethod
117 def partition_spec(self, partition):
118 """ Turn a dict into a string partition specification """
119 pass
120
121
122 class HiveCommandClient(HiveClient):
123 """
124 Uses `hive` invocations to find information.
125 """
126
127 def table_location(self, table, database='default', partition=None):
128 cmd = "use {0}; describe formatted {1}".format(database, table)
129 if partition is not None:
130 cmd += " PARTITION ({0})".format(self.partition_spec(partition))
131
132 stdout = run_hive_cmd(cmd)
133
134 for line in stdout.split("\n"):
135 if "Location:" in line:
136 return line.split("\t")[1]
137
138 def table_exists(self, table, database='default', partition=None):
139 if partition is None:
140 stdout = run_hive_cmd('use {0}; show tables like "{1}";'.format(database, table))
141
142 return stdout and table.lower() in stdout
143 else:
144 stdout = run_hive_cmd("""use %s; show partitions %s partition
145 (%s)""" % (database, table, self.partition_spec(partition)))
146
147 if stdout:
148 return True
149 else:
150 return False
151
152 def table_schema(self, table, database='default'):
153 describe = run_hive_cmd("use {0}; describe {1}".format(database, table))
154 if not describe or "does not exist" in describe:
155 return None
156 return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")]
157
158 def partition_spec(self, partition):
159 """
160 Turns a dict into the a Hive partition specification string.
161 """
162 return ','.join(["`{0}`='{1}'".format(k, v) for (k, v) in
163 sorted(six.iteritems(partition), key=operator.itemgetter(0))])
164
165
166 class ApacheHiveCommandClient(HiveCommandClient):
167 """
168 A subclass for the HiveCommandClient to (in some cases) ignore the return code from
169 the hive command so that we can just parse the output.
170 """
171
172 def table_schema(self, table, database='default'):
173 describe = run_hive_cmd("use {0}; describe {1}".format(database, table), False)
174 if not describe or "Table not found" in describe:
175 return None
176 return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")]
177
178
179 class MetastoreClient(HiveClient):
180
181 def table_location(self, table, database='default', partition=None):
182 with HiveThriftContext() as client:
183 if partition is not None:
184 try:
185 import hive_metastore.ttypes
186 partition_str = self.partition_spec(partition)
187 thrift_table = client.get_partition_by_name(database, table, partition_str)
188 except hive_metastore.ttypes.NoSuchObjectException:
189 return ''
190 else:
191 thrift_table = client.get_table(database, table)
192 return thrift_table.sd.location
193
194 def table_exists(self, table, database='default', partition=None):
195 with HiveThriftContext() as client:
196 if partition is None:
197 return table in client.get_all_tables(database)
198 else:
199 return partition in self._existing_partitions(table, database, client)
200
201 def _existing_partitions(self, table, database, client):
202 def _parse_partition_string(partition_string):
203 partition_def = {}
204 for part in partition_string.split("/"):
205 name, value = part.split("=")
206 partition_def[name] = value
207 return partition_def
208
209 # -1 is max_parts, the # of partition names to return (-1 = unlimited)
210 partition_strings = client.get_partition_names(database, table, -1)
211 return [_parse_partition_string(existing_partition) for existing_partition in partition_strings]
212
213 def table_schema(self, table, database='default'):
214 with HiveThriftContext() as client:
215 return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)]
216
217 def partition_spec(self, partition):
218 return "/".join("%s=%s" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0)))
219
220
221 class HiveThriftContext(object):
222 """
223 Context manager for hive metastore client.
224 """
225
226 def __enter__(self):
227 try:
228 from thrift.transport import TSocket
229 from thrift.transport import TTransport
230 from thrift.protocol import TBinaryProtocol
231 # Note that this will only work with a CDH release.
232 # This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax.
233 # If using the Apache release of Hive this import will fail.
234 from hive_metastore import ThriftHiveMetastore
235 config = luigi.configuration.get_config()
236 host = config.get('hive', 'metastore_host')
237 port = config.getint('hive', 'metastore_port')
238 transport = TSocket.TSocket(host, port)
239 transport = TTransport.TBufferedTransport(transport)
240 protocol = TBinaryProtocol.TBinaryProtocol(transport)
241 transport.open()
242 self.transport = transport
243 return ThriftHiveMetastore.Client(protocol)
244 except ImportError as e:
245 raise Exception('Could not import Hive thrift library:' + str(e))
246
247 def __exit__(self, exc_type, exc_val, exc_tb):
248 self.transport.close()
249
250
251 def get_default_client():
252 syntax = get_hive_syntax()
253 if syntax == "apache":
254 return ApacheHiveCommandClient()
255 elif syntax == "metastore":
256 return MetastoreClient()
257 else:
258 return HiveCommandClient()
259
260
261 client = get_default_client()
262
263
264 class HiveQueryTask(luigi.contrib.hadoop.BaseHadoopJobTask):
265 """
266 Task to run a hive query.
267 """
268
269 # by default, we let hive figure these out.
270 n_reduce_tasks = None
271 bytes_per_reducer = None
272 reducers_max = None
273
274 @abc.abstractmethod
275 def query(self):
276 """ Text of query to run in hive """
277 raise RuntimeError("Must implement query!")
278
279 def hiverc(self):
280 """
281 Location of an rc file to run before the query
282 if hiverc-location key is specified in luigi.cfg, will default to the value there
283 otherwise returns None.
284
285 Returning a list of rc files will load all of them in order.
286 """
287 return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None)
288
289 def hivevars(self):
290 """
291 Returns a dict of key=value settings to be passed along
292 to the hive command line via --hivevar.
293 This option can be used as a separated namespace for script local variables.
294 See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+VariableSubstitution
295 """
296 return {}
297
298 def hiveconfs(self):
299 """
300 Returns a dict of key=value settings to be passed along
301 to the hive command line via --hiveconf. By default, sets
302 mapred.job.name to task_id and if not None, sets:
303
304 * mapred.reduce.tasks (n_reduce_tasks)
305 * mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)
306 * hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)
307 * hive.exec.reducers.max (reducers_max)
308 """
309 jcs = {}
310 jcs['mapred.job.name'] = "'" + self.task_id + "'"
311 if self.n_reduce_tasks is not None:
312 jcs['mapred.reduce.tasks'] = self.n_reduce_tasks
313 if self.pool is not None:
314 # Supporting two schedulers: fair (default) and capacity using the same option
315 scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')
316 if scheduler_type == 'fair':
317 jcs['mapred.fairscheduler.pool'] = self.pool
318 elif scheduler_type == 'capacity':
319 jcs['mapred.job.queue.name'] = self.pool
320 if self.bytes_per_reducer is not None:
321 jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer
322 if self.reducers_max is not None:
323 jcs['hive.exec.reducers.max'] = self.reducers_max
324 return jcs
325
326 def job_runner(self):
327 return HiveQueryRunner()
328
329
330 class HiveQueryRunner(luigi.contrib.hadoop.JobRunner):
331 """
332 Runs a HiveQueryTask by shelling out to hive.
333 """
334
335 def prepare_outputs(self, job):
336 """
337 Called before job is started.
338
339 If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
340 """
341 outputs = flatten(job.output())
342 for o in outputs:
343 if isinstance(o, FileSystemTarget):
344 parent_dir = os.path.dirname(o.path)
345 if parent_dir and not o.fs.exists(parent_dir):
346 logger.info("Creating parent directory %r", parent_dir)
347 try:
348 # there is a possible race condition
349 # which needs to be handled here
350 o.fs.mkdir(parent_dir)
351 except FileAlreadyExists:
352 pass
353
354 def get_arglist(self, f_name, job):
355 arglist = load_hive_cmd() + ['-f', f_name]
356 hiverc = job.hiverc()
357 if hiverc:
358 if isinstance(hiverc, str):
359 hiverc = [hiverc]
360 for rcfile in hiverc:
361 arglist += ['-i', rcfile]
362 hiveconfs = job.hiveconfs()
363 if hiveconfs:
364 for k, v in six.iteritems(hiveconfs):
365 arglist += ['--hiveconf', '{0}={1}'.format(k, v)]
366 hivevars = job.hivevars()
367 if hivevars:
368 for k, v in six.iteritems(hivevars):
369 arglist += ['--hivevar', '{0}={1}'.format(k, v)]
370 logger.info(arglist)
371 return arglist
372
373 def run_job(self, job, tracking_url_callback=None):
374 if tracking_url_callback is not None:
375 warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
376 "used instead.", DeprecationWarning)
377
378 self.prepare_outputs(job)
379 with tempfile.NamedTemporaryFile() as f:
380 query = job.query()
381 if isinstance(query, unicode):
382 query = query.encode('utf8')
383 f.write(query)
384 f.flush()
385 arglist = self.get_arglist(f.name, job)
386 return luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url)
387
388
389 class HiveTableTarget(luigi.Target):
390 """
391 exists returns true if the table exists.
392 """
393
394 def __init__(self, table, database='default', client=None):
395 self.database = database
396 self.table = table
397 self.client = client or get_default_client()
398
399 def exists(self):
400 logger.debug("Checking if Hive table '%s.%s' exists", self.database, self.table)
401 return self.client.table_exists(self.table, self.database)
402
403 @property
404 def path(self):
405 """
406 Returns the path to this table in HDFS.
407 """
408 location = self.client.table_location(self.table, self.database)
409 if not location:
410 raise Exception("Couldn't find location for table: {0}".format(str(self)))
411 return location
412
413 def open(self, mode):
414 return NotImplementedError("open() is not supported for HiveTableTarget")
415
416
417 class HivePartitionTarget(luigi.Target):
418 """
419 exists returns true if the table's partition exists.
420 """
421
422 def __init__(self, table, partition, database='default', fail_missing_table=True, client=None):
423 self.database = database
424 self.table = table
425 self.partition = partition
426 self.client = client or get_default_client()
427
428 self.fail_missing_table = fail_missing_table
429
430 def exists(self):
431 try:
432 logger.debug("Checking Hive table '{d}.{t}' for partition {p}".format(d=self.database, t=self.table, p=str(self.partition)))
433 return self.client.table_exists(self.table, self.database, self.partition)
434 except HiveCommandError:
435 if self.fail_missing_table:
436 raise
437 else:
438 if self.client.table_exists(self.table, self.database):
439 # a real error occurred
440 raise
441 else:
442 # oh the table just doesn't exist
443 return False
444
445 @property
446 def path(self):
447 """
448 Returns the path for this HiveTablePartitionTarget's data.
449 """
450 location = self.client.table_location(self.table, self.database, self.partition)
451 if not location:
452 raise Exception("Couldn't find location for table: {0}".format(str(self)))
453 return location
454
455 def open(self, mode):
456 return NotImplementedError("open() is not supported for HivePartitionTarget")
457
458
459 class ExternalHiveTask(luigi.ExternalTask):
460 """
461 External task that depends on a Hive table/partition.
462 """
463
464 database = luigi.Parameter(default='default')
465 table = luigi.Parameter()
466 partition = luigi.DictParameter(default={}, description='Python dictionary specifying the target partition e.g. {"date": "2013-01-25"}')
467
468 def output(self):
469 if len(self.partition) != 0:
470 assert self.partition, "partition required"
471 return HivePartitionTarget(table=self.table,
472 partition=self.partition,
473 database=self.database)
474 else:
475 return HiveTableTarget(self.table, self.database)
476
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/luigi/contrib/hive.py b/luigi/contrib/hive.py
--- a/luigi/contrib/hive.py
+++ b/luigi/contrib/hive.py
@@ -68,7 +68,7 @@
if check_return_code and p.returncode != 0:
raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode),
stdout, stderr)
- return stdout
+ return stdout.decode('utf-8')
def run_hive_cmd(hivecmd, check_return_code=True):
| {"golden_diff": "diff --git a/luigi/contrib/hive.py b/luigi/contrib/hive.py\n--- a/luigi/contrib/hive.py\n+++ b/luigi/contrib/hive.py\n@@ -68,7 +68,7 @@\n if check_return_code and p.returncode != 0:\n raise HiveCommandError(\"Hive command: {0} failed with error code: {1}\".format(\" \".join(cmd), p.returncode),\n stdout, stderr)\n- return stdout\n+ return stdout.decode('utf-8')\n \n \n def run_hive_cmd(hivecmd, check_return_code=True):\n", "issue": "TypeError: 'str' does not support the buffer interface in luigi.contrib.hive\nHi,\n\nI'm running luigi task with python3.4 and trying call print(HiveTableTarget(table = \"tbl\", database = \"db\").exists()).\nI'm getting error the following stacktrace:\n\n```\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.4/dist-packages/luigi/worker.py\", line 137, in run\n new_deps = self._run_get_new_deps()\n File \"/usr/local/lib/python3.4/dist-packages/luigi/worker.py\", line 88, in _run_get_new_deps\n task_gen = self.task.run()\n File \"hive.py\", line 10, in run\n print(str(target.exists()))\n File \"/usr/local/lib/python3.4/dist-packages/luigi/contrib/hive.py\", line 376, in exists\n return self.client.table_exists(self.table, self.database)\n File \"/usr/local/lib/python3.4/dist-packages/luigi/contrib/hive.py\", line 141, in table_exists\n return stdout and table.lower() in stdout\nTypeError: 'str' does not support the buffer interface\n```\n\nI changed the last line in the stacktrace into \n\n```\nreturn stdout and table.lower() in str(stdout)\n```\n\nand it works now.\n\nIs this a bug or do I use wrong python version or something else?\n\nThanks\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport abc\nimport logging\nimport operator\nimport os\nimport subprocess\nimport tempfile\nimport warnings\n\nfrom luigi import six\n\nimport luigi\nimport luigi.contrib.hadoop\nfrom luigi.target import FileAlreadyExists, FileSystemTarget\nfrom luigi.task import flatten\n\nif six.PY3:\n unicode = str\n\nlogger = logging.getLogger('luigi-interface')\n\n\nclass HiveCommandError(RuntimeError):\n\n def __init__(self, message, out=None, err=None):\n super(HiveCommandError, self).__init__(message, out, err)\n self.message = message\n self.out = out\n self.err = err\n\n\ndef load_hive_cmd():\n return luigi.configuration.get_config().get('hive', 'command', 'hive').split(' ')\n\n\ndef get_hive_syntax():\n return luigi.configuration.get_config().get('hive', 'release', 'cdh4')\n\n\ndef run_hive(args, check_return_code=True):\n \"\"\"\n Runs the `hive` from the command line, passing in the given args, and\n returning stdout.\n\n With the apache release of Hive, so of the table existence checks\n (which are done using DESCRIBE do not exit with a return code of 0\n so we need an option to ignore the return code and just return stdout for parsing\n \"\"\"\n cmd = load_hive_cmd() + args\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n if check_return_code and p.returncode != 0:\n raise HiveCommandError(\"Hive command: {0} failed with error code: {1}\".format(\" \".join(cmd), p.returncode),\n stdout, stderr)\n return stdout\n\n\ndef run_hive_cmd(hivecmd, check_return_code=True):\n \"\"\"\n Runs the given hive query and returns stdout.\n \"\"\"\n return run_hive(['-e', hivecmd], check_return_code)\n\n\ndef run_hive_script(script):\n \"\"\"\n Runs the contents of the given script in hive and returns stdout.\n \"\"\"\n if not os.path.isfile(script):\n raise RuntimeError(\"Hive script: {0} does not exist.\".format(script))\n return run_hive(['-f', script])\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HiveClient(object): # interface\n\n @abc.abstractmethod\n def table_location(self, table, database='default', partition=None):\n \"\"\"\n Returns location of db.table (or db.table.partition). partition is a dict of partition key to\n value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def table_schema(self, table, database='default'):\n \"\"\"\n Returns list of [(name, type)] for each column in database.table.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def table_exists(self, table, database='default', partition=None):\n \"\"\"\n Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to\n value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def partition_spec(self, partition):\n \"\"\" Turn a dict into a string partition specification \"\"\"\n pass\n\n\nclass HiveCommandClient(HiveClient):\n \"\"\"\n Uses `hive` invocations to find information.\n \"\"\"\n\n def table_location(self, table, database='default', partition=None):\n cmd = \"use {0}; describe formatted {1}\".format(database, table)\n if partition is not None:\n cmd += \" PARTITION ({0})\".format(self.partition_spec(partition))\n\n stdout = run_hive_cmd(cmd)\n\n for line in stdout.split(\"\\n\"):\n if \"Location:\" in line:\n return line.split(\"\\t\")[1]\n\n def table_exists(self, table, database='default', partition=None):\n if partition is None:\n stdout = run_hive_cmd('use {0}; show tables like \"{1}\";'.format(database, table))\n\n return stdout and table.lower() in stdout\n else:\n stdout = run_hive_cmd(\"\"\"use %s; show partitions %s partition\n (%s)\"\"\" % (database, table, self.partition_spec(partition)))\n\n if stdout:\n return True\n else:\n return False\n\n def table_schema(self, table, database='default'):\n describe = run_hive_cmd(\"use {0}; describe {1}\".format(database, table))\n if not describe or \"does not exist\" in describe:\n return None\n return [tuple([x.strip() for x in line.strip().split(\"\\t\")]) for line in describe.strip().split(\"\\n\")]\n\n def partition_spec(self, partition):\n \"\"\"\n Turns a dict into the a Hive partition specification string.\n \"\"\"\n return ','.join([\"`{0}`='{1}'\".format(k, v) for (k, v) in\n sorted(six.iteritems(partition), key=operator.itemgetter(0))])\n\n\nclass ApacheHiveCommandClient(HiveCommandClient):\n \"\"\"\n A subclass for the HiveCommandClient to (in some cases) ignore the return code from\n the hive command so that we can just parse the output.\n \"\"\"\n\n def table_schema(self, table, database='default'):\n describe = run_hive_cmd(\"use {0}; describe {1}\".format(database, table), False)\n if not describe or \"Table not found\" in describe:\n return None\n return [tuple([x.strip() for x in line.strip().split(\"\\t\")]) for line in describe.strip().split(\"\\n\")]\n\n\nclass MetastoreClient(HiveClient):\n\n def table_location(self, table, database='default', partition=None):\n with HiveThriftContext() as client:\n if partition is not None:\n try:\n import hive_metastore.ttypes\n partition_str = self.partition_spec(partition)\n thrift_table = client.get_partition_by_name(database, table, partition_str)\n except hive_metastore.ttypes.NoSuchObjectException:\n return ''\n else:\n thrift_table = client.get_table(database, table)\n return thrift_table.sd.location\n\n def table_exists(self, table, database='default', partition=None):\n with HiveThriftContext() as client:\n if partition is None:\n return table in client.get_all_tables(database)\n else:\n return partition in self._existing_partitions(table, database, client)\n\n def _existing_partitions(self, table, database, client):\n def _parse_partition_string(partition_string):\n partition_def = {}\n for part in partition_string.split(\"/\"):\n name, value = part.split(\"=\")\n partition_def[name] = value\n return partition_def\n\n # -1 is max_parts, the # of partition names to return (-1 = unlimited)\n partition_strings = client.get_partition_names(database, table, -1)\n return [_parse_partition_string(existing_partition) for existing_partition in partition_strings]\n\n def table_schema(self, table, database='default'):\n with HiveThriftContext() as client:\n return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)]\n\n def partition_spec(self, partition):\n return \"/\".join(\"%s=%s\" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0)))\n\n\nclass HiveThriftContext(object):\n \"\"\"\n Context manager for hive metastore client.\n \"\"\"\n\n def __enter__(self):\n try:\n from thrift.transport import TSocket\n from thrift.transport import TTransport\n from thrift.protocol import TBinaryProtocol\n # Note that this will only work with a CDH release.\n # This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax.\n # If using the Apache release of Hive this import will fail.\n from hive_metastore import ThriftHiveMetastore\n config = luigi.configuration.get_config()\n host = config.get('hive', 'metastore_host')\n port = config.getint('hive', 'metastore_port')\n transport = TSocket.TSocket(host, port)\n transport = TTransport.TBufferedTransport(transport)\n protocol = TBinaryProtocol.TBinaryProtocol(transport)\n transport.open()\n self.transport = transport\n return ThriftHiveMetastore.Client(protocol)\n except ImportError as e:\n raise Exception('Could not import Hive thrift library:' + str(e))\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.transport.close()\n\n\ndef get_default_client():\n syntax = get_hive_syntax()\n if syntax == \"apache\":\n return ApacheHiveCommandClient()\n elif syntax == \"metastore\":\n return MetastoreClient()\n else:\n return HiveCommandClient()\n\n\nclient = get_default_client()\n\n\nclass HiveQueryTask(luigi.contrib.hadoop.BaseHadoopJobTask):\n \"\"\"\n Task to run a hive query.\n \"\"\"\n\n # by default, we let hive figure these out.\n n_reduce_tasks = None\n bytes_per_reducer = None\n reducers_max = None\n\n @abc.abstractmethod\n def query(self):\n \"\"\" Text of query to run in hive \"\"\"\n raise RuntimeError(\"Must implement query!\")\n\n def hiverc(self):\n \"\"\"\n Location of an rc file to run before the query\n if hiverc-location key is specified in luigi.cfg, will default to the value there\n otherwise returns None.\n\n Returning a list of rc files will load all of them in order.\n \"\"\"\n return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None)\n\n def hivevars(self):\n \"\"\"\n Returns a dict of key=value settings to be passed along\n to the hive command line via --hivevar.\n This option can be used as a separated namespace for script local variables.\n See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+VariableSubstitution\n \"\"\"\n return {}\n\n def hiveconfs(self):\n \"\"\"\n Returns a dict of key=value settings to be passed along\n to the hive command line via --hiveconf. By default, sets\n mapred.job.name to task_id and if not None, sets:\n\n * mapred.reduce.tasks (n_reduce_tasks)\n * mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)\n * hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)\n * hive.exec.reducers.max (reducers_max)\n \"\"\"\n jcs = {}\n jcs['mapred.job.name'] = \"'\" + self.task_id + \"'\"\n if self.n_reduce_tasks is not None:\n jcs['mapred.reduce.tasks'] = self.n_reduce_tasks\n if self.pool is not None:\n # Supporting two schedulers: fair (default) and capacity using the same option\n scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')\n if scheduler_type == 'fair':\n jcs['mapred.fairscheduler.pool'] = self.pool\n elif scheduler_type == 'capacity':\n jcs['mapred.job.queue.name'] = self.pool\n if self.bytes_per_reducer is not None:\n jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer\n if self.reducers_max is not None:\n jcs['hive.exec.reducers.max'] = self.reducers_max\n return jcs\n\n def job_runner(self):\n return HiveQueryRunner()\n\n\nclass HiveQueryRunner(luigi.contrib.hadoop.JobRunner):\n \"\"\"\n Runs a HiveQueryTask by shelling out to hive.\n \"\"\"\n\n def prepare_outputs(self, job):\n \"\"\"\n Called before job is started.\n\n If output is a `FileSystemTarget`, create parent directories so the hive command won't fail\n \"\"\"\n outputs = flatten(job.output())\n for o in outputs:\n if isinstance(o, FileSystemTarget):\n parent_dir = os.path.dirname(o.path)\n if parent_dir and not o.fs.exists(parent_dir):\n logger.info(\"Creating parent directory %r\", parent_dir)\n try:\n # there is a possible race condition\n # which needs to be handled here\n o.fs.mkdir(parent_dir)\n except FileAlreadyExists:\n pass\n\n def get_arglist(self, f_name, job):\n arglist = load_hive_cmd() + ['-f', f_name]\n hiverc = job.hiverc()\n if hiverc:\n if isinstance(hiverc, str):\n hiverc = [hiverc]\n for rcfile in hiverc:\n arglist += ['-i', rcfile]\n hiveconfs = job.hiveconfs()\n if hiveconfs:\n for k, v in six.iteritems(hiveconfs):\n arglist += ['--hiveconf', '{0}={1}'.format(k, v)]\n hivevars = job.hivevars()\n if hivevars:\n for k, v in six.iteritems(hivevars):\n arglist += ['--hivevar', '{0}={1}'.format(k, v)]\n logger.info(arglist)\n return arglist\n\n def run_job(self, job, tracking_url_callback=None):\n if tracking_url_callback is not None:\n warnings.warn(\"tracking_url_callback argument is deprecated, task.set_tracking_url is \"\n \"used instead.\", DeprecationWarning)\n\n self.prepare_outputs(job)\n with tempfile.NamedTemporaryFile() as f:\n query = job.query()\n if isinstance(query, unicode):\n query = query.encode('utf8')\n f.write(query)\n f.flush()\n arglist = self.get_arglist(f.name, job)\n return luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url)\n\n\nclass HiveTableTarget(luigi.Target):\n \"\"\"\n exists returns true if the table exists.\n \"\"\"\n\n def __init__(self, table, database='default', client=None):\n self.database = database\n self.table = table\n self.client = client or get_default_client()\n\n def exists(self):\n logger.debug(\"Checking if Hive table '%s.%s' exists\", self.database, self.table)\n return self.client.table_exists(self.table, self.database)\n\n @property\n def path(self):\n \"\"\"\n Returns the path to this table in HDFS.\n \"\"\"\n location = self.client.table_location(self.table, self.database)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location\n\n def open(self, mode):\n return NotImplementedError(\"open() is not supported for HiveTableTarget\")\n\n\nclass HivePartitionTarget(luigi.Target):\n \"\"\"\n exists returns true if the table's partition exists.\n \"\"\"\n\n def __init__(self, table, partition, database='default', fail_missing_table=True, client=None):\n self.database = database\n self.table = table\n self.partition = partition\n self.client = client or get_default_client()\n\n self.fail_missing_table = fail_missing_table\n\n def exists(self):\n try:\n logger.debug(\"Checking Hive table '{d}.{t}' for partition {p}\".format(d=self.database, t=self.table, p=str(self.partition)))\n return self.client.table_exists(self.table, self.database, self.partition)\n except HiveCommandError:\n if self.fail_missing_table:\n raise\n else:\n if self.client.table_exists(self.table, self.database):\n # a real error occurred\n raise\n else:\n # oh the table just doesn't exist\n return False\n\n @property\n def path(self):\n \"\"\"\n Returns the path for this HiveTablePartitionTarget's data.\n \"\"\"\n location = self.client.table_location(self.table, self.database, self.partition)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location\n\n def open(self, mode):\n return NotImplementedError(\"open() is not supported for HivePartitionTarget\")\n\n\nclass ExternalHiveTask(luigi.ExternalTask):\n \"\"\"\n External task that depends on a Hive table/partition.\n \"\"\"\n\n database = luigi.Parameter(default='default')\n table = luigi.Parameter()\n partition = luigi.DictParameter(default={}, description='Python dictionary specifying the target partition e.g. {\"date\": \"2013-01-25\"}')\n\n def output(self):\n if len(self.partition) != 0:\n assert self.partition, \"partition required\"\n return HivePartitionTarget(table=self.table,\n partition=self.partition,\n database=self.database)\n else:\n return HiveTableTarget(self.table, self.database)\n", "path": "luigi/contrib/hive.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport abc\nimport logging\nimport operator\nimport os\nimport subprocess\nimport tempfile\nimport warnings\n\nfrom luigi import six\n\nimport luigi\nimport luigi.contrib.hadoop\nfrom luigi.target import FileAlreadyExists, FileSystemTarget\nfrom luigi.task import flatten\n\nif six.PY3:\n unicode = str\n\nlogger = logging.getLogger('luigi-interface')\n\n\nclass HiveCommandError(RuntimeError):\n\n def __init__(self, message, out=None, err=None):\n super(HiveCommandError, self).__init__(message, out, err)\n self.message = message\n self.out = out\n self.err = err\n\n\ndef load_hive_cmd():\n return luigi.configuration.get_config().get('hive', 'command', 'hive').split(' ')\n\n\ndef get_hive_syntax():\n return luigi.configuration.get_config().get('hive', 'release', 'cdh4')\n\n\ndef run_hive(args, check_return_code=True):\n \"\"\"\n Runs the `hive` from the command line, passing in the given args, and\n returning stdout.\n\n With the apache release of Hive, so of the table existence checks\n (which are done using DESCRIBE do not exit with a return code of 0\n so we need an option to ignore the return code and just return stdout for parsing\n \"\"\"\n cmd = load_hive_cmd() + args\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n if check_return_code and p.returncode != 0:\n raise HiveCommandError(\"Hive command: {0} failed with error code: {1}\".format(\" \".join(cmd), p.returncode),\n stdout, stderr)\n return stdout.decode('utf-8')\n\n\ndef run_hive_cmd(hivecmd, check_return_code=True):\n \"\"\"\n Runs the given hive query and returns stdout.\n \"\"\"\n return run_hive(['-e', hivecmd], check_return_code)\n\n\ndef run_hive_script(script):\n \"\"\"\n Runs the contents of the given script in hive and returns stdout.\n \"\"\"\n if not os.path.isfile(script):\n raise RuntimeError(\"Hive script: {0} does not exist.\".format(script))\n return run_hive(['-f', script])\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HiveClient(object): # interface\n\n @abc.abstractmethod\n def table_location(self, table, database='default', partition=None):\n \"\"\"\n Returns location of db.table (or db.table.partition). partition is a dict of partition key to\n value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def table_schema(self, table, database='default'):\n \"\"\"\n Returns list of [(name, type)] for each column in database.table.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def table_exists(self, table, database='default', partition=None):\n \"\"\"\n Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to\n value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def partition_spec(self, partition):\n \"\"\" Turn a dict into a string partition specification \"\"\"\n pass\n\n\nclass HiveCommandClient(HiveClient):\n \"\"\"\n Uses `hive` invocations to find information.\n \"\"\"\n\n def table_location(self, table, database='default', partition=None):\n cmd = \"use {0}; describe formatted {1}\".format(database, table)\n if partition is not None:\n cmd += \" PARTITION ({0})\".format(self.partition_spec(partition))\n\n stdout = run_hive_cmd(cmd)\n\n for line in stdout.split(\"\\n\"):\n if \"Location:\" in line:\n return line.split(\"\\t\")[1]\n\n def table_exists(self, table, database='default', partition=None):\n if partition is None:\n stdout = run_hive_cmd('use {0}; show tables like \"{1}\";'.format(database, table))\n\n return stdout and table.lower() in stdout\n else:\n stdout = run_hive_cmd(\"\"\"use %s; show partitions %s partition\n (%s)\"\"\" % (database, table, self.partition_spec(partition)))\n\n if stdout:\n return True\n else:\n return False\n\n def table_schema(self, table, database='default'):\n describe = run_hive_cmd(\"use {0}; describe {1}\".format(database, table))\n if not describe or \"does not exist\" in describe:\n return None\n return [tuple([x.strip() for x in line.strip().split(\"\\t\")]) for line in describe.strip().split(\"\\n\")]\n\n def partition_spec(self, partition):\n \"\"\"\n Turns a dict into the a Hive partition specification string.\n \"\"\"\n return ','.join([\"`{0}`='{1}'\".format(k, v) for (k, v) in\n sorted(six.iteritems(partition), key=operator.itemgetter(0))])\n\n\nclass ApacheHiveCommandClient(HiveCommandClient):\n \"\"\"\n A subclass for the HiveCommandClient to (in some cases) ignore the return code from\n the hive command so that we can just parse the output.\n \"\"\"\n\n def table_schema(self, table, database='default'):\n describe = run_hive_cmd(\"use {0}; describe {1}\".format(database, table), False)\n if not describe or \"Table not found\" in describe:\n return None\n return [tuple([x.strip() for x in line.strip().split(\"\\t\")]) for line in describe.strip().split(\"\\n\")]\n\n\nclass MetastoreClient(HiveClient):\n\n def table_location(self, table, database='default', partition=None):\n with HiveThriftContext() as client:\n if partition is not None:\n try:\n import hive_metastore.ttypes\n partition_str = self.partition_spec(partition)\n thrift_table = client.get_partition_by_name(database, table, partition_str)\n except hive_metastore.ttypes.NoSuchObjectException:\n return ''\n else:\n thrift_table = client.get_table(database, table)\n return thrift_table.sd.location\n\n def table_exists(self, table, database='default', partition=None):\n with HiveThriftContext() as client:\n if partition is None:\n return table in client.get_all_tables(database)\n else:\n return partition in self._existing_partitions(table, database, client)\n\n def _existing_partitions(self, table, database, client):\n def _parse_partition_string(partition_string):\n partition_def = {}\n for part in partition_string.split(\"/\"):\n name, value = part.split(\"=\")\n partition_def[name] = value\n return partition_def\n\n # -1 is max_parts, the # of partition names to return (-1 = unlimited)\n partition_strings = client.get_partition_names(database, table, -1)\n return [_parse_partition_string(existing_partition) for existing_partition in partition_strings]\n\n def table_schema(self, table, database='default'):\n with HiveThriftContext() as client:\n return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)]\n\n def partition_spec(self, partition):\n return \"/\".join(\"%s=%s\" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0)))\n\n\nclass HiveThriftContext(object):\n \"\"\"\n Context manager for hive metastore client.\n \"\"\"\n\n def __enter__(self):\n try:\n from thrift.transport import TSocket\n from thrift.transport import TTransport\n from thrift.protocol import TBinaryProtocol\n # Note that this will only work with a CDH release.\n # This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax.\n # If using the Apache release of Hive this import will fail.\n from hive_metastore import ThriftHiveMetastore\n config = luigi.configuration.get_config()\n host = config.get('hive', 'metastore_host')\n port = config.getint('hive', 'metastore_port')\n transport = TSocket.TSocket(host, port)\n transport = TTransport.TBufferedTransport(transport)\n protocol = TBinaryProtocol.TBinaryProtocol(transport)\n transport.open()\n self.transport = transport\n return ThriftHiveMetastore.Client(protocol)\n except ImportError as e:\n raise Exception('Could not import Hive thrift library:' + str(e))\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.transport.close()\n\n\ndef get_default_client():\n syntax = get_hive_syntax()\n if syntax == \"apache\":\n return ApacheHiveCommandClient()\n elif syntax == \"metastore\":\n return MetastoreClient()\n else:\n return HiveCommandClient()\n\n\nclient = get_default_client()\n\n\nclass HiveQueryTask(luigi.contrib.hadoop.BaseHadoopJobTask):\n \"\"\"\n Task to run a hive query.\n \"\"\"\n\n # by default, we let hive figure these out.\n n_reduce_tasks = None\n bytes_per_reducer = None\n reducers_max = None\n\n @abc.abstractmethod\n def query(self):\n \"\"\" Text of query to run in hive \"\"\"\n raise RuntimeError(\"Must implement query!\")\n\n def hiverc(self):\n \"\"\"\n Location of an rc file to run before the query\n if hiverc-location key is specified in luigi.cfg, will default to the value there\n otherwise returns None.\n\n Returning a list of rc files will load all of them in order.\n \"\"\"\n return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None)\n\n def hivevars(self):\n \"\"\"\n Returns a dict of key=value settings to be passed along\n to the hive command line via --hivevar.\n This option can be used as a separated namespace for script local variables.\n See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+VariableSubstitution\n \"\"\"\n return {}\n\n def hiveconfs(self):\n \"\"\"\n Returns a dict of key=value settings to be passed along\n to the hive command line via --hiveconf. By default, sets\n mapred.job.name to task_id and if not None, sets:\n\n * mapred.reduce.tasks (n_reduce_tasks)\n * mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)\n * hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)\n * hive.exec.reducers.max (reducers_max)\n \"\"\"\n jcs = {}\n jcs['mapred.job.name'] = \"'\" + self.task_id + \"'\"\n if self.n_reduce_tasks is not None:\n jcs['mapred.reduce.tasks'] = self.n_reduce_tasks\n if self.pool is not None:\n # Supporting two schedulers: fair (default) and capacity using the same option\n scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')\n if scheduler_type == 'fair':\n jcs['mapred.fairscheduler.pool'] = self.pool\n elif scheduler_type == 'capacity':\n jcs['mapred.job.queue.name'] = self.pool\n if self.bytes_per_reducer is not None:\n jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer\n if self.reducers_max is not None:\n jcs['hive.exec.reducers.max'] = self.reducers_max\n return jcs\n\n def job_runner(self):\n return HiveQueryRunner()\n\n\nclass HiveQueryRunner(luigi.contrib.hadoop.JobRunner):\n \"\"\"\n Runs a HiveQueryTask by shelling out to hive.\n \"\"\"\n\n def prepare_outputs(self, job):\n \"\"\"\n Called before job is started.\n\n If output is a `FileSystemTarget`, create parent directories so the hive command won't fail\n \"\"\"\n outputs = flatten(job.output())\n for o in outputs:\n if isinstance(o, FileSystemTarget):\n parent_dir = os.path.dirname(o.path)\n if parent_dir and not o.fs.exists(parent_dir):\n logger.info(\"Creating parent directory %r\", parent_dir)\n try:\n # there is a possible race condition\n # which needs to be handled here\n o.fs.mkdir(parent_dir)\n except FileAlreadyExists:\n pass\n\n def get_arglist(self, f_name, job):\n arglist = load_hive_cmd() + ['-f', f_name]\n hiverc = job.hiverc()\n if hiverc:\n if isinstance(hiverc, str):\n hiverc = [hiverc]\n for rcfile in hiverc:\n arglist += ['-i', rcfile]\n hiveconfs = job.hiveconfs()\n if hiveconfs:\n for k, v in six.iteritems(hiveconfs):\n arglist += ['--hiveconf', '{0}={1}'.format(k, v)]\n hivevars = job.hivevars()\n if hivevars:\n for k, v in six.iteritems(hivevars):\n arglist += ['--hivevar', '{0}={1}'.format(k, v)]\n logger.info(arglist)\n return arglist\n\n def run_job(self, job, tracking_url_callback=None):\n if tracking_url_callback is not None:\n warnings.warn(\"tracking_url_callback argument is deprecated, task.set_tracking_url is \"\n \"used instead.\", DeprecationWarning)\n\n self.prepare_outputs(job)\n with tempfile.NamedTemporaryFile() as f:\n query = job.query()\n if isinstance(query, unicode):\n query = query.encode('utf8')\n f.write(query)\n f.flush()\n arglist = self.get_arglist(f.name, job)\n return luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url)\n\n\nclass HiveTableTarget(luigi.Target):\n \"\"\"\n exists returns true if the table exists.\n \"\"\"\n\n def __init__(self, table, database='default', client=None):\n self.database = database\n self.table = table\n self.client = client or get_default_client()\n\n def exists(self):\n logger.debug(\"Checking if Hive table '%s.%s' exists\", self.database, self.table)\n return self.client.table_exists(self.table, self.database)\n\n @property\n def path(self):\n \"\"\"\n Returns the path to this table in HDFS.\n \"\"\"\n location = self.client.table_location(self.table, self.database)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location\n\n def open(self, mode):\n return NotImplementedError(\"open() is not supported for HiveTableTarget\")\n\n\nclass HivePartitionTarget(luigi.Target):\n \"\"\"\n exists returns true if the table's partition exists.\n \"\"\"\n\n def __init__(self, table, partition, database='default', fail_missing_table=True, client=None):\n self.database = database\n self.table = table\n self.partition = partition\n self.client = client or get_default_client()\n\n self.fail_missing_table = fail_missing_table\n\n def exists(self):\n try:\n logger.debug(\"Checking Hive table '{d}.{t}' for partition {p}\".format(d=self.database, t=self.table, p=str(self.partition)))\n return self.client.table_exists(self.table, self.database, self.partition)\n except HiveCommandError:\n if self.fail_missing_table:\n raise\n else:\n if self.client.table_exists(self.table, self.database):\n # a real error occurred\n raise\n else:\n # oh the table just doesn't exist\n return False\n\n @property\n def path(self):\n \"\"\"\n Returns the path for this HiveTablePartitionTarget's data.\n \"\"\"\n location = self.client.table_location(self.table, self.database, self.partition)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location\n\n def open(self, mode):\n return NotImplementedError(\"open() is not supported for HivePartitionTarget\")\n\n\nclass ExternalHiveTask(luigi.ExternalTask):\n \"\"\"\n External task that depends on a Hive table/partition.\n \"\"\"\n\n database = luigi.Parameter(default='default')\n table = luigi.Parameter()\n partition = luigi.DictParameter(default={}, description='Python dictionary specifying the target partition e.g. {\"date\": \"2013-01-25\"}')\n\n def output(self):\n if len(self.partition) != 0:\n assert self.partition, \"partition required\"\n return HivePartitionTarget(table=self.table,\n partition=self.partition,\n database=self.database)\n else:\n return HiveTableTarget(self.table, self.database)\n", "path": "luigi/contrib/hive.py"}]} |
gh_patches_debug_1574 | rasdani/github-patches | git_diff | kivy__kivy-3066 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SDL2 window crash on (atleast) Windows
Run the following
``` python
from kivy.app import App
from kivy.uix.label import Label
class TestApp(App):
def build(self):
return Label(text='Use scroll wheel without having the mouse touch the kivy window first')
TestApp().run()
```
The new window gets focus, without touching the window with the mousepointer, scroll the scrollwheel.
Result:
```
Traceback (most recent call last):
File "dbg.py", line 11, in <module>
TestApp().run()
File "C:\dev\python\kivy\kivy\kivy\app.py", line 824, in run
runTouchApp()
File "C:\dev\python\kivy\kivy\kivy\base.py", line 484, in runTouchApp
EventLoop.window.mainloop()
File "C:\dev\python\kivy\kivy\kivy\core\window\window_sdl2.py", line 478, in mainloop
self._mainloop()
File "C:\dev\python\kivy\kivy\kivy\core\window\window_sdl2.py", line 315, in _mainloop
self._mouse_x, self._mouse_y, btn, self.modifiers)
AttributeError: 'WindowSDL' object has no attribute '_mouse_x'
```
_mouse_x and _mouse_y aren't set, should probably just be set to
self._mouse_x = x
self._mouse_y = y
as in the mousebutton\* or mousemotion cases.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/window/window_sdl2.py`
Content:
```
1 # found a way to include it more easily.
2 '''
3 SDL2 Window
4 ===========
5
6 Windowing provider directly based on our own wrapped version of SDL.
7
8 TODO:
9 - fix keys
10 - support scrolling
11 - clean code
12 - manage correctly all sdl events
13
14 '''
15
16 __all__ = ('WindowSDL2', )
17
18 from os.path import join
19 from kivy import kivy_data_dir
20 from kivy.logger import Logger
21 from kivy.base import EventLoop, ExceptionManager, stopTouchApp
22 from kivy.clock import Clock
23 from kivy.config import Config
24 from kivy.core.window import WindowBase
25 from kivy.core.window._window_sdl2 import _WindowSDL2Storage
26 from kivy.input.provider import MotionEventProvider
27 from kivy.input.motionevent import MotionEvent
28 from kivy.resources import resource_find
29 from kivy.utils import platform, deprecated
30 from kivy.compat import unichr
31 from collections import deque
32
33 KMOD_LCTRL = 64
34 KMOD_RCTRL = 128
35 KMOD_RSHIFT = 2
36 KMOD_LSHIFT = 1
37 KMOD_RALT = 512
38 KMOD_LALT = 256
39 KMOD_LMETA = 1024
40 KMOD_RMETA = 2048
41
42 SDLK_SHIFTL = 1073742049
43 SDLK_SHIFTR = 1073742053
44 SDLK_LCTRL = 1073742048
45 SDLK_RCTRL = 1073742052
46 SDLK_LALT = 1073742050
47 SDLK_RALT = 1073742054
48 SDLK_LEFT = 1073741904
49 SDLK_RIGHT = 1073741903
50 SDLK_UP = 1073741906
51 SDLK_DOWN = 1073741905
52 SDLK_HOME = 1073741898
53 SDLK_END = 1073741901
54 SDLK_PAGEUP = 1073741899
55 SDLK_PAGEDOWN = 1073741902
56
57
58 class SDL2MotionEvent(MotionEvent):
59 def depack(self, args):
60 self.is_touch = True
61 self.profile = ('pos', )
62 self.sx, self.sy = args
63 super(SDL2MotionEvent, self).depack(args)
64
65
66 class SDL2MotionEventProvider(MotionEventProvider):
67 win = None
68 q = deque()
69 touchmap = {}
70
71 def update(self, dispatch_fn):
72 touchmap = self.touchmap
73 while True:
74 try:
75 value = self.q.pop()
76 except IndexError:
77 return
78
79 action, fid, x, y = value
80 x = x / 32768.
81 y = 1 - (y / 32768.)
82 if fid not in touchmap:
83 touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))
84 else:
85 me = touchmap[fid]
86 me.move((x, y))
87 if action == 'fingerdown':
88 dispatch_fn('begin', me)
89 elif action == 'fingerup':
90 me.update_time_end()
91 dispatch_fn('end', me)
92 del touchmap[fid]
93 else:
94 dispatch_fn('update', me)
95
96
97 class WindowSDL(WindowBase):
98
99 def __init__(self, **kwargs):
100 self._win = _WindowSDL2Storage()
101 super(WindowSDL, self).__init__()
102 self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,
103 KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,
104 KMOD_RMETA)
105 self.command_keys = {
106 27: 'escape',
107 9: 'tab',
108 8: 'backspace',
109 13: 'enter',
110 127: 'del',
111 271: 'enter',
112 273: 'up',
113 274: 'down',
114 275: 'right',
115 276: 'left',
116 278: 'home',
117 279: 'end',
118 280: 'pgup',
119 281: 'pgdown'}
120 self._mouse_buttons_down = set()
121
122 def create_window(self, *largs):
123
124 if self._fake_fullscreen:
125 if not self.borderless:
126 self.fullscreen = self._fake_fullscreen = False
127 elif not self.fullscreen or self.fullscreen == 'auto':
128 self.borderless = self._fake_fullscreen = False
129
130 if self.fullscreen == 'fake':
131 self.borderless = self._fake_fullscreen = True
132 Logger.warning("The 'fake' fullscreen option has been "
133 "deprecated, use Window.borderless or the "
134 "borderless Config option instead.")
135
136 if not self.initialized:
137
138 if self.position == 'auto':
139 pos = None, None
140 elif self.position == 'custom':
141 pos = self.left, self.top
142
143 # setup !
144 w, h = self._size
145 resizable = Config.getboolean('graphics', 'resizable')
146 gl_size = self._win.setup_window(pos[0], pos[1], w, h,
147 self.borderless, self.fullscreen,
148 resizable)
149 # never stay with a None pos, application using w.center
150 # will be fired.
151 self._pos = (0, 0)
152 else:
153 w, h = self._size
154 self._win.resize_window(w, h)
155 self._win.set_border_state(self.borderless)
156 self._win.set_fullscreen_mode(self.fullscreen)
157
158 super(WindowSDL, self).create_window()
159
160 # auto add input provider
161 Logger.info('Window: auto add sdl input provider')
162 from kivy.base import EventLoop
163 SDL2MotionEventProvider.win = self
164 EventLoop.add_input_provider(SDL2MotionEventProvider('sdl', ''))
165
166 # set window icon before calling set_mode
167 try:
168 filename_icon = self.icon or Config.get('kivy', 'window_icon')
169 if filename_icon == '':
170 logo_size = 32
171 if platform == 'macosx':
172 logo_size = 512
173 elif platform == 'win':
174 logo_size = 64
175 filename_icon = 'kivy-icon-{}.png'.format(logo_size)
176 filename_icon = resource_find(
177 join(kivy_data_dir, 'logo', filename_icon))
178 self.set_icon(filename_icon)
179 except:
180 Logger.exception('Window: cannot set icon')
181
182 def close(self):
183 self._win.teardown_window()
184 self.dispatch('on_close')
185
186 def maximize(self):
187 if self._is_desktop:
188 self._win.maximize_window()
189 else:
190 Logger.warning('Window: maximize() is used only on desktop OSes.')
191
192 def minimize(self):
193 if self._is_desktop:
194 self._win.minimize_window()
195 else:
196 Logger.warning('Window: minimize() is used only on desktop OSes.')
197
198 def restore(self):
199 if self._is_desktop:
200 self._win.restore_window()
201 else:
202 Logger.warning('Window: restore() is used only on desktop OSes.')
203
204 def hide(self):
205 if self._is_desktop:
206 self._win.hide_window()
207 else:
208 Logger.warning('Window: hide() is used only on desktop OSes.')
209
210 def show(self):
211 if self._is_desktop:
212 self._win.show_window()
213 else:
214 Logger.warning('Window: show() is used only on desktop OSes.')
215
216 @deprecated
217 def toggle_fullscreen(self):
218 if self.fullscreen in (True, 'auto'):
219 self.fullscreen = False
220 else:
221 self.fullscreen = 'auto'
222
223 def set_title(self, title):
224 self._win.set_window_title(title)
225
226 def set_icon(self, filename):
227 self._win.set_window_icon(str(filename))
228
229 def screenshot(self, *largs, **kwargs):
230 filename = super(WindowSDL, self).screenshot(*largs, **kwargs)
231 if filename is None:
232 return
233
234 from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE
235 width, height = self.size
236 data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)
237 self._win.save_bytes_in_png(filename, data, width, height)
238 Logger.debug('Window: Screenshot saved at <%s>' % filename)
239 return filename
240
241 def flip(self):
242 self._win.flip()
243 super(WindowSDL, self).flip()
244
245 def _mainloop(self):
246 EventLoop.idle()
247
248 while True:
249 event = self._win.poll()
250 if event is False:
251 break
252 if event is None:
253 continue
254
255 action, args = event[0], event[1:]
256 if action == 'quit':
257 EventLoop.quit = True
258 self.close()
259 break
260
261 elif action in ('fingermotion', 'fingerdown', 'fingerup'):
262 # for finger, pass the raw event to SDL motion event provider
263 # XXX this is problematic. On OSX, it generates touches with 0,
264 # 0 coordinates, at the same times as mouse. But it works.
265 # We have a conflict of using either the mouse or the finger.
266 # Right now, we have no mechanism that we could use to know
267 # which is the preferred one for the application.
268 #SDL2MotionEventProvider.q.appendleft(event)
269 pass
270
271 elif action == 'mousemotion':
272 x, y = args
273 self.mouse_pos = x, self.system_size[1] - y
274 self._mouse_x = x
275 self._mouse_y = y
276 # don't dispatch motion if no button are pressed
277 if len(self._mouse_buttons_down) == 0:
278 continue
279 self._mouse_meta = self.modifiers
280 self.dispatch('on_mouse_move', x, y, self.modifiers)
281
282 elif action in ('mousebuttondown', 'mousebuttonup'):
283 x, y, button = args
284 btn = 'left'
285 if button == 3:
286 btn = 'right'
287 elif button == 2:
288 btn = 'middle'
289 eventname = 'on_mouse_down'
290 self._mouse_buttons_down.add(button)
291 if action == 'mousebuttonup':
292 eventname = 'on_mouse_up'
293 self._mouse_buttons_down.remove(button)
294 self._mouse_x = x
295 self._mouse_y = y
296 self.dispatch(eventname, x, y, btn, self.modifiers)
297 elif action.startswith('mousewheel'):
298 self._update_modifiers()
299 x, y, button = args
300 btn = 'scrolldown'
301 if action.endswith('up'):
302 btn = 'scrollup'
303 elif action.endswith('right'):
304 btn = 'scrollright'
305 elif action.endswith('left'):
306 btn = 'scrollleft'
307
308 self._mouse_meta = self.modifiers
309 self._mouse_btn = btn
310 #times = x if y == 0 else y
311 #times = min(abs(times), 100)
312 #for k in range(times):
313 self._mouse_down = True
314 self.dispatch('on_mouse_down',
315 self._mouse_x, self._mouse_y, btn, self.modifiers)
316 self._mouse_down = False
317 self.dispatch('on_mouse_up',
318 self._mouse_x, self._mouse_y, btn, self.modifiers)
319
320 elif action == 'dropfile':
321 dropfile = args
322 self.dispatch('on_dropfile', dropfile[0])
323 # video resize
324 elif action == 'windowresized':
325 self._size = args
326 # don't use trigger here, we want to delay the resize event
327 cb = self._do_resize
328 Clock.unschedule(cb)
329 Clock.schedule_once(cb, .1)
330
331 elif action == 'windowresized':
332 self.canvas.ask_update()
333
334 elif action == 'windowrestored':
335 self.canvas.ask_update()
336
337 elif action == 'windowexposed':
338 self.canvas.ask_update()
339
340 elif action == 'windowminimized':
341 if Config.getboolean('kivy', 'pause_on_minimize'):
342 self.do_pause()
343
344 elif action == 'joyaxismotion':
345 stickid, axisid, value = args
346 self.dispatch('on_joy_axis', stickid, axisid, value)
347 elif action == 'joyhatmotion':
348 stickid, hatid, value = args
349 self.dispatch('on_joy_hat', stickid, hatid, value)
350 elif action == 'joyballmotion':
351 stickid, ballid, xrel, yrel = args
352 self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)
353 elif action == 'joybuttondown':
354 stickid, buttonid = args
355 self.dispatch('on_joy_button_down', stickid, buttonid)
356 elif action == 'joybuttonup':
357 stickid, buttonid = args
358 self.dispatch('on_joy_button_up', stickid, buttonid)
359
360 elif action in ('keydown', 'keyup'):
361 mod, key, scancode, kstr = args
362 if mod in self._meta_keys:
363 try:
364 kstr = unichr(key)
365 except ValueError:
366 pass
367
368 key_swap = {
369 SDLK_LEFT: 276,
370 SDLK_RIGHT: 275,
371 SDLK_UP: 273,
372 SDLK_DOWN: 274,
373 SDLK_HOME: 278,
374 SDLK_END: 279,
375 SDLK_PAGEDOWN: 281,
376 SDLK_PAGEUP: 280,
377 SDLK_SHIFTL: 303,
378 SDLK_SHIFTR: 304,
379 SDLK_LCTRL: KMOD_LCTRL,
380 SDLK_RCTRL: KMOD_RCTRL,
381 SDLK_LALT: KMOD_LALT,
382 SDLK_RALT: KMOD_RALT}
383
384 if platform == 'ios':
385 # XXX ios keyboard suck, when backspace is hit, the delete
386 # keycode is sent. fix it.
387 key_swap[127] = 8 # back
388
389 try:
390 key = key_swap[key]
391 except KeyError:
392 pass
393
394 if action == 'keydown':
395 self._update_modifiers(mod, key)
396 else:
397 self._update_modifiers(mod) # ignore the key, it
398 # has been released
399 if 'shift' in self._modifiers and key\
400 not in self.command_keys.keys():
401 return
402
403 if action == 'keyup':
404 self.dispatch('on_key_up', key, scancode)
405 continue
406
407 # don't dispatch more key if down event is accepted
408 if self.dispatch('on_key_down', key,
409 scancode, kstr,
410 self.modifiers):
411 continue
412 self.dispatch('on_keyboard', key,
413 scancode, kstr,
414 self.modifiers)
415
416 elif action == 'textinput':
417 key = args[0][0]
418 # XXX on IOS, keydown/up don't send unicode anymore.
419 # With latest sdl, the text is sent over textinput
420 # Right now, redo keydown/up, but we need to seperate both call
421 # too. (and adapt on_key_* API.)
422 self.dispatch('on_key_down', key, None, args[0],
423 self.modifiers)
424 self.dispatch('on_keyboard', None, None, args[0],
425 self.modifiers)
426 self.dispatch('on_key_up', key, None, args[0],
427 self.modifiers)
428
429 # unhandled event !
430 else:
431 Logger.trace('WindowSDL: Unhandled event %s' % str(event))
432
433 def _do_resize(self, dt):
434 Logger.debug('Window: Resize window to %s' % str(self._size))
435 self._win.resize_display_mode(*self._size)
436 self.dispatch('on_resize', *self._size)
437
438 def do_pause(self):
439 # should go to app pause mode.
440 from kivy.app import App
441 from kivy.base import stopTouchApp
442 app = App.get_running_app()
443 if not app:
444 Logger.info('WindowSDL: No running App found, exit.')
445 stopTouchApp()
446 return
447
448 if not app.dispatch('on_pause'):
449 Logger.info('WindowSDL: App doesn\'t support pause mode, stop.')
450 stopTouchApp()
451 return
452
453 # XXX FIXME wait for sdl resume
454 while True:
455 event = self._win.poll()
456 if event is False:
457 continue
458 if event is None:
459 continue
460
461 action, args = event[0], event[1:]
462 if action == 'quit':
463 EventLoop.quit = True
464 self.close()
465 break
466 elif action == 'windowrestored':
467 break
468
469 app.dispatch('on_resume')
470
471 def mainloop(self):
472 # don't known why, but pygame required a resize event
473 # for opengl, before mainloop... window reinit ?
474 self.dispatch('on_resize', *self.size)
475
476 while not EventLoop.quit and EventLoop.status == 'started':
477 try:
478 self._mainloop()
479 except BaseException as inst:
480 # use exception manager first
481 r = ExceptionManager.handle_exception(inst)
482 if r == ExceptionManager.RAISE:
483 stopTouchApp()
484 raise
485 else:
486 pass
487
488 # force deletion of window
489 self._win.teardown_window()
490
491 #
492 # Pygame wrapper
493 #
494 def _update_modifiers(self, mods=None, key=None):
495 # Available mod, from dir(pygame)
496 # 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',
497 # 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',
498 # 'KMOD_MODE', 'KMOD_NONE'
499 if mods is None and key is None:
500 return
501 modifiers = set()
502
503 if mods is not None:
504 if mods & (KMOD_RSHIFT | KMOD_LSHIFT):
505 modifiers.add('shift')
506 if mods & (KMOD_RALT | KMOD_LALT):
507 modifiers.add('alt')
508 if mods & (KMOD_RCTRL | KMOD_LCTRL):
509 modifiers.add('ctrl')
510 if mods & (KMOD_RMETA | KMOD_LMETA):
511 modifiers.add('meta')
512
513 if key is not None:
514 if key in (KMOD_RSHIFT, KMOD_LSHIFT):
515 modifiers.add('shift')
516 if key in (KMOD_RALT, KMOD_LALT):
517 modifiers.add('alt')
518 if key in (KMOD_RCTRL, KMOD_LCTRL):
519 modifiers.add('ctrl')
520 if key in (KMOD_RMETA, KMOD_LMETA):
521 modifiers.add('meta')
522
523 self._modifiers = list(modifiers)
524 return
525
526 def request_keyboard(self, callback, target, input_type='text'):
527 self._sdl_keyboard = super(WindowSDL, self).\
528 request_keyboard(callback, target, input_type)
529 self._win.show_keyboard()
530 Clock.schedule_interval(self._check_keyboard_shown, 1 / 5.)
531 return self._sdl_keyboard
532
533 def release_keyboard(self, *largs):
534 super(WindowSDL, self).release_keyboard(*largs)
535 self._win.hide_keyboard()
536 self._sdl_keyboard = None
537 return True
538
539 def _check_keyboard_shown(self, dt):
540 if self._sdl_keyboard is None:
541 return False
542 if not self._win.is_keyboard_shown():
543 self._sdl_keyboard.release()
544
545
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/core/window/window_sdl2.py b/kivy/core/window/window_sdl2.py
--- a/kivy/core/window/window_sdl2.py
+++ b/kivy/core/window/window_sdl2.py
@@ -99,6 +99,7 @@
def __init__(self, **kwargs):
self._win = _WindowSDL2Storage()
super(WindowSDL, self).__init__()
+ self._mouse_x = self._mouse_y = -1
self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,
KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,
KMOD_RMETA)
| {"golden_diff": "diff --git a/kivy/core/window/window_sdl2.py b/kivy/core/window/window_sdl2.py\n--- a/kivy/core/window/window_sdl2.py\n+++ b/kivy/core/window/window_sdl2.py\n@@ -99,6 +99,7 @@\n def __init__(self, **kwargs):\n self._win = _WindowSDL2Storage()\n super(WindowSDL, self).__init__()\n+ self._mouse_x = self._mouse_y = -1\n self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,\n KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,\n KMOD_RMETA)\n", "issue": "SDL2 window crash on (atleast) Windows\nRun the following\n\n``` python\nfrom kivy.app import App\nfrom kivy.uix.label import Label\nclass TestApp(App):\n def build(self):\n return Label(text='Use scroll wheel without having the mouse touch the kivy window first')\nTestApp().run()\n```\n\nThe new window gets focus, without touching the window with the mousepointer, scroll the scrollwheel. \nResult:\n\n```\n Traceback (most recent call last):\n File \"dbg.py\", line 11, in <module>\n TestApp().run()\n File \"C:\\dev\\python\\kivy\\kivy\\kivy\\app.py\", line 824, in run\n runTouchApp()\n File \"C:\\dev\\python\\kivy\\kivy\\kivy\\base.py\", line 484, in runTouchApp\n EventLoop.window.mainloop()\n File \"C:\\dev\\python\\kivy\\kivy\\kivy\\core\\window\\window_sdl2.py\", line 478, in mainloop\n self._mainloop()\n File \"C:\\dev\\python\\kivy\\kivy\\kivy\\core\\window\\window_sdl2.py\", line 315, in _mainloop\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n AttributeError: 'WindowSDL' object has no attribute '_mouse_x'\n```\n\n_mouse_x and _mouse_y aren't set, should probably just be set to \nself._mouse_x = x\nself._mouse_y = y\nas in the mousebutton\\* or mousemotion cases.\n\n", "before_files": [{"content": "# found a way to include it more easily.\n'''\nSDL2 Window\n===========\n\nWindowing provider directly based on our own wrapped version of SDL.\n\nTODO:\n - fix keys\n - support scrolling\n - clean code\n - manage correctly all sdl events\n\n'''\n\n__all__ = ('WindowSDL2', )\n\nfrom os.path import join\nfrom kivy import kivy_data_dir\nfrom kivy.logger import Logger\nfrom kivy.base import EventLoop, ExceptionManager, stopTouchApp\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.core.window import WindowBase\nfrom kivy.core.window._window_sdl2 import _WindowSDL2Storage\nfrom kivy.input.provider import MotionEventProvider\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.resources import resource_find\nfrom kivy.utils import platform, deprecated\nfrom kivy.compat import unichr\nfrom collections import deque\n\nKMOD_LCTRL = 64\nKMOD_RCTRL = 128\nKMOD_RSHIFT = 2\nKMOD_LSHIFT = 1\nKMOD_RALT = 512\nKMOD_LALT = 256\nKMOD_LMETA = 1024\nKMOD_RMETA = 2048\n\nSDLK_SHIFTL = 1073742049\nSDLK_SHIFTR = 1073742053\nSDLK_LCTRL = 1073742048\nSDLK_RCTRL = 1073742052\nSDLK_LALT = 1073742050\nSDLK_RALT = 1073742054\nSDLK_LEFT = 1073741904\nSDLK_RIGHT = 1073741903\nSDLK_UP = 1073741906\nSDLK_DOWN = 1073741905\nSDLK_HOME = 1073741898\nSDLK_END = 1073741901\nSDLK_PAGEUP = 1073741899\nSDLK_PAGEDOWN = 1073741902\n\n\nclass SDL2MotionEvent(MotionEvent):\n def depack(self, args):\n self.is_touch = True\n self.profile = ('pos', )\n self.sx, self.sy = args\n super(SDL2MotionEvent, self).depack(args)\n\n\nclass SDL2MotionEventProvider(MotionEventProvider):\n win = None\n q = deque()\n touchmap = {}\n\n def update(self, dispatch_fn):\n touchmap = self.touchmap\n while True:\n try:\n value = self.q.pop()\n except IndexError:\n return\n\n action, fid, x, y = value\n x = x / 32768.\n y = 1 - (y / 32768.)\n if fid not in touchmap:\n touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))\n else:\n me = touchmap[fid]\n me.move((x, y))\n if action == 'fingerdown':\n dispatch_fn('begin', me)\n elif action == 'fingerup':\n me.update_time_end()\n dispatch_fn('end', me)\n del touchmap[fid]\n else:\n dispatch_fn('update', me)\n\n\nclass WindowSDL(WindowBase):\n\n def __init__(self, **kwargs):\n self._win = _WindowSDL2Storage()\n super(WindowSDL, self).__init__()\n self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,\n KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,\n KMOD_RMETA)\n self.command_keys = {\n 27: 'escape',\n 9: 'tab',\n 8: 'backspace',\n 13: 'enter',\n 127: 'del',\n 271: 'enter',\n 273: 'up',\n 274: 'down',\n 275: 'right',\n 276: 'left',\n 278: 'home',\n 279: 'end',\n 280: 'pgup',\n 281: 'pgdown'}\n self._mouse_buttons_down = set()\n\n def create_window(self, *largs):\n\n if self._fake_fullscreen:\n if not self.borderless:\n self.fullscreen = self._fake_fullscreen = False\n elif not self.fullscreen or self.fullscreen == 'auto':\n self.borderless = self._fake_fullscreen = False\n\n if self.fullscreen == 'fake':\n self.borderless = self._fake_fullscreen = True\n Logger.warning(\"The 'fake' fullscreen option has been \"\n \"deprecated, use Window.borderless or the \"\n \"borderless Config option instead.\")\n\n if not self.initialized:\n\n if self.position == 'auto':\n pos = None, None\n elif self.position == 'custom':\n pos = self.left, self.top\n\n # setup !\n w, h = self._size\n resizable = Config.getboolean('graphics', 'resizable')\n gl_size = self._win.setup_window(pos[0], pos[1], w, h,\n self.borderless, self.fullscreen,\n resizable)\n # never stay with a None pos, application using w.center\n # will be fired.\n self._pos = (0, 0)\n else:\n w, h = self._size\n self._win.resize_window(w, h)\n self._win.set_border_state(self.borderless)\n self._win.set_fullscreen_mode(self.fullscreen)\n\n super(WindowSDL, self).create_window()\n\n # auto add input provider\n Logger.info('Window: auto add sdl input provider')\n from kivy.base import EventLoop\n SDL2MotionEventProvider.win = self\n EventLoop.add_input_provider(SDL2MotionEventProvider('sdl', ''))\n\n # set window icon before calling set_mode\n try:\n filename_icon = self.icon or Config.get('kivy', 'window_icon')\n if filename_icon == '':\n logo_size = 32\n if platform == 'macosx':\n logo_size = 512\n elif platform == 'win':\n logo_size = 64\n filename_icon = 'kivy-icon-{}.png'.format(logo_size)\n filename_icon = resource_find(\n join(kivy_data_dir, 'logo', filename_icon))\n self.set_icon(filename_icon)\n except:\n Logger.exception('Window: cannot set icon')\n\n def close(self):\n self._win.teardown_window()\n self.dispatch('on_close')\n\n def maximize(self):\n if self._is_desktop:\n self._win.maximize_window()\n else:\n Logger.warning('Window: maximize() is used only on desktop OSes.')\n\n def minimize(self):\n if self._is_desktop:\n self._win.minimize_window()\n else:\n Logger.warning('Window: minimize() is used only on desktop OSes.')\n\n def restore(self):\n if self._is_desktop:\n self._win.restore_window()\n else:\n Logger.warning('Window: restore() is used only on desktop OSes.')\n\n def hide(self):\n if self._is_desktop:\n self._win.hide_window()\n else:\n Logger.warning('Window: hide() is used only on desktop OSes.')\n\n def show(self):\n if self._is_desktop:\n self._win.show_window()\n else:\n Logger.warning('Window: show() is used only on desktop OSes.')\n\n @deprecated\n def toggle_fullscreen(self):\n if self.fullscreen in (True, 'auto'):\n self.fullscreen = False\n else:\n self.fullscreen = 'auto'\n\n def set_title(self, title):\n self._win.set_window_title(title)\n\n def set_icon(self, filename):\n self._win.set_window_icon(str(filename))\n\n def screenshot(self, *largs, **kwargs):\n filename = super(WindowSDL, self).screenshot(*largs, **kwargs)\n if filename is None:\n return\n\n from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE\n width, height = self.size\n data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)\n self._win.save_bytes_in_png(filename, data, width, height)\n Logger.debug('Window: Screenshot saved at <%s>' % filename)\n return filename\n\n def flip(self):\n self._win.flip()\n super(WindowSDL, self).flip()\n\n def _mainloop(self):\n EventLoop.idle()\n\n while True:\n event = self._win.poll()\n if event is False:\n break\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n\n elif action in ('fingermotion', 'fingerdown', 'fingerup'):\n # for finger, pass the raw event to SDL motion event provider\n # XXX this is problematic. On OSX, it generates touches with 0,\n # 0 coordinates, at the same times as mouse. But it works.\n # We have a conflict of using either the mouse or the finger.\n # Right now, we have no mechanism that we could use to know\n # which is the preferred one for the application.\n #SDL2MotionEventProvider.q.appendleft(event)\n pass\n\n elif action == 'mousemotion':\n x, y = args\n self.mouse_pos = x, self.system_size[1] - y\n self._mouse_x = x\n self._mouse_y = y\n # don't dispatch motion if no button are pressed\n if len(self._mouse_buttons_down) == 0:\n continue\n self._mouse_meta = self.modifiers\n self.dispatch('on_mouse_move', x, y, self.modifiers)\n\n elif action in ('mousebuttondown', 'mousebuttonup'):\n x, y, button = args\n btn = 'left'\n if button == 3:\n btn = 'right'\n elif button == 2:\n btn = 'middle'\n eventname = 'on_mouse_down'\n self._mouse_buttons_down.add(button)\n if action == 'mousebuttonup':\n eventname = 'on_mouse_up'\n self._mouse_buttons_down.remove(button)\n self._mouse_x = x\n self._mouse_y = y\n self.dispatch(eventname, x, y, btn, self.modifiers)\n elif action.startswith('mousewheel'):\n self._update_modifiers()\n x, y, button = args\n btn = 'scrolldown'\n if action.endswith('up'):\n btn = 'scrollup'\n elif action.endswith('right'):\n btn = 'scrollright'\n elif action.endswith('left'):\n btn = 'scrollleft'\n\n self._mouse_meta = self.modifiers\n self._mouse_btn = btn\n #times = x if y == 0 else y\n #times = min(abs(times), 100)\n #for k in range(times):\n self._mouse_down = True\n self.dispatch('on_mouse_down',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n self._mouse_down = False\n self.dispatch('on_mouse_up',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n\n elif action == 'dropfile':\n dropfile = args\n self.dispatch('on_dropfile', dropfile[0])\n # video resize\n elif action == 'windowresized':\n self._size = args\n # don't use trigger here, we want to delay the resize event\n cb = self._do_resize\n Clock.unschedule(cb)\n Clock.schedule_once(cb, .1)\n\n elif action == 'windowresized':\n self.canvas.ask_update()\n\n elif action == 'windowrestored':\n self.canvas.ask_update()\n\n elif action == 'windowexposed':\n self.canvas.ask_update()\n\n elif action == 'windowminimized':\n if Config.getboolean('kivy', 'pause_on_minimize'):\n self.do_pause()\n\n elif action == 'joyaxismotion':\n stickid, axisid, value = args\n self.dispatch('on_joy_axis', stickid, axisid, value)\n elif action == 'joyhatmotion':\n stickid, hatid, value = args\n self.dispatch('on_joy_hat', stickid, hatid, value)\n elif action == 'joyballmotion':\n stickid, ballid, xrel, yrel = args\n self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)\n elif action == 'joybuttondown':\n stickid, buttonid = args\n self.dispatch('on_joy_button_down', stickid, buttonid)\n elif action == 'joybuttonup':\n stickid, buttonid = args\n self.dispatch('on_joy_button_up', stickid, buttonid)\n\n elif action in ('keydown', 'keyup'):\n mod, key, scancode, kstr = args\n if mod in self._meta_keys:\n try:\n kstr = unichr(key)\n except ValueError:\n pass\n\n key_swap = {\n SDLK_LEFT: 276,\n SDLK_RIGHT: 275,\n SDLK_UP: 273,\n SDLK_DOWN: 274,\n SDLK_HOME: 278,\n SDLK_END: 279,\n SDLK_PAGEDOWN: 281,\n SDLK_PAGEUP: 280,\n SDLK_SHIFTL: 303,\n SDLK_SHIFTR: 304,\n SDLK_LCTRL: KMOD_LCTRL,\n SDLK_RCTRL: KMOD_RCTRL,\n SDLK_LALT: KMOD_LALT,\n SDLK_RALT: KMOD_RALT}\n\n if platform == 'ios':\n # XXX ios keyboard suck, when backspace is hit, the delete\n # keycode is sent. fix it.\n key_swap[127] = 8 # back\n\n try:\n key = key_swap[key]\n except KeyError:\n pass\n\n if action == 'keydown':\n self._update_modifiers(mod, key)\n else:\n self._update_modifiers(mod) # ignore the key, it\n # has been released\n if 'shift' in self._modifiers and key\\\n not in self.command_keys.keys():\n return\n\n if action == 'keyup':\n self.dispatch('on_key_up', key, scancode)\n continue\n\n # don't dispatch more key if down event is accepted\n if self.dispatch('on_key_down', key,\n scancode, kstr,\n self.modifiers):\n continue\n self.dispatch('on_keyboard', key,\n scancode, kstr,\n self.modifiers)\n\n elif action == 'textinput':\n key = args[0][0]\n # XXX on IOS, keydown/up don't send unicode anymore.\n # With latest sdl, the text is sent over textinput\n # Right now, redo keydown/up, but we need to seperate both call\n # too. (and adapt on_key_* API.)\n self.dispatch('on_key_down', key, None, args[0],\n self.modifiers)\n self.dispatch('on_keyboard', None, None, args[0],\n self.modifiers)\n self.dispatch('on_key_up', key, None, args[0],\n self.modifiers)\n\n # unhandled event !\n else:\n Logger.trace('WindowSDL: Unhandled event %s' % str(event))\n\n def _do_resize(self, dt):\n Logger.debug('Window: Resize window to %s' % str(self._size))\n self._win.resize_display_mode(*self._size)\n self.dispatch('on_resize', *self._size)\n\n def do_pause(self):\n # should go to app pause mode.\n from kivy.app import App\n from kivy.base import stopTouchApp\n app = App.get_running_app()\n if not app:\n Logger.info('WindowSDL: No running App found, exit.')\n stopTouchApp()\n return\n\n if not app.dispatch('on_pause'):\n Logger.info('WindowSDL: App doesn\\'t support pause mode, stop.')\n stopTouchApp()\n return\n\n # XXX FIXME wait for sdl resume\n while True:\n event = self._win.poll()\n if event is False:\n continue\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n elif action == 'windowrestored':\n break\n\n app.dispatch('on_resume')\n\n def mainloop(self):\n # don't known why, but pygame required a resize event\n # for opengl, before mainloop... window reinit ?\n self.dispatch('on_resize', *self.size)\n\n while not EventLoop.quit and EventLoop.status == 'started':\n try:\n self._mainloop()\n except BaseException as inst:\n # use exception manager first\n r = ExceptionManager.handle_exception(inst)\n if r == ExceptionManager.RAISE:\n stopTouchApp()\n raise\n else:\n pass\n\n # force deletion of window\n self._win.teardown_window()\n\n #\n # Pygame wrapper\n #\n def _update_modifiers(self, mods=None, key=None):\n # Available mod, from dir(pygame)\n # 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',\n # 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',\n # 'KMOD_MODE', 'KMOD_NONE'\n if mods is None and key is None:\n return\n modifiers = set()\n\n if mods is not None:\n if mods & (KMOD_RSHIFT | KMOD_LSHIFT):\n modifiers.add('shift')\n if mods & (KMOD_RALT | KMOD_LALT):\n modifiers.add('alt')\n if mods & (KMOD_RCTRL | KMOD_LCTRL):\n modifiers.add('ctrl')\n if mods & (KMOD_RMETA | KMOD_LMETA):\n modifiers.add('meta')\n\n if key is not None:\n if key in (KMOD_RSHIFT, KMOD_LSHIFT):\n modifiers.add('shift')\n if key in (KMOD_RALT, KMOD_LALT):\n modifiers.add('alt')\n if key in (KMOD_RCTRL, KMOD_LCTRL):\n modifiers.add('ctrl')\n if key in (KMOD_RMETA, KMOD_LMETA):\n modifiers.add('meta')\n\n self._modifiers = list(modifiers)\n return\n\n def request_keyboard(self, callback, target, input_type='text'):\n self._sdl_keyboard = super(WindowSDL, self).\\\n request_keyboard(callback, target, input_type)\n self._win.show_keyboard()\n Clock.schedule_interval(self._check_keyboard_shown, 1 / 5.)\n return self._sdl_keyboard\n\n def release_keyboard(self, *largs):\n super(WindowSDL, self).release_keyboard(*largs)\n self._win.hide_keyboard()\n self._sdl_keyboard = None\n return True\n\n def _check_keyboard_shown(self, dt):\n if self._sdl_keyboard is None:\n return False\n if not self._win.is_keyboard_shown():\n self._sdl_keyboard.release()\n\n", "path": "kivy/core/window/window_sdl2.py"}], "after_files": [{"content": "# found a way to include it more easily.\n'''\nSDL2 Window\n===========\n\nWindowing provider directly based on our own wrapped version of SDL.\n\nTODO:\n - fix keys\n - support scrolling\n - clean code\n - manage correctly all sdl events\n\n'''\n\n__all__ = ('WindowSDL2', )\n\nfrom os.path import join\nfrom kivy import kivy_data_dir\nfrom kivy.logger import Logger\nfrom kivy.base import EventLoop, ExceptionManager, stopTouchApp\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.core.window import WindowBase\nfrom kivy.core.window._window_sdl2 import _WindowSDL2Storage\nfrom kivy.input.provider import MotionEventProvider\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.resources import resource_find\nfrom kivy.utils import platform, deprecated\nfrom kivy.compat import unichr\nfrom collections import deque\n\nKMOD_LCTRL = 64\nKMOD_RCTRL = 128\nKMOD_RSHIFT = 2\nKMOD_LSHIFT = 1\nKMOD_RALT = 512\nKMOD_LALT = 256\nKMOD_LMETA = 1024\nKMOD_RMETA = 2048\n\nSDLK_SHIFTL = 1073742049\nSDLK_SHIFTR = 1073742053\nSDLK_LCTRL = 1073742048\nSDLK_RCTRL = 1073742052\nSDLK_LALT = 1073742050\nSDLK_RALT = 1073742054\nSDLK_LEFT = 1073741904\nSDLK_RIGHT = 1073741903\nSDLK_UP = 1073741906\nSDLK_DOWN = 1073741905\nSDLK_HOME = 1073741898\nSDLK_END = 1073741901\nSDLK_PAGEUP = 1073741899\nSDLK_PAGEDOWN = 1073741902\n\n\nclass SDL2MotionEvent(MotionEvent):\n def depack(self, args):\n self.is_touch = True\n self.profile = ('pos', )\n self.sx, self.sy = args\n super(SDL2MotionEvent, self).depack(args)\n\n\nclass SDL2MotionEventProvider(MotionEventProvider):\n win = None\n q = deque()\n touchmap = {}\n\n def update(self, dispatch_fn):\n touchmap = self.touchmap\n while True:\n try:\n value = self.q.pop()\n except IndexError:\n return\n\n action, fid, x, y = value\n x = x / 32768.\n y = 1 - (y / 32768.)\n if fid not in touchmap:\n touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))\n else:\n me = touchmap[fid]\n me.move((x, y))\n if action == 'fingerdown':\n dispatch_fn('begin', me)\n elif action == 'fingerup':\n me.update_time_end()\n dispatch_fn('end', me)\n del touchmap[fid]\n else:\n dispatch_fn('update', me)\n\n\nclass WindowSDL(WindowBase):\n\n def __init__(self, **kwargs):\n self._win = _WindowSDL2Storage()\n super(WindowSDL, self).__init__()\n self._mouse_x = self._mouse_y = -1\n self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,\n KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,\n KMOD_RMETA)\n self.command_keys = {\n 27: 'escape',\n 9: 'tab',\n 8: 'backspace',\n 13: 'enter',\n 127: 'del',\n 271: 'enter',\n 273: 'up',\n 274: 'down',\n 275: 'right',\n 276: 'left',\n 278: 'home',\n 279: 'end',\n 280: 'pgup',\n 281: 'pgdown'}\n self._mouse_buttons_down = set()\n\n def create_window(self, *largs):\n\n if self._fake_fullscreen:\n if not self.borderless:\n self.fullscreen = self._fake_fullscreen = False\n elif not self.fullscreen or self.fullscreen == 'auto':\n self.borderless = self._fake_fullscreen = False\n\n if self.fullscreen == 'fake':\n self.borderless = self._fake_fullscreen = True\n Logger.warning(\"The 'fake' fullscreen option has been \"\n \"deprecated, use Window.borderless or the \"\n \"borderless Config option instead.\")\n\n if not self.initialized:\n\n if self.position == 'auto':\n pos = None, None\n elif self.position == 'custom':\n pos = self.left, self.top\n\n # setup !\n w, h = self._size\n resizable = Config.getboolean('graphics', 'resizable')\n gl_size = self._win.setup_window(pos[0], pos[1], w, h,\n self.borderless, self.fullscreen,\n resizable)\n # never stay with a None pos, application using w.center\n # will be fired.\n self._pos = (0, 0)\n else:\n w, h = self._size\n self._win.resize_window(w, h)\n self._win.set_border_state(self.borderless)\n self._win.set_fullscreen_mode(self.fullscreen)\n\n super(WindowSDL, self).create_window()\n\n # auto add input provider\n Logger.info('Window: auto add sdl input provider')\n from kivy.base import EventLoop\n SDL2MotionEventProvider.win = self\n EventLoop.add_input_provider(SDL2MotionEventProvider('sdl', ''))\n\n # set window icon before calling set_mode\n try:\n filename_icon = self.icon or Config.get('kivy', 'window_icon')\n if filename_icon == '':\n logo_size = 32\n if platform == 'macosx':\n logo_size = 512\n elif platform == 'win':\n logo_size = 64\n filename_icon = 'kivy-icon-{}.png'.format(logo_size)\n filename_icon = resource_find(\n join(kivy_data_dir, 'logo', filename_icon))\n self.set_icon(filename_icon)\n except:\n Logger.exception('Window: cannot set icon')\n\n def close(self):\n self._win.teardown_window()\n self.dispatch('on_close')\n\n def maximize(self):\n if self._is_desktop:\n self._win.maximize_window()\n else:\n Logger.warning('Window: maximize() is used only on desktop OSes.')\n\n def minimize(self):\n if self._is_desktop:\n self._win.minimize_window()\n else:\n Logger.warning('Window: minimize() is used only on desktop OSes.')\n\n def restore(self):\n if self._is_desktop:\n self._win.restore_window()\n else:\n Logger.warning('Window: restore() is used only on desktop OSes.')\n\n def hide(self):\n if self._is_desktop:\n self._win.hide_window()\n else:\n Logger.warning('Window: hide() is used only on desktop OSes.')\n\n def show(self):\n if self._is_desktop:\n self._win.show_window()\n else:\n Logger.warning('Window: show() is used only on desktop OSes.')\n\n @deprecated\n def toggle_fullscreen(self):\n if self.fullscreen in (True, 'auto'):\n self.fullscreen = False\n else:\n self.fullscreen = 'auto'\n\n def set_title(self, title):\n self._win.set_window_title(title)\n\n def set_icon(self, filename):\n self._win.set_window_icon(str(filename))\n\n def screenshot(self, *largs, **kwargs):\n filename = super(WindowSDL, self).screenshot(*largs, **kwargs)\n if filename is None:\n return\n\n from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE\n width, height = self.size\n data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)\n self._win.save_bytes_in_png(filename, data, width, height)\n Logger.debug('Window: Screenshot saved at <%s>' % filename)\n return filename\n\n def flip(self):\n self._win.flip()\n super(WindowSDL, self).flip()\n\n def _mainloop(self):\n EventLoop.idle()\n\n while True:\n event = self._win.poll()\n if event is False:\n break\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n\n elif action in ('fingermotion', 'fingerdown', 'fingerup'):\n # for finger, pass the raw event to SDL motion event provider\n # XXX this is problematic. On OSX, it generates touches with 0,\n # 0 coordinates, at the same times as mouse. But it works.\n # We have a conflict of using either the mouse or the finger.\n # Right now, we have no mechanism that we could use to know\n # which is the preferred one for the application.\n #SDL2MotionEventProvider.q.appendleft(event)\n pass\n\n elif action == 'mousemotion':\n x, y = args\n self.mouse_pos = x, self.system_size[1] - y\n self._mouse_x = x\n self._mouse_y = y\n # don't dispatch motion if no button are pressed\n if len(self._mouse_buttons_down) == 0:\n continue\n self._mouse_meta = self.modifiers\n self.dispatch('on_mouse_move', x, y, self.modifiers)\n\n elif action in ('mousebuttondown', 'mousebuttonup'):\n x, y, button = args\n btn = 'left'\n if button == 3:\n btn = 'right'\n elif button == 2:\n btn = 'middle'\n eventname = 'on_mouse_down'\n self._mouse_buttons_down.add(button)\n if action == 'mousebuttonup':\n eventname = 'on_mouse_up'\n self._mouse_buttons_down.remove(button)\n self._mouse_x = x\n self._mouse_y = y\n self.dispatch(eventname, x, y, btn, self.modifiers)\n elif action.startswith('mousewheel'):\n self._update_modifiers()\n x, y, button = args\n btn = 'scrolldown'\n if action.endswith('up'):\n btn = 'scrollup'\n elif action.endswith('right'):\n btn = 'scrollright'\n elif action.endswith('left'):\n btn = 'scrollleft'\n\n self._mouse_meta = self.modifiers\n self._mouse_btn = btn\n #times = x if y == 0 else y\n #times = min(abs(times), 100)\n #for k in range(times):\n self._mouse_down = True\n self.dispatch('on_mouse_down',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n self._mouse_down = False\n self.dispatch('on_mouse_up',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n\n elif action == 'dropfile':\n dropfile = args\n self.dispatch('on_dropfile', dropfile[0])\n # video resize\n elif action == 'windowresized':\n self._size = args\n # don't use trigger here, we want to delay the resize event\n cb = self._do_resize\n Clock.unschedule(cb)\n Clock.schedule_once(cb, .1)\n\n elif action == 'windowresized':\n self.canvas.ask_update()\n\n elif action == 'windowrestored':\n self.canvas.ask_update()\n\n elif action == 'windowexposed':\n self.canvas.ask_update()\n\n elif action == 'windowminimized':\n if Config.getboolean('kivy', 'pause_on_minimize'):\n self.do_pause()\n\n elif action == 'joyaxismotion':\n stickid, axisid, value = args\n self.dispatch('on_joy_axis', stickid, axisid, value)\n elif action == 'joyhatmotion':\n stickid, hatid, value = args\n self.dispatch('on_joy_hat', stickid, hatid, value)\n elif action == 'joyballmotion':\n stickid, ballid, xrel, yrel = args\n self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)\n elif action == 'joybuttondown':\n stickid, buttonid = args\n self.dispatch('on_joy_button_down', stickid, buttonid)\n elif action == 'joybuttonup':\n stickid, buttonid = args\n self.dispatch('on_joy_button_up', stickid, buttonid)\n\n elif action in ('keydown', 'keyup'):\n mod, key, scancode, kstr = args\n if mod in self._meta_keys:\n try:\n kstr = unichr(key)\n except ValueError:\n pass\n\n key_swap = {\n SDLK_LEFT: 276,\n SDLK_RIGHT: 275,\n SDLK_UP: 273,\n SDLK_DOWN: 274,\n SDLK_HOME: 278,\n SDLK_END: 279,\n SDLK_PAGEDOWN: 281,\n SDLK_PAGEUP: 280,\n SDLK_SHIFTL: 303,\n SDLK_SHIFTR: 304,\n SDLK_LCTRL: KMOD_LCTRL,\n SDLK_RCTRL: KMOD_RCTRL,\n SDLK_LALT: KMOD_LALT,\n SDLK_RALT: KMOD_RALT}\n\n if platform == 'ios':\n # XXX ios keyboard suck, when backspace is hit, the delete\n # keycode is sent. fix it.\n key_swap[127] = 8 # back\n\n try:\n key = key_swap[key]\n except KeyError:\n pass\n\n if action == 'keydown':\n self._update_modifiers(mod, key)\n else:\n self._update_modifiers(mod) # ignore the key, it\n # has been released\n if 'shift' in self._modifiers and key\\\n not in self.command_keys.keys():\n return\n\n if action == 'keyup':\n self.dispatch('on_key_up', key, scancode)\n continue\n\n # don't dispatch more key if down event is accepted\n if self.dispatch('on_key_down', key,\n scancode, kstr,\n self.modifiers):\n continue\n self.dispatch('on_keyboard', key,\n scancode, kstr,\n self.modifiers)\n\n elif action == 'textinput':\n key = args[0][0]\n # XXX on IOS, keydown/up don't send unicode anymore.\n # With latest sdl, the text is sent over textinput\n # Right now, redo keydown/up, but we need to seperate both call\n # too. (and adapt on_key_* API.)\n self.dispatch('on_key_down', key, None, args[0],\n self.modifiers)\n self.dispatch('on_keyboard', None, None, args[0],\n self.modifiers)\n self.dispatch('on_key_up', key, None, args[0],\n self.modifiers)\n\n # unhandled event !\n else:\n Logger.trace('WindowSDL: Unhandled event %s' % str(event))\n\n def _do_resize(self, dt):\n Logger.debug('Window: Resize window to %s' % str(self._size))\n self._win.resize_display_mode(*self._size)\n self.dispatch('on_resize', *self._size)\n\n def do_pause(self):\n # should go to app pause mode.\n from kivy.app import App\n from kivy.base import stopTouchApp\n app = App.get_running_app()\n if not app:\n Logger.info('WindowSDL: No running App found, exit.')\n stopTouchApp()\n return\n\n if not app.dispatch('on_pause'):\n Logger.info('WindowSDL: App doesn\\'t support pause mode, stop.')\n stopTouchApp()\n return\n\n # XXX FIXME wait for sdl resume\n while True:\n event = self._win.poll()\n if event is False:\n continue\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n elif action == 'windowrestored':\n break\n\n app.dispatch('on_resume')\n\n def mainloop(self):\n # don't known why, but pygame required a resize event\n # for opengl, before mainloop... window reinit ?\n self.dispatch('on_resize', *self.size)\n\n while not EventLoop.quit and EventLoop.status == 'started':\n try:\n self._mainloop()\n except BaseException as inst:\n # use exception manager first\n r = ExceptionManager.handle_exception(inst)\n if r == ExceptionManager.RAISE:\n stopTouchApp()\n raise\n else:\n pass\n\n # force deletion of window\n self._win.teardown_window()\n\n #\n # Pygame wrapper\n #\n def _update_modifiers(self, mods=None, key=None):\n # Available mod, from dir(pygame)\n # 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',\n # 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',\n # 'KMOD_MODE', 'KMOD_NONE'\n if mods is None and key is None:\n return\n modifiers = set()\n\n if mods is not None:\n if mods & (KMOD_RSHIFT | KMOD_LSHIFT):\n modifiers.add('shift')\n if mods & (KMOD_RALT | KMOD_LALT):\n modifiers.add('alt')\n if mods & (KMOD_RCTRL | KMOD_LCTRL):\n modifiers.add('ctrl')\n if mods & (KMOD_RMETA | KMOD_LMETA):\n modifiers.add('meta')\n\n if key is not None:\n if key in (KMOD_RSHIFT, KMOD_LSHIFT):\n modifiers.add('shift')\n if key in (KMOD_RALT, KMOD_LALT):\n modifiers.add('alt')\n if key in (KMOD_RCTRL, KMOD_LCTRL):\n modifiers.add('ctrl')\n if key in (KMOD_RMETA, KMOD_LMETA):\n modifiers.add('meta')\n\n self._modifiers = list(modifiers)\n return\n\n def request_keyboard(self, callback, target, input_type='text'):\n self._sdl_keyboard = super(WindowSDL, self).\\\n request_keyboard(callback, target, input_type)\n self._win.show_keyboard()\n Clock.schedule_interval(self._check_keyboard_shown, 1 / 5.)\n return self._sdl_keyboard\n\n def release_keyboard(self, *largs):\n super(WindowSDL, self).release_keyboard(*largs)\n self._win.hide_keyboard()\n self._sdl_keyboard = None\n return True\n\n def _check_keyboard_shown(self, dt):\n if self._sdl_keyboard is None:\n return False\n if not self._win.is_keyboard_shown():\n self._sdl_keyboard.release()\n\n", "path": "kivy/core/window/window_sdl2.py"}]} |
gh_patches_debug_1575 | rasdani/github-patches | git_diff | conda__conda-5426 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Launching navigator via prompt warnings appear
_From @RidaZubair on May 24, 2017 9:47_
**OS:** Windows
**Anaconda: 4.4.0**
**Actual:**
On launching navigator via prompt following warning appears on prompt

_Copied from original issue: ContinuumIO/navigator#1189_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/common/platform.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from collections import OrderedDict
5 from genericpath import exists
6 from glob import glob
7 from logging import getLogger
8 import sys
9
10 from .compat import iteritems, on_win
11 from .._vendor.auxlib.decorators import memoize
12
13 log = getLogger(__name__)
14
15
16 def is_admin_on_windows(): # pragma: unix no cover
17 # http://stackoverflow.com/a/1026626/2127762
18 if not on_win: # pragma: no cover
19 return False
20 try:
21 from ctypes import windll
22 return windll.shell32.IsUserAnAdmin()() != 0
23 except ImportError as e:
24 log.debug('%r', e)
25 return 'unknown'
26 except Exception as e:
27 log.warn('%r', e)
28 return 'unknown'
29
30
31 @memoize
32 def linux_get_libc_version():
33 """
34 If on linux, returns (libc_family, version), otherwise (None, None)
35 """
36
37 if not sys.platform.startswith('linux'):
38 return None, None
39
40 from os import confstr, confstr_names, readlink
41
42 # Python 2.7 does not have either of these keys in confstr_names, so provide
43 # hard-coded defaults and assert if the key is in confstr_names but differs.
44 # These are defined by POSIX anyway so should never change.
45 confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),
46 ('CS_GNU_LIBPTHREAD_VERSION', 3)])
47
48 val = None
49 for k, v in iteritems(confstr_names_fallback):
50 assert k not in confstr_names or confstr_names[k] == v, (
51 "confstr_names_fallback for %s is %s yet in confstr_names it is %s"
52 "" % (k, confstr_names_fallback[k], confstr_names[k])
53 )
54 try:
55 val = str(confstr(v))
56 except:
57 pass
58 else:
59 if val:
60 break
61
62 if not val:
63 # Weird, play it safe and assume glibc 2.5
64 family, version = 'glibc', '2.5'
65 log.warning("Failed to detect libc family and version, assuming %s/%s", family, version)
66 return family, version
67 family, version = val.split(' ')
68
69 # NPTL is just the name of the threading library, even though the
70 # version refers to that of uClibc. readlink() can help to try to
71 # figure out a better name instead.
72 if family == 'NPTL':
73 clibs = glob('/lib/libc.so*')
74 for clib in clibs:
75 clib = readlink(clib)
76 if exists(clib):
77 if clib.startswith('libuClibc'):
78 if version.startswith('0.'):
79 family = 'uClibc'
80 else:
81 family = 'uClibc-ng'
82 return family, version
83 # This could be some other C library; it is unlikely though.
84 family = 'uClibc'
85 log.warning("Failed to detect non-glibc family, assuming %s (%s)", family, version)
86 return family, version
87 return family, version
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/common/platform.py b/conda/common/platform.py
--- a/conda/common/platform.py
+++ b/conda/common/platform.py
@@ -19,12 +19,12 @@
return False
try:
from ctypes import windll
- return windll.shell32.IsUserAnAdmin()() != 0
+ return windll.shell32.IsUserAnAdmin() != 0
except ImportError as e:
log.debug('%r', e)
return 'unknown'
except Exception as e:
- log.warn('%r', e)
+ log.info('%r', e)
return 'unknown'
| {"golden_diff": "diff --git a/conda/common/platform.py b/conda/common/platform.py\n--- a/conda/common/platform.py\n+++ b/conda/common/platform.py\n@@ -19,12 +19,12 @@\n return False\n try:\n from ctypes import windll\n- return windll.shell32.IsUserAnAdmin()() != 0\n+ return windll.shell32.IsUserAnAdmin() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n- log.warn('%r', e)\n+ log.info('%r', e)\n return 'unknown'\n", "issue": "Launching navigator via prompt warnings appear\n_From @RidaZubair on May 24, 2017 9:47_\n\n**OS:** Windows\r\n**Anaconda: 4.4.0**\r\n\r\n**Actual:**\r\nOn launching navigator via prompt following warning appears on prompt\r\n\r\n\r\n\n\n_Copied from original issue: ContinuumIO/navigator#1189_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom genericpath import exists\nfrom glob import glob\nfrom logging import getLogger\nimport sys\n\nfrom .compat import iteritems, on_win\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\ndef is_admin_on_windows(): # pragma: unix no cover\n # http://stackoverflow.com/a/1026626/2127762\n if not on_win: # pragma: no cover\n return False\n try:\n from ctypes import windll\n return windll.shell32.IsUserAnAdmin()() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n log.warn('%r', e)\n return 'unknown'\n\n\n@memoize\ndef linux_get_libc_version():\n \"\"\"\n If on linux, returns (libc_family, version), otherwise (None, None)\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None, None\n\n from os import confstr, confstr_names, readlink\n\n # Python 2.7 does not have either of these keys in confstr_names, so provide\n # hard-coded defaults and assert if the key is in confstr_names but differs.\n # These are defined by POSIX anyway so should never change.\n confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),\n ('CS_GNU_LIBPTHREAD_VERSION', 3)])\n\n val = None\n for k, v in iteritems(confstr_names_fallback):\n assert k not in confstr_names or confstr_names[k] == v, (\n \"confstr_names_fallback for %s is %s yet in confstr_names it is %s\"\n \"\" % (k, confstr_names_fallback[k], confstr_names[k])\n )\n try:\n val = str(confstr(v))\n except:\n pass\n else:\n if val:\n break\n\n if not val:\n # Weird, play it safe and assume glibc 2.5\n family, version = 'glibc', '2.5'\n log.warning(\"Failed to detect libc family and version, assuming %s/%s\", family, version)\n return family, version\n family, version = val.split(' ')\n\n # NPTL is just the name of the threading library, even though the\n # version refers to that of uClibc. readlink() can help to try to\n # figure out a better name instead.\n if family == 'NPTL':\n clibs = glob('/lib/libc.so*')\n for clib in clibs:\n clib = readlink(clib)\n if exists(clib):\n if clib.startswith('libuClibc'):\n if version.startswith('0.'):\n family = 'uClibc'\n else:\n family = 'uClibc-ng'\n return family, version\n # This could be some other C library; it is unlikely though.\n family = 'uClibc'\n log.warning(\"Failed to detect non-glibc family, assuming %s (%s)\", family, version)\n return family, version\n return family, version\n", "path": "conda/common/platform.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom genericpath import exists\nfrom glob import glob\nfrom logging import getLogger\nimport sys\n\nfrom .compat import iteritems, on_win\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\ndef is_admin_on_windows(): # pragma: unix no cover\n # http://stackoverflow.com/a/1026626/2127762\n if not on_win: # pragma: no cover\n return False\n try:\n from ctypes import windll\n return windll.shell32.IsUserAnAdmin() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n log.info('%r', e)\n return 'unknown'\n\n\n@memoize\ndef linux_get_libc_version():\n \"\"\"\n If on linux, returns (libc_family, version), otherwise (None, None)\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None, None\n\n from os import confstr, confstr_names, readlink\n\n # Python 2.7 does not have either of these keys in confstr_names, so provide\n # hard-coded defaults and assert if the key is in confstr_names but differs.\n # These are defined by POSIX anyway so should never change.\n confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),\n ('CS_GNU_LIBPTHREAD_VERSION', 3)])\n\n val = None\n for k, v in iteritems(confstr_names_fallback):\n assert k not in confstr_names or confstr_names[k] == v, (\n \"confstr_names_fallback for %s is %s yet in confstr_names it is %s\"\n \"\" % (k, confstr_names_fallback[k], confstr_names[k])\n )\n try:\n val = str(confstr(v))\n except:\n pass\n else:\n if val:\n break\n\n if not val:\n # Weird, play it safe and assume glibc 2.5\n family, version = 'glibc', '2.5'\n log.warning(\"Failed to detect libc family and version, assuming %s/%s\", family, version)\n return family, version\n family, version = val.split(' ')\n\n # NPTL is just the name of the threading library, even though the\n # version refers to that of uClibc. readlink() can help to try to\n # figure out a better name instead.\n if family == 'NPTL':\n clibs = glob('/lib/libc.so*')\n for clib in clibs:\n clib = readlink(clib)\n if exists(clib):\n if clib.startswith('libuClibc'):\n if version.startswith('0.'):\n family = 'uClibc'\n else:\n family = 'uClibc-ng'\n return family, version\n # This could be some other C library; it is unlikely though.\n family = 'uClibc'\n log.warning(\"Failed to detect non-glibc family, assuming %s (%s)\", family, version)\n return family, version\n return family, version\n", "path": "conda/common/platform.py"}]} |
gh_patches_debug_1576 | rasdani/github-patches | git_diff | modin-project__modin-3390 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not check ASV benchmarks on test data, where the number of rows is much less than the number of columns
These sizes can be removed because such cases are not used in benchmarking: https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L33 and https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L46
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `asv_bench/benchmarks/utils/data_shapes.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """Define data shapes."""
15
16 import os
17 import json
18
19 from .compatibility import ASV_USE_BACKEND, ASV_DATASET_SIZE
20
21 RAND_LOW = 0
22 RAND_HIGH = 1_000_000_000 if ASV_USE_BACKEND == "omnisci" else 100
23
24 BINARY_OP_DATA_SIZE = {
25 "big": [
26 [[5000, 5000], [5000, 5000]],
27 # the case extremely inefficient
28 # [[20, 500_000], [10, 1_000_000]],
29 [[500_000, 20], [1_000_000, 10]],
30 ],
31 "small": [
32 [[250, 250], [250, 250]],
33 [[20, 10_000], [10, 25_000]],
34 [[10_000, 20], [25_000, 10]],
35 ],
36 }
37 UNARY_OP_DATA_SIZE = {
38 "big": [
39 [5000, 5000],
40 # the case extremely inefficient
41 # [10, 1_000_000],
42 [1_000_000, 10],
43 ],
44 "small": [
45 [250, 250],
46 [10, 10_000],
47 [10_000, 10],
48 ],
49 }
50 SERIES_DATA_SIZE = {
51 "big": [
52 (100_000, 1),
53 ],
54 "small": [
55 (10_000, 1),
56 ],
57 }
58
59
60 OMNISCI_BINARY_OP_DATA_SIZE = {
61 "big": [
62 [[500_000, 20], [1_000_000, 10]],
63 ],
64 "small": [
65 [[10_000, 20], [25_000, 10]],
66 ],
67 }
68 OMNISCI_UNARY_OP_DATA_SIZE = {
69 "big": [
70 [1_000_000, 10],
71 ],
72 "small": [
73 [10_000, 10],
74 ],
75 }
76 OMNISCI_SERIES_DATA_SIZE = {
77 "big": [
78 [10_000_000, 1],
79 ],
80 "small": [
81 [100_000, 1],
82 ],
83 }
84
85 BINARY_SHAPES = (
86 OMNISCI_BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]
87 if ASV_USE_BACKEND == "omnisci"
88 else BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]
89 )
90 UNARY_SHAPES = (
91 OMNISCI_UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]
92 if ASV_USE_BACKEND == "omnisci"
93 else UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]
94 )
95 SERIES_SHAPES = (
96 OMNISCI_SERIES_DATA_SIZE[ASV_DATASET_SIZE]
97 if ASV_USE_BACKEND == "omnisci"
98 else SERIES_DATA_SIZE[ASV_DATASET_SIZE]
99 )
100
101 DEFAULT_GROUPBY_NGROUPS = {
102 "big": [100, "huge_amount_groups"],
103 "small": [5],
104 }
105 GROUPBY_NGROUPS = DEFAULT_GROUPBY_NGROUPS[ASV_DATASET_SIZE]
106
107 _DEFAULT_CONFIG_T = [
108 (
109 UNARY_SHAPES,
110 [
111 # Pandas backend benchmarks
112 "TimeGroupByMultiColumn",
113 "TimeGroupByDefaultAggregations",
114 "TimeGroupByDictionaryAggregation",
115 "TimeSetItem",
116 "TimeInsert",
117 "TimeArithmetic",
118 "TimeSortValues",
119 "TimeDrop",
120 "TimeHead",
121 "TimeFillna",
122 "TimeFillnaDataFrame",
123 "TimeValueCountsFrame",
124 "TimeValueCountsSeries",
125 "TimeIndexing",
126 "TimeMultiIndexing",
127 "TimeResetIndex",
128 "TimeAstype",
129 "TimeDescribe",
130 "TimeProperties",
131 # IO benchmarks
132 "TimeReadCsvSkiprows",
133 "TimeReadCsvTrueFalseValues",
134 "TimeReadCsvNamesDtype",
135 # Scalability benchmarks
136 "TimeFromPandas",
137 "TimeToPandas",
138 # OmniSci backend benchmarks
139 "omnisci.TimeJoin",
140 "omnisci.TimeBinaryOpDataFrame",
141 "omnisci.TimeArithmetic",
142 "omnisci.TimeSortValues",
143 "omnisci.TimeDrop",
144 "omnisci.TimeHead",
145 "omnisci.TimeFillna",
146 "omnisci.TimeIndexing",
147 "omnisci.TimeResetIndex",
148 "omnisci.TimeAstype",
149 "omnisci.TimeDescribe",
150 "omnisci.TimeProperties",
151 "omnisci.TimeGroupByDefaultAggregations",
152 "omnisci.TimeGroupByMultiColumn",
153 # OmniSci backend IO benchmarks
154 "omnisci.TimeReadCsvNames",
155 ],
156 ),
157 (
158 BINARY_SHAPES,
159 [
160 # Pandas backend benchmarks
161 "TimeJoin",
162 "TimeMerge",
163 "TimeConcat",
164 "TimeAppend",
165 "TimeBinaryOp",
166 # OmniSci backend benchmarks
167 "omnisci.TimeMerge",
168 "omnisci.TimeAppend",
169 ],
170 ),
171 (
172 SERIES_SHAPES,
173 [
174 # Pandas backend benchmarks
175 "TimeFillnaSeries",
176 # OmniSci backend benchmarks
177 "omnisci.TimeBinaryOpSeries",
178 "omnisci.TimeValueCountsSeries",
179 ],
180 ),
181 ]
182 DEFAULT_CONFIG = {}
183 for _shape, _names in _DEFAULT_CONFIG_T:
184 DEFAULT_CONFIG.update({_name: _shape for _name in _names})
185
186 CONFIG_FROM_FILE = None
187
188
189 def get_benchmark_shapes(bench_id: str):
190 """
191 Get custom benchmark shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.
192
193 If `bench_id` benchmark is not found in the file, then the default value will
194 be used.
195
196 Parameters
197 ----------
198 bench_id : str
199 Unique benchmark identifier that is used to get shapes.
200
201 Returns
202 -------
203 list
204 Benchmark shapes.
205 """
206 global CONFIG_FROM_FILE
207 if not CONFIG_FROM_FILE:
208 try:
209 from modin.config import AsvDataSizeConfig
210
211 filename = AsvDataSizeConfig.get()
212 except ImportError:
213 filename = os.environ.get("MODIN_ASV_DATASIZE_CONFIG", None)
214 if filename:
215 # should be json
216 with open(filename) as _f:
217 CONFIG_FROM_FILE = json.load(_f)
218
219 if CONFIG_FROM_FILE and bench_id in CONFIG_FROM_FILE:
220 # example: "omnisci.TimeReadCsvNames": [[5555, 55], [3333, 33]]
221 return CONFIG_FROM_FILE[bench_id]
222 return DEFAULT_CONFIG[bench_id]
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/asv_bench/benchmarks/utils/data_shapes.py b/asv_bench/benchmarks/utils/data_shapes.py
--- a/asv_bench/benchmarks/utils/data_shapes.py
+++ b/asv_bench/benchmarks/utils/data_shapes.py
@@ -30,7 +30,6 @@
],
"small": [
[[250, 250], [250, 250]],
- [[20, 10_000], [10, 25_000]],
[[10_000, 20], [25_000, 10]],
],
}
@@ -43,7 +42,6 @@
],
"small": [
[250, 250],
- [10, 10_000],
[10_000, 10],
],
}
| {"golden_diff": "diff --git a/asv_bench/benchmarks/utils/data_shapes.py b/asv_bench/benchmarks/utils/data_shapes.py\n--- a/asv_bench/benchmarks/utils/data_shapes.py\n+++ b/asv_bench/benchmarks/utils/data_shapes.py\n@@ -30,7 +30,6 @@\n ],\n \"small\": [\n [[250, 250], [250, 250]],\n- [[20, 10_000], [10, 25_000]],\n [[10_000, 20], [25_000, 10]],\n ],\n }\n@@ -43,7 +42,6 @@\n ],\n \"small\": [\n [250, 250],\n- [10, 10_000],\n [10_000, 10],\n ],\n }\n", "issue": "Do not check ASV benchmarks on test data, where the number of rows is much less than the number of columns\nThese sizes can be removed because such cases are not used in benchmarking: https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L33 and https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L46\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Define data shapes.\"\"\"\n\nimport os\nimport json\n\nfrom .compatibility import ASV_USE_BACKEND, ASV_DATASET_SIZE\n\nRAND_LOW = 0\nRAND_HIGH = 1_000_000_000 if ASV_USE_BACKEND == \"omnisci\" else 100\n\nBINARY_OP_DATA_SIZE = {\n \"big\": [\n [[5000, 5000], [5000, 5000]],\n # the case extremely inefficient\n # [[20, 500_000], [10, 1_000_000]],\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[250, 250], [250, 250]],\n [[20, 10_000], [10, 25_000]],\n [[10_000, 20], [25_000, 10]],\n ],\n}\nUNARY_OP_DATA_SIZE = {\n \"big\": [\n [5000, 5000],\n # the case extremely inefficient\n # [10, 1_000_000],\n [1_000_000, 10],\n ],\n \"small\": [\n [250, 250],\n [10, 10_000],\n [10_000, 10],\n ],\n}\nSERIES_DATA_SIZE = {\n \"big\": [\n (100_000, 1),\n ],\n \"small\": [\n (10_000, 1),\n ],\n}\n\n\nOMNISCI_BINARY_OP_DATA_SIZE = {\n \"big\": [\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[10_000, 20], [25_000, 10]],\n ],\n}\nOMNISCI_UNARY_OP_DATA_SIZE = {\n \"big\": [\n [1_000_000, 10],\n ],\n \"small\": [\n [10_000, 10],\n ],\n}\nOMNISCI_SERIES_DATA_SIZE = {\n \"big\": [\n [10_000_000, 1],\n ],\n \"small\": [\n [100_000, 1],\n ],\n}\n\nBINARY_SHAPES = (\n OMNISCI_BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nUNARY_SHAPES = (\n OMNISCI_UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nSERIES_SHAPES = (\n OMNISCI_SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n)\n\nDEFAULT_GROUPBY_NGROUPS = {\n \"big\": [100, \"huge_amount_groups\"],\n \"small\": [5],\n}\nGROUPBY_NGROUPS = DEFAULT_GROUPBY_NGROUPS[ASV_DATASET_SIZE]\n\n_DEFAULT_CONFIG_T = [\n (\n UNARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeGroupByMultiColumn\",\n \"TimeGroupByDefaultAggregations\",\n \"TimeGroupByDictionaryAggregation\",\n \"TimeSetItem\",\n \"TimeInsert\",\n \"TimeArithmetic\",\n \"TimeSortValues\",\n \"TimeDrop\",\n \"TimeHead\",\n \"TimeFillna\",\n \"TimeFillnaDataFrame\",\n \"TimeValueCountsFrame\",\n \"TimeValueCountsSeries\",\n \"TimeIndexing\",\n \"TimeMultiIndexing\",\n \"TimeResetIndex\",\n \"TimeAstype\",\n \"TimeDescribe\",\n \"TimeProperties\",\n # IO benchmarks\n \"TimeReadCsvSkiprows\",\n \"TimeReadCsvTrueFalseValues\",\n \"TimeReadCsvNamesDtype\",\n # Scalability benchmarks\n \"TimeFromPandas\",\n \"TimeToPandas\",\n # OmniSci backend benchmarks\n \"omnisci.TimeJoin\",\n \"omnisci.TimeBinaryOpDataFrame\",\n \"omnisci.TimeArithmetic\",\n \"omnisci.TimeSortValues\",\n \"omnisci.TimeDrop\",\n \"omnisci.TimeHead\",\n \"omnisci.TimeFillna\",\n \"omnisci.TimeIndexing\",\n \"omnisci.TimeResetIndex\",\n \"omnisci.TimeAstype\",\n \"omnisci.TimeDescribe\",\n \"omnisci.TimeProperties\",\n \"omnisci.TimeGroupByDefaultAggregations\",\n \"omnisci.TimeGroupByMultiColumn\",\n # OmniSci backend IO benchmarks\n \"omnisci.TimeReadCsvNames\",\n ],\n ),\n (\n BINARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeJoin\",\n \"TimeMerge\",\n \"TimeConcat\",\n \"TimeAppend\",\n \"TimeBinaryOp\",\n # OmniSci backend benchmarks\n \"omnisci.TimeMerge\",\n \"omnisci.TimeAppend\",\n ],\n ),\n (\n SERIES_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeFillnaSeries\",\n # OmniSci backend benchmarks\n \"omnisci.TimeBinaryOpSeries\",\n \"omnisci.TimeValueCountsSeries\",\n ],\n ),\n]\nDEFAULT_CONFIG = {}\nfor _shape, _names in _DEFAULT_CONFIG_T:\n DEFAULT_CONFIG.update({_name: _shape for _name in _names})\n\nCONFIG_FROM_FILE = None\n\n\ndef get_benchmark_shapes(bench_id: str):\n \"\"\"\n Get custom benchmark shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.\n\n If `bench_id` benchmark is not found in the file, then the default value will\n be used.\n\n Parameters\n ----------\n bench_id : str\n Unique benchmark identifier that is used to get shapes.\n\n Returns\n -------\n list\n Benchmark shapes.\n \"\"\"\n global CONFIG_FROM_FILE\n if not CONFIG_FROM_FILE:\n try:\n from modin.config import AsvDataSizeConfig\n\n filename = AsvDataSizeConfig.get()\n except ImportError:\n filename = os.environ.get(\"MODIN_ASV_DATASIZE_CONFIG\", None)\n if filename:\n # should be json\n with open(filename) as _f:\n CONFIG_FROM_FILE = json.load(_f)\n\n if CONFIG_FROM_FILE and bench_id in CONFIG_FROM_FILE:\n # example: \"omnisci.TimeReadCsvNames\": [[5555, 55], [3333, 33]]\n return CONFIG_FROM_FILE[bench_id]\n return DEFAULT_CONFIG[bench_id]\n", "path": "asv_bench/benchmarks/utils/data_shapes.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Define data shapes.\"\"\"\n\nimport os\nimport json\n\nfrom .compatibility import ASV_USE_BACKEND, ASV_DATASET_SIZE\n\nRAND_LOW = 0\nRAND_HIGH = 1_000_000_000 if ASV_USE_BACKEND == \"omnisci\" else 100\n\nBINARY_OP_DATA_SIZE = {\n \"big\": [\n [[5000, 5000], [5000, 5000]],\n # the case extremely inefficient\n # [[20, 500_000], [10, 1_000_000]],\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[250, 250], [250, 250]],\n [[10_000, 20], [25_000, 10]],\n ],\n}\nUNARY_OP_DATA_SIZE = {\n \"big\": [\n [5000, 5000],\n # the case extremely inefficient\n # [10, 1_000_000],\n [1_000_000, 10],\n ],\n \"small\": [\n [250, 250],\n [10_000, 10],\n ],\n}\nSERIES_DATA_SIZE = {\n \"big\": [\n (100_000, 1),\n ],\n \"small\": [\n (10_000, 1),\n ],\n}\n\n\nOMNISCI_BINARY_OP_DATA_SIZE = {\n \"big\": [\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[10_000, 20], [25_000, 10]],\n ],\n}\nOMNISCI_UNARY_OP_DATA_SIZE = {\n \"big\": [\n [1_000_000, 10],\n ],\n \"small\": [\n [10_000, 10],\n ],\n}\nOMNISCI_SERIES_DATA_SIZE = {\n \"big\": [\n [10_000_000, 1],\n ],\n \"small\": [\n [100_000, 1],\n ],\n}\n\nBINARY_SHAPES = (\n OMNISCI_BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nUNARY_SHAPES = (\n OMNISCI_UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nSERIES_SHAPES = (\n OMNISCI_SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n)\n\nDEFAULT_GROUPBY_NGROUPS = {\n \"big\": [100, \"huge_amount_groups\"],\n \"small\": [5],\n}\nGROUPBY_NGROUPS = DEFAULT_GROUPBY_NGROUPS[ASV_DATASET_SIZE]\n\n_DEFAULT_CONFIG_T = [\n (\n UNARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeGroupByMultiColumn\",\n \"TimeGroupByDefaultAggregations\",\n \"TimeGroupByDictionaryAggregation\",\n \"TimeSetItem\",\n \"TimeInsert\",\n \"TimeArithmetic\",\n \"TimeSortValues\",\n \"TimeDrop\",\n \"TimeHead\",\n \"TimeFillna\",\n \"TimeFillnaDataFrame\",\n \"TimeValueCountsFrame\",\n \"TimeValueCountsSeries\",\n \"TimeIndexing\",\n \"TimeMultiIndexing\",\n \"TimeResetIndex\",\n \"TimeAstype\",\n \"TimeDescribe\",\n \"TimeProperties\",\n # IO benchmarks\n \"TimeReadCsvSkiprows\",\n \"TimeReadCsvTrueFalseValues\",\n \"TimeReadCsvNamesDtype\",\n # Scalability benchmarks\n \"TimeFromPandas\",\n \"TimeToPandas\",\n # OmniSci backend benchmarks\n \"omnisci.TimeJoin\",\n \"omnisci.TimeBinaryOpDataFrame\",\n \"omnisci.TimeArithmetic\",\n \"omnisci.TimeSortValues\",\n \"omnisci.TimeDrop\",\n \"omnisci.TimeHead\",\n \"omnisci.TimeFillna\",\n \"omnisci.TimeIndexing\",\n \"omnisci.TimeResetIndex\",\n \"omnisci.TimeAstype\",\n \"omnisci.TimeDescribe\",\n \"omnisci.TimeProperties\",\n \"omnisci.TimeGroupByDefaultAggregations\",\n \"omnisci.TimeGroupByMultiColumn\",\n # OmniSci backend IO benchmarks\n \"omnisci.TimeReadCsvNames\",\n ],\n ),\n (\n BINARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeJoin\",\n \"TimeMerge\",\n \"TimeConcat\",\n \"TimeAppend\",\n \"TimeBinaryOp\",\n # OmniSci backend benchmarks\n \"omnisci.TimeMerge\",\n \"omnisci.TimeAppend\",\n ],\n ),\n (\n SERIES_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeFillnaSeries\",\n # OmniSci backend benchmarks\n \"omnisci.TimeBinaryOpSeries\",\n \"omnisci.TimeValueCountsSeries\",\n ],\n ),\n]\nDEFAULT_CONFIG = {}\nfor _shape, _names in _DEFAULT_CONFIG_T:\n DEFAULT_CONFIG.update({_name: _shape for _name in _names})\n\nCONFIG_FROM_FILE = None\n\n\ndef get_benchmark_shapes(bench_id: str):\n \"\"\"\n Get custom benchmark shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.\n\n If `bench_id` benchmark is not found in the file, then the default value will\n be used.\n\n Parameters\n ----------\n bench_id : str\n Unique benchmark identifier that is used to get shapes.\n\n Returns\n -------\n list\n Benchmark shapes.\n \"\"\"\n global CONFIG_FROM_FILE\n if not CONFIG_FROM_FILE:\n try:\n from modin.config import AsvDataSizeConfig\n\n filename = AsvDataSizeConfig.get()\n except ImportError:\n filename = os.environ.get(\"MODIN_ASV_DATASIZE_CONFIG\", None)\n if filename:\n # should be json\n with open(filename) as _f:\n CONFIG_FROM_FILE = json.load(_f)\n\n if CONFIG_FROM_FILE and bench_id in CONFIG_FROM_FILE:\n # example: \"omnisci.TimeReadCsvNames\": [[5555, 55], [3333, 33]]\n return CONFIG_FROM_FILE[bench_id]\n return DEFAULT_CONFIG[bench_id]\n", "path": "asv_bench/benchmarks/utils/data_shapes.py"}]} |
gh_patches_debug_1577 | rasdani/github-patches | git_diff | gratipay__gratipay.com-1750 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
regression w/ unglobalizing of gittip.db
We fixed a bunch (!m). I'm seeing one more on community pages:
https://www.gittip.com/for/python/
```
Traceback (most recent call last):
File "aspen/website.py", line 81, in handle_safely
response = self.handle(request)
File "aspen/website.py", line 114, in handle
response = request.resource.respond(request)
File "aspen/resources/dynamic_resource.py", line 68, in respond
response = self.get_response(context)
File "aspen/resources/negotiated_resource.py", line 99, in get_response
response.body = render(context)
File "aspen/renderers/__init__.py", line 99, in __call__
return self.render_content(context)
File "site-packages/aspen_tornado_renderer.py", line 14, in render_content
return self.compiled.generate(**context)
File "tornado/template.py", line 129, in generate
return execute()
File "/app/www/for/%slug/index.html.spt", line 233, in _execute
{% if community.nmembers == 0 %}
File "gittip/models/_mixin_elsewhere.py", line 96, in get_img_src
self.get_accounts_elsewhere()
File "gittip/models/_mixin_elsewhere.py", line 60, in get_accounts_elsewhere
accounts = self.db.all(ACCOUNTS, (self.username,))
AttributeError: 'UtterHack' object has no attribute 'db'
```
https://app.getsentry.com/gittip/gittip/group/11624316/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/models/_mixin_elsewhere.py`
Content:
```
1 import os
2
3 from gittip import NotSane
4 from aspen.utils import typecheck
5 from psycopg2 import IntegrityError
6
7
8 # Exceptions
9 # ==========
10
11 class UnknownPlatform(Exception): pass
12
13 class NeedConfirmation(Exception):
14 """Represent the case where we need user confirmation during a merge.
15
16 This is used in the workflow for merging one participant into another.
17
18 """
19
20 def __init__(self, a, b, c):
21 self.other_is_a_real_participant = a
22 self.this_is_others_last_account_elsewhere = b
23 self.we_already_have_that_kind_of_account = c
24 self._all = (a, b, c)
25
26 def __repr__(self):
27 return "<NeedConfirmation: %r %r %r>" % self._all
28 __str__ = __repr__
29
30 def __eq__(self, other):
31 return self._all == other._all
32
33 def __ne__(self, other):
34 return not self.__eq__(other)
35
36 def __nonzero__(self):
37 # bool(need_confirmation)
38 A, B, C = self._all
39 return A or C
40
41
42 # Mixin
43 # =====
44
45 class MixinElsewhere(object):
46 """We use this as a mixin for Participant, and in a hackish way on the
47 homepage and community pages.
48
49 """
50
51 def get_accounts_elsewhere(self):
52 """Return a four-tuple of elsewhere Records.
53 """
54 github_account = None
55 twitter_account = None
56 bitbucket_account = None
57 bountysource_account = None
58
59 ACCOUNTS = "SELECT * FROM elsewhere WHERE participant=%s"
60 accounts = self.db.all(ACCOUNTS, (self.username,))
61
62 for account in accounts:
63 if account.platform == "github":
64 github_account = account
65 elif account.platform == "twitter":
66 twitter_account = account
67 elif account.platform == "bitbucket":
68 bitbucket_account = account
69 elif account.platform == "bountysource":
70 bountysource_account = account
71 else:
72 raise UnknownPlatform(account.platform)
73
74 return ( github_account
75 , twitter_account
76 , bitbucket_account
77 , bountysource_account
78 )
79
80
81 def get_img_src(self, size=128):
82 """Return a value for <img src="..." />.
83
84 Until we have our own profile pics, delegate. XXX Is this an attack
85 vector? Can someone inject this value? Don't think so, but if you make
86 it happen, let me know, eh? Thanks. :)
87
88 https://www.gittip.com/security.txt
89
90 """
91 typecheck(size, int)
92
93 src = '/assets/%s/avatar-default.gif' % os.environ['__VERSION__']
94
95 github, twitter, bitbucket, bountysource = \
96 self.get_accounts_elsewhere()
97 if github is not None:
98 # GitHub -> Gravatar: http://en.gravatar.com/site/implement/images/
99 if 'gravatar_id' in github.user_info:
100 gravatar_hash = github.user_info['gravatar_id']
101 src = "https://www.gravatar.com/avatar/%s.jpg?s=%s"
102 src %= (gravatar_hash, size)
103
104 elif twitter is not None:
105 # https://dev.twitter.com/docs/api/1.1/get/users/show
106 if 'profile_image_url_https' in twitter.user_info:
107 src = twitter.user_info['profile_image_url_https']
108
109 # For Twitter, we don't have good control over size. The
110 # biggest option is 73px(?!), but that's too small. Let's go
111 # with the original: even though it may be huge, that's
112 # preferrable to guaranteed blurriness. :-/
113
114 src = src.replace('_normal.', '.')
115
116 return src
117
118
119 def take_over(self, account_elsewhere, have_confirmation=False):
120 """Given an AccountElsewhere and a bool, raise NeedConfirmation or return None.
121
122 This method associates an account on another platform (GitHub, Twitter,
123 etc.) with the given Gittip participant. Every account elsewhere has an
124 associated Gittip participant account, even if its only a stub
125 participant (it allows us to track pledges to that account should they
126 ever decide to join Gittip).
127
128 In certain circumstances, we want to present the user with a
129 confirmation before proceeding to reconnect the account elsewhere to
130 the new Gittip account; NeedConfirmation is the signal to request
131 confirmation. If it was the last account elsewhere connected to the old
132 Gittip account, then we absorb the old Gittip account into the new one,
133 effectively archiving the old account.
134
135 Here's what absorbing means:
136
137 - consolidated tips to and fro are set up for the new participant
138
139 Amounts are summed, so if alice tips bob $1 and carl $1, and
140 then bob absorbs carl, then alice tips bob $2(!) and carl $0.
141
142 And if bob tips alice $1 and carl tips alice $1, and then bob
143 absorbs carl, then bob tips alice $2(!) and carl tips alice $0.
144
145 The ctime of each new consolidated tip is the older of the two
146 tips that are being consolidated.
147
148 If alice tips bob $1, and alice absorbs bob, then alice tips
149 bob $0.
150
151 If alice tips bob $1, and bob absorbs alice, then alice tips
152 bob $0.
153
154 - all tips to and from the other participant are set to zero
155 - the absorbed username is released for reuse
156 - the absorption is recorded in an absorptions table
157
158 This is done in one transaction.
159
160 """
161 # Lazy imports to dodge circular imports.
162 from gittip.models.participant import reserve_a_random_username
163 from gittip.models.participant import gen_random_usernames
164
165 platform = account_elsewhere.platform
166 user_id = account_elsewhere.user_id
167
168 CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS = """
169
170 CREATE TEMP TABLE __temp_unique_tips ON COMMIT drop AS
171
172 -- Get all the latest tips from everyone to everyone.
173
174 SELECT DISTINCT ON (tipper, tippee)
175 ctime, tipper, tippee, amount
176 FROM tips
177 ORDER BY tipper, tippee, mtime DESC;
178
179 """
180
181 CONSOLIDATE_TIPS_RECEIVING = """
182
183 -- Create a new set of tips, one for each current tip *to* either
184 -- the dead or the live account. If a user was tipping both the
185 -- dead and the live account, then we create one new combined tip
186 -- to the live account (via the GROUP BY and sum()).
187
188 INSERT INTO tips (ctime, tipper, tippee, amount)
189
190 SELECT min(ctime), tipper, %(live)s AS tippee, sum(amount)
191
192 FROM __temp_unique_tips
193
194 WHERE (tippee = %(dead)s OR tippee = %(live)s)
195 -- Include tips *to* either the dead or live account.
196
197 AND NOT (tipper = %(dead)s OR tipper = %(live)s)
198 -- Don't include tips *from* the dead or live account,
199 -- lest we convert cross-tipping to self-tipping.
200
201 AND amount > 0
202 -- Don't include zeroed out tips, so we avoid a no-op
203 -- zero tip entry.
204
205 GROUP BY tipper
206
207 """
208
209 CONSOLIDATE_TIPS_GIVING = """
210
211 -- Create a new set of tips, one for each current tip *from* either
212 -- the dead or the live account. If both the dead and the live
213 -- account were tipping a given user, then we create one new
214 -- combined tip from the live account (via the GROUP BY and sum()).
215
216 INSERT INTO tips (ctime, tipper, tippee, amount)
217
218 SELECT min(ctime), %(live)s AS tipper, tippee, sum(amount)
219
220 FROM __temp_unique_tips
221
222 WHERE (tipper = %(dead)s OR tipper = %(live)s)
223 -- Include tips *from* either the dead or live account.
224
225 AND NOT (tippee = %(dead)s OR tippee = %(live)s)
226 -- Don't include tips *to* the dead or live account,
227 -- lest we convert cross-tipping to self-tipping.
228
229 AND amount > 0
230 -- Don't include zeroed out tips, so we avoid a no-op
231 -- zero tip entry.
232
233 GROUP BY tippee
234
235 """
236
237 ZERO_OUT_OLD_TIPS_RECEIVING = """
238
239 INSERT INTO tips (ctime, tipper, tippee, amount)
240
241 SELECT ctime, tipper, tippee, 0 AS amount
242 FROM __temp_unique_tips
243 WHERE tippee=%s AND amount > 0
244
245 """
246
247 ZERO_OUT_OLD_TIPS_GIVING = """
248
249 INSERT INTO tips (ctime, tipper, tippee, amount)
250
251 SELECT ctime, tipper, tippee, 0 AS amount
252 FROM __temp_unique_tips
253 WHERE tipper=%s AND amount > 0
254
255 """
256
257 with self.db.get_cursor() as cursor:
258
259 # Load the existing connection.
260 # =============================
261 # Every account elsewhere has at least a stub participant account
262 # on Gittip.
263
264 rec = cursor.one("""
265
266 SELECT participant
267 , claimed_time IS NULL AS is_stub
268 FROM elsewhere
269 JOIN participants ON participant=participants.username
270 WHERE elsewhere.platform=%s AND elsewhere.user_id=%s
271
272 """, (platform, user_id), default=NotSane)
273
274 other_username = rec.participant
275
276
277 # Make sure we have user confirmation if needed.
278 # ==============================================
279 # We need confirmation in whatever combination of the following
280 # three cases:
281 #
282 # - the other participant is not a stub; we are taking the
283 # account elsewhere away from another viable Gittip
284 # participant
285 #
286 # - the other participant has no other accounts elsewhere; taking
287 # away the account elsewhere will leave the other Gittip
288 # participant without any means of logging in, and it will be
289 # archived and its tips absorbed by us
290 #
291 # - we already have an account elsewhere connected from the given
292 # platform, and it will be handed off to a new stub
293 # participant
294
295 # other_is_a_real_participant
296 other_is_a_real_participant = not rec.is_stub
297
298 # this_is_others_last_account_elsewhere
299 nelsewhere = cursor.one( "SELECT count(*) FROM elsewhere "
300 "WHERE participant=%s"
301 , (other_username,)
302 )
303 assert nelsewhere > 0 # sanity check
304 this_is_others_last_account_elsewhere = (nelsewhere == 1)
305
306 # we_already_have_that_kind_of_account
307 nparticipants = cursor.one( "SELECT count(*) FROM elsewhere "
308 "WHERE participant=%s AND platform=%s"
309 , (self.username, platform)
310 )
311 assert nparticipants in (0, 1) # sanity check
312 we_already_have_that_kind_of_account = nparticipants == 1
313
314 need_confirmation = NeedConfirmation( other_is_a_real_participant
315 , this_is_others_last_account_elsewhere
316 , we_already_have_that_kind_of_account
317 )
318 if need_confirmation and not have_confirmation:
319 raise need_confirmation
320
321
322 # We have user confirmation. Proceed.
323 # ===================================
324 # There is a race condition here. The last person to call this will
325 # win. XXX: I'm not sure what will happen to the DB and UI for the
326 # loser.
327
328
329 # Move any old account out of the way.
330 # ====================================
331
332 if we_already_have_that_kind_of_account:
333 new_stub_username = reserve_a_random_username(cursor)
334 cursor.run( "UPDATE elsewhere SET participant=%s "
335 "WHERE platform=%s AND participant=%s"
336 , (new_stub_username, platform, self.username)
337 )
338
339
340 # Do the deal.
341 # ============
342 # If other_is_not_a_stub, then other will have the account
343 # elsewhere taken away from them with this call. If there are other
344 # browsing sessions open from that account, they will stay open
345 # until they expire (XXX Is that okay?)
346
347 cursor.run( "UPDATE elsewhere SET participant=%s "
348 "WHERE platform=%s AND user_id=%s"
349 , (self.username, platform, user_id)
350 )
351
352
353 # Fold the old participant into the new as appropriate.
354 # =====================================================
355 # We want to do this whether or not other is a stub participant.
356
357 if this_is_others_last_account_elsewhere:
358
359 # Take over tips.
360 # ===============
361
362 x, y = self.username, other_username
363 cursor.run(CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS)
364 cursor.run(CONSOLIDATE_TIPS_RECEIVING, dict(live=x, dead=y))
365 cursor.run(CONSOLIDATE_TIPS_GIVING, dict(live=x, dead=y))
366 cursor.run(ZERO_OUT_OLD_TIPS_RECEIVING, (other_username,))
367 cursor.run(ZERO_OUT_OLD_TIPS_GIVING, (other_username,))
368
369
370 # Archive the old participant.
371 # ============================
372 # We always give them a new, random username. We sign out
373 # the old participant.
374
375 for archive_username in gen_random_usernames():
376 try:
377 username = cursor.one("""
378
379 UPDATE participants
380 SET username=%s
381 , username_lower=%s
382 , session_token=NULL
383 , session_expires=now()
384 WHERE username=%s
385 RETURNING username
386
387 """, ( archive_username
388 , archive_username.lower()
389 , other_username
390 ), default=NotSane)
391 except IntegrityError:
392 continue # archive_username is already taken;
393 # extremely unlikely, but ...
394 # XXX But can the UPDATE fail in other ways?
395 else:
396 assert username == archive_username
397 break
398
399
400 # Record the absorption.
401 # ======================
402 # This is for preservation of history.
403
404 cursor.run( "INSERT INTO absorptions "
405 "(absorbed_was, absorbed_by, archived_as) "
406 "VALUES (%s, %s, %s)"
407 , ( other_username
408 , self.username
409 , archive_username
410 )
411 )
412
413 # Utter Hack
414 # ==========
415
416 def utter_hack(records):
417 for rec in records:
418 yield UtterHack(rec)
419
420 class UtterHack(MixinElsewhere):
421 def __init__(self, rec):
422 for name in rec._fields:
423 setattr(self, name, getattr(rec, name))
424
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gittip/models/_mixin_elsewhere.py b/gittip/models/_mixin_elsewhere.py
--- a/gittip/models/_mixin_elsewhere.py
+++ b/gittip/models/_mixin_elsewhere.py
@@ -413,11 +413,12 @@
# Utter Hack
# ==========
-def utter_hack(records):
+def utter_hack(db, records):
for rec in records:
- yield UtterHack(rec)
+ yield UtterHack(db, rec)
class UtterHack(MixinElsewhere):
- def __init__(self, rec):
+ def __init__(self, db, rec):
+ self.db = db
for name in rec._fields:
setattr(self, name, getattr(rec, name))
| {"golden_diff": "diff --git a/gittip/models/_mixin_elsewhere.py b/gittip/models/_mixin_elsewhere.py\n--- a/gittip/models/_mixin_elsewhere.py\n+++ b/gittip/models/_mixin_elsewhere.py\n@@ -413,11 +413,12 @@\n # Utter Hack\n # ==========\n \n-def utter_hack(records):\n+def utter_hack(db, records):\n for rec in records:\n- yield UtterHack(rec)\n+ yield UtterHack(db, rec)\n \n class UtterHack(MixinElsewhere):\n- def __init__(self, rec):\n+ def __init__(self, db, rec):\n+ self.db = db\n for name in rec._fields:\n setattr(self, name, getattr(rec, name))\n", "issue": "regression w/ unglobalizing of gittip.db\nWe fixed a bunch (!m). I'm seeing one more on community pages:\n\nhttps://www.gittip.com/for/python/\n\n```\nTraceback (most recent call last):\n File \"aspen/website.py\", line 81, in handle_safely\n response = self.handle(request)\n File \"aspen/website.py\", line 114, in handle\n response = request.resource.respond(request)\n File \"aspen/resources/dynamic_resource.py\", line 68, in respond\n response = self.get_response(context)\n File \"aspen/resources/negotiated_resource.py\", line 99, in get_response\n response.body = render(context)\n File \"aspen/renderers/__init__.py\", line 99, in __call__\n return self.render_content(context)\n File \"site-packages/aspen_tornado_renderer.py\", line 14, in render_content\n return self.compiled.generate(**context)\n File \"tornado/template.py\", line 129, in generate\n return execute()\n File \"/app/www/for/%slug/index.html.spt\", line 233, in _execute\n {% if community.nmembers == 0 %}\n File \"gittip/models/_mixin_elsewhere.py\", line 96, in get_img_src\n self.get_accounts_elsewhere()\n File \"gittip/models/_mixin_elsewhere.py\", line 60, in get_accounts_elsewhere\n accounts = self.db.all(ACCOUNTS, (self.username,))\nAttributeError: 'UtterHack' object has no attribute 'db'\n```\n\nhttps://app.getsentry.com/gittip/gittip/group/11624316/\n\n", "before_files": [{"content": "import os\n\nfrom gittip import NotSane\nfrom aspen.utils import typecheck\nfrom psycopg2 import IntegrityError\n\n\n# Exceptions\n# ==========\n\nclass UnknownPlatform(Exception): pass\n\nclass NeedConfirmation(Exception):\n \"\"\"Represent the case where we need user confirmation during a merge.\n\n This is used in the workflow for merging one participant into another.\n\n \"\"\"\n\n def __init__(self, a, b, c):\n self.other_is_a_real_participant = a\n self.this_is_others_last_account_elsewhere = b\n self.we_already_have_that_kind_of_account = c\n self._all = (a, b, c)\n\n def __repr__(self):\n return \"<NeedConfirmation: %r %r %r>\" % self._all\n __str__ = __repr__\n\n def __eq__(self, other):\n return self._all == other._all\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __nonzero__(self):\n # bool(need_confirmation)\n A, B, C = self._all\n return A or C\n\n\n# Mixin\n# =====\n\nclass MixinElsewhere(object):\n \"\"\"We use this as a mixin for Participant, and in a hackish way on the\n homepage and community pages.\n\n \"\"\"\n\n def get_accounts_elsewhere(self):\n \"\"\"Return a four-tuple of elsewhere Records.\n \"\"\"\n github_account = None\n twitter_account = None\n bitbucket_account = None\n bountysource_account = None\n\n ACCOUNTS = \"SELECT * FROM elsewhere WHERE participant=%s\"\n accounts = self.db.all(ACCOUNTS, (self.username,))\n\n for account in accounts:\n if account.platform == \"github\":\n github_account = account\n elif account.platform == \"twitter\":\n twitter_account = account\n elif account.platform == \"bitbucket\":\n bitbucket_account = account\n elif account.platform == \"bountysource\":\n bountysource_account = account\n else:\n raise UnknownPlatform(account.platform)\n\n return ( github_account\n , twitter_account\n , bitbucket_account\n , bountysource_account\n )\n\n\n def get_img_src(self, size=128):\n \"\"\"Return a value for <img src=\"...\" />.\n\n Until we have our own profile pics, delegate. XXX Is this an attack\n vector? Can someone inject this value? Don't think so, but if you make\n it happen, let me know, eh? Thanks. :)\n\n https://www.gittip.com/security.txt\n\n \"\"\"\n typecheck(size, int)\n\n src = '/assets/%s/avatar-default.gif' % os.environ['__VERSION__']\n\n github, twitter, bitbucket, bountysource = \\\n self.get_accounts_elsewhere()\n if github is not None:\n # GitHub -> Gravatar: http://en.gravatar.com/site/implement/images/\n if 'gravatar_id' in github.user_info:\n gravatar_hash = github.user_info['gravatar_id']\n src = \"https://www.gravatar.com/avatar/%s.jpg?s=%s\"\n src %= (gravatar_hash, size)\n\n elif twitter is not None:\n # https://dev.twitter.com/docs/api/1.1/get/users/show\n if 'profile_image_url_https' in twitter.user_info:\n src = twitter.user_info['profile_image_url_https']\n\n # For Twitter, we don't have good control over size. The\n # biggest option is 73px(?!), but that's too small. Let's go\n # with the original: even though it may be huge, that's\n # preferrable to guaranteed blurriness. :-/\n\n src = src.replace('_normal.', '.')\n\n return src\n\n\n def take_over(self, account_elsewhere, have_confirmation=False):\n \"\"\"Given an AccountElsewhere and a bool, raise NeedConfirmation or return None.\n\n This method associates an account on another platform (GitHub, Twitter,\n etc.) with the given Gittip participant. Every account elsewhere has an\n associated Gittip participant account, even if its only a stub\n participant (it allows us to track pledges to that account should they\n ever decide to join Gittip).\n\n In certain circumstances, we want to present the user with a\n confirmation before proceeding to reconnect the account elsewhere to\n the new Gittip account; NeedConfirmation is the signal to request\n confirmation. If it was the last account elsewhere connected to the old\n Gittip account, then we absorb the old Gittip account into the new one,\n effectively archiving the old account.\n\n Here's what absorbing means:\n\n - consolidated tips to and fro are set up for the new participant\n\n Amounts are summed, so if alice tips bob $1 and carl $1, and\n then bob absorbs carl, then alice tips bob $2(!) and carl $0.\n\n And if bob tips alice $1 and carl tips alice $1, and then bob\n absorbs carl, then bob tips alice $2(!) and carl tips alice $0.\n\n The ctime of each new consolidated tip is the older of the two\n tips that are being consolidated.\n\n If alice tips bob $1, and alice absorbs bob, then alice tips\n bob $0.\n\n If alice tips bob $1, and bob absorbs alice, then alice tips\n bob $0.\n\n - all tips to and from the other participant are set to zero\n - the absorbed username is released for reuse\n - the absorption is recorded in an absorptions table\n\n This is done in one transaction.\n\n \"\"\"\n # Lazy imports to dodge circular imports.\n from gittip.models.participant import reserve_a_random_username\n from gittip.models.participant import gen_random_usernames\n\n platform = account_elsewhere.platform\n user_id = account_elsewhere.user_id\n\n CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS = \"\"\"\n\n CREATE TEMP TABLE __temp_unique_tips ON COMMIT drop AS\n\n -- Get all the latest tips from everyone to everyone.\n\n SELECT DISTINCT ON (tipper, tippee)\n ctime, tipper, tippee, amount\n FROM tips\n ORDER BY tipper, tippee, mtime DESC;\n\n \"\"\"\n\n CONSOLIDATE_TIPS_RECEIVING = \"\"\"\n\n -- Create a new set of tips, one for each current tip *to* either\n -- the dead or the live account. If a user was tipping both the\n -- dead and the live account, then we create one new combined tip\n -- to the live account (via the GROUP BY and sum()).\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), tipper, %(live)s AS tippee, sum(amount)\n\n FROM __temp_unique_tips\n\n WHERE (tippee = %(dead)s OR tippee = %(live)s)\n -- Include tips *to* either the dead or live account.\n\n AND NOT (tipper = %(dead)s OR tipper = %(live)s)\n -- Don't include tips *from* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n\n AND amount > 0\n -- Don't include zeroed out tips, so we avoid a no-op\n -- zero tip entry.\n\n GROUP BY tipper\n\n \"\"\"\n\n CONSOLIDATE_TIPS_GIVING = \"\"\"\n\n -- Create a new set of tips, one for each current tip *from* either\n -- the dead or the live account. If both the dead and the live\n -- account were tipping a given user, then we create one new\n -- combined tip from the live account (via the GROUP BY and sum()).\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), %(live)s AS tipper, tippee, sum(amount)\n\n FROM __temp_unique_tips\n\n WHERE (tipper = %(dead)s OR tipper = %(live)s)\n -- Include tips *from* either the dead or live account.\n\n AND NOT (tippee = %(dead)s OR tippee = %(live)s)\n -- Don't include tips *to* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n\n AND amount > 0\n -- Don't include zeroed out tips, so we avoid a no-op\n -- zero tip entry.\n\n GROUP BY tippee\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_RECEIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM __temp_unique_tips\n WHERE tippee=%s AND amount > 0\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_GIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM __temp_unique_tips\n WHERE tipper=%s AND amount > 0\n\n \"\"\"\n\n with self.db.get_cursor() as cursor:\n\n # Load the existing connection.\n # =============================\n # Every account elsewhere has at least a stub participant account\n # on Gittip.\n\n rec = cursor.one(\"\"\"\n\n SELECT participant\n , claimed_time IS NULL AS is_stub\n FROM elsewhere\n JOIN participants ON participant=participants.username\n WHERE elsewhere.platform=%s AND elsewhere.user_id=%s\n\n \"\"\", (platform, user_id), default=NotSane)\n\n other_username = rec.participant\n\n\n # Make sure we have user confirmation if needed.\n # ==============================================\n # We need confirmation in whatever combination of the following\n # three cases:\n #\n # - the other participant is not a stub; we are taking the\n # account elsewhere away from another viable Gittip\n # participant\n #\n # - the other participant has no other accounts elsewhere; taking\n # away the account elsewhere will leave the other Gittip\n # participant without any means of logging in, and it will be\n # archived and its tips absorbed by us\n #\n # - we already have an account elsewhere connected from the given\n # platform, and it will be handed off to a new stub\n # participant\n\n # other_is_a_real_participant\n other_is_a_real_participant = not rec.is_stub\n\n # this_is_others_last_account_elsewhere\n nelsewhere = cursor.one( \"SELECT count(*) FROM elsewhere \"\n \"WHERE participant=%s\"\n , (other_username,)\n )\n assert nelsewhere > 0 # sanity check\n this_is_others_last_account_elsewhere = (nelsewhere == 1)\n\n # we_already_have_that_kind_of_account\n nparticipants = cursor.one( \"SELECT count(*) FROM elsewhere \"\n \"WHERE participant=%s AND platform=%s\"\n , (self.username, platform)\n )\n assert nparticipants in (0, 1) # sanity check\n we_already_have_that_kind_of_account = nparticipants == 1\n\n need_confirmation = NeedConfirmation( other_is_a_real_participant\n , this_is_others_last_account_elsewhere\n , we_already_have_that_kind_of_account\n )\n if need_confirmation and not have_confirmation:\n raise need_confirmation\n\n\n # We have user confirmation. Proceed.\n # ===================================\n # There is a race condition here. The last person to call this will\n # win. XXX: I'm not sure what will happen to the DB and UI for the\n # loser.\n\n\n # Move any old account out of the way.\n # ====================================\n\n if we_already_have_that_kind_of_account:\n new_stub_username = reserve_a_random_username(cursor)\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND participant=%s\"\n , (new_stub_username, platform, self.username)\n )\n\n\n # Do the deal.\n # ============\n # If other_is_not_a_stub, then other will have the account\n # elsewhere taken away from them with this call. If there are other\n # browsing sessions open from that account, they will stay open\n # until they expire (XXX Is that okay?)\n\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND user_id=%s\"\n , (self.username, platform, user_id)\n )\n\n\n # Fold the old participant into the new as appropriate.\n # =====================================================\n # We want to do this whether or not other is a stub participant.\n\n if this_is_others_last_account_elsewhere:\n\n # Take over tips.\n # ===============\n\n x, y = self.username, other_username\n cursor.run(CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS)\n cursor.run(CONSOLIDATE_TIPS_RECEIVING, dict(live=x, dead=y))\n cursor.run(CONSOLIDATE_TIPS_GIVING, dict(live=x, dead=y))\n cursor.run(ZERO_OUT_OLD_TIPS_RECEIVING, (other_username,))\n cursor.run(ZERO_OUT_OLD_TIPS_GIVING, (other_username,))\n\n\n # Archive the old participant.\n # ============================\n # We always give them a new, random username. We sign out\n # the old participant.\n\n for archive_username in gen_random_usernames():\n try:\n username = cursor.one(\"\"\"\n\n UPDATE participants\n SET username=%s\n , username_lower=%s\n , session_token=NULL\n , session_expires=now()\n WHERE username=%s\n RETURNING username\n\n \"\"\", ( archive_username\n , archive_username.lower()\n , other_username\n ), default=NotSane)\n except IntegrityError:\n continue # archive_username is already taken;\n # extremely unlikely, but ...\n # XXX But can the UPDATE fail in other ways?\n else:\n assert username == archive_username\n break\n\n\n # Record the absorption.\n # ======================\n # This is for preservation of history.\n\n cursor.run( \"INSERT INTO absorptions \"\n \"(absorbed_was, absorbed_by, archived_as) \"\n \"VALUES (%s, %s, %s)\"\n , ( other_username\n , self.username\n , archive_username\n )\n )\n\n# Utter Hack\n# ==========\n\ndef utter_hack(records):\n for rec in records:\n yield UtterHack(rec)\n\nclass UtterHack(MixinElsewhere):\n def __init__(self, rec):\n for name in rec._fields:\n setattr(self, name, getattr(rec, name))\n", "path": "gittip/models/_mixin_elsewhere.py"}], "after_files": [{"content": "import os\n\nfrom gittip import NotSane\nfrom aspen.utils import typecheck\nfrom psycopg2 import IntegrityError\n\n\n# Exceptions\n# ==========\n\nclass UnknownPlatform(Exception): pass\n\nclass NeedConfirmation(Exception):\n \"\"\"Represent the case where we need user confirmation during a merge.\n\n This is used in the workflow for merging one participant into another.\n\n \"\"\"\n\n def __init__(self, a, b, c):\n self.other_is_a_real_participant = a\n self.this_is_others_last_account_elsewhere = b\n self.we_already_have_that_kind_of_account = c\n self._all = (a, b, c)\n\n def __repr__(self):\n return \"<NeedConfirmation: %r %r %r>\" % self._all\n __str__ = __repr__\n\n def __eq__(self, other):\n return self._all == other._all\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __nonzero__(self):\n # bool(need_confirmation)\n A, B, C = self._all\n return A or C\n\n\n# Mixin\n# =====\n\nclass MixinElsewhere(object):\n \"\"\"We use this as a mixin for Participant, and in a hackish way on the\n homepage and community pages.\n\n \"\"\"\n\n def get_accounts_elsewhere(self):\n \"\"\"Return a four-tuple of elsewhere Records.\n \"\"\"\n github_account = None\n twitter_account = None\n bitbucket_account = None\n bountysource_account = None\n\n ACCOUNTS = \"SELECT * FROM elsewhere WHERE participant=%s\"\n accounts = self.db.all(ACCOUNTS, (self.username,))\n\n for account in accounts:\n if account.platform == \"github\":\n github_account = account\n elif account.platform == \"twitter\":\n twitter_account = account\n elif account.platform == \"bitbucket\":\n bitbucket_account = account\n elif account.platform == \"bountysource\":\n bountysource_account = account\n else:\n raise UnknownPlatform(account.platform)\n\n return ( github_account\n , twitter_account\n , bitbucket_account\n , bountysource_account\n )\n\n\n def get_img_src(self, size=128):\n \"\"\"Return a value for <img src=\"...\" />.\n\n Until we have our own profile pics, delegate. XXX Is this an attack\n vector? Can someone inject this value? Don't think so, but if you make\n it happen, let me know, eh? Thanks. :)\n\n https://www.gittip.com/security.txt\n\n \"\"\"\n typecheck(size, int)\n\n src = '/assets/%s/avatar-default.gif' % os.environ['__VERSION__']\n\n github, twitter, bitbucket, bountysource = \\\n self.get_accounts_elsewhere()\n if github is not None:\n # GitHub -> Gravatar: http://en.gravatar.com/site/implement/images/\n if 'gravatar_id' in github.user_info:\n gravatar_hash = github.user_info['gravatar_id']\n src = \"https://www.gravatar.com/avatar/%s.jpg?s=%s\"\n src %= (gravatar_hash, size)\n\n elif twitter is not None:\n # https://dev.twitter.com/docs/api/1.1/get/users/show\n if 'profile_image_url_https' in twitter.user_info:\n src = twitter.user_info['profile_image_url_https']\n\n # For Twitter, we don't have good control over size. The\n # biggest option is 73px(?!), but that's too small. Let's go\n # with the original: even though it may be huge, that's\n # preferrable to guaranteed blurriness. :-/\n\n src = src.replace('_normal.', '.')\n\n return src\n\n\n def take_over(self, account_elsewhere, have_confirmation=False):\n \"\"\"Given an AccountElsewhere and a bool, raise NeedConfirmation or return None.\n\n This method associates an account on another platform (GitHub, Twitter,\n etc.) with the given Gittip participant. Every account elsewhere has an\n associated Gittip participant account, even if its only a stub\n participant (it allows us to track pledges to that account should they\n ever decide to join Gittip).\n\n In certain circumstances, we want to present the user with a\n confirmation before proceeding to reconnect the account elsewhere to\n the new Gittip account; NeedConfirmation is the signal to request\n confirmation. If it was the last account elsewhere connected to the old\n Gittip account, then we absorb the old Gittip account into the new one,\n effectively archiving the old account.\n\n Here's what absorbing means:\n\n - consolidated tips to and fro are set up for the new participant\n\n Amounts are summed, so if alice tips bob $1 and carl $1, and\n then bob absorbs carl, then alice tips bob $2(!) and carl $0.\n\n And if bob tips alice $1 and carl tips alice $1, and then bob\n absorbs carl, then bob tips alice $2(!) and carl tips alice $0.\n\n The ctime of each new consolidated tip is the older of the two\n tips that are being consolidated.\n\n If alice tips bob $1, and alice absorbs bob, then alice tips\n bob $0.\n\n If alice tips bob $1, and bob absorbs alice, then alice tips\n bob $0.\n\n - all tips to and from the other participant are set to zero\n - the absorbed username is released for reuse\n - the absorption is recorded in an absorptions table\n\n This is done in one transaction.\n\n \"\"\"\n # Lazy imports to dodge circular imports.\n from gittip.models.participant import reserve_a_random_username\n from gittip.models.participant import gen_random_usernames\n\n platform = account_elsewhere.platform\n user_id = account_elsewhere.user_id\n\n CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS = \"\"\"\n\n CREATE TEMP TABLE __temp_unique_tips ON COMMIT drop AS\n\n -- Get all the latest tips from everyone to everyone.\n\n SELECT DISTINCT ON (tipper, tippee)\n ctime, tipper, tippee, amount\n FROM tips\n ORDER BY tipper, tippee, mtime DESC;\n\n \"\"\"\n\n CONSOLIDATE_TIPS_RECEIVING = \"\"\"\n\n -- Create a new set of tips, one for each current tip *to* either\n -- the dead or the live account. If a user was tipping both the\n -- dead and the live account, then we create one new combined tip\n -- to the live account (via the GROUP BY and sum()).\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), tipper, %(live)s AS tippee, sum(amount)\n\n FROM __temp_unique_tips\n\n WHERE (tippee = %(dead)s OR tippee = %(live)s)\n -- Include tips *to* either the dead or live account.\n\n AND NOT (tipper = %(dead)s OR tipper = %(live)s)\n -- Don't include tips *from* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n\n AND amount > 0\n -- Don't include zeroed out tips, so we avoid a no-op\n -- zero tip entry.\n\n GROUP BY tipper\n\n \"\"\"\n\n CONSOLIDATE_TIPS_GIVING = \"\"\"\n\n -- Create a new set of tips, one for each current tip *from* either\n -- the dead or the live account. If both the dead and the live\n -- account were tipping a given user, then we create one new\n -- combined tip from the live account (via the GROUP BY and sum()).\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), %(live)s AS tipper, tippee, sum(amount)\n\n FROM __temp_unique_tips\n\n WHERE (tipper = %(dead)s OR tipper = %(live)s)\n -- Include tips *from* either the dead or live account.\n\n AND NOT (tippee = %(dead)s OR tippee = %(live)s)\n -- Don't include tips *to* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n\n AND amount > 0\n -- Don't include zeroed out tips, so we avoid a no-op\n -- zero tip entry.\n\n GROUP BY tippee\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_RECEIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM __temp_unique_tips\n WHERE tippee=%s AND amount > 0\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_GIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM __temp_unique_tips\n WHERE tipper=%s AND amount > 0\n\n \"\"\"\n\n with self.db.get_cursor() as cursor:\n\n # Load the existing connection.\n # =============================\n # Every account elsewhere has at least a stub participant account\n # on Gittip.\n\n rec = cursor.one(\"\"\"\n\n SELECT participant\n , claimed_time IS NULL AS is_stub\n FROM elsewhere\n JOIN participants ON participant=participants.username\n WHERE elsewhere.platform=%s AND elsewhere.user_id=%s\n\n \"\"\", (platform, user_id), default=NotSane)\n\n other_username = rec.participant\n\n\n # Make sure we have user confirmation if needed.\n # ==============================================\n # We need confirmation in whatever combination of the following\n # three cases:\n #\n # - the other participant is not a stub; we are taking the\n # account elsewhere away from another viable Gittip\n # participant\n #\n # - the other participant has no other accounts elsewhere; taking\n # away the account elsewhere will leave the other Gittip\n # participant without any means of logging in, and it will be\n # archived and its tips absorbed by us\n #\n # - we already have an account elsewhere connected from the given\n # platform, and it will be handed off to a new stub\n # participant\n\n # other_is_a_real_participant\n other_is_a_real_participant = not rec.is_stub\n\n # this_is_others_last_account_elsewhere\n nelsewhere = cursor.one( \"SELECT count(*) FROM elsewhere \"\n \"WHERE participant=%s\"\n , (other_username,)\n )\n assert nelsewhere > 0 # sanity check\n this_is_others_last_account_elsewhere = (nelsewhere == 1)\n\n # we_already_have_that_kind_of_account\n nparticipants = cursor.one( \"SELECT count(*) FROM elsewhere \"\n \"WHERE participant=%s AND platform=%s\"\n , (self.username, platform)\n )\n assert nparticipants in (0, 1) # sanity check\n we_already_have_that_kind_of_account = nparticipants == 1\n\n need_confirmation = NeedConfirmation( other_is_a_real_participant\n , this_is_others_last_account_elsewhere\n , we_already_have_that_kind_of_account\n )\n if need_confirmation and not have_confirmation:\n raise need_confirmation\n\n\n # We have user confirmation. Proceed.\n # ===================================\n # There is a race condition here. The last person to call this will\n # win. XXX: I'm not sure what will happen to the DB and UI for the\n # loser.\n\n\n # Move any old account out of the way.\n # ====================================\n\n if we_already_have_that_kind_of_account:\n new_stub_username = reserve_a_random_username(cursor)\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND participant=%s\"\n , (new_stub_username, platform, self.username)\n )\n\n\n # Do the deal.\n # ============\n # If other_is_not_a_stub, then other will have the account\n # elsewhere taken away from them with this call. If there are other\n # browsing sessions open from that account, they will stay open\n # until they expire (XXX Is that okay?)\n\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND user_id=%s\"\n , (self.username, platform, user_id)\n )\n\n\n # Fold the old participant into the new as appropriate.\n # =====================================================\n # We want to do this whether or not other is a stub participant.\n\n if this_is_others_last_account_elsewhere:\n\n # Take over tips.\n # ===============\n\n x, y = self.username, other_username\n cursor.run(CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS)\n cursor.run(CONSOLIDATE_TIPS_RECEIVING, dict(live=x, dead=y))\n cursor.run(CONSOLIDATE_TIPS_GIVING, dict(live=x, dead=y))\n cursor.run(ZERO_OUT_OLD_TIPS_RECEIVING, (other_username,))\n cursor.run(ZERO_OUT_OLD_TIPS_GIVING, (other_username,))\n\n\n # Archive the old participant.\n # ============================\n # We always give them a new, random username. We sign out\n # the old participant.\n\n for archive_username in gen_random_usernames():\n try:\n username = cursor.one(\"\"\"\n\n UPDATE participants\n SET username=%s\n , username_lower=%s\n , session_token=NULL\n , session_expires=now()\n WHERE username=%s\n RETURNING username\n\n \"\"\", ( archive_username\n , archive_username.lower()\n , other_username\n ), default=NotSane)\n except IntegrityError:\n continue # archive_username is already taken;\n # extremely unlikely, but ...\n # XXX But can the UPDATE fail in other ways?\n else:\n assert username == archive_username\n break\n\n\n # Record the absorption.\n # ======================\n # This is for preservation of history.\n\n cursor.run( \"INSERT INTO absorptions \"\n \"(absorbed_was, absorbed_by, archived_as) \"\n \"VALUES (%s, %s, %s)\"\n , ( other_username\n , self.username\n , archive_username\n )\n )\n\n# Utter Hack\n# ==========\n\ndef utter_hack(db, records):\n for rec in records:\n yield UtterHack(db, rec)\n\nclass UtterHack(MixinElsewhere):\n def __init__(self, db, rec):\n self.db = db\n for name in rec._fields:\n setattr(self, name, getattr(rec, name))\n", "path": "gittip/models/_mixin_elsewhere.py"}]} |
gh_patches_debug_1578 | rasdani/github-patches | git_diff | cloudtools__troposphere-605 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: IAM Role title should not be validated
The title specified in troposphere is not the actual physical resource name and should be validated as such (https://github.com/cloudtools/troposphere/blob/fe72f7d3f7b0711a22173c1240134173aafef574/troposphere/iam.py#L75-L77).
The next snippet was created today using `troposphere==1.5.0`and `boto3==1.2.6`:
``` python
>>> import boto3
>>> import troposphere
>>> import troposphere.iam
>>> import time
>>>
>>> long_title = 'VeryLongName{}'.format('Z' * 100)
>>> print len(long_title)
112
>>>
>>> # create a role
...
>>> role = troposphere.iam.Role(
... long_title,
... AssumeRolePolicyDocument={
... "Statement": [{
... "Action": ["sts:AssumeRole"],
... "Effect": "Allow",
... "Principal": {"Service": ["ec2.amazonaws.com"]}
... }]
... })
Traceback (most recent call last):
File "<stdin>", line 8, in <module>
File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/__init__.py", line 44, in __init__
self.validate_title()
File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/iam.py", line 77, in validate_title
iam_role_name(self.title)
File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/validators.py", line 98, in iam_role_name
raise ValueError('IAM Role Name may not exceed 64 characters')
ValueError: IAM Role Name may not exceed 64 characters
>>>
>>>
>>> # delete validator
...
>>> del troposphere.iam.Role.validate_title
>>> # try again
...
>>> role = troposphere.iam.Role(
... long_title,
... AssumeRolePolicyDocument={
... "Statement": [{
... "Action": ["sts:AssumeRole"],
... "Effect": "Allow",
... "Principal": {"Service": ["ec2.amazonaws.com"]}
... }]
... })
>>> template = troposphere.Template()
>>> template.add_resource(role)
<troposphere.iam.Role object at 0x10ee02990>
>>> print template.to_json()
{
"Resources": {
"VeryLongNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ": {
"Properties": {
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": [
"sts:AssumeRole"
],
"Effect": "Allow",
"Principal": {
"Service": [
"ec2.amazonaws.com"
]
}
}
]
}
},
"Type": "AWS::IAM::Role"
}
}
}
>>> client = boto3.client('cloudformation', 'us-east-1')
>>> stack = client.create_stack(
... StackName='testTroposphere',
... TemplateBody=template.to_json(),
... Capabilities=['CAPABILITY_IAM'])
>>>
>>> while client.describe_stacks(StackName=stack['StackId'])['Stacks'][0]['StackStatus'] != 'CREATE_COMPLETE':
... import time
... time.sleep(1)
...
>>> resources = client.describe_stack_resources(StackName=stack['StackId'])
>>> for r in resources['StackResources']:
... physical_id = r['PhysicalResourceId']
... print("{} ({} chars)".format(physical_id, len(physical_id)))
...
testTroposphere-VeryLongNameZZZZZZZZZZZZZZZZZZZZZZ-PTHEM9FPNX28 (63 chars)
```
The snippet above shows that the physical id was chosen by CloudFormation and isn't just a trimmed version of the title (it includes a random part too).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/iam.py`
Content:
```
1 # Copyright (c) 2012-2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty
7 from .validators import integer, boolean, status
8 from .validators import iam_path, iam_role_name, iam_group_name, iam_user_name
9
10 try:
11 from awacs.aws import Policy
12 policytypes = (dict, Policy)
13 except ImportError:
14 policytypes = dict,
15
16
17 Active = "Active"
18 Inactive = "Inactive"
19
20
21 class AccessKey(AWSObject):
22 resource_type = "AWS::IAM::AccessKey"
23
24 props = {
25 'Serial': (integer, False),
26 'Status': (status, False),
27 'UserName': (basestring, True),
28 }
29
30
31 class PolicyType(AWSObject):
32 resource_type = "AWS::IAM::Policy"
33
34 props = {
35 'Groups': ([basestring], False),
36 'PolicyDocument': (policytypes, True),
37 'PolicyName': (basestring, True),
38 'Roles': ([basestring], False),
39 'Users': ([basestring], False),
40 }
41
42
43 class Policy(AWSProperty):
44 props = {
45 'PolicyDocument': (policytypes, True),
46 'PolicyName': (basestring, True),
47 }
48
49 PolicyProperty = Policy
50
51
52 class Group(AWSObject):
53 def validate_title(self):
54 iam_group_name(self.title)
55
56 resource_type = "AWS::IAM::Group"
57
58 props = {
59 'GroupName': (iam_group_name, False),
60 'ManagedPolicyArns': ([basestring], False),
61 'Path': (iam_path, False),
62 'Policies': ([Policy], False),
63 }
64
65
66 class InstanceProfile(AWSObject):
67 resource_type = "AWS::IAM::InstanceProfile"
68
69 props = {
70 'Path': (iam_path, False),
71 'Roles': (list, True),
72 }
73
74
75 class Role(AWSObject):
76 def validate_title(self):
77 iam_role_name(self.title)
78
79 resource_type = "AWS::IAM::Role"
80
81 props = {
82 'AssumeRolePolicyDocument': (policytypes, True),
83 'ManagedPolicyArns': ([basestring], False),
84 'Path': (iam_path, False),
85 'Policies': ([Policy], False),
86 'RoleName': (iam_role_name, False),
87 }
88
89
90 class LoginProfile(AWSProperty):
91 props = {
92 'Password': (basestring, True),
93 'PasswordResetRequired': (boolean, False),
94 }
95
96
97 class User(AWSObject):
98 resource_type = "AWS::IAM::User"
99
100 props = {
101 'Path': (iam_path, False),
102 'Groups': ([basestring], False),
103 'ManagedPolicyArns': ([basestring], False),
104 'LoginProfile': (LoginProfile, False),
105 'Policies': ([Policy], False),
106 'UserName': (iam_user_name, False),
107 }
108
109
110 class UserToGroupAddition(AWSObject):
111 resource_type = "AWS::IAM::UserToGroupAddition"
112
113 props = {
114 'GroupName': (basestring, True),
115 'Users': (list, True),
116 }
117
118
119 class ManagedPolicy(AWSObject):
120 resource_type = "AWS::IAM::ManagedPolicy"
121
122 props = {
123 'Description': (basestring, False),
124 'Groups': ([basestring], False),
125 'Path': (iam_path, False),
126 'PolicyDocument': (policytypes, True),
127 'Roles': ([basestring], False),
128 'Users': ([basestring], False),
129 }
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/iam.py b/troposphere/iam.py
--- a/troposphere/iam.py
+++ b/troposphere/iam.py
@@ -50,9 +50,6 @@
class Group(AWSObject):
- def validate_title(self):
- iam_group_name(self.title)
-
resource_type = "AWS::IAM::Group"
props = {
@@ -73,9 +70,6 @@
class Role(AWSObject):
- def validate_title(self):
- iam_role_name(self.title)
-
resource_type = "AWS::IAM::Role"
props = {
| {"golden_diff": "diff --git a/troposphere/iam.py b/troposphere/iam.py\n--- a/troposphere/iam.py\n+++ b/troposphere/iam.py\n@@ -50,9 +50,6 @@\n \n \n class Group(AWSObject):\n- def validate_title(self):\n- iam_group_name(self.title)\n-\n resource_type = \"AWS::IAM::Group\"\n \n props = {\n@@ -73,9 +70,6 @@\n \n \n class Role(AWSObject):\n- def validate_title(self):\n- iam_role_name(self.title)\n-\n resource_type = \"AWS::IAM::Role\"\n \n props = {\n", "issue": "BUG: IAM Role title should not be validated\nThe title specified in troposphere is not the actual physical resource name and should be validated as such (https://github.com/cloudtools/troposphere/blob/fe72f7d3f7b0711a22173c1240134173aafef574/troposphere/iam.py#L75-L77).\n\nThe next snippet was created today using `troposphere==1.5.0`and `boto3==1.2.6`:\n\n``` python\n>>> import boto3\n>>> import troposphere\n>>> import troposphere.iam\n>>> import time\n>>>\n>>> long_title = 'VeryLongName{}'.format('Z' * 100)\n>>> print len(long_title)\n112\n>>>\n>>> # create a role\n...\n>>> role = troposphere.iam.Role(\n... long_title,\n... AssumeRolePolicyDocument={\n... \"Statement\": [{\n... \"Action\": [\"sts:AssumeRole\"],\n... \"Effect\": \"Allow\",\n... \"Principal\": {\"Service\": [\"ec2.amazonaws.com\"]}\n... }]\n... })\nTraceback (most recent call last):\n File \"<stdin>\", line 8, in <module>\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/__init__.py\", line 44, in __init__\n self.validate_title()\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/iam.py\", line 77, in validate_title\n iam_role_name(self.title)\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/validators.py\", line 98, in iam_role_name\n raise ValueError('IAM Role Name may not exceed 64 characters')\nValueError: IAM Role Name may not exceed 64 characters\n>>>\n>>>\n>>> # delete validator\n...\n>>> del troposphere.iam.Role.validate_title\n>>> # try again\n...\n>>> role = troposphere.iam.Role(\n... long_title,\n... AssumeRolePolicyDocument={\n... \"Statement\": [{\n... \"Action\": [\"sts:AssumeRole\"],\n... \"Effect\": \"Allow\",\n... \"Principal\": {\"Service\": [\"ec2.amazonaws.com\"]}\n... }]\n... })\n>>> template = troposphere.Template()\n>>> template.add_resource(role)\n<troposphere.iam.Role object at 0x10ee02990>\n>>> print template.to_json()\n{\n \"Resources\": {\n \"VeryLongNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\": {\n \"Properties\": {\n \"AssumeRolePolicyDocument\": {\n \"Statement\": [\n {\n \"Action\": [\n \"sts:AssumeRole\"\n ],\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": [\n \"ec2.amazonaws.com\"\n ]\n }\n }\n ]\n }\n },\n \"Type\": \"AWS::IAM::Role\"\n }\n }\n}\n>>> client = boto3.client('cloudformation', 'us-east-1')\n>>> stack = client.create_stack(\n... StackName='testTroposphere',\n... TemplateBody=template.to_json(),\n... Capabilities=['CAPABILITY_IAM'])\n>>>\n>>> while client.describe_stacks(StackName=stack['StackId'])['Stacks'][0]['StackStatus'] != 'CREATE_COMPLETE':\n... import time\n... time.sleep(1)\n...\n>>> resources = client.describe_stack_resources(StackName=stack['StackId'])\n>>> for r in resources['StackResources']:\n... physical_id = r['PhysicalResourceId']\n... print(\"{} ({} chars)\".format(physical_id, len(physical_id)))\n...\ntestTroposphere-VeryLongNameZZZZZZZZZZZZZZZZZZZZZZ-PTHEM9FPNX28 (63 chars)\n```\n\nThe snippet above shows that the physical id was chosen by CloudFormation and isn't just a trimmed version of the title (it includes a random part too).\n\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n def validate_title(self):\n iam_group_name(self.title)\n\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n }\n\n\nclass Role(AWSObject):\n def validate_title(self):\n iam_role_name(self.title)\n\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}], "after_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}]} |
gh_patches_debug_1579 | rasdani/github-patches | git_diff | mne-tools__mne-bids-pipeline-139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
config.trigger_time_shift is currently unused
And also lacks clarification of what exactly it is supposed to do.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config.py`
Content:
```
1 """Set the configuration parameters for the study.
2
3 You need to define an environment variable `BIDS_ROOT` to point to the root
4 of your BIDS dataset to be analyzed.
5
6 """
7 import importlib
8 import functools
9 import os
10 from collections import defaultdict
11 import copy
12 import coloredlogs
13 import logging
14
15 import numpy as np
16 import mne
17 from mne_bids.utils import get_entity_vals
18
19 # Name, version, and hosting location of the pipeline
20 PIPELINE_NAME = 'mne-study-template'
21 VERSION = '0.1.dev0'
22 CODE_URL = 'https://github.com/mne-tools/mne-study-template'
23
24
25 # ``study_name`` : str
26 # Specify the name of your study. It will be used to populate filenames for
27 # saving the analysis results.
28 #
29 # Example
30 # ~~~~~~~
31 # >>> study_name = 'my-study'
32
33 study_name = ''
34
35 # ``bids_root`` : str or None
36 # Speficy the BIDS root directory. Pass an empty string or ```None`` to use
37 # the value specified in the ``BIDS_ROOT`` environment variable instead.
38 # Raises an exception if the BIDS root has not been specified.
39 #
40 # Example
41 # ~~~~~~~
42 # >>> bids_root = '/path/to/your/bids_root' # Use this to specify a path here.
43 # or
44 # >>> bids_root = None # Make use of the ``BIDS_ROOT`` environment variable.
45
46 bids_root = None
47
48 # ``subjects_dir`` : str or None
49 # Path to the directory that contains the MRI data files and their
50 # derivativesfor all subjects. Specifically, the ``subjects_dir`` is the
51 # $SUBJECTS_DIR used by the Freesurfer software. If ``None``, will use
52 # ``'bids_root/derivatives/freesurfer/subjects'``.
53
54 subjects_dir = None
55
56 # ``daysback`` : int
57 # If not None apply a time shift to dates to adjust for limitateions
58 # of fif files
59
60 daysback = None
61
62 # ``interactive`` : boolean
63 # If True, the scripts will provide some interactive elements, such as
64 # figures. If running the scripts from a notebook or Spyder,
65 # run %matplotlib qt in the command line to open the figures in a separate
66 # window.
67
68 interactive = False
69
70 # ``crop`` : tuple or None
71 # If tuple, (tmin, tmax) to crop the raw data
72 # If None (default), do not crop.
73 crop = None
74
75 # BIDS params
76 # see: bids-specification.rtfd.io/en/latest/99-appendices/04-entity-table.html
77
78 # ``sessions`` : iterable or 'all'
79 # The sessions to process.
80 sessions = 'all'
81
82 # ``task`` : str
83 # The task to process.
84 task = ''
85
86 # ``runs`` : iterable or 'all'
87 # The runs to process.
88 runs = 'all'
89
90 acq = None
91
92 proc = None
93
94 rec = None
95
96 space = None
97
98 # ``subjects_list`` : 'all' | list of str
99 # Subjects to analyze. If ``'all``, include all subjects. To only
100 # include a subset of subjects, pass a list of their identifiers. Even
101 # if you plan on analyzing only a single subject, pass their identifier
102 # as a list.
103 #
104 # Please note that if you intend to EXCLUDE only a few subjects, you
105 # should consider setting ``subjects_list = 'all'`` and adding the
106 # identifiers of the excluded subjects to ``exclude_subjects`` (see next
107 # section).
108 #
109 # Example
110 # ~~~~~~~
111 # >>> subjects_list = 'all' # Include all subjects.
112 # >>> subjects_list = ['05'] # Only include subject 05.
113 # >>> subjects_list = ['01', '02'] # Only include subjects 01 and 02.
114
115 subjects_list = 'all'
116
117 # ``exclude_subjects`` : list of str
118 # Specify subjects to exclude from analysis. The MEG empty-room mock-subject
119 # is automatically excluded from regular analysis.
120 #
121 # Good Practice / Advice
122 # ~~~~~~~~~~~~~~~~~~~~~~
123 # Keep track of the criteria leading you to exclude
124 # a participant (e.g. too many movements, missing blocks, aborted experiment,
125 # did not understand the instructions, etc, ...)
126 # The ``emptyroom`` subject will be excluded automatically.
127
128 exclude_subjects = []
129
130 # ``ch_types`` : list of st
131 # The list of channel types to consider.
132 #
133 # Example
134 # ~~~~~~~
135 # >>> ch_types = ['meg', 'eeg'] # to use MEG and EEG channels
136 # or
137 # >>> ch_types = ['meg'] # to use only MEG
138 # or
139 # >>> ch_types = ['grad'] # to use only gradiometer MEG channels
140
141 # Note: If `kind` is 'eeg', EEG ch_types will be used regardless of whether
142 # specified here or not
143 ch_types = []
144
145 ###############################################################################
146 # DEFINE ADDITIONAL CHANNELS
147 # --------------------------
148 # needed for 01-import_and_maxfilter.py
149
150 # ``rename_channels`` : dict rename channels
151 # Here you name or replace extra channels that were recorded, for instance
152 # EOG, ECG.
153 #
154 # Example
155 # ~~~~~~~
156 # Here rename EEG061 to EOG061, EEG062 to EOG062, EEG063 to ECG063:
157 # >>> rename_channels = {'EEG061': 'EOG061', 'EEG062': 'EOG062',
158 # 'EEG063': 'ECG063'}
159
160 # XXX should be done automatically from BIDS ?
161 rename_channels = None
162
163 # ``set_channel_types``: dict
164 # Here you define types of channels to pick later.
165 #
166 # Example
167 # ~~~~~~~
168 # >>> set_channel_types = {'EEG061': 'eog', 'EEG062': 'eog',
169 # 'EEG063': 'ecg', 'EEG064': 'misc'}
170
171 # XXX should not be necessary
172 set_channel_types = None
173
174 ###############################################################################
175 # MAXWELL FILTER PARAMETERS
176 # -------------------------
177 # done in 01-import_and_maxfilter.py
178 #
179 # Note: For any of this to work, you must set ``mf_ctc_fname`` and
180 # ``mf_cal_fname`` above.
181 #
182 # "Bad", i.e. flat and overly noisy channels, can be automatically detected
183 # using a procedure inspired by the commercial MaxFilter by Elekta. First,
184 # a copy of the data is low-pass filtered at 40 Hz. Then, channels with
185 # unusually low variability are flagged as "flat", while channels with
186 # excessively high variability are flagged as "noisy". Flat and noisy channels
187 # are marked as "bad" and excluded from subsequent analysis. See
188 # :func:`mne.preprocssessing.find_bad_channels_maxwell` for more information
189 # on this procedure. The list of bad channels detected through this procedure
190 # will be merged with the list of bad channels already present in the dataset,
191 # if any.
192 #
193 # ``find_flat_channels_meg`` : bool
194 # Auto-detect "flat" channels and mark them as bad.
195 #
196 # ``find_noisy_channels_meg`` : bool
197 # Auto-detect "noisy" channels and mark them as bad.
198
199 find_flat_channels_meg = False
200 find_noisy_channels_meg = False
201
202 # ``use_maxwell_filter`` : bool
203 # Use or not maxwell filter to preprocess the data.
204 #
205 # Warning
206 # ~~~~~~~
207 # If the data were recorded with internal active compensation (MaxShield),
208 # they need to be run through Maxwell filter to avoid distortions.
209 # Bad channels need to be set through BIDS channels.tsv and / or via the
210 # ``find_flat_channels_meg`` and ``find_noisy_channels_meg`` options above
211 # before applying Maxwell filter.
212
213 use_maxwell_filter = False
214
215 # There are two kinds of maxfiltering: SSS and tSSS
216 # [SSS = signal space separation ; tSSS = temporal signal space separation]
217 # (Taulu et al, 2004): http://cds.cern.ch/record/709081/files/0401166.pdf
218 #
219 # ``mf_st_duration`` : float | None
220 # If not None, apply spatiotemporal SSS (tSSS) with specified buffer
221 # duration (in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.
222 # Spatiotemporal SSS acts as implicitly as a high-pass filter where the
223 # cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer
224 # buffers are generally better as long as your system can handle the
225 # higher memory usage. To ensure that each window is processed
226 # identically, choose a buffer length that divides evenly into your data.
227 # Any data at the trailing edge that doesn't fit evenly into a whole
228 # buffer window will be lumped into the previous buffer.
229 #
230 # Good Practice / Advice
231 # ~~~~~~~~~~~~~~~~~~~~~~
232 # If you are interested in low frequency activity (<0.1Hz), avoid using tSSS
233 # and set mf_st_duration to None
234 #
235 # If you are interested in low frequency above 0.1 Hz, you can use the
236 # default mf_st_duration to 10 s meaning it acts like a 0.1 Hz highpass filter.
237 #
238 # Example
239 # ~~~~~~~
240 # >>> mf_st_duration = None
241 # or
242 # >>> mf_st_duration = 10. # to apply tSSS with 0.1Hz highpass filter.
243
244 mf_st_duration = None
245
246 # ``mf_head_origin`` : array-like, shape (3,) | 'auto'
247 # Origin of internal and external multipolar moment space in meters.
248 # If 'auto', it will be estimated from headshape points.
249 # If automatic fitting fails (e.g., due to having too few digitization
250 # points), consider separately calling the fitting function with different
251 # options or specifying the origin manually.
252 #
253 # Example
254 # ~~~~~~~
255 # >>> mf_head_origin = 'auto'
256
257 mf_head_origin = 'auto'
258
259 # ``cross talk`` : str
260 # Path to the cross talk file
261 #
262 #
263 # ``calibration`` : str
264 # Path to the calibration file.
265 #
266 #
267 # These 2 files should be downloaded and made available for running
268 # maxwell filtering.
269 #
270 # Example
271 # ~~~~~~~
272 # >>> cal_files_path = os.path.join(study_path, 'SSS')
273 # >>> mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')
274 # >>> mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')
275 #
276 # Warning
277 # ~~~~~~~
278 # These 2 files are site and machine specific files that provide information
279 # about the environmental noise. For practical purposes, place them in your
280 # study folder.
281 #
282 # At NeuroSpin: ct_sparse and sss_call are on the meg_tmp server
283
284 # cal_files_path = os.path.join(study_path, 'SSS')
285 # mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')
286 # mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')
287
288 mf_ctc_fname = ''
289 mf_cal_fname = ''
290
291 # Despite all possible care to avoid movements in the MEG, the participant
292 # will likely slowly drift down from the Dewar or slightly shift the head
293 # around in the course of the recording session. Hence, to take this into
294 # account, we are realigning all data to a single position. For this, you need
295 # to define a reference run (typically the one in the middle of
296 # the recording session).
297 #
298 # ``mf_reference_run`` : int
299 # Which run to take as the reference for adjusting the head position of all
300 # runs.
301 #
302 # Example
303 # ~~~~~~~
304 # >>> mf_reference_run = 0 # to use the first run
305
306 mf_reference_run = 0
307
308 ###############################################################################
309 # FREQUENCY FILTERING
310 # -------------------
311 # done in 02-frequency_filter.py
312
313 # Good Practice / Advice
314 # ~~~~~~~~~~~~~~~~~~~~~~
315 # It is typically better to set your filtering properties on the raw data so
316 # as to avoid what we call border (or edge) effects.
317 #
318 # If you use this pipeline for evoked responses, you could consider
319 # a low-pass filter cut-off of h_freq = 40 Hz
320 # and possibly a high-pass filter cut-off of l_freq = 1 Hz
321 # so you would preserve only the power in the 1Hz to 40 Hz band.
322 # Note that highpass filtering is not necessarily recommended as it can
323 # distort waveforms of evoked components, or simply wash out any low
324 # frequency that can may contain brain signal. It can also act as
325 # a replacement for baseline correction in Epochs. See below.
326 #
327 # If you use this pipeline for time-frequency analysis, a default filtering
328 # coult be a high-pass filter cut-off of l_freq = 1 Hz
329 # a low-pass filter cut-off of h_freq = 120 Hz
330 # so you would preserve only the power in the 1Hz to 120 Hz band.
331 #
332 # If you need more fancy analysis, you are already likely past this kind
333 # of tips! :)
334
335
336 # ``l_freq`` : float
337 # The low-frequency cut-off in the highpass filtering step.
338 # Keep it None if no highpass filtering should be applied.
339
340 l_freq = 1.
341
342 # ``h_freq`` : float
343 # The high-frequency cut-off in the lowpass filtering step.
344 # Keep it None if no lowpass filtering should be applied.
345
346 h_freq = 40.
347
348 ###############################################################################
349 # RESAMPLING
350 # ----------
351 #
352 # Good Practice / Advice
353 # ~~~~~~~~~~~~~~~~~~~~~~
354 # If you have acquired data with a very high sampling frequency (e.g. 2 kHz)
355 # you will likely want to downsample to lighten up the size of the files you
356 # are working with (pragmatics)
357 # If you are interested in typical analysis (up to 120 Hz) you can typically
358 # resample your data down to 500 Hz without preventing reliable time-frequency
359 # exploration of your data
360 #
361 # ``resample_sfreq`` : float
362 # Specifies at which sampling frequency the data should be resampled.
363 # If None then no resampling will be done.
364 #
365 # Example
366 # ~~~~~~~
367 # >>> resample_sfreq = None # no resampling
368 # or
369 # >>> resample_sfreq = 500 # resample to 500Hz
370
371 resample_sfreq = None
372
373 # ``decim`` : int
374 # Says how much to decimate data at the epochs level.
375 # It is typically an alternative to the `resample_sfreq` parameter that
376 # can be used for resampling raw data. 1 means no decimation.
377 #
378 # Good Practice / Advice
379 # ~~~~~~~~~~~~~~~~~~~~~~
380 # Decimation requires to lowpass filtered the data to avoid aliasing.
381 # Note that using decimation is much faster than resampling.
382 #
383 # Example
384 # ~~~~~~~
385 # >>> decim = 1 # no decimation
386 # or
387 # >>> decim = 4 # decimate by 4 ie devide sampling frequency by 4
388
389 decim = 1
390
391 ###############################################################################
392 # AUTOMATIC REJECTION OF ARTIFACTS
393 # --------------------------------
394 #
395 # Good Practice / Advice
396 # ~~~~~~~~~~~~~~~~~~~~~~
397 # Have a look at your raw data and train yourself to detect a blink, a heart
398 # beat and an eye movement.
399 # You can do a quick average of blink data and check what the amplitude looks
400 # like.
401 #
402 # ``reject`` : dict | None
403 # The rejection limits to make some epochs as bads.
404 # This allows to remove strong transient artifacts.
405 # If you want to reject and retrieve blinks later, e.g. with ICA,
406 # don't specify a value for the eog channel (see examples below).
407 # Make sure to include values for eeg if you have EEG data
408 #
409 # Note
410 # ~~~~
411 # These numbers tend to vary between subjects.. You might want to consider
412 # using the autoreject method by Jas et al. 2018.
413 # See https://autoreject.github.io
414 #
415 # Example
416 # ~~~~~~~
417 # >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eog': 150e-6}
418 # >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 200e-6}
419 # >>> reject = None
420
421 reject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 150e-6}
422
423
424 ###############################################################################
425 # RENAME EXPERIMENTAL EVENTS
426 # --------------------------
427 #
428 # ``rename_events`` : dict
429 # A dictionary specifying which events in the BIDS dataset to rename upon
430 # loading, and before processing begins.
431 #
432 # Pass an empty dictionary to not perform any renaming.
433 #
434 # Example
435 # ~~~~~~~
436 # Rename ``audio_left`` in the BIDS dataset to ``audio/left`` in the pipeline:
437 # >>> rename_events = {'audio_left': 'audio/left'}
438
439 rename_events = dict()
440
441
442 ###############################################################################
443 # EPOCHING
444 # --------
445 #
446 # ``tmin``: float
447 # A float in seconds that gives the start time before event of an epoch.
448 #
449 # Example
450 # ~~~~~~~
451 # >>> tmin = -0.2 # take 200ms before event onset.
452
453 tmin = -0.2
454
455 # ``tmax``: float
456 # A float in seconds that gives the end time before event of an epoch.
457 #
458 # Example
459 # ~~~~~~~
460 # >>> tmax = 0.5 # take 500ms after event onset.
461
462 tmax = 0.5
463
464 # ``trigger_time_shift`` : float | None
465 # If float it specifies the offset for the trigger and the stimulus
466 # (in seconds). You need to measure this value for your specific
467 # experiment/setup.
468 #
469 # Example
470 # ~~~~~~~
471 # >>> trigger_time_shift = 0 # don't apply any offset
472
473 trigger_time_shift = 0.
474
475 # ``baseline`` : tuple
476 # It specifies how to baseline the epochs; if None, no baseline is applied.
477 #
478 # Example
479 # ~~~~~~~
480 # >>> baseline = (None, 0) # baseline between tmin and 0
481
482 baseline = (None, 0)
483
484 # `conditions`` : list
485 # The condition names to consider. This can either be the keys of
486 # ``event_id``, or – if event names were specified with ``/`` for
487 # grouping – the name of the *grouped* condition (i.e., the
488 # condition name before or after that ``/`` that is shared between the
489 # respective conditions you wish to group). See the "Subselecting epochs"
490 # tutorial for more information: https://mne.tools/stable/auto_tutorials/epochs/plot_10_epochs_overview.html#subselecting-epochs # noqa: 501
491 #
492 # Example
493 # ~~~~~~~
494 # >>> conditions = ['auditory/left', 'visual/left']
495 # or
496 # >>> conditions = ['auditory/left', 'auditory/right']
497 # or
498 # >>> conditions = ['auditory']
499 # or
500 # >>> conditions = ['auditory', 'visual']
501 # or
502 # >>> conditions = ['left', 'right']
503
504 conditions = ['left', 'right']
505
506 ###############################################################################
507 # ARTIFACT REMOVAL
508 # ----------------
509 #
510 # You can choose between ICA and SSP to remove eye and heart artifacts.
511 # SSP: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ssp.html?highlight=ssp # noqa
512 # ICA: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ica.html?highlight=ica # noqa
513 # if you choose ICA, run scripts 5a and 6a
514 # if you choose SSP, run scripts 5b and 6b
515 #
516 # Currently you cannot use both.
517
518 # SSP
519 # ~~~
520 #
521 # ``use_ssp`` : bool
522 # If True ICA should be used or not.
523
524 use_ssp = True
525
526 # ICA
527 # ~~~
528 # ``use_ica`` : bool
529 # If True ICA should be used or not.
530
531 use_ica = False
532
533 # ``ica_algorithm`` : 'picard' | 'fastica' | 'extended_infomax'
534 # The ICA algorithm to use.
535
536 ica_algorithm = 'picard'
537
538 # ``ica_max_iterations`` : int
539 # Maximum number of iterations to decompose the data into independent
540 # components. A low number means to finish earlier, but the consequence is
541 # that the algorithm may not have finished converging. To ensure
542 # convergence, pick a high number here (e.g. 3000); yet the algorithm will
543 # terminate as soon as it determines that is has successfully converged, and
544 # not necessarily exhaust the maximum number of iterations. Note that the
545 # default of 200 seems to be sufficient for Picard in many datasets, because
546 # it converges quicker than the other algorithms; but e.g. for FastICA, this
547 # limit may be too low to achieve convergence.
548
549 ica_max_iterations = 200
550
551 # ``ica_decim`` : None | None
552 # The decimation parameter to compute ICA. If 5 it means
553 # that 1 every 5 sample is used by ICA solver. The higher the faster
554 # it is to run but the less data you have to compute a good ICA. Set to
555 # ``1`` ``None`` to not perform an decimation.
556
557 ica_decim = None
558
559
560 # ``default_reject_comps_factory`` : callable
561 # A factory function that returns a default rejection component dictionary:
562 # A dictionary that specifies the indices of the ICA components to reject
563 # for each subject. For example you can use:
564 # rejcomps_man['subject01'] = dict(eeg=[12], meg=[7])
565
566 def default_reject_comps_factory():
567 """Return the default rejection component dictionary."""
568 return dict(meg=[], eeg=[])
569
570
571 rejcomps_man = defaultdict(default_reject_comps_factory)
572
573 # ``ica_ctps_ecg_threshold``: float
574 # The threshold parameter passed to `find_bads_ecg` method.
575
576 ica_ctps_ecg_threshold = 0.1
577
578 ###############################################################################
579 # DECODING
580 # --------
581 #
582 # ``decoding_conditions`` : list
583 # List of conditions to be classified.
584 #
585 # Example
586 # ~~~~~~~
587 # >>> decoding_conditions = [] # don't do decoding
588 # or
589 # >>> decoding_conditions = [('auditory', 'visual'), ('left', 'right')]
590
591 decoding_conditions = []
592 # decoding_conditions = [('left', 'right')]
593
594 ###############################################################################
595 # GROUP AVERAGE SENSORS
596 # ---------------------
597 #
598 # ``interpolate_bads_grand_average`` : bool
599 # Interpolate bad sensors in each dataset before calculating the grand
600 # average. This parameter is passed to the `mne.grand_average` function via
601 # the keyword argument `interpolate_bads`. It requires to have channel
602 # locations set.
603 #
604 # Example
605 # ~~~~~~~
606 # >>> interpolate_bads_grand_average = True
607
608 interpolate_bads_grand_average = True
609
610 # ``decoding_metric`` : str
611 # The metric to use for cross-validation. It can be 'roc_auc' or 'accuracy'
612 # or any metric supported by scikit-learn.
613
614 decoding_metric = 'roc_auc'
615
616 # ``decoding_n_splits`` : int
617 # The number of folds (a.k.a. splits) to use in the cross-validation.
618
619 decoding_n_splits = 5
620
621 ###############################################################################
622 # TIME-FREQUENCY
623 # --------------
624 #
625 # ``time_frequency_conditions`` : list
626 # The conditions to compute time-frequency decomposition on.
627
628 # time_frequency_conditions = ['left', 'right']
629 time_frequency_conditions = []
630
631 ###############################################################################
632 # SOURCE SPACE PARAMETERS
633 # -----------------------
634 #
635
636 # ``spacing`` : str
637 # The spacing to use. Can be ``'ico#'`` for a recursively subdivided
638 # icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
639 # ``'all'`` for all points, or an integer to use appoximate
640 # distance-based spacing (in mm).
641
642 spacing = 'oct6'
643
644 # ``mindist`` : float
645 # Exclude points closer than this distance (mm) to the bounding surface.
646
647 mindist = 5
648
649 # ``loose`` : float in [0, 1] | 'auto'
650 # Value that weights the source variances of the dipole components
651 # that are parallel (tangential) to the cortical surface. If loose
652 # is 0 then the solution is computed with fixed orientation,
653 # and fixed must be True or "auto".
654 # If loose is 1, it corresponds to free orientations.
655 # The default value ('auto') is set to 0.2 for surface-oriented source
656 # space and set to 1.0 for volumetric, discrete, or mixed source spaces,
657 # unless ``fixed is True`` in which case the value 0. is used.
658
659 loose = 0.2
660
661 # ``depth`` : None | float | dict
662 # If float (default 0.8), it acts as the depth weighting exponent (``exp``)
663 # to use (must be between 0 and 1). None is equivalent to 0, meaning no
664 # depth weighting is performed. Can also be a `dict` containing additional
665 # keyword arguments to pass to :func:`mne.forward.compute_depth_prior`
666 # (see docstring for details and defaults).
667
668 depth = 0.8
669
670 # inverse_method : "MNE" | "dSPM" | "sLORETA" | "eLORETA"
671 # Use minimum norm, dSPM (default), sLORETA, or eLORETA.
672
673 inverse_method = 'dSPM'
674
675 # noise_cov : (None, 0) | ‘emptyroom’
676 # Specify how to estimate the noise covariance matrix, which is used in
677 # inverse modeling.
678 #
679 # If a tuple, it takes the form ``(tmin, tmax)`` with the time specified in
680 # seconds. If the first value of the tuple is ``None``, the considered
681 # period starts at the beginning of the epoch. If the second value of the
682 # tuple is ``None``, the considered period ends at the end of the epoch.
683 # The default, ``(None, 0)``, includes the entire period before the event,
684 # which is typically the pre-stimulus period.
685 #
686 # If ``emptyroom``, the noise covariance matrix will be estimated from an
687 # empty-room MEG recording. The empty-room recording will be automatically
688 # selected based on recording date and time.
689 #
690 # Please note that when processing data that contains EEG channels, the noise
691 # covariance can ONLY be estimated from the pre-stimulus period.
692 #
693 # Example
694 # ~~~~~~~
695 # Use the period from start of the epoch until 100 ms before the experimental
696 # event:
697 # >>> noise_cov = (None, -0.1)
698 #
699 # Use the time period from the experimental event until the end of the epoch:
700 # >>> noise_cov = (0, None)
701 #
702 # Use an empty-room recording:
703 # >>> noise_cov = 'emptyroom'
704
705 noise_cov = (None, 0)
706
707 # smooth : int | None
708 # Number of iterations for the smoothing of the surface data.
709 # If None, smooth is automatically defined to fill the surface
710 # with non-zero values. The default is spacing=None.
711
712 smooth = 10
713
714 fsaverage_vertices = [np.arange(10242), np.arange(10242)]
715
716 ###############################################################################
717 # ADVANCED
718 # --------
719 #
720 # ``l_trans_bandwidth`` : float | 'auto'
721 # A float that specifies the transition bandwidth of the
722 # highpass filter. By default it's `'auto'` and uses default mne
723 # parameters.
724
725 l_trans_bandwidth = 'auto'
726
727 # ``h_trans_bandwidth`` : float | 'auto'
728 # A float that specifies the transition bandwidth of the
729 # lowpass filter. By default it's `'auto'` and uses default mne
730 # parameters.
731
732 h_trans_bandwidth = 'auto'
733
734 # ``N_JOBS`` : int
735 # An integer that specifies how many subjects you want to run in parallel.
736
737 N_JOBS = 1
738
739 # ``random_state`` : None | int | np.random.RandomState
740 # To specify the random generator state. This allows to have
741 # the results more reproducible between machines and systems.
742 # Some methods like ICA need random values for initialisation.
743
744 random_state = 42
745
746 # ``shortest_event`` : int
747 # Minimum number of samples an event must last. If the
748 # duration is less than this an exception will be raised.
749
750 shortest_event = 1
751
752 # ``allow_maxshield`` : bool
753 # To import data that was recorded with Maxshield on before running
754 # maxfilter set this to True.
755
756 allow_maxshield = False
757
758 log_level = 'info'
759 mne_log_level = 'error'
760
761 # ``on_abort`` : 'continue' | 'abort'
762 # Whether to abort processing as soon as an error occurs, or whether to
763 # continue with all other processing steps for as long as possible.
764
765 on_error = 'abort'
766
767
768 ###############################################################################
769 # #
770 # CUSTOM CONFIGURATION ENDS HERE #
771 # #
772 ###############################################################################
773
774
775 ###############################################################################
776 # Logger
777 # ------
778
779 logger = logging.getLogger('mne-study-template')
780
781 log_fmt = '%(asctime)s %(message)s'
782 log_date_fmt = coloredlogs.DEFAULT_DATE_FORMAT = '%H:%M:%S'
783 coloredlogs.install(level=log_level, logger=logger, fmt=log_fmt,
784 date_fmt=log_date_fmt)
785
786 mne.set_log_level(verbose=mne_log_level.upper())
787
788 ###############################################################################
789 # Retrieve custom configuration options
790 # -------------------------------------
791 #
792 # For testing a specific dataset, create a Python file with a name of your
793 # liking (e.g., ``mydataset-template-config.py``), and set an environment
794 # variable ``MNE_BIDS_STUDY_CONFIG`` to that file.
795 #
796 # Example
797 # ~~~~~~~
798 # ``export MNE_BIDS_STUDY_CONFIG=/data/mystudy/mydataset-template-config.py``
799
800 if "MNE_BIDS_STUDY_CONFIG" in os.environ:
801 cfg_path = os.environ['MNE_BIDS_STUDY_CONFIG']
802
803 if os.path.exists(cfg_path):
804 msg = f'Using custom configuration: {cfg_path}'
805 logger.info(msg)
806 else:
807 msg = ('The custom configuration file specified in the '
808 'MNE_BIDS_STUDY_CONFIG environment variable could not be '
809 'found: {cfg_path}'.format(cfg_path=cfg_path))
810 raise ValueError(msg)
811
812 # Import configuration from an arbitrary path without having to fiddle
813 # with `sys.path`.
814 spec = importlib.util.spec_from_file_location(name='custom_config',
815 location=cfg_path)
816 custom_cfg = importlib.util.module_from_spec(spec)
817 spec.loader.exec_module(custom_cfg)
818 del spec, cfg_path
819
820 new = None
821 for val in dir(custom_cfg):
822 if not val.startswith('__'):
823 exec("new = custom_cfg.%s" % val)
824 logger.debug('Overwriting: %s -> %s' % (val, new))
825 exec("%s = custom_cfg.%s" % (val, val))
826
827
828 # BIDS_ROOT environment variable takes precedence over any configuration file
829 # values.
830 if os.getenv('BIDS_ROOT') is not None:
831 bids_root = os.getenv('BIDS_ROOT')
832
833 # If we don't have a bids_root until now, raise an exeception as we cannot
834 # proceed.
835 if not bids_root:
836 msg = ('You need to specify `bids_root` in your configuration, or '
837 'define an environment variable `BIDS_ROOT` pointing to the '
838 'root folder of your BIDS dataset')
839 raise ValueError(msg)
840
841
842 ###############################################################################
843 # Derivates root
844 # --------------
845 deriv_root = os.path.join(bids_root, 'derivatives', PIPELINE_NAME)
846
847
848 ###############################################################################
849 # CHECKS
850 # ------
851
852 if (use_maxwell_filter and
853 len(set(ch_types).intersection(('meg', 'grad', 'mag'))) == 0):
854 raise ValueError('Cannot use maxwell filter without MEG channels.')
855
856 if use_ssp and use_ica:
857 raise ValueError('Cannot use both SSP and ICA.')
858
859 if use_ica and ica_algorithm not in ('picard', 'fastica', 'extended_infomax'):
860 msg = (f"Invalid ICA algorithm requested. Valid values for ica_algorithm "
861 f"are: 'picard', 'fastica', and 'extended_infomax', but received "
862 f"{ica_algorithm}.")
863 raise ValueError(msg)
864
865 if not ch_types:
866 msg = 'Please specify ch_types in your configuration.'
867 raise ValueError(msg)
868
869 if ch_types == ['eeg']:
870 pass
871 elif 'eeg' in ch_types and len(ch_types) > 1: # EEG + some other channel types
872 msg = ('EEG data can only be analyzed separately from other channel '
873 'types. Please adjust `ch_types` in your configuration.')
874 raise ValueError(msg)
875 elif any([ch_type not in ('meg', 'mag', 'grad') for ch_type in ch_types]):
876 msg = ('Invalid channel type passed. Please adjust `ch_types` in your '
877 'configuration.')
878 raise ValueError(msg)
879
880 if 'eeg' in ch_types:
881 if use_ssp:
882 msg = ('You requested SSP for EEG data via use_ssp=True. However, '
883 'this is not presently supported. Please use ICA instead by '
884 'setting use_ssp=False and use_ica=True.')
885 raise ValueError(msg)
886 if not use_ica:
887 msg = ('You did not request ICA artifact correction for your data. '
888 'To turn it on, set use_ica=True.')
889 logger.info(msg)
890
891 if on_error not in ('continue', 'abort'):
892 msg = (f"on_error must be one of 'continue' or 'abort', but received "
893 f"{on_error}.")
894 logger.info(msg)
895
896 if isinstance(noise_cov, str) and noise_cov != 'emptyroom':
897 msg = (f"noise_cov must be a tuple or 'emptyroom', but received "
898 f"{noise_cov}")
899 raise ValueError(msg)
900
901 if noise_cov == 'emptyroom' and 'eeg' in ch_types:
902 msg = ('You requested to process data that contains EEG channels. In this '
903 'case, noise covariance can only be estimated from the '
904 'experimental data, e.g., the pre-stimulus period. Please set '
905 'noise_cov to (tmin, tmax)')
906 raise ValueError(msg)
907
908
909 ###############################################################################
910 # Helper functions
911 # ----------------
912
913 def get_sessions():
914 sessions_ = copy.deepcopy(sessions) # Avoid clash with global variable.
915
916 if sessions_ == 'all':
917 sessions_ = get_entity_vals(bids_root, entity_key='ses')
918
919 if not sessions_:
920 return [None]
921 else:
922 return sessions_
923
924
925 def get_runs():
926 runs_ = copy.deepcopy(runs) # Avoid clash with global variable.
927
928 if runs_ == 'all':
929 runs_ = get_entity_vals(bids_root, entity_key='run')
930
931 if not runs_:
932 return [None]
933 else:
934 return runs_
935
936
937 def get_subjects():
938 if subjects_list == 'all':
939 s = get_entity_vals(bids_root, entity_key='sub')
940 else:
941 s = subjects_list
942
943 subjects = set(s) - set(exclude_subjects)
944 # Drop empty-room subject.
945 subjects = subjects - set(['emptyroom'])
946
947 return list(subjects)
948
949
950 def get_task():
951 if not task:
952 tasks = get_entity_vals(bids_root, entity_key='task')
953 if not tasks:
954 return None
955 else:
956 return tasks[0]
957 else:
958 return task
959
960
961 def get_kind():
962 # Content of ch_types should be sanitized already, so we don't need any
963 # extra sanity checks here.
964 if ch_types == ['eeg']:
965 return 'eeg'
966 else:
967 return 'meg'
968
969
970 def get_reject():
971 reject_ = reject.copy() # Avoid clash with global variable.
972 kind = get_kind()
973
974 if kind == 'eeg':
975 ch_types_to_remove = ('mag', 'grad')
976 else:
977 ch_types_to_remove = ('eeg',)
978
979 for ch_type in ch_types_to_remove:
980 try:
981 del reject_[ch_type]
982 except KeyError:
983 pass
984 return reject_
985
986
987 def get_fs_subjects_dir():
988 if not subjects_dir:
989 return os.path.join(bids_root, 'derivatives', 'freesurfer', 'subjects')
990 else:
991 return subjects_dir
992
993
994 def get_subject_path(subject, session, kind):
995 subject_path = f'sub-{subject}'
996 if session is not None:
997 subject_path = os.path.join(subject_path, f'ses-{session}')
998 subject_path = os.path.join(subject_path, kind)
999 return subject_path
1000
1001
1002 def get_subject_deriv_path(subject, session, kind):
1003 subject_path = get_subject_path(subject=subject, session=session,
1004 kind=kind)
1005 deriv_path = os.path.join(deriv_root, subject_path)
1006 return deriv_path
1007
1008
1009 def gen_log_message(message, step=None, subject=None, session=None, run=None):
1010 if subject is not None:
1011 subject = f'sub-{subject}'
1012 if session is not None:
1013 session = f'ses-{session}'
1014 if run is not None:
1015 run = f'run-{run}'
1016
1017 prefix = ', '.join([item for item in [subject, session, run]
1018 if item is not None])
1019 if prefix:
1020 prefix = f'[{prefix}]'
1021
1022 if step is not None:
1023 prefix = f'[Step-{step:02}]{prefix}'
1024
1025 return prefix + ' ' + message
1026
1027
1028 def failsafe_run(on_error):
1029 def failsafe_run_decorator(func):
1030 @functools.wraps(func) # Preserve "identity" of original function
1031 def wrapper(*args, **kwargs):
1032 try:
1033 return func(*args, **kwargs)
1034 except Exception as e:
1035 message = 'A critical error occurred.'
1036 message = gen_log_message(message=message)
1037
1038 if on_error == 'abort':
1039 logger.critical(message)
1040 raise(e)
1041 else:
1042 message = f'{message} The error message was:\n{str(e)}'
1043 logger.critical(message)
1044 return wrapper
1045 return failsafe_run_decorator
1046
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/config.py b/config.py
--- a/config.py
+++ b/config.py
@@ -461,17 +461,6 @@
tmax = 0.5
-# ``trigger_time_shift`` : float | None
-# If float it specifies the offset for the trigger and the stimulus
-# (in seconds). You need to measure this value for your specific
-# experiment/setup.
-#
-# Example
-# ~~~~~~~
-# >>> trigger_time_shift = 0 # don't apply any offset
-
-trigger_time_shift = 0.
-
# ``baseline`` : tuple
# It specifies how to baseline the epochs; if None, no baseline is applied.
#
| {"golden_diff": "diff --git a/config.py b/config.py\n--- a/config.py\n+++ b/config.py\n@@ -461,17 +461,6 @@\n \n tmax = 0.5\n \n-# ``trigger_time_shift`` : float | None\n-# If float it specifies the offset for the trigger and the stimulus\n-# (in seconds). You need to measure this value for your specific\n-# experiment/setup.\n-#\n-# Example\n-# ~~~~~~~\n-# >>> trigger_time_shift = 0 # don't apply any offset\n-\n-trigger_time_shift = 0.\n-\n # ``baseline`` : tuple\n # It specifies how to baseline the epochs; if None, no baseline is applied.\n #\n", "issue": "config.trigger_time_shift is currently unused\nAnd also lacks clarification of what exactly it is supposed to do.\n", "before_files": [{"content": "\"\"\"Set the configuration parameters for the study.\n\nYou need to define an environment variable `BIDS_ROOT` to point to the root\nof your BIDS dataset to be analyzed.\n\n\"\"\"\nimport importlib\nimport functools\nimport os\nfrom collections import defaultdict\nimport copy\nimport coloredlogs\nimport logging\n\nimport numpy as np\nimport mne\nfrom mne_bids.utils import get_entity_vals\n\n# Name, version, and hosting location of the pipeline\nPIPELINE_NAME = 'mne-study-template'\nVERSION = '0.1.dev0'\nCODE_URL = 'https://github.com/mne-tools/mne-study-template'\n\n\n# ``study_name`` : str\n# Specify the name of your study. It will be used to populate filenames for\n# saving the analysis results.\n#\n# Example\n# ~~~~~~~\n# >>> study_name = 'my-study'\n\nstudy_name = ''\n\n# ``bids_root`` : str or None\n# Speficy the BIDS root directory. Pass an empty string or ```None`` to use\n# the value specified in the ``BIDS_ROOT`` environment variable instead.\n# Raises an exception if the BIDS root has not been specified.\n#\n# Example\n# ~~~~~~~\n# >>> bids_root = '/path/to/your/bids_root' # Use this to specify a path here.\n# or\n# >>> bids_root = None # Make use of the ``BIDS_ROOT`` environment variable.\n\nbids_root = None\n\n# ``subjects_dir`` : str or None\n# Path to the directory that contains the MRI data files and their\n# derivativesfor all subjects. Specifically, the ``subjects_dir`` is the\n# $SUBJECTS_DIR used by the Freesurfer software. If ``None``, will use\n# ``'bids_root/derivatives/freesurfer/subjects'``.\n\nsubjects_dir = None\n\n# ``daysback`` : int\n# If not None apply a time shift to dates to adjust for limitateions\n# of fif files\n\ndaysback = None\n\n# ``interactive`` : boolean\n# If True, the scripts will provide some interactive elements, such as\n# figures. If running the scripts from a notebook or Spyder,\n# run %matplotlib qt in the command line to open the figures in a separate\n# window.\n\ninteractive = False\n\n# ``crop`` : tuple or None\n# If tuple, (tmin, tmax) to crop the raw data\n# If None (default), do not crop.\ncrop = None\n\n# BIDS params\n# see: bids-specification.rtfd.io/en/latest/99-appendices/04-entity-table.html\n\n# ``sessions`` : iterable or 'all'\n# The sessions to process.\nsessions = 'all'\n\n# ``task`` : str\n# The task to process.\ntask = ''\n\n# ``runs`` : iterable or 'all'\n# The runs to process.\nruns = 'all'\n\nacq = None\n\nproc = None\n\nrec = None\n\nspace = None\n\n# ``subjects_list`` : 'all' | list of str\n# Subjects to analyze. If ``'all``, include all subjects. To only\n# include a subset of subjects, pass a list of their identifiers. Even\n# if you plan on analyzing only a single subject, pass their identifier\n# as a list.\n#\n# Please note that if you intend to EXCLUDE only a few subjects, you\n# should consider setting ``subjects_list = 'all'`` and adding the\n# identifiers of the excluded subjects to ``exclude_subjects`` (see next\n# section).\n#\n# Example\n# ~~~~~~~\n# >>> subjects_list = 'all' # Include all subjects.\n# >>> subjects_list = ['05'] # Only include subject 05.\n# >>> subjects_list = ['01', '02'] # Only include subjects 01 and 02.\n\nsubjects_list = 'all'\n\n# ``exclude_subjects`` : list of str\n# Specify subjects to exclude from analysis. The MEG empty-room mock-subject\n# is automatically excluded from regular analysis.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Keep track of the criteria leading you to exclude\n# a participant (e.g. too many movements, missing blocks, aborted experiment,\n# did not understand the instructions, etc, ...)\n# The ``emptyroom`` subject will be excluded automatically.\n\nexclude_subjects = []\n\n# ``ch_types`` : list of st\n# The list of channel types to consider.\n#\n# Example\n# ~~~~~~~\n# >>> ch_types = ['meg', 'eeg'] # to use MEG and EEG channels\n# or\n# >>> ch_types = ['meg'] # to use only MEG\n# or\n# >>> ch_types = ['grad'] # to use only gradiometer MEG channels\n\n# Note: If `kind` is 'eeg', EEG ch_types will be used regardless of whether\n# specified here or not\nch_types = []\n\n###############################################################################\n# DEFINE ADDITIONAL CHANNELS\n# --------------------------\n# needed for 01-import_and_maxfilter.py\n\n# ``rename_channels`` : dict rename channels\n# Here you name or replace extra channels that were recorded, for instance\n# EOG, ECG.\n#\n# Example\n# ~~~~~~~\n# Here rename EEG061 to EOG061, EEG062 to EOG062, EEG063 to ECG063:\n# >>> rename_channels = {'EEG061': 'EOG061', 'EEG062': 'EOG062',\n# 'EEG063': 'ECG063'}\n\n# XXX should be done automatically from BIDS ?\nrename_channels = None\n\n# ``set_channel_types``: dict\n# Here you define types of channels to pick later.\n#\n# Example\n# ~~~~~~~\n# >>> set_channel_types = {'EEG061': 'eog', 'EEG062': 'eog',\n# 'EEG063': 'ecg', 'EEG064': 'misc'}\n\n# XXX should not be necessary\nset_channel_types = None\n\n###############################################################################\n# MAXWELL FILTER PARAMETERS\n# -------------------------\n# done in 01-import_and_maxfilter.py\n#\n# Note: For any of this to work, you must set ``mf_ctc_fname`` and\n# ``mf_cal_fname`` above.\n#\n# \"Bad\", i.e. flat and overly noisy channels, can be automatically detected\n# using a procedure inspired by the commercial MaxFilter by Elekta. First,\n# a copy of the data is low-pass filtered at 40 Hz. Then, channels with\n# unusually low variability are flagged as \"flat\", while channels with\n# excessively high variability are flagged as \"noisy\". Flat and noisy channels\n# are marked as \"bad\" and excluded from subsequent analysis. See\n# :func:`mne.preprocssessing.find_bad_channels_maxwell` for more information\n# on this procedure. The list of bad channels detected through this procedure\n# will be merged with the list of bad channels already present in the dataset,\n# if any.\n#\n# ``find_flat_channels_meg`` : bool\n# Auto-detect \"flat\" channels and mark them as bad.\n#\n# ``find_noisy_channels_meg`` : bool\n# Auto-detect \"noisy\" channels and mark them as bad.\n\nfind_flat_channels_meg = False\nfind_noisy_channels_meg = False\n\n# ``use_maxwell_filter`` : bool\n# Use or not maxwell filter to preprocess the data.\n#\n# Warning\n# ~~~~~~~\n# If the data were recorded with internal active compensation (MaxShield),\n# they need to be run through Maxwell filter to avoid distortions.\n# Bad channels need to be set through BIDS channels.tsv and / or via the\n# ``find_flat_channels_meg`` and ``find_noisy_channels_meg`` options above\n# before applying Maxwell filter.\n\nuse_maxwell_filter = False\n\n# There are two kinds of maxfiltering: SSS and tSSS\n# [SSS = signal space separation ; tSSS = temporal signal space separation]\n# (Taulu et al, 2004): http://cds.cern.ch/record/709081/files/0401166.pdf\n#\n# ``mf_st_duration`` : float | None\n# If not None, apply spatiotemporal SSS (tSSS) with specified buffer\n# duration (in seconds). MaxFilter\u2122's default is 10.0 seconds in v2.2.\n# Spatiotemporal SSS acts as implicitly as a high-pass filter where the\n# cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer\n# buffers are generally better as long as your system can handle the\n# higher memory usage. To ensure that each window is processed\n# identically, choose a buffer length that divides evenly into your data.\n# Any data at the trailing edge that doesn't fit evenly into a whole\n# buffer window will be lumped into the previous buffer.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# If you are interested in low frequency activity (<0.1Hz), avoid using tSSS\n# and set mf_st_duration to None\n#\n# If you are interested in low frequency above 0.1 Hz, you can use the\n# default mf_st_duration to 10 s meaning it acts like a 0.1 Hz highpass filter.\n#\n# Example\n# ~~~~~~~\n# >>> mf_st_duration = None\n# or\n# >>> mf_st_duration = 10. # to apply tSSS with 0.1Hz highpass filter.\n\nmf_st_duration = None\n\n# ``mf_head_origin`` : array-like, shape (3,) | 'auto'\n# Origin of internal and external multipolar moment space in meters.\n# If 'auto', it will be estimated from headshape points.\n# If automatic fitting fails (e.g., due to having too few digitization\n# points), consider separately calling the fitting function with different\n# options or specifying the origin manually.\n#\n# Example\n# ~~~~~~~\n# >>> mf_head_origin = 'auto'\n\nmf_head_origin = 'auto'\n\n# ``cross talk`` : str\n# Path to the cross talk file\n#\n#\n# ``calibration`` : str\n# Path to the calibration file.\n#\n#\n# These 2 files should be downloaded and made available for running\n# maxwell filtering.\n#\n# Example\n# ~~~~~~~\n# >>> cal_files_path = os.path.join(study_path, 'SSS')\n# >>> mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')\n# >>> mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')\n#\n# Warning\n# ~~~~~~~\n# These 2 files are site and machine specific files that provide information\n# about the environmental noise. For practical purposes, place them in your\n# study folder.\n#\n# At NeuroSpin: ct_sparse and sss_call are on the meg_tmp server\n\n# cal_files_path = os.path.join(study_path, 'SSS')\n# mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')\n# mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')\n\nmf_ctc_fname = ''\nmf_cal_fname = ''\n\n# Despite all possible care to avoid movements in the MEG, the participant\n# will likely slowly drift down from the Dewar or slightly shift the head\n# around in the course of the recording session. Hence, to take this into\n# account, we are realigning all data to a single position. For this, you need\n# to define a reference run (typically the one in the middle of\n# the recording session).\n#\n# ``mf_reference_run`` : int\n# Which run to take as the reference for adjusting the head position of all\n# runs.\n#\n# Example\n# ~~~~~~~\n# >>> mf_reference_run = 0 # to use the first run\n\nmf_reference_run = 0\n\n###############################################################################\n# FREQUENCY FILTERING\n# -------------------\n# done in 02-frequency_filter.py\n\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# It is typically better to set your filtering properties on the raw data so\n# as to avoid what we call border (or edge) effects.\n#\n# If you use this pipeline for evoked responses, you could consider\n# a low-pass filter cut-off of h_freq = 40 Hz\n# and possibly a high-pass filter cut-off of l_freq = 1 Hz\n# so you would preserve only the power in the 1Hz to 40 Hz band.\n# Note that highpass filtering is not necessarily recommended as it can\n# distort waveforms of evoked components, or simply wash out any low\n# frequency that can may contain brain signal. It can also act as\n# a replacement for baseline correction in Epochs. See below.\n#\n# If you use this pipeline for time-frequency analysis, a default filtering\n# coult be a high-pass filter cut-off of l_freq = 1 Hz\n# a low-pass filter cut-off of h_freq = 120 Hz\n# so you would preserve only the power in the 1Hz to 120 Hz band.\n#\n# If you need more fancy analysis, you are already likely past this kind\n# of tips! :)\n\n\n# ``l_freq`` : float\n# The low-frequency cut-off in the highpass filtering step.\n# Keep it None if no highpass filtering should be applied.\n\nl_freq = 1.\n\n# ``h_freq`` : float\n# The high-frequency cut-off in the lowpass filtering step.\n# Keep it None if no lowpass filtering should be applied.\n\nh_freq = 40.\n\n###############################################################################\n# RESAMPLING\n# ----------\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# If you have acquired data with a very high sampling frequency (e.g. 2 kHz)\n# you will likely want to downsample to lighten up the size of the files you\n# are working with (pragmatics)\n# If you are interested in typical analysis (up to 120 Hz) you can typically\n# resample your data down to 500 Hz without preventing reliable time-frequency\n# exploration of your data\n#\n# ``resample_sfreq`` : float\n# Specifies at which sampling frequency the data should be resampled.\n# If None then no resampling will be done.\n#\n# Example\n# ~~~~~~~\n# >>> resample_sfreq = None # no resampling\n# or\n# >>> resample_sfreq = 500 # resample to 500Hz\n\nresample_sfreq = None\n\n# ``decim`` : int\n# Says how much to decimate data at the epochs level.\n# It is typically an alternative to the `resample_sfreq` parameter that\n# can be used for resampling raw data. 1 means no decimation.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Decimation requires to lowpass filtered the data to avoid aliasing.\n# Note that using decimation is much faster than resampling.\n#\n# Example\n# ~~~~~~~\n# >>> decim = 1 # no decimation\n# or\n# >>> decim = 4 # decimate by 4 ie devide sampling frequency by 4\n\ndecim = 1\n\n###############################################################################\n# AUTOMATIC REJECTION OF ARTIFACTS\n# --------------------------------\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Have a look at your raw data and train yourself to detect a blink, a heart\n# beat and an eye movement.\n# You can do a quick average of blink data and check what the amplitude looks\n# like.\n#\n# ``reject`` : dict | None\n# The rejection limits to make some epochs as bads.\n# This allows to remove strong transient artifacts.\n# If you want to reject and retrieve blinks later, e.g. with ICA,\n# don't specify a value for the eog channel (see examples below).\n# Make sure to include values for eeg if you have EEG data\n#\n# Note\n# ~~~~\n# These numbers tend to vary between subjects.. You might want to consider\n# using the autoreject method by Jas et al. 2018.\n# See https://autoreject.github.io\n#\n# Example\n# ~~~~~~~\n# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eog': 150e-6}\n# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 200e-6}\n# >>> reject = None\n\nreject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 150e-6}\n\n\n###############################################################################\n# RENAME EXPERIMENTAL EVENTS\n# --------------------------\n#\n# ``rename_events`` : dict\n# A dictionary specifying which events in the BIDS dataset to rename upon\n# loading, and before processing begins.\n#\n# Pass an empty dictionary to not perform any renaming.\n#\n# Example\n# ~~~~~~~\n# Rename ``audio_left`` in the BIDS dataset to ``audio/left`` in the pipeline:\n# >>> rename_events = {'audio_left': 'audio/left'}\n\nrename_events = dict()\n\n\n###############################################################################\n# EPOCHING\n# --------\n#\n# ``tmin``: float\n# A float in seconds that gives the start time before event of an epoch.\n#\n# Example\n# ~~~~~~~\n# >>> tmin = -0.2 # take 200ms before event onset.\n\ntmin = -0.2\n\n# ``tmax``: float\n# A float in seconds that gives the end time before event of an epoch.\n#\n# Example\n# ~~~~~~~\n# >>> tmax = 0.5 # take 500ms after event onset.\n\ntmax = 0.5\n\n# ``trigger_time_shift`` : float | None\n# If float it specifies the offset for the trigger and the stimulus\n# (in seconds). You need to measure this value for your specific\n# experiment/setup.\n#\n# Example\n# ~~~~~~~\n# >>> trigger_time_shift = 0 # don't apply any offset\n\ntrigger_time_shift = 0.\n\n# ``baseline`` : tuple\n# It specifies how to baseline the epochs; if None, no baseline is applied.\n#\n# Example\n# ~~~~~~~\n# >>> baseline = (None, 0) # baseline between tmin and 0\n\nbaseline = (None, 0)\n\n# `conditions`` : list\n# The condition names to consider. This can either be the keys of\n# ``event_id``, or \u2013 if event names were specified with ``/`` for\n# grouping \u2013 the name of the *grouped* condition (i.e., the\n# condition name before or after that ``/`` that is shared between the\n# respective conditions you wish to group). See the \"Subselecting epochs\"\n# tutorial for more information: https://mne.tools/stable/auto_tutorials/epochs/plot_10_epochs_overview.html#subselecting-epochs # noqa: 501\n#\n# Example\n# ~~~~~~~\n# >>> conditions = ['auditory/left', 'visual/left']\n# or\n# >>> conditions = ['auditory/left', 'auditory/right']\n# or\n# >>> conditions = ['auditory']\n# or\n# >>> conditions = ['auditory', 'visual']\n# or\n# >>> conditions = ['left', 'right']\n\nconditions = ['left', 'right']\n\n###############################################################################\n# ARTIFACT REMOVAL\n# ----------------\n#\n# You can choose between ICA and SSP to remove eye and heart artifacts.\n# SSP: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ssp.html?highlight=ssp # noqa\n# ICA: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ica.html?highlight=ica # noqa\n# if you choose ICA, run scripts 5a and 6a\n# if you choose SSP, run scripts 5b and 6b\n#\n# Currently you cannot use both.\n\n# SSP\n# ~~~\n#\n# ``use_ssp`` : bool\n# If True ICA should be used or not.\n\nuse_ssp = True\n\n# ICA\n# ~~~\n# ``use_ica`` : bool\n# If True ICA should be used or not.\n\nuse_ica = False\n\n# ``ica_algorithm`` : 'picard' | 'fastica' | 'extended_infomax'\n# The ICA algorithm to use.\n\nica_algorithm = 'picard'\n\n# ``ica_max_iterations`` : int\n# Maximum number of iterations to decompose the data into independent\n# components. A low number means to finish earlier, but the consequence is\n# that the algorithm may not have finished converging. To ensure\n# convergence, pick a high number here (e.g. 3000); yet the algorithm will\n# terminate as soon as it determines that is has successfully converged, and\n# not necessarily exhaust the maximum number of iterations. Note that the\n# default of 200 seems to be sufficient for Picard in many datasets, because\n# it converges quicker than the other algorithms; but e.g. for FastICA, this\n# limit may be too low to achieve convergence.\n\nica_max_iterations = 200\n\n# ``ica_decim`` : None | None\n# The decimation parameter to compute ICA. If 5 it means\n# that 1 every 5 sample is used by ICA solver. The higher the faster\n# it is to run but the less data you have to compute a good ICA. Set to\n# ``1`` ``None`` to not perform an decimation.\n\nica_decim = None\n\n\n# ``default_reject_comps_factory`` : callable\n# A factory function that returns a default rejection component dictionary:\n# A dictionary that specifies the indices of the ICA components to reject\n# for each subject. For example you can use:\n# rejcomps_man['subject01'] = dict(eeg=[12], meg=[7])\n\ndef default_reject_comps_factory():\n \"\"\"Return the default rejection component dictionary.\"\"\"\n return dict(meg=[], eeg=[])\n\n\nrejcomps_man = defaultdict(default_reject_comps_factory)\n\n# ``ica_ctps_ecg_threshold``: float\n# The threshold parameter passed to `find_bads_ecg` method.\n\nica_ctps_ecg_threshold = 0.1\n\n###############################################################################\n# DECODING\n# --------\n#\n# ``decoding_conditions`` : list\n# List of conditions to be classified.\n#\n# Example\n# ~~~~~~~\n# >>> decoding_conditions = [] # don't do decoding\n# or\n# >>> decoding_conditions = [('auditory', 'visual'), ('left', 'right')]\n\ndecoding_conditions = []\n# decoding_conditions = [('left', 'right')]\n\n###############################################################################\n# GROUP AVERAGE SENSORS\n# ---------------------\n#\n# ``interpolate_bads_grand_average`` : bool\n# Interpolate bad sensors in each dataset before calculating the grand\n# average. This parameter is passed to the `mne.grand_average` function via\n# the keyword argument `interpolate_bads`. It requires to have channel\n# locations set.\n#\n# Example\n# ~~~~~~~\n# >>> interpolate_bads_grand_average = True\n\ninterpolate_bads_grand_average = True\n\n# ``decoding_metric`` : str\n# The metric to use for cross-validation. It can be 'roc_auc' or 'accuracy'\n# or any metric supported by scikit-learn.\n\ndecoding_metric = 'roc_auc'\n\n# ``decoding_n_splits`` : int\n# The number of folds (a.k.a. splits) to use in the cross-validation.\n\ndecoding_n_splits = 5\n\n###############################################################################\n# TIME-FREQUENCY\n# --------------\n#\n# ``time_frequency_conditions`` : list\n# The conditions to compute time-frequency decomposition on.\n\n# time_frequency_conditions = ['left', 'right']\ntime_frequency_conditions = []\n\n###############################################################################\n# SOURCE SPACE PARAMETERS\n# -----------------------\n#\n\n# ``spacing`` : str\n# The spacing to use. Can be ``'ico#'`` for a recursively subdivided\n# icosahedron, ``'oct#'`` for a recursively subdivided octahedron,\n# ``'all'`` for all points, or an integer to use appoximate\n# distance-based spacing (in mm).\n\nspacing = 'oct6'\n\n# ``mindist`` : float\n# Exclude points closer than this distance (mm) to the bounding surface.\n\nmindist = 5\n\n# ``loose`` : float in [0, 1] | 'auto'\n# Value that weights the source variances of the dipole components\n# that are parallel (tangential) to the cortical surface. If loose\n# is 0 then the solution is computed with fixed orientation,\n# and fixed must be True or \"auto\".\n# If loose is 1, it corresponds to free orientations.\n# The default value ('auto') is set to 0.2 for surface-oriented source\n# space and set to 1.0 for volumetric, discrete, or mixed source spaces,\n# unless ``fixed is True`` in which case the value 0. is used.\n\nloose = 0.2\n\n# ``depth`` : None | float | dict\n# If float (default 0.8), it acts as the depth weighting exponent (``exp``)\n# to use (must be between 0 and 1). None is equivalent to 0, meaning no\n# depth weighting is performed. Can also be a `dict` containing additional\n# keyword arguments to pass to :func:`mne.forward.compute_depth_prior`\n# (see docstring for details and defaults).\n\ndepth = 0.8\n\n# inverse_method : \"MNE\" | \"dSPM\" | \"sLORETA\" | \"eLORETA\"\n# Use minimum norm, dSPM (default), sLORETA, or eLORETA.\n\ninverse_method = 'dSPM'\n\n# noise_cov : (None, 0) | \u2018emptyroom\u2019\n# Specify how to estimate the noise covariance matrix, which is used in\n# inverse modeling.\n#\n# If a tuple, it takes the form ``(tmin, tmax)`` with the time specified in\n# seconds. If the first value of the tuple is ``None``, the considered\n# period starts at the beginning of the epoch. If the second value of the\n# tuple is ``None``, the considered period ends at the end of the epoch.\n# The default, ``(None, 0)``, includes the entire period before the event,\n# which is typically the pre-stimulus period.\n#\n# If ``emptyroom``, the noise covariance matrix will be estimated from an\n# empty-room MEG recording. The empty-room recording will be automatically\n# selected based on recording date and time.\n#\n# Please note that when processing data that contains EEG channels, the noise\n# covariance can ONLY be estimated from the pre-stimulus period.\n#\n# Example\n# ~~~~~~~\n# Use the period from start of the epoch until 100 ms before the experimental\n# event:\n# >>> noise_cov = (None, -0.1)\n#\n# Use the time period from the experimental event until the end of the epoch:\n# >>> noise_cov = (0, None)\n#\n# Use an empty-room recording:\n# >>> noise_cov = 'emptyroom'\n\nnoise_cov = (None, 0)\n\n# smooth : int | None\n# Number of iterations for the smoothing of the surface data.\n# If None, smooth is automatically defined to fill the surface\n# with non-zero values. The default is spacing=None.\n\nsmooth = 10\n\nfsaverage_vertices = [np.arange(10242), np.arange(10242)]\n\n###############################################################################\n# ADVANCED\n# --------\n#\n# ``l_trans_bandwidth`` : float | 'auto'\n# A float that specifies the transition bandwidth of the\n# highpass filter. By default it's `'auto'` and uses default mne\n# parameters.\n\nl_trans_bandwidth = 'auto'\n\n# ``h_trans_bandwidth`` : float | 'auto'\n# A float that specifies the transition bandwidth of the\n# lowpass filter. By default it's `'auto'` and uses default mne\n# parameters.\n\nh_trans_bandwidth = 'auto'\n\n# ``N_JOBS`` : int\n# An integer that specifies how many subjects you want to run in parallel.\n\nN_JOBS = 1\n\n# ``random_state`` : None | int | np.random.RandomState\n# To specify the random generator state. This allows to have\n# the results more reproducible between machines and systems.\n# Some methods like ICA need random values for initialisation.\n\nrandom_state = 42\n\n# ``shortest_event`` : int\n# Minimum number of samples an event must last. If the\n# duration is less than this an exception will be raised.\n\nshortest_event = 1\n\n# ``allow_maxshield`` : bool\n# To import data that was recorded with Maxshield on before running\n# maxfilter set this to True.\n\nallow_maxshield = False\n\nlog_level = 'info'\nmne_log_level = 'error'\n\n# ``on_abort`` : 'continue' | 'abort'\n# Whether to abort processing as soon as an error occurs, or whether to\n# continue with all other processing steps for as long as possible.\n\non_error = 'abort'\n\n\n###############################################################################\n# #\n# CUSTOM CONFIGURATION ENDS HERE #\n# #\n###############################################################################\n\n\n###############################################################################\n# Logger\n# ------\n\nlogger = logging.getLogger('mne-study-template')\n\nlog_fmt = '%(asctime)s %(message)s'\nlog_date_fmt = coloredlogs.DEFAULT_DATE_FORMAT = '%H:%M:%S'\ncoloredlogs.install(level=log_level, logger=logger, fmt=log_fmt,\n date_fmt=log_date_fmt)\n\nmne.set_log_level(verbose=mne_log_level.upper())\n\n###############################################################################\n# Retrieve custom configuration options\n# -------------------------------------\n#\n# For testing a specific dataset, create a Python file with a name of your\n# liking (e.g., ``mydataset-template-config.py``), and set an environment\n# variable ``MNE_BIDS_STUDY_CONFIG`` to that file.\n#\n# Example\n# ~~~~~~~\n# ``export MNE_BIDS_STUDY_CONFIG=/data/mystudy/mydataset-template-config.py``\n\nif \"MNE_BIDS_STUDY_CONFIG\" in os.environ:\n cfg_path = os.environ['MNE_BIDS_STUDY_CONFIG']\n\n if os.path.exists(cfg_path):\n msg = f'Using custom configuration: {cfg_path}'\n logger.info(msg)\n else:\n msg = ('The custom configuration file specified in the '\n 'MNE_BIDS_STUDY_CONFIG environment variable could not be '\n 'found: {cfg_path}'.format(cfg_path=cfg_path))\n raise ValueError(msg)\n\n # Import configuration from an arbitrary path without having to fiddle\n # with `sys.path`.\n spec = importlib.util.spec_from_file_location(name='custom_config',\n location=cfg_path)\n custom_cfg = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(custom_cfg)\n del spec, cfg_path\n\n new = None\n for val in dir(custom_cfg):\n if not val.startswith('__'):\n exec(\"new = custom_cfg.%s\" % val)\n logger.debug('Overwriting: %s -> %s' % (val, new))\n exec(\"%s = custom_cfg.%s\" % (val, val))\n\n\n# BIDS_ROOT environment variable takes precedence over any configuration file\n# values.\nif os.getenv('BIDS_ROOT') is not None:\n bids_root = os.getenv('BIDS_ROOT')\n\n# If we don't have a bids_root until now, raise an exeception as we cannot\n# proceed.\nif not bids_root:\n msg = ('You need to specify `bids_root` in your configuration, or '\n 'define an environment variable `BIDS_ROOT` pointing to the '\n 'root folder of your BIDS dataset')\n raise ValueError(msg)\n\n\n###############################################################################\n# Derivates root\n# --------------\nderiv_root = os.path.join(bids_root, 'derivatives', PIPELINE_NAME)\n\n\n###############################################################################\n# CHECKS\n# ------\n\nif (use_maxwell_filter and\n len(set(ch_types).intersection(('meg', 'grad', 'mag'))) == 0):\n raise ValueError('Cannot use maxwell filter without MEG channels.')\n\nif use_ssp and use_ica:\n raise ValueError('Cannot use both SSP and ICA.')\n\nif use_ica and ica_algorithm not in ('picard', 'fastica', 'extended_infomax'):\n msg = (f\"Invalid ICA algorithm requested. Valid values for ica_algorithm \"\n f\"are: 'picard', 'fastica', and 'extended_infomax', but received \"\n f\"{ica_algorithm}.\")\n raise ValueError(msg)\n\nif not ch_types:\n msg = 'Please specify ch_types in your configuration.'\n raise ValueError(msg)\n\nif ch_types == ['eeg']:\n pass\nelif 'eeg' in ch_types and len(ch_types) > 1: # EEG + some other channel types\n msg = ('EEG data can only be analyzed separately from other channel '\n 'types. Please adjust `ch_types` in your configuration.')\n raise ValueError(msg)\nelif any([ch_type not in ('meg', 'mag', 'grad') for ch_type in ch_types]):\n msg = ('Invalid channel type passed. Please adjust `ch_types` in your '\n 'configuration.')\n raise ValueError(msg)\n\nif 'eeg' in ch_types:\n if use_ssp:\n msg = ('You requested SSP for EEG data via use_ssp=True. However, '\n 'this is not presently supported. Please use ICA instead by '\n 'setting use_ssp=False and use_ica=True.')\n raise ValueError(msg)\n if not use_ica:\n msg = ('You did not request ICA artifact correction for your data. '\n 'To turn it on, set use_ica=True.')\n logger.info(msg)\n\nif on_error not in ('continue', 'abort'):\n msg = (f\"on_error must be one of 'continue' or 'abort', but received \"\n f\"{on_error}.\")\n logger.info(msg)\n\nif isinstance(noise_cov, str) and noise_cov != 'emptyroom':\n msg = (f\"noise_cov must be a tuple or 'emptyroom', but received \"\n f\"{noise_cov}\")\n raise ValueError(msg)\n\nif noise_cov == 'emptyroom' and 'eeg' in ch_types:\n msg = ('You requested to process data that contains EEG channels. In this '\n 'case, noise covariance can only be estimated from the '\n 'experimental data, e.g., the pre-stimulus period. Please set '\n 'noise_cov to (tmin, tmax)')\n raise ValueError(msg)\n\n\n###############################################################################\n# Helper functions\n# ----------------\n\ndef get_sessions():\n sessions_ = copy.deepcopy(sessions) # Avoid clash with global variable.\n\n if sessions_ == 'all':\n sessions_ = get_entity_vals(bids_root, entity_key='ses')\n\n if not sessions_:\n return [None]\n else:\n return sessions_\n\n\ndef get_runs():\n runs_ = copy.deepcopy(runs) # Avoid clash with global variable.\n\n if runs_ == 'all':\n runs_ = get_entity_vals(bids_root, entity_key='run')\n\n if not runs_:\n return [None]\n else:\n return runs_\n\n\ndef get_subjects():\n if subjects_list == 'all':\n s = get_entity_vals(bids_root, entity_key='sub')\n else:\n s = subjects_list\n\n subjects = set(s) - set(exclude_subjects)\n # Drop empty-room subject.\n subjects = subjects - set(['emptyroom'])\n\n return list(subjects)\n\n\ndef get_task():\n if not task:\n tasks = get_entity_vals(bids_root, entity_key='task')\n if not tasks:\n return None\n else:\n return tasks[0]\n else:\n return task\n\n\ndef get_kind():\n # Content of ch_types should be sanitized already, so we don't need any\n # extra sanity checks here.\n if ch_types == ['eeg']:\n return 'eeg'\n else:\n return 'meg'\n\n\ndef get_reject():\n reject_ = reject.copy() # Avoid clash with global variable.\n kind = get_kind()\n\n if kind == 'eeg':\n ch_types_to_remove = ('mag', 'grad')\n else:\n ch_types_to_remove = ('eeg',)\n\n for ch_type in ch_types_to_remove:\n try:\n del reject_[ch_type]\n except KeyError:\n pass\n return reject_\n\n\ndef get_fs_subjects_dir():\n if not subjects_dir:\n return os.path.join(bids_root, 'derivatives', 'freesurfer', 'subjects')\n else:\n return subjects_dir\n\n\ndef get_subject_path(subject, session, kind):\n subject_path = f'sub-{subject}'\n if session is not None:\n subject_path = os.path.join(subject_path, f'ses-{session}')\n subject_path = os.path.join(subject_path, kind)\n return subject_path\n\n\ndef get_subject_deriv_path(subject, session, kind):\n subject_path = get_subject_path(subject=subject, session=session,\n kind=kind)\n deriv_path = os.path.join(deriv_root, subject_path)\n return deriv_path\n\n\ndef gen_log_message(message, step=None, subject=None, session=None, run=None):\n if subject is not None:\n subject = f'sub-{subject}'\n if session is not None:\n session = f'ses-{session}'\n if run is not None:\n run = f'run-{run}'\n\n prefix = ', '.join([item for item in [subject, session, run]\n if item is not None])\n if prefix:\n prefix = f'[{prefix}]'\n\n if step is not None:\n prefix = f'[Step-{step:02}]{prefix}'\n\n return prefix + ' ' + message\n\n\ndef failsafe_run(on_error):\n def failsafe_run_decorator(func):\n @functools.wraps(func) # Preserve \"identity\" of original function\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n message = 'A critical error occurred.'\n message = gen_log_message(message=message)\n\n if on_error == 'abort':\n logger.critical(message)\n raise(e)\n else:\n message = f'{message} The error message was:\\n{str(e)}'\n logger.critical(message)\n return wrapper\n return failsafe_run_decorator\n", "path": "config.py"}], "after_files": [{"content": "\"\"\"Set the configuration parameters for the study.\n\nYou need to define an environment variable `BIDS_ROOT` to point to the root\nof your BIDS dataset to be analyzed.\n\n\"\"\"\nimport importlib\nimport functools\nimport os\nfrom collections import defaultdict\nimport copy\nimport coloredlogs\nimport logging\n\nimport numpy as np\nimport mne\nfrom mne_bids.utils import get_entity_vals\n\n# Name, version, and hosting location of the pipeline\nPIPELINE_NAME = 'mne-study-template'\nVERSION = '0.1.dev0'\nCODE_URL = 'https://github.com/mne-tools/mne-study-template'\n\n\n# ``study_name`` : str\n# Specify the name of your study. It will be used to populate filenames for\n# saving the analysis results.\n#\n# Example\n# ~~~~~~~\n# >>> study_name = 'my-study'\n\nstudy_name = ''\n\n# ``bids_root`` : str or None\n# Speficy the BIDS root directory. Pass an empty string or ```None`` to use\n# the value specified in the ``BIDS_ROOT`` environment variable instead.\n# Raises an exception if the BIDS root has not been specified.\n#\n# Example\n# ~~~~~~~\n# >>> bids_root = '/path/to/your/bids_root' # Use this to specify a path here.\n# or\n# >>> bids_root = None # Make use of the ``BIDS_ROOT`` environment variable.\n\nbids_root = None\n\n# ``subjects_dir`` : str or None\n# Path to the directory that contains the MRI data files and their\n# derivativesfor all subjects. Specifically, the ``subjects_dir`` is the\n# $SUBJECTS_DIR used by the Freesurfer software. If ``None``, will use\n# ``'bids_root/derivatives/freesurfer/subjects'``.\n\nsubjects_dir = None\n\n# ``daysback`` : int\n# If not None apply a time shift to dates to adjust for limitateions\n# of fif files\n\ndaysback = None\n\n# ``interactive`` : boolean\n# If True, the scripts will provide some interactive elements, such as\n# figures. If running the scripts from a notebook or Spyder,\n# run %matplotlib qt in the command line to open the figures in a separate\n# window.\n\ninteractive = False\n\n# ``crop`` : tuple or None\n# If tuple, (tmin, tmax) to crop the raw data\n# If None (default), do not crop.\ncrop = None\n\n# BIDS params\n# see: bids-specification.rtfd.io/en/latest/99-appendices/04-entity-table.html\n\n# ``sessions`` : iterable or 'all'\n# The sessions to process.\nsessions = 'all'\n\n# ``task`` : str\n# The task to process.\ntask = ''\n\n# ``runs`` : iterable or 'all'\n# The runs to process.\nruns = 'all'\n\nacq = None\n\nproc = None\n\nrec = None\n\nspace = None\n\n# ``subjects_list`` : 'all' | list of str\n# Subjects to analyze. If ``'all``, include all subjects. To only\n# include a subset of subjects, pass a list of their identifiers. Even\n# if you plan on analyzing only a single subject, pass their identifier\n# as a list.\n#\n# Please note that if you intend to EXCLUDE only a few subjects, you\n# should consider setting ``subjects_list = 'all'`` and adding the\n# identifiers of the excluded subjects to ``exclude_subjects`` (see next\n# section).\n#\n# Example\n# ~~~~~~~\n# >>> subjects_list = 'all' # Include all subjects.\n# >>> subjects_list = ['05'] # Only include subject 05.\n# >>> subjects_list = ['01', '02'] # Only include subjects 01 and 02.\n\nsubjects_list = 'all'\n\n# ``exclude_subjects`` : list of str\n# Specify subjects to exclude from analysis. The MEG empty-room mock-subject\n# is automatically excluded from regular analysis.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Keep track of the criteria leading you to exclude\n# a participant (e.g. too many movements, missing blocks, aborted experiment,\n# did not understand the instructions, etc, ...)\n# The ``emptyroom`` subject will be excluded automatically.\n\nexclude_subjects = []\n\n# ``ch_types`` : list of st\n# The list of channel types to consider.\n#\n# Example\n# ~~~~~~~\n# >>> ch_types = ['meg', 'eeg'] # to use MEG and EEG channels\n# or\n# >>> ch_types = ['meg'] # to use only MEG\n# or\n# >>> ch_types = ['grad'] # to use only gradiometer MEG channels\n\n# Note: If `kind` is 'eeg', EEG ch_types will be used regardless of whether\n# specified here or not\nch_types = []\n\n###############################################################################\n# DEFINE ADDITIONAL CHANNELS\n# --------------------------\n# needed for 01-import_and_maxfilter.py\n\n# ``rename_channels`` : dict rename channels\n# Here you name or replace extra channels that were recorded, for instance\n# EOG, ECG.\n#\n# Example\n# ~~~~~~~\n# Here rename EEG061 to EOG061, EEG062 to EOG062, EEG063 to ECG063:\n# >>> rename_channels = {'EEG061': 'EOG061', 'EEG062': 'EOG062',\n# 'EEG063': 'ECG063'}\n\n# XXX should be done automatically from BIDS ?\nrename_channels = None\n\n# ``set_channel_types``: dict\n# Here you define types of channels to pick later.\n#\n# Example\n# ~~~~~~~\n# >>> set_channel_types = {'EEG061': 'eog', 'EEG062': 'eog',\n# 'EEG063': 'ecg', 'EEG064': 'misc'}\n\n# XXX should not be necessary\nset_channel_types = None\n\n###############################################################################\n# MAXWELL FILTER PARAMETERS\n# -------------------------\n# done in 01-import_and_maxfilter.py\n#\n# Note: For any of this to work, you must set ``mf_ctc_fname`` and\n# ``mf_cal_fname`` above.\n#\n# \"Bad\", i.e. flat and overly noisy channels, can be automatically detected\n# using a procedure inspired by the commercial MaxFilter by Elekta. First,\n# a copy of the data is low-pass filtered at 40 Hz. Then, channels with\n# unusually low variability are flagged as \"flat\", while channels with\n# excessively high variability are flagged as \"noisy\". Flat and noisy channels\n# are marked as \"bad\" and excluded from subsequent analysis. See\n# :func:`mne.preprocssessing.find_bad_channels_maxwell` for more information\n# on this procedure. The list of bad channels detected through this procedure\n# will be merged with the list of bad channels already present in the dataset,\n# if any.\n#\n# ``find_flat_channels_meg`` : bool\n# Auto-detect \"flat\" channels and mark them as bad.\n#\n# ``find_noisy_channels_meg`` : bool\n# Auto-detect \"noisy\" channels and mark them as bad.\n\nfind_flat_channels_meg = False\nfind_noisy_channels_meg = False\n\n# ``use_maxwell_filter`` : bool\n# Use or not maxwell filter to preprocess the data.\n#\n# Warning\n# ~~~~~~~\n# If the data were recorded with internal active compensation (MaxShield),\n# they need to be run through Maxwell filter to avoid distortions.\n# Bad channels need to be set through BIDS channels.tsv and / or via the\n# ``find_flat_channels_meg`` and ``find_noisy_channels_meg`` options above\n# before applying Maxwell filter.\n\nuse_maxwell_filter = False\n\n# There are two kinds of maxfiltering: SSS and tSSS\n# [SSS = signal space separation ; tSSS = temporal signal space separation]\n# (Taulu et al, 2004): http://cds.cern.ch/record/709081/files/0401166.pdf\n#\n# ``mf_st_duration`` : float | None\n# If not None, apply spatiotemporal SSS (tSSS) with specified buffer\n# duration (in seconds). MaxFilter\u2122's default is 10.0 seconds in v2.2.\n# Spatiotemporal SSS acts as implicitly as a high-pass filter where the\n# cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer\n# buffers are generally better as long as your system can handle the\n# higher memory usage. To ensure that each window is processed\n# identically, choose a buffer length that divides evenly into your data.\n# Any data at the trailing edge that doesn't fit evenly into a whole\n# buffer window will be lumped into the previous buffer.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# If you are interested in low frequency activity (<0.1Hz), avoid using tSSS\n# and set mf_st_duration to None\n#\n# If you are interested in low frequency above 0.1 Hz, you can use the\n# default mf_st_duration to 10 s meaning it acts like a 0.1 Hz highpass filter.\n#\n# Example\n# ~~~~~~~\n# >>> mf_st_duration = None\n# or\n# >>> mf_st_duration = 10. # to apply tSSS with 0.1Hz highpass filter.\n\nmf_st_duration = None\n\n# ``mf_head_origin`` : array-like, shape (3,) | 'auto'\n# Origin of internal and external multipolar moment space in meters.\n# If 'auto', it will be estimated from headshape points.\n# If automatic fitting fails (e.g., due to having too few digitization\n# points), consider separately calling the fitting function with different\n# options or specifying the origin manually.\n#\n# Example\n# ~~~~~~~\n# >>> mf_head_origin = 'auto'\n\nmf_head_origin = 'auto'\n\n# ``cross talk`` : str\n# Path to the cross talk file\n#\n#\n# ``calibration`` : str\n# Path to the calibration file.\n#\n#\n# These 2 files should be downloaded and made available for running\n# maxwell filtering.\n#\n# Example\n# ~~~~~~~\n# >>> cal_files_path = os.path.join(study_path, 'SSS')\n# >>> mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')\n# >>> mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')\n#\n# Warning\n# ~~~~~~~\n# These 2 files are site and machine specific files that provide information\n# about the environmental noise. For practical purposes, place them in your\n# study folder.\n#\n# At NeuroSpin: ct_sparse and sss_call are on the meg_tmp server\n\n# cal_files_path = os.path.join(study_path, 'SSS')\n# mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')\n# mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')\n\nmf_ctc_fname = ''\nmf_cal_fname = ''\n\n# Despite all possible care to avoid movements in the MEG, the participant\n# will likely slowly drift down from the Dewar or slightly shift the head\n# around in the course of the recording session. Hence, to take this into\n# account, we are realigning all data to a single position. For this, you need\n# to define a reference run (typically the one in the middle of\n# the recording session).\n#\n# ``mf_reference_run`` : int\n# Which run to take as the reference for adjusting the head position of all\n# runs.\n#\n# Example\n# ~~~~~~~\n# >>> mf_reference_run = 0 # to use the first run\n\nmf_reference_run = 0\n\n###############################################################################\n# FREQUENCY FILTERING\n# -------------------\n# done in 02-frequency_filter.py\n\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# It is typically better to set your filtering properties on the raw data so\n# as to avoid what we call border (or edge) effects.\n#\n# If you use this pipeline for evoked responses, you could consider\n# a low-pass filter cut-off of h_freq = 40 Hz\n# and possibly a high-pass filter cut-off of l_freq = 1 Hz\n# so you would preserve only the power in the 1Hz to 40 Hz band.\n# Note that highpass filtering is not necessarily recommended as it can\n# distort waveforms of evoked components, or simply wash out any low\n# frequency that can may contain brain signal. It can also act as\n# a replacement for baseline correction in Epochs. See below.\n#\n# If you use this pipeline for time-frequency analysis, a default filtering\n# coult be a high-pass filter cut-off of l_freq = 1 Hz\n# a low-pass filter cut-off of h_freq = 120 Hz\n# so you would preserve only the power in the 1Hz to 120 Hz band.\n#\n# If you need more fancy analysis, you are already likely past this kind\n# of tips! :)\n\n\n# ``l_freq`` : float\n# The low-frequency cut-off in the highpass filtering step.\n# Keep it None if no highpass filtering should be applied.\n\nl_freq = 1.\n\n# ``h_freq`` : float\n# The high-frequency cut-off in the lowpass filtering step.\n# Keep it None if no lowpass filtering should be applied.\n\nh_freq = 40.\n\n###############################################################################\n# RESAMPLING\n# ----------\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# If you have acquired data with a very high sampling frequency (e.g. 2 kHz)\n# you will likely want to downsample to lighten up the size of the files you\n# are working with (pragmatics)\n# If you are interested in typical analysis (up to 120 Hz) you can typically\n# resample your data down to 500 Hz without preventing reliable time-frequency\n# exploration of your data\n#\n# ``resample_sfreq`` : float\n# Specifies at which sampling frequency the data should be resampled.\n# If None then no resampling will be done.\n#\n# Example\n# ~~~~~~~\n# >>> resample_sfreq = None # no resampling\n# or\n# >>> resample_sfreq = 500 # resample to 500Hz\n\nresample_sfreq = None\n\n# ``decim`` : int\n# Says how much to decimate data at the epochs level.\n# It is typically an alternative to the `resample_sfreq` parameter that\n# can be used for resampling raw data. 1 means no decimation.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Decimation requires to lowpass filtered the data to avoid aliasing.\n# Note that using decimation is much faster than resampling.\n#\n# Example\n# ~~~~~~~\n# >>> decim = 1 # no decimation\n# or\n# >>> decim = 4 # decimate by 4 ie devide sampling frequency by 4\n\ndecim = 1\n\n###############################################################################\n# AUTOMATIC REJECTION OF ARTIFACTS\n# --------------------------------\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Have a look at your raw data and train yourself to detect a blink, a heart\n# beat and an eye movement.\n# You can do a quick average of blink data and check what the amplitude looks\n# like.\n#\n# ``reject`` : dict | None\n# The rejection limits to make some epochs as bads.\n# This allows to remove strong transient artifacts.\n# If you want to reject and retrieve blinks later, e.g. with ICA,\n# don't specify a value for the eog channel (see examples below).\n# Make sure to include values for eeg if you have EEG data\n#\n# Note\n# ~~~~\n# These numbers tend to vary between subjects.. You might want to consider\n# using the autoreject method by Jas et al. 2018.\n# See https://autoreject.github.io\n#\n# Example\n# ~~~~~~~\n# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eog': 150e-6}\n# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 200e-6}\n# >>> reject = None\n\nreject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 150e-6}\n\n\n###############################################################################\n# RENAME EXPERIMENTAL EVENTS\n# --------------------------\n#\n# ``rename_events`` : dict\n# A dictionary specifying which events in the BIDS dataset to rename upon\n# loading, and before processing begins.\n#\n# Pass an empty dictionary to not perform any renaming.\n#\n# Example\n# ~~~~~~~\n# Rename ``audio_left`` in the BIDS dataset to ``audio/left`` in the pipeline:\n# >>> rename_events = {'audio_left': 'audio/left'}\n\nrename_events = dict()\n\n\n###############################################################################\n# EPOCHING\n# --------\n#\n# ``tmin``: float\n# A float in seconds that gives the start time before event of an epoch.\n#\n# Example\n# ~~~~~~~\n# >>> tmin = -0.2 # take 200ms before event onset.\n\ntmin = -0.2\n\n# ``tmax``: float\n# A float in seconds that gives the end time before event of an epoch.\n#\n# Example\n# ~~~~~~~\n# >>> tmax = 0.5 # take 500ms after event onset.\n\ntmax = 0.5\n\n# ``baseline`` : tuple\n# It specifies how to baseline the epochs; if None, no baseline is applied.\n#\n# Example\n# ~~~~~~~\n# >>> baseline = (None, 0) # baseline between tmin and 0\n\nbaseline = (None, 0)\n\n# `conditions`` : list\n# The condition names to consider. This can either be the keys of\n# ``event_id``, or \u2013 if event names were specified with ``/`` for\n# grouping \u2013 the name of the *grouped* condition (i.e., the\n# condition name before or after that ``/`` that is shared between the\n# respective conditions you wish to group). See the \"Subselecting epochs\"\n# tutorial for more information: https://mne.tools/stable/auto_tutorials/epochs/plot_10_epochs_overview.html#subselecting-epochs # noqa: 501\n#\n# Example\n# ~~~~~~~\n# >>> conditions = ['auditory/left', 'visual/left']\n# or\n# >>> conditions = ['auditory/left', 'auditory/right']\n# or\n# >>> conditions = ['auditory']\n# or\n# >>> conditions = ['auditory', 'visual']\n# or\n# >>> conditions = ['left', 'right']\n\nconditions = ['left', 'right']\n\n###############################################################################\n# ARTIFACT REMOVAL\n# ----------------\n#\n# You can choose between ICA and SSP to remove eye and heart artifacts.\n# SSP: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ssp.html?highlight=ssp # noqa\n# ICA: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ica.html?highlight=ica # noqa\n# if you choose ICA, run scripts 5a and 6a\n# if you choose SSP, run scripts 5b and 6b\n#\n# Currently you cannot use both.\n\n# SSP\n# ~~~\n#\n# ``use_ssp`` : bool\n# If True ICA should be used or not.\n\nuse_ssp = True\n\n# ICA\n# ~~~\n# ``use_ica`` : bool\n# If True ICA should be used or not.\n\nuse_ica = False\n\n# ``ica_algorithm`` : 'picard' | 'fastica' | 'extended_infomax'\n# The ICA algorithm to use.\n\nica_algorithm = 'picard'\n\n# ``ica_max_iterations`` : int\n# Maximum number of iterations to decompose the data into independent\n# components. A low number means to finish earlier, but the consequence is\n# that the algorithm may not have finished converging. To ensure\n# convergence, pick a high number here (e.g. 3000); yet the algorithm will\n# terminate as soon as it determines that is has successfully converged, and\n# not necessarily exhaust the maximum number of iterations. Note that the\n# default of 200 seems to be sufficient for Picard in many datasets, because\n# it converges quicker than the other algorithms; but e.g. for FastICA, this\n# limit may be too low to achieve convergence.\n\nica_max_iterations = 200\n\n# ``ica_decim`` : None | None\n# The decimation parameter to compute ICA. If 5 it means\n# that 1 every 5 sample is used by ICA solver. The higher the faster\n# it is to run but the less data you have to compute a good ICA. Set to\n# ``1`` ``None`` to not perform an decimation.\n\nica_decim = None\n\n\n# ``default_reject_comps_factory`` : callable\n# A factory function that returns a default rejection component dictionary:\n# A dictionary that specifies the indices of the ICA components to reject\n# for each subject. For example you can use:\n# rejcomps_man['subject01'] = dict(eeg=[12], meg=[7])\n\ndef default_reject_comps_factory():\n \"\"\"Return the default rejection component dictionary.\"\"\"\n return dict(meg=[], eeg=[])\n\n\nrejcomps_man = defaultdict(default_reject_comps_factory)\n\n# ``ica_ctps_ecg_threshold``: float\n# The threshold parameter passed to `find_bads_ecg` method.\n\nica_ctps_ecg_threshold = 0.1\n\n###############################################################################\n# DECODING\n# --------\n#\n# ``decoding_conditions`` : list\n# List of conditions to be classified.\n#\n# Example\n# ~~~~~~~\n# >>> decoding_conditions = [] # don't do decoding\n# or\n# >>> decoding_conditions = [('auditory', 'visual'), ('left', 'right')]\n\ndecoding_conditions = []\n# decoding_conditions = [('left', 'right')]\n\n###############################################################################\n# GROUP AVERAGE SENSORS\n# ---------------------\n#\n# ``interpolate_bads_grand_average`` : bool\n# Interpolate bad sensors in each dataset before calculating the grand\n# average. This parameter is passed to the `mne.grand_average` function via\n# the keyword argument `interpolate_bads`. It requires to have channel\n# locations set.\n#\n# Example\n# ~~~~~~~\n# >>> interpolate_bads_grand_average = True\n\ninterpolate_bads_grand_average = True\n\n# ``decoding_metric`` : str\n# The metric to use for cross-validation. It can be 'roc_auc' or 'accuracy'\n# or any metric supported by scikit-learn.\n\ndecoding_metric = 'roc_auc'\n\n# ``decoding_n_splits`` : int\n# The number of folds (a.k.a. splits) to use in the cross-validation.\n\ndecoding_n_splits = 5\n\n###############################################################################\n# TIME-FREQUENCY\n# --------------\n#\n# ``time_frequency_conditions`` : list\n# The conditions to compute time-frequency decomposition on.\n\n# time_frequency_conditions = ['left', 'right']\ntime_frequency_conditions = []\n\n###############################################################################\n# SOURCE SPACE PARAMETERS\n# -----------------------\n#\n\n# ``spacing`` : str\n# The spacing to use. Can be ``'ico#'`` for a recursively subdivided\n# icosahedron, ``'oct#'`` for a recursively subdivided octahedron,\n# ``'all'`` for all points, or an integer to use appoximate\n# distance-based spacing (in mm).\n\nspacing = 'oct6'\n\n# ``mindist`` : float\n# Exclude points closer than this distance (mm) to the bounding surface.\n\nmindist = 5\n\n# ``loose`` : float in [0, 1] | 'auto'\n# Value that weights the source variances of the dipole components\n# that are parallel (tangential) to the cortical surface. If loose\n# is 0 then the solution is computed with fixed orientation,\n# and fixed must be True or \"auto\".\n# If loose is 1, it corresponds to free orientations.\n# The default value ('auto') is set to 0.2 for surface-oriented source\n# space and set to 1.0 for volumetric, discrete, or mixed source spaces,\n# unless ``fixed is True`` in which case the value 0. is used.\n\nloose = 0.2\n\n# ``depth`` : None | float | dict\n# If float (default 0.8), it acts as the depth weighting exponent (``exp``)\n# to use (must be between 0 and 1). None is equivalent to 0, meaning no\n# depth weighting is performed. Can also be a `dict` containing additional\n# keyword arguments to pass to :func:`mne.forward.compute_depth_prior`\n# (see docstring for details and defaults).\n\ndepth = 0.8\n\n# inverse_method : \"MNE\" | \"dSPM\" | \"sLORETA\" | \"eLORETA\"\n# Use minimum norm, dSPM (default), sLORETA, or eLORETA.\n\ninverse_method = 'dSPM'\n\n# noise_cov : (None, 0) | \u2018emptyroom\u2019\n# Specify how to estimate the noise covariance matrix, which is used in\n# inverse modeling.\n#\n# If a tuple, it takes the form ``(tmin, tmax)`` with the time specified in\n# seconds. If the first value of the tuple is ``None``, the considered\n# period starts at the beginning of the epoch. If the second value of the\n# tuple is ``None``, the considered period ends at the end of the epoch.\n# The default, ``(None, 0)``, includes the entire period before the event,\n# which is typically the pre-stimulus period.\n#\n# If ``emptyroom``, the noise covariance matrix will be estimated from an\n# empty-room MEG recording. The empty-room recording will be automatically\n# selected based on recording date and time.\n#\n# Please note that when processing data that contains EEG channels, the noise\n# covariance can ONLY be estimated from the pre-stimulus period.\n#\n# Example\n# ~~~~~~~\n# Use the period from start of the epoch until 100 ms before the experimental\n# event:\n# >>> noise_cov = (None, -0.1)\n#\n# Use the time period from the experimental event until the end of the epoch:\n# >>> noise_cov = (0, None)\n#\n# Use an empty-room recording:\n# >>> noise_cov = 'emptyroom'\n\nnoise_cov = (None, 0)\n\n# smooth : int | None\n# Number of iterations for the smoothing of the surface data.\n# If None, smooth is automatically defined to fill the surface\n# with non-zero values. The default is spacing=None.\n\nsmooth = 10\n\nfsaverage_vertices = [np.arange(10242), np.arange(10242)]\n\n###############################################################################\n# ADVANCED\n# --------\n#\n# ``l_trans_bandwidth`` : float | 'auto'\n# A float that specifies the transition bandwidth of the\n# highpass filter. By default it's `'auto'` and uses default mne\n# parameters.\n\nl_trans_bandwidth = 'auto'\n\n# ``h_trans_bandwidth`` : float | 'auto'\n# A float that specifies the transition bandwidth of the\n# lowpass filter. By default it's `'auto'` and uses default mne\n# parameters.\n\nh_trans_bandwidth = 'auto'\n\n# ``N_JOBS`` : int\n# An integer that specifies how many subjects you want to run in parallel.\n\nN_JOBS = 1\n\n# ``random_state`` : None | int | np.random.RandomState\n# To specify the random generator state. This allows to have\n# the results more reproducible between machines and systems.\n# Some methods like ICA need random values for initialisation.\n\nrandom_state = 42\n\n# ``shortest_event`` : int\n# Minimum number of samples an event must last. If the\n# duration is less than this an exception will be raised.\n\nshortest_event = 1\n\n# ``allow_maxshield`` : bool\n# To import data that was recorded with Maxshield on before running\n# maxfilter set this to True.\n\nallow_maxshield = False\n\nlog_level = 'info'\nmne_log_level = 'error'\n\n# ``on_abort`` : 'continue' | 'abort'\n# Whether to abort processing as soon as an error occurs, or whether to\n# continue with all other processing steps for as long as possible.\n\non_error = 'abort'\n\n\n###############################################################################\n# #\n# CUSTOM CONFIGURATION ENDS HERE #\n# #\n###############################################################################\n\n\n###############################################################################\n# Logger\n# ------\n\nlogger = logging.getLogger('mne-study-template')\n\nlog_fmt = '%(asctime)s %(message)s'\nlog_date_fmt = coloredlogs.DEFAULT_DATE_FORMAT = '%H:%M:%S'\ncoloredlogs.install(level=log_level, logger=logger, fmt=log_fmt,\n date_fmt=log_date_fmt)\n\nmne.set_log_level(verbose=mne_log_level.upper())\n\n###############################################################################\n# Retrieve custom configuration options\n# -------------------------------------\n#\n# For testing a specific dataset, create a Python file with a name of your\n# liking (e.g., ``mydataset-template-config.py``), and set an environment\n# variable ``MNE_BIDS_STUDY_CONFIG`` to that file.\n#\n# Example\n# ~~~~~~~\n# ``export MNE_BIDS_STUDY_CONFIG=/data/mystudy/mydataset-template-config.py``\n\nif \"MNE_BIDS_STUDY_CONFIG\" in os.environ:\n cfg_path = os.environ['MNE_BIDS_STUDY_CONFIG']\n\n if os.path.exists(cfg_path):\n msg = f'Using custom configuration: {cfg_path}'\n logger.info(msg)\n else:\n msg = ('The custom configuration file specified in the '\n 'MNE_BIDS_STUDY_CONFIG environment variable could not be '\n 'found: {cfg_path}'.format(cfg_path=cfg_path))\n raise ValueError(msg)\n\n # Import configuration from an arbitrary path without having to fiddle\n # with `sys.path`.\n spec = importlib.util.spec_from_file_location(name='custom_config',\n location=cfg_path)\n custom_cfg = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(custom_cfg)\n del spec, cfg_path\n\n new = None\n for val in dir(custom_cfg):\n if not val.startswith('__'):\n exec(\"new = custom_cfg.%s\" % val)\n logger.debug('Overwriting: %s -> %s' % (val, new))\n exec(\"%s = custom_cfg.%s\" % (val, val))\n\n\n# BIDS_ROOT environment variable takes precedence over any configuration file\n# values.\nif os.getenv('BIDS_ROOT') is not None:\n bids_root = os.getenv('BIDS_ROOT')\n\n# If we don't have a bids_root until now, raise an exeception as we cannot\n# proceed.\nif not bids_root:\n msg = ('You need to specify `bids_root` in your configuration, or '\n 'define an environment variable `BIDS_ROOT` pointing to the '\n 'root folder of your BIDS dataset')\n raise ValueError(msg)\n\n\n###############################################################################\n# Derivates root\n# --------------\nderiv_root = os.path.join(bids_root, 'derivatives', PIPELINE_NAME)\n\n\n###############################################################################\n# CHECKS\n# ------\n\nif (use_maxwell_filter and\n len(set(ch_types).intersection(('meg', 'grad', 'mag'))) == 0):\n raise ValueError('Cannot use maxwell filter without MEG channels.')\n\nif use_ssp and use_ica:\n raise ValueError('Cannot use both SSP and ICA.')\n\nif use_ica and ica_algorithm not in ('picard', 'fastica', 'extended_infomax'):\n msg = (f\"Invalid ICA algorithm requested. Valid values for ica_algorithm \"\n f\"are: 'picard', 'fastica', and 'extended_infomax', but received \"\n f\"{ica_algorithm}.\")\n raise ValueError(msg)\n\nif not ch_types:\n msg = 'Please specify ch_types in your configuration.'\n raise ValueError(msg)\n\nif ch_types == ['eeg']:\n pass\nelif 'eeg' in ch_types and len(ch_types) > 1: # EEG + some other channel types\n msg = ('EEG data can only be analyzed separately from other channel '\n 'types. Please adjust `ch_types` in your configuration.')\n raise ValueError(msg)\nelif any([ch_type not in ('meg', 'mag', 'grad') for ch_type in ch_types]):\n msg = ('Invalid channel type passed. Please adjust `ch_types` in your '\n 'configuration.')\n raise ValueError(msg)\n\nif 'eeg' in ch_types:\n if use_ssp:\n msg = ('You requested SSP for EEG data via use_ssp=True. However, '\n 'this is not presently supported. Please use ICA instead by '\n 'setting use_ssp=False and use_ica=True.')\n raise ValueError(msg)\n if not use_ica:\n msg = ('You did not request ICA artifact correction for your data. '\n 'To turn it on, set use_ica=True.')\n logger.info(msg)\n\nif on_error not in ('continue', 'abort'):\n msg = (f\"on_error must be one of 'continue' or 'abort', but received \"\n f\"{on_error}.\")\n logger.info(msg)\n\nif isinstance(noise_cov, str) and noise_cov != 'emptyroom':\n msg = (f\"noise_cov must be a tuple or 'emptyroom', but received \"\n f\"{noise_cov}\")\n raise ValueError(msg)\n\nif noise_cov == 'emptyroom' and 'eeg' in ch_types:\n msg = ('You requested to process data that contains EEG channels. In this '\n 'case, noise covariance can only be estimated from the '\n 'experimental data, e.g., the pre-stimulus period. Please set '\n 'noise_cov to (tmin, tmax)')\n raise ValueError(msg)\n\n\n###############################################################################\n# Helper functions\n# ----------------\n\ndef get_sessions():\n sessions_ = copy.deepcopy(sessions) # Avoid clash with global variable.\n\n if sessions_ == 'all':\n sessions_ = get_entity_vals(bids_root, entity_key='ses')\n\n if not sessions_:\n return [None]\n else:\n return sessions_\n\n\ndef get_runs():\n runs_ = copy.deepcopy(runs) # Avoid clash with global variable.\n\n if runs_ == 'all':\n runs_ = get_entity_vals(bids_root, entity_key='run')\n\n if not runs_:\n return [None]\n else:\n return runs_\n\n\ndef get_subjects():\n if subjects_list == 'all':\n s = get_entity_vals(bids_root, entity_key='sub')\n else:\n s = subjects_list\n\n subjects = set(s) - set(exclude_subjects)\n # Drop empty-room subject.\n subjects = subjects - set(['emptyroom'])\n\n return list(subjects)\n\n\ndef get_task():\n if not task:\n tasks = get_entity_vals(bids_root, entity_key='task')\n if not tasks:\n return None\n else:\n return tasks[0]\n else:\n return task\n\n\ndef get_kind():\n # Content of ch_types should be sanitized already, so we don't need any\n # extra sanity checks here.\n if ch_types == ['eeg']:\n return 'eeg'\n else:\n return 'meg'\n\n\ndef get_reject():\n reject_ = reject.copy() # Avoid clash with global variable.\n kind = get_kind()\n\n if kind == 'eeg':\n ch_types_to_remove = ('mag', 'grad')\n else:\n ch_types_to_remove = ('eeg',)\n\n for ch_type in ch_types_to_remove:\n try:\n del reject_[ch_type]\n except KeyError:\n pass\n return reject_\n\n\ndef get_fs_subjects_dir():\n if not subjects_dir:\n return os.path.join(bids_root, 'derivatives', 'freesurfer', 'subjects')\n else:\n return subjects_dir\n\n\ndef get_subject_path(subject, session, kind):\n subject_path = f'sub-{subject}'\n if session is not None:\n subject_path = os.path.join(subject_path, f'ses-{session}')\n subject_path = os.path.join(subject_path, kind)\n return subject_path\n\n\ndef get_subject_deriv_path(subject, session, kind):\n subject_path = get_subject_path(subject=subject, session=session,\n kind=kind)\n deriv_path = os.path.join(deriv_root, subject_path)\n return deriv_path\n\n\ndef gen_log_message(message, step=None, subject=None, session=None, run=None):\n if subject is not None:\n subject = f'sub-{subject}'\n if session is not None:\n session = f'ses-{session}'\n if run is not None:\n run = f'run-{run}'\n\n prefix = ', '.join([item for item in [subject, session, run]\n if item is not None])\n if prefix:\n prefix = f'[{prefix}]'\n\n if step is not None:\n prefix = f'[Step-{step:02}]{prefix}'\n\n return prefix + ' ' + message\n\n\ndef failsafe_run(on_error):\n def failsafe_run_decorator(func):\n @functools.wraps(func) # Preserve \"identity\" of original function\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n message = 'A critical error occurred.'\n message = gen_log_message(message=message)\n\n if on_error == 'abort':\n logger.critical(message)\n raise(e)\n else:\n message = f'{message} The error message was:\\n{str(e)}'\n logger.critical(message)\n return wrapper\n return failsafe_run_decorator\n", "path": "config.py"}]} |
gh_patches_debug_1580 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3093 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sorting in control panel
The items of the control panel are completely unsorted (should be sorted in alphabetical order (depending on the current language in Plone).

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/PloneControlPanel.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from AccessControl import ClassSecurityInfo
3 from AccessControl.class_init import InitializeClass
4 from App.special_dtml import DTMLFile
5 from OFS.Folder import Folder
6 from OFS.PropertyManager import PropertyManager
7 from Products.CMFCore.ActionInformation import ActionInformation
8 from Products.CMFCore.ActionProviderBase import ActionProviderBase
9 from Products.CMFCore.Expression import Expression, createExprContext
10 from Products.CMFCore.permissions import ManagePortal, View
11 from Products.CMFCore.utils import _checkPermission
12 from Products.CMFCore.utils import getToolByName
13 from Products.CMFCore.utils import registerToolInterface
14 from Products.CMFCore.utils import UniqueObject
15 from Products.CMFPlone import PloneMessageFactory as _
16 from Products.CMFPlone.interfaces import IControlPanel
17 from Products.CMFPlone.PloneBaseTool import PloneBaseTool
18 from zope.component.hooks import getSite
19 from zope.i18n import translate
20 from zope.i18nmessageid import Message
21 from zope.interface import implementer
22
23 import six
24
25
26 class PloneConfiglet(ActionInformation):
27
28 def __init__(self, appId, **kwargs):
29 self.appId = appId
30 ActionInformation.__init__(self, **kwargs)
31
32 def getAppId(self):
33 return self.appId
34
35 def getDescription(self):
36 return self.description
37
38 def clone(self):
39 return self.__class__(**self.__dict__)
40
41 def getAction(self, ec):
42 res = ActionInformation.getAction(self, ec)
43 res['description'] = self.getDescription()
44 return res
45
46
47 @implementer(IControlPanel)
48 class PloneControlPanel(PloneBaseTool, UniqueObject,
49 Folder, ActionProviderBase, PropertyManager):
50 """Weave together the various sources of "actions" which
51 are apropos to the current user and context.
52 """
53
54 security = ClassSecurityInfo()
55
56 id = 'portal_controlpanel'
57 title = 'Control Panel'
58 toolicon = 'skins/plone_images/site_icon.png'
59 meta_type = 'Plone Control Panel Tool'
60 _actions_form = DTMLFile('www/editPloneConfiglets', globals())
61
62 manage_options = (ActionProviderBase.manage_options +
63 PropertyManager.manage_options)
64
65 group = dict(
66 member=[
67 ('Member', _(u'My Preferences')),
68 ],
69 site=[
70 ('plone-general', _(u'General')),
71 ('plone-content', _(u'Content')),
72 ('plone-users', _(u'Users')),
73 ('plone-security', _(u'Security')),
74 ('plone-advanced', _(u'Advanced')),
75 ('Plone', _(u'Plone Configuration')),
76 ('Products', _(u'Add-on Configuration')),
77 ]
78 )
79
80 def __init__(self, **kw):
81 if kw:
82 self.__dict__.update(**kw)
83
84 security.declareProtected(ManagePortal, 'registerConfiglets')
85
86 def registerConfiglets(self, configlets):
87 for conf in configlets:
88 self.registerConfiglet(**conf)
89
90 security.declareProtected(ManagePortal, 'getGroupIds')
91
92 def getGroupIds(self, category='site'):
93 groups = self.group.get(category, [])
94 return [g[0] for g in groups if g]
95
96 security.declareProtected(View, 'getGroups')
97
98 def getGroups(self, category='site'):
99 groups = self.group.get(category, [])
100 return [{'id': g[0], 'title': g[1]} for g in groups if g]
101
102 security.declarePrivate('listActions')
103
104 def listActions(self, info=None, object=None):
105 # This exists here to shut up a deprecation warning about old-style
106 # actions in CMFCore's ActionProviderBase. It was decided not to
107 # move configlets to be based on action tool categories for Plone 4
108 # (see PLIP #8804), but that (or an alternative) will have to happen
109 # before CMF 2.4 when support for old-style actions is removed.
110 return self._actions or ()
111
112 security.declarePublic('maySeeSomeConfiglets')
113
114 def maySeeSomeConfiglets(self):
115 groups = self.getGroups('site')
116
117 all = []
118 for group in groups:
119 all.extend(self.enumConfiglets(group=group['id']))
120 all = [item for item in all if item['visible']]
121 return len(all) != 0
122
123 security.declarePublic('enumConfiglets')
124
125 def enumConfiglets(self, group=None):
126 portal = getToolByName(self, 'portal_url').getPortalObject()
127 context = createExprContext(self, portal, self)
128 res = []
129 for a in self.listActions():
130 verified = 0
131 for permission in a.permissions:
132 if _checkPermission(permission, portal):
133 verified = 1
134 if verified and a.category == group and a.testCondition(context) \
135 and a.visible:
136 res.append(a.getAction(context))
137 # Translate the title for sorting
138 if getattr(self, 'REQUEST', None) is not None:
139 for a in res:
140 title = a['title']
141 if not isinstance(title, Message):
142 title = Message(title, domain='plone')
143 a['title'] = translate(title,
144 context=self.REQUEST)
145
146 def _id(v):
147 return v['id']
148 res.sort(key=_id)
149 return res
150
151 security.declareProtected(ManagePortal, 'unregisterConfiglet')
152
153 def unregisterConfiglet(self, id):
154 actids = [o.id for o in self.listActions()]
155 selection = [actids.index(a) for a in actids if a == id]
156 if not selection:
157 return
158 self.deleteActions(selection)
159
160 security.declareProtected(ManagePortal, 'unregisterApplication')
161
162 def unregisterApplication(self, appId):
163 acts = list(self.listActions())
164 selection = [acts.index(a) for a in acts if a.appId == appId]
165 if not selection:
166 return
167 self.deleteActions(selection)
168
169 def _extractAction(self, properties, index):
170 # Extract an ActionInformation from the funky form properties.
171 id = str(properties.get('id_%d' % index, ''))
172 name = str(properties.get('name_%d' % index, ''))
173 action = str(properties.get('action_%d' % index, ''))
174 condition = str(properties.get('condition_%d' % index, ''))
175 category = str(properties.get('category_%d' % index, ''))
176 visible = properties.get('visible_%d' % index, 0)
177 permissions = properties.get('permission_%d' % index, ())
178 appId = properties.get('appId_%d' % index, '')
179 description = properties.get('description_%d' % index, '')
180 icon_expr = properties.get('icon_expr_%d' % index, '')
181
182 if not name:
183 raise ValueError('A name is required.')
184
185 if action != '':
186 action = Expression(text=action)
187
188 if condition != '':
189 condition = Expression(text=condition)
190
191 if category == '':
192 category = 'object'
193
194 if not isinstance(visible, int):
195 try:
196 visible = int(visible)
197 except ValueError:
198 visible = 0
199
200 if isinstance(permissions, six.string_types):
201 permissions = (permissions, )
202
203 return PloneConfiglet(id=id,
204 title=name,
205 action=action,
206 condition=condition,
207 permissions=permissions,
208 category=category,
209 visible=visible,
210 appId=appId,
211 description=description,
212 icon_expr=icon_expr,
213 )
214
215 security.declareProtected(ManagePortal, 'addAction')
216
217 def addAction(self,
218 id,
219 name,
220 action,
221 condition='',
222 permission='',
223 category='Plone',
224 visible=1,
225 appId=None,
226 icon_expr='',
227 description='',
228 REQUEST=None,
229 ):
230 # Add an action to our list.
231 if not name:
232 raise ValueError('A name is required.')
233
234 a_expr = action and Expression(text=str(action)) or ''
235 c_expr = condition and Expression(text=str(condition)) or ''
236
237 if not isinstance(permission, tuple):
238 permission = permission and (str(permission), ) or ()
239
240 new_actions = self._cloneActions()
241
242 new_action = PloneConfiglet(id=str(id),
243 title=name,
244 action=a_expr,
245 condition=c_expr,
246 permissions=permission,
247 category=str(category),
248 visible=int(visible),
249 appId=appId,
250 description=description,
251 icon_expr=icon_expr,
252 )
253
254 new_actions.append(new_action)
255 self._actions = tuple(new_actions)
256
257 if REQUEST is not None:
258 return self.manage_editActionsForm(
259 REQUEST, manage_tabs_message='Added.')
260
261 security.declareProtected(ManagePortal, 'registerConfiglet')
262 registerConfiglet = addAction
263
264 security.declareProtected(ManagePortal, 'manage_editActionsForm')
265
266 def manage_editActionsForm(self, REQUEST, manage_tabs_message=None):
267 """ Show the 'Actions' management tab.
268 """
269 actions = []
270
271 for a in self.listActions():
272
273 a1 = {}
274 a1['id'] = a.getId()
275 a1['name'] = a.Title()
276 p = a.getPermissions()
277 if p:
278 a1['permission'] = p[0]
279 else:
280 a1['permission'] = ''
281 a1['category'] = a.getCategory() or 'object'
282 a1['visible'] = a.getVisibility()
283 a1['action'] = a.getActionExpression()
284 a1['condition'] = a.getCondition()
285 a1['appId'] = a.getAppId()
286 a1['description'] = a.getDescription()
287 a1['icon_expr'] = a.getIconExpression()
288 actions.append(a1)
289
290 # possible_permissions is in OFS.role.RoleManager.
291 pp = self.possible_permissions()
292 return self._actions_form(
293 self,
294 REQUEST,
295 actions=actions,
296 possible_permissions=pp,
297 management_view='Actions',
298 manage_tabs_message=manage_tabs_message,
299 )
300
301 @property
302 def site_url(self):
303 """Return the absolute URL to the current site, which is likely not
304 necessarily the portal root.
305 Used by ``portlet_prefs`` to construct the URL to
306 ``@@overview-controlpanel``.
307 """
308 return getSite().absolute_url()
309
310
311 InitializeClass(PloneControlPanel)
312 registerToolInterface('portal_controlpanel', IControlPanel)
313
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/PloneControlPanel.py b/Products/CMFPlone/PloneControlPanel.py
--- a/Products/CMFPlone/PloneControlPanel.py
+++ b/Products/CMFPlone/PloneControlPanel.py
@@ -143,9 +143,10 @@
a['title'] = translate(title,
context=self.REQUEST)
- def _id(v):
- return v['id']
- res.sort(key=_id)
+ def _title(v):
+ return v['title']
+
+ res.sort(key=_title)
return res
security.declareProtected(ManagePortal, 'unregisterConfiglet')
| {"golden_diff": "diff --git a/Products/CMFPlone/PloneControlPanel.py b/Products/CMFPlone/PloneControlPanel.py\n--- a/Products/CMFPlone/PloneControlPanel.py\n+++ b/Products/CMFPlone/PloneControlPanel.py\n@@ -143,9 +143,10 @@\n a['title'] = translate(title,\n context=self.REQUEST)\n \n- def _id(v):\n- return v['id']\n- res.sort(key=_id)\n+ def _title(v):\n+ return v['title']\n+\n+ res.sort(key=_title)\n return res\n \n security.declareProtected(ManagePortal, 'unregisterConfiglet')\n", "issue": "sorting in control panel\nThe items of the control panel are completely unsorted (should be sorted in alphabetical order (depending on the current language in Plone).\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl.class_init import InitializeClass\nfrom App.special_dtml import DTMLFile\nfrom OFS.Folder import Folder\nfrom OFS.PropertyManager import PropertyManager\nfrom Products.CMFCore.ActionInformation import ActionInformation\nfrom Products.CMFCore.ActionProviderBase import ActionProviderBase\nfrom Products.CMFCore.Expression import Expression, createExprContext\nfrom Products.CMFCore.permissions import ManagePortal, View\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.utils import registerToolInterface\nfrom Products.CMFCore.utils import UniqueObject\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IControlPanel\nfrom Products.CMFPlone.PloneBaseTool import PloneBaseTool\nfrom zope.component.hooks import getSite\nfrom zope.i18n import translate\nfrom zope.i18nmessageid import Message\nfrom zope.interface import implementer\n\nimport six\n\n\nclass PloneConfiglet(ActionInformation):\n\n def __init__(self, appId, **kwargs):\n self.appId = appId\n ActionInformation.__init__(self, **kwargs)\n\n def getAppId(self):\n return self.appId\n\n def getDescription(self):\n return self.description\n\n def clone(self):\n return self.__class__(**self.__dict__)\n\n def getAction(self, ec):\n res = ActionInformation.getAction(self, ec)\n res['description'] = self.getDescription()\n return res\n\n\n@implementer(IControlPanel)\nclass PloneControlPanel(PloneBaseTool, UniqueObject,\n Folder, ActionProviderBase, PropertyManager):\n \"\"\"Weave together the various sources of \"actions\" which\n are apropos to the current user and context.\n \"\"\"\n\n security = ClassSecurityInfo()\n\n id = 'portal_controlpanel'\n title = 'Control Panel'\n toolicon = 'skins/plone_images/site_icon.png'\n meta_type = 'Plone Control Panel Tool'\n _actions_form = DTMLFile('www/editPloneConfiglets', globals())\n\n manage_options = (ActionProviderBase.manage_options +\n PropertyManager.manage_options)\n\n group = dict(\n member=[\n ('Member', _(u'My Preferences')),\n ],\n site=[\n ('plone-general', _(u'General')),\n ('plone-content', _(u'Content')),\n ('plone-users', _(u'Users')),\n ('plone-security', _(u'Security')),\n ('plone-advanced', _(u'Advanced')),\n ('Plone', _(u'Plone Configuration')),\n ('Products', _(u'Add-on Configuration')),\n ]\n )\n\n def __init__(self, **kw):\n if kw:\n self.__dict__.update(**kw)\n\n security.declareProtected(ManagePortal, 'registerConfiglets')\n\n def registerConfiglets(self, configlets):\n for conf in configlets:\n self.registerConfiglet(**conf)\n\n security.declareProtected(ManagePortal, 'getGroupIds')\n\n def getGroupIds(self, category='site'):\n groups = self.group.get(category, [])\n return [g[0] for g in groups if g]\n\n security.declareProtected(View, 'getGroups')\n\n def getGroups(self, category='site'):\n groups = self.group.get(category, [])\n return [{'id': g[0], 'title': g[1]} for g in groups if g]\n\n security.declarePrivate('listActions')\n\n def listActions(self, info=None, object=None):\n # This exists here to shut up a deprecation warning about old-style\n # actions in CMFCore's ActionProviderBase. It was decided not to\n # move configlets to be based on action tool categories for Plone 4\n # (see PLIP #8804), but that (or an alternative) will have to happen\n # before CMF 2.4 when support for old-style actions is removed.\n return self._actions or ()\n\n security.declarePublic('maySeeSomeConfiglets')\n\n def maySeeSomeConfiglets(self):\n groups = self.getGroups('site')\n\n all = []\n for group in groups:\n all.extend(self.enumConfiglets(group=group['id']))\n all = [item for item in all if item['visible']]\n return len(all) != 0\n\n security.declarePublic('enumConfiglets')\n\n def enumConfiglets(self, group=None):\n portal = getToolByName(self, 'portal_url').getPortalObject()\n context = createExprContext(self, portal, self)\n res = []\n for a in self.listActions():\n verified = 0\n for permission in a.permissions:\n if _checkPermission(permission, portal):\n verified = 1\n if verified and a.category == group and a.testCondition(context) \\\n and a.visible:\n res.append(a.getAction(context))\n # Translate the title for sorting\n if getattr(self, 'REQUEST', None) is not None:\n for a in res:\n title = a['title']\n if not isinstance(title, Message):\n title = Message(title, domain='plone')\n a['title'] = translate(title,\n context=self.REQUEST)\n\n def _id(v):\n return v['id']\n res.sort(key=_id)\n return res\n\n security.declareProtected(ManagePortal, 'unregisterConfiglet')\n\n def unregisterConfiglet(self, id):\n actids = [o.id for o in self.listActions()]\n selection = [actids.index(a) for a in actids if a == id]\n if not selection:\n return\n self.deleteActions(selection)\n\n security.declareProtected(ManagePortal, 'unregisterApplication')\n\n def unregisterApplication(self, appId):\n acts = list(self.listActions())\n selection = [acts.index(a) for a in acts if a.appId == appId]\n if not selection:\n return\n self.deleteActions(selection)\n\n def _extractAction(self, properties, index):\n # Extract an ActionInformation from the funky form properties.\n id = str(properties.get('id_%d' % index, ''))\n name = str(properties.get('name_%d' % index, ''))\n action = str(properties.get('action_%d' % index, ''))\n condition = str(properties.get('condition_%d' % index, ''))\n category = str(properties.get('category_%d' % index, ''))\n visible = properties.get('visible_%d' % index, 0)\n permissions = properties.get('permission_%d' % index, ())\n appId = properties.get('appId_%d' % index, '')\n description = properties.get('description_%d' % index, '')\n icon_expr = properties.get('icon_expr_%d' % index, '')\n\n if not name:\n raise ValueError('A name is required.')\n\n if action != '':\n action = Expression(text=action)\n\n if condition != '':\n condition = Expression(text=condition)\n\n if category == '':\n category = 'object'\n\n if not isinstance(visible, int):\n try:\n visible = int(visible)\n except ValueError:\n visible = 0\n\n if isinstance(permissions, six.string_types):\n permissions = (permissions, )\n\n return PloneConfiglet(id=id,\n title=name,\n action=action,\n condition=condition,\n permissions=permissions,\n category=category,\n visible=visible,\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n security.declareProtected(ManagePortal, 'addAction')\n\n def addAction(self,\n id,\n name,\n action,\n condition='',\n permission='',\n category='Plone',\n visible=1,\n appId=None,\n icon_expr='',\n description='',\n REQUEST=None,\n ):\n # Add an action to our list.\n if not name:\n raise ValueError('A name is required.')\n\n a_expr = action and Expression(text=str(action)) or ''\n c_expr = condition and Expression(text=str(condition)) or ''\n\n if not isinstance(permission, tuple):\n permission = permission and (str(permission), ) or ()\n\n new_actions = self._cloneActions()\n\n new_action = PloneConfiglet(id=str(id),\n title=name,\n action=a_expr,\n condition=c_expr,\n permissions=permission,\n category=str(category),\n visible=int(visible),\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n new_actions.append(new_action)\n self._actions = tuple(new_actions)\n\n if REQUEST is not None:\n return self.manage_editActionsForm(\n REQUEST, manage_tabs_message='Added.')\n\n security.declareProtected(ManagePortal, 'registerConfiglet')\n registerConfiglet = addAction\n\n security.declareProtected(ManagePortal, 'manage_editActionsForm')\n\n def manage_editActionsForm(self, REQUEST, manage_tabs_message=None):\n \"\"\" Show the 'Actions' management tab.\n \"\"\"\n actions = []\n\n for a in self.listActions():\n\n a1 = {}\n a1['id'] = a.getId()\n a1['name'] = a.Title()\n p = a.getPermissions()\n if p:\n a1['permission'] = p[0]\n else:\n a1['permission'] = ''\n a1['category'] = a.getCategory() or 'object'\n a1['visible'] = a.getVisibility()\n a1['action'] = a.getActionExpression()\n a1['condition'] = a.getCondition()\n a1['appId'] = a.getAppId()\n a1['description'] = a.getDescription()\n a1['icon_expr'] = a.getIconExpression()\n actions.append(a1)\n\n # possible_permissions is in OFS.role.RoleManager.\n pp = self.possible_permissions()\n return self._actions_form(\n self,\n REQUEST,\n actions=actions,\n possible_permissions=pp,\n management_view='Actions',\n manage_tabs_message=manage_tabs_message,\n )\n\n @property\n def site_url(self):\n \"\"\"Return the absolute URL to the current site, which is likely not\n necessarily the portal root.\n Used by ``portlet_prefs`` to construct the URL to\n ``@@overview-controlpanel``.\n \"\"\"\n return getSite().absolute_url()\n\n\nInitializeClass(PloneControlPanel)\nregisterToolInterface('portal_controlpanel', IControlPanel)\n", "path": "Products/CMFPlone/PloneControlPanel.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl.class_init import InitializeClass\nfrom App.special_dtml import DTMLFile\nfrom OFS.Folder import Folder\nfrom OFS.PropertyManager import PropertyManager\nfrom Products.CMFCore.ActionInformation import ActionInformation\nfrom Products.CMFCore.ActionProviderBase import ActionProviderBase\nfrom Products.CMFCore.Expression import Expression, createExprContext\nfrom Products.CMFCore.permissions import ManagePortal, View\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.utils import registerToolInterface\nfrom Products.CMFCore.utils import UniqueObject\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IControlPanel\nfrom Products.CMFPlone.PloneBaseTool import PloneBaseTool\nfrom zope.component.hooks import getSite\nfrom zope.i18n import translate\nfrom zope.i18nmessageid import Message\nfrom zope.interface import implementer\n\nimport six\n\n\nclass PloneConfiglet(ActionInformation):\n\n def __init__(self, appId, **kwargs):\n self.appId = appId\n ActionInformation.__init__(self, **kwargs)\n\n def getAppId(self):\n return self.appId\n\n def getDescription(self):\n return self.description\n\n def clone(self):\n return self.__class__(**self.__dict__)\n\n def getAction(self, ec):\n res = ActionInformation.getAction(self, ec)\n res['description'] = self.getDescription()\n return res\n\n\n@implementer(IControlPanel)\nclass PloneControlPanel(PloneBaseTool, UniqueObject,\n Folder, ActionProviderBase, PropertyManager):\n \"\"\"Weave together the various sources of \"actions\" which\n are apropos to the current user and context.\n \"\"\"\n\n security = ClassSecurityInfo()\n\n id = 'portal_controlpanel'\n title = 'Control Panel'\n toolicon = 'skins/plone_images/site_icon.png'\n meta_type = 'Plone Control Panel Tool'\n _actions_form = DTMLFile('www/editPloneConfiglets', globals())\n\n manage_options = (ActionProviderBase.manage_options +\n PropertyManager.manage_options)\n\n group = dict(\n member=[\n ('Member', _(u'My Preferences')),\n ],\n site=[\n ('plone-general', _(u'General')),\n ('plone-content', _(u'Content')),\n ('plone-users', _(u'Users')),\n ('plone-security', _(u'Security')),\n ('plone-advanced', _(u'Advanced')),\n ('Plone', _(u'Plone Configuration')),\n ('Products', _(u'Add-on Configuration')),\n ]\n )\n\n def __init__(self, **kw):\n if kw:\n self.__dict__.update(**kw)\n\n security.declareProtected(ManagePortal, 'registerConfiglets')\n\n def registerConfiglets(self, configlets):\n for conf in configlets:\n self.registerConfiglet(**conf)\n\n security.declareProtected(ManagePortal, 'getGroupIds')\n\n def getGroupIds(self, category='site'):\n groups = self.group.get(category, [])\n return [g[0] for g in groups if g]\n\n security.declareProtected(View, 'getGroups')\n\n def getGroups(self, category='site'):\n groups = self.group.get(category, [])\n return [{'id': g[0], 'title': g[1]} for g in groups if g]\n\n security.declarePrivate('listActions')\n\n def listActions(self, info=None, object=None):\n # This exists here to shut up a deprecation warning about old-style\n # actions in CMFCore's ActionProviderBase. It was decided not to\n # move configlets to be based on action tool categories for Plone 4\n # (see PLIP #8804), but that (or an alternative) will have to happen\n # before CMF 2.4 when support for old-style actions is removed.\n return self._actions or ()\n\n security.declarePublic('maySeeSomeConfiglets')\n\n def maySeeSomeConfiglets(self):\n groups = self.getGroups('site')\n\n all = []\n for group in groups:\n all.extend(self.enumConfiglets(group=group['id']))\n all = [item for item in all if item['visible']]\n return len(all) != 0\n\n security.declarePublic('enumConfiglets')\n\n def enumConfiglets(self, group=None):\n portal = getToolByName(self, 'portal_url').getPortalObject()\n context = createExprContext(self, portal, self)\n res = []\n for a in self.listActions():\n verified = 0\n for permission in a.permissions:\n if _checkPermission(permission, portal):\n verified = 1\n if verified and a.category == group and a.testCondition(context) \\\n and a.visible:\n res.append(a.getAction(context))\n # Translate the title for sorting\n if getattr(self, 'REQUEST', None) is not None:\n for a in res:\n title = a['title']\n if not isinstance(title, Message):\n title = Message(title, domain='plone')\n a['title'] = translate(title,\n context=self.REQUEST)\n\n def _title(v):\n return v['title']\n\n res.sort(key=_title)\n return res\n\n security.declareProtected(ManagePortal, 'unregisterConfiglet')\n\n def unregisterConfiglet(self, id):\n actids = [o.id for o in self.listActions()]\n selection = [actids.index(a) for a in actids if a == id]\n if not selection:\n return\n self.deleteActions(selection)\n\n security.declareProtected(ManagePortal, 'unregisterApplication')\n\n def unregisterApplication(self, appId):\n acts = list(self.listActions())\n selection = [acts.index(a) for a in acts if a.appId == appId]\n if not selection:\n return\n self.deleteActions(selection)\n\n def _extractAction(self, properties, index):\n # Extract an ActionInformation from the funky form properties.\n id = str(properties.get('id_%d' % index, ''))\n name = str(properties.get('name_%d' % index, ''))\n action = str(properties.get('action_%d' % index, ''))\n condition = str(properties.get('condition_%d' % index, ''))\n category = str(properties.get('category_%d' % index, ''))\n visible = properties.get('visible_%d' % index, 0)\n permissions = properties.get('permission_%d' % index, ())\n appId = properties.get('appId_%d' % index, '')\n description = properties.get('description_%d' % index, '')\n icon_expr = properties.get('icon_expr_%d' % index, '')\n\n if not name:\n raise ValueError('A name is required.')\n\n if action != '':\n action = Expression(text=action)\n\n if condition != '':\n condition = Expression(text=condition)\n\n if category == '':\n category = 'object'\n\n if not isinstance(visible, int):\n try:\n visible = int(visible)\n except ValueError:\n visible = 0\n\n if isinstance(permissions, six.string_types):\n permissions = (permissions, )\n\n return PloneConfiglet(id=id,\n title=name,\n action=action,\n condition=condition,\n permissions=permissions,\n category=category,\n visible=visible,\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n security.declareProtected(ManagePortal, 'addAction')\n\n def addAction(self,\n id,\n name,\n action,\n condition='',\n permission='',\n category='Plone',\n visible=1,\n appId=None,\n icon_expr='',\n description='',\n REQUEST=None,\n ):\n # Add an action to our list.\n if not name:\n raise ValueError('A name is required.')\n\n a_expr = action and Expression(text=str(action)) or ''\n c_expr = condition and Expression(text=str(condition)) or ''\n\n if not isinstance(permission, tuple):\n permission = permission and (str(permission), ) or ()\n\n new_actions = self._cloneActions()\n\n new_action = PloneConfiglet(id=str(id),\n title=name,\n action=a_expr,\n condition=c_expr,\n permissions=permission,\n category=str(category),\n visible=int(visible),\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n new_actions.append(new_action)\n self._actions = tuple(new_actions)\n\n if REQUEST is not None:\n return self.manage_editActionsForm(\n REQUEST, manage_tabs_message='Added.')\n\n security.declareProtected(ManagePortal, 'registerConfiglet')\n registerConfiglet = addAction\n\n security.declareProtected(ManagePortal, 'manage_editActionsForm')\n\n def manage_editActionsForm(self, REQUEST, manage_tabs_message=None):\n \"\"\" Show the 'Actions' management tab.\n \"\"\"\n actions = []\n\n for a in self.listActions():\n\n a1 = {}\n a1['id'] = a.getId()\n a1['name'] = a.Title()\n p = a.getPermissions()\n if p:\n a1['permission'] = p[0]\n else:\n a1['permission'] = ''\n a1['category'] = a.getCategory() or 'object'\n a1['visible'] = a.getVisibility()\n a1['action'] = a.getActionExpression()\n a1['condition'] = a.getCondition()\n a1['appId'] = a.getAppId()\n a1['description'] = a.getDescription()\n a1['icon_expr'] = a.getIconExpression()\n actions.append(a1)\n\n # possible_permissions is in OFS.role.RoleManager.\n pp = self.possible_permissions()\n return self._actions_form(\n self,\n REQUEST,\n actions=actions,\n possible_permissions=pp,\n management_view='Actions',\n manage_tabs_message=manage_tabs_message,\n )\n\n @property\n def site_url(self):\n \"\"\"Return the absolute URL to the current site, which is likely not\n necessarily the portal root.\n Used by ``portlet_prefs`` to construct the URL to\n ``@@overview-controlpanel``.\n \"\"\"\n return getSite().absolute_url()\n\n\nInitializeClass(PloneControlPanel)\nregisterToolInterface('portal_controlpanel', IControlPanel)\n", "path": "Products/CMFPlone/PloneControlPanel.py"}]} |
gh_patches_debug_1581 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5936 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_GCP_79 SQL Server latest version is 2022 instead of 2019
**Describe the issue**
The `CKV_GCP_79` about SQL server is pinned at 2019 but 2022 is the latest version :
https://learn.microsoft.com/en-us/troubleshoot/sql/releases/download-and-install-latest-updates
**Examples**
Related to this files :
https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
https://github.com/bridgecrewio/checkov/blob/d07fdc994015772a9fa0dc1a12d1391b5765916c/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf#L213
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories
3
4
5 class CloudSqlMajorVersion(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure SQL database is using latest Major version"
8 id = "CKV_GCP_79"
9 supported_resources = ['google_sql_database_instance']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'database_version'
15
16 def get_expected_values(self):
17 return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_WEB",
18 "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS"]
19
20
21 check = CloudSqlMajorVersion()
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
--- a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
+++ b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
@@ -14,8 +14,8 @@
return 'database_version'
def get_expected_values(self):
- return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_WEB",
- "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS"]
+ return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2022_STANDARD", "SQLSERVER_2022_WEB",
+ "SQLSERVER_2022_ENTERPRISE", "SQLSERVER_2022_EXPRESS"]
check = CloudSqlMajorVersion()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py\n--- a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py\n+++ b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py\n@@ -14,8 +14,8 @@\n return 'database_version'\n \n def get_expected_values(self):\n- return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2019_STANDARD\", \"SQLSERVER_2019_WEB\",\n- \"SQLSERVER_2019_ENTERPRISE\", \"SQLSERVER_2019_EXPRESS\"]\n+ return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2022_STANDARD\", \"SQLSERVER_2022_WEB\",\n+ \"SQLSERVER_2022_ENTERPRISE\", \"SQLSERVER_2022_EXPRESS\"]\n \n \n check = CloudSqlMajorVersion()\n", "issue": "CKV_GCP_79 SQL Server latest version is 2022 instead of 2019\n**Describe the issue**\r\nThe `CKV_GCP_79` about SQL server is pinned at 2019 but 2022 is the latest version : \r\nhttps://learn.microsoft.com/en-us/troubleshoot/sql/releases/download-and-install-latest-updates\r\n\r\n**Examples**\r\nRelated to this files : \r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/d07fdc994015772a9fa0dc1a12d1391b5765916c/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf#L213\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass CloudSqlMajorVersion(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure SQL database is using latest Major version\"\n id = \"CKV_GCP_79\"\n supported_resources = ['google_sql_database_instance']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'database_version'\n\n def get_expected_values(self):\n return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2019_STANDARD\", \"SQLSERVER_2019_WEB\",\n \"SQLSERVER_2019_ENTERPRISE\", \"SQLSERVER_2019_EXPRESS\"]\n\n\ncheck = CloudSqlMajorVersion()\n", "path": "checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py"}], "after_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass CloudSqlMajorVersion(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure SQL database is using latest Major version\"\n id = \"CKV_GCP_79\"\n supported_resources = ['google_sql_database_instance']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'database_version'\n\n def get_expected_values(self):\n return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2022_STANDARD\", \"SQLSERVER_2022_WEB\",\n \"SQLSERVER_2022_ENTERPRISE\", \"SQLSERVER_2022_EXPRESS\"]\n\n\ncheck = CloudSqlMajorVersion()\n", "path": "checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py"}]} |
gh_patches_debug_1582 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3429 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[QA] Tails GUI updater reporting new versions
# Bug
## Description
The 0.7.0 GUI updater reports new versions even when it's running the latest. Just ran a pre-flight check with the 0.7.0 tag, checked out inside a Tails VM. The install portion completed fine. So did `./securedrop-admin tailsconfig`. However, after finishing, it popped up the GUI declaring there were new updates. Which there definitely should not be, given that 0.7.0 is the latest release.
After a reboot of the Tails VM, the GUI updater displayed again, prompting to install updates. I accepted. Here's the detailed output: https://gist.github.com/conorsch/2e2da8fb909df067b693949474ef945c
## Steps to Reproduce
See above.
## Expected Behavior
0.7.0 is determined to be latest release; no further prompting.
## Actual Behavior
Prompts for updates even though 0.7.0 is latest release.
## Comments
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `admin/securedrop_admin/__init__.py`
Content:
```
1 # -*- mode: python; coding: utf-8 -*-
2 #
3 # Copyright (C) 2013-2018 Freedom of the Press Foundation & al
4 # Copyright (C) 2018 Loic Dachary <[email protected]>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 """
20 SecureDrop Admin Toolkit.
21
22 For use by administrators to install, maintain, and manage their SD
23 instances.
24 """
25
26 import argparse
27 import logging
28 import os
29 import io
30 import re
31 import string
32 import subprocess
33 import sys
34 import types
35 import prompt_toolkit
36 from prompt_toolkit.validation import Validator, ValidationError
37 import yaml
38
39 sdlog = logging.getLogger(__name__)
40 RELEASE_KEY = '22245C81E3BAEB4138B36061310F561200F4AD77'
41
42
43 class FingerprintException(Exception):
44 pass
45
46
47 class JournalistAlertEmailException(Exception):
48 pass
49
50
51 class SiteConfig(object):
52
53 class ValidateNotEmpty(Validator):
54 def validate(self, document):
55 if document.text != '':
56 return True
57 raise ValidationError(
58 message="Must not be an empty string")
59
60 class ValidateTime(Validator):
61 def validate(self, document):
62 if document.text.isdigit() and int(document.text) in range(0, 24):
63 return True
64 raise ValidationError(
65 message="Must be an integer between 0 and 23")
66
67 class ValidateUser(Validator):
68 def validate(self, document):
69 text = document.text
70 if text != '' and text != 'root' and text != 'amnesia':
71 return True
72 raise ValidationError(
73 message="Must not be root, amnesia or an empty string")
74
75 class ValidateIP(Validator):
76 def validate(self, document):
77 if re.match('((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}$',
78 document.text):
79 return True
80 raise ValidationError(
81 message="An IP address must be something like 10.240.20.83")
82
83 class ValidateDNS(Validator):
84 def validate(self):
85 raise Exception() # pragma: no cover
86
87 def is_tails(self):
88 try:
89 id = subprocess.check_output('lsb_release --id --short',
90 shell=True).strip()
91 except subprocess.CalledProcessError:
92 id = None
93 return id == 'Tails'
94
95 def lookup_fqdn(self, fqdn, dns=None):
96 cmd = 'host -W=10 -T -4 ' + fqdn
97 if self.is_tails():
98 cmd = 'torify ' + cmd
99 cmd += ' ' + (dns and dns or '8.8.8.8')
100 try:
101 result = subprocess.check_output(cmd.split(' '),
102 stderr=subprocess.STDOUT)
103 except subprocess.CalledProcessError as e:
104 result = e.output
105 sdlog.debug(cmd + ' => ' + result)
106 return 'has address' in result
107
108 class ValidateDNSServer(ValidateDNS):
109 def validate(self, document):
110 if self.lookup_fqdn('gnu.org', document.text):
111 return True
112 raise ValidationError(
113 message='Unable to resolve gnu.org using this DNS')
114
115 class ValidateFQDN(ValidateDNS):
116 def validate(self, document):
117 if self.lookup_fqdn(document.text):
118 return True
119 raise ValidationError(
120 message='Unable to resolve ' + document.text)
121
122 class ValidatePath(Validator):
123 def __init__(self, basedir):
124 self.basedir = basedir
125 super(SiteConfig.ValidatePath, self).__init__()
126
127 def validate(self, document):
128 if document.text == '':
129 raise ValidationError(
130 message='an existing file name is required')
131 path = os.path.join(self.basedir, document.text)
132 if os.path.exists(path):
133 return True
134 raise ValidationError(
135 message=path + ' file does not exist')
136
137 class ValidateOptionalPath(ValidatePath):
138 def validate(self, document):
139 if document.text == '':
140 return True
141 return super(SiteConfig.ValidateOptionalPath, self).validate(
142 document)
143
144 class ValidateYesNo(Validator):
145 def validate(self, document):
146 text = document.text.lower()
147 if text == 'yes' or text == 'no':
148 return True
149 raise ValidationError(message="Must be either yes or no")
150
151 class ValidateFingerprint(Validator):
152 def validate(self, document):
153 text = document.text.replace(' ', '')
154 if text == '65A1B5FF195B56353CC63DFFCC40EF1228271441':
155 raise ValidationError(
156 message='This is the TEST journalist fingerprint')
157 if text == '600BC6D5142C68F35DDBCEA87B597104EDDDC102':
158 raise ValidationError(
159 message='This is the TEST admin fingerprint')
160 if not re.match('[a-fA-F0-9]{40}$', text):
161 raise ValidationError(
162 message='fingerprints must be 40 hexadecimal characters')
163 return True
164
165 class ValidateOptionalFingerprint(ValidateFingerprint):
166 def validate(self, document):
167 if document.text == '':
168 return True
169 return super(SiteConfig.ValidateOptionalFingerprint,
170 self).validate(document)
171
172 class ValidateInt(Validator):
173 def validate(self, document):
174 if re.match('\d+$', document.text):
175 return True
176 raise ValidationError(message="Must be an integer")
177
178 class Locales(object):
179 def __init__(self, appdir):
180 self.translation_dir = os.path.realpath(
181 os.path.join(appdir, 'translations'))
182
183 def get_translations(self):
184 translations = set(['en_US'])
185 for dirname in os.listdir(self.translation_dir):
186 if dirname != 'messages.pot':
187 translations.add(dirname)
188 return translations
189
190 class ValidateLocales(Validator):
191 def __init__(self, basedir):
192 self.basedir = basedir
193 super(SiteConfig.ValidateLocales, self).__init__()
194
195 def validate(self, document):
196 desired = document.text.split()
197 existing = SiteConfig.Locales(self.basedir).get_translations()
198 missing = set(desired) - set(existing)
199 if not missing:
200 return True
201 raise ValidationError(
202 message="The following locales do not exist " + " ".join(
203 missing))
204
205 class ValidateOSSECUsername(Validator):
206 def validate(self, document):
207 text = document.text
208 if text and '@' not in text and 'test' != text:
209 return True
210 raise ValidationError(
211 message="The SASL username should not include the domain name")
212
213 class ValidateOSSECPassword(Validator):
214 def validate(self, document):
215 text = document.text
216 if len(text) >= 8 and 'password123' != text:
217 return True
218 raise ValidationError(
219 message="Password for OSSEC email account must be strong")
220
221 class ValidateEmail(Validator):
222 def validate(self, document):
223 text = document.text
224 if text == '':
225 raise ValidationError(
226 message=("Must not be empty"))
227 if '@' not in text:
228 raise ValidationError(
229 message=("Must contain a @"))
230 return True
231
232 class ValidateOSSECEmail(ValidateEmail):
233 def validate(self, document):
234 super(SiteConfig.ValidateOSSECEmail, self).validate(document)
235 text = document.text
236 if '[email protected]' != text:
237 return True
238 raise ValidationError(
239 message=("Must be set to something other than "
240 "[email protected]"))
241
242 class ValidateOptionalEmail(ValidateEmail):
243 def validate(self, document):
244 if document.text == '':
245 return True
246 return super(SiteConfig.ValidateOptionalEmail, self).validate(
247 document)
248
249 def __init__(self, args):
250 self.args = args
251 self.config = {}
252 translations = SiteConfig.Locales(
253 self.args.app_path).get_translations()
254 translations = " ".join(translations)
255 self.desc = [
256 ['ssh_users', 'sd', str,
257 u'Username for SSH access to the servers',
258 SiteConfig.ValidateUser(),
259 None],
260 ['daily_reboot_time', 4, int,
261 u'Daily reboot time of the server (24-hour clock)',
262 SiteConfig.ValidateTime(),
263 int],
264 ['app_ip', '10.20.2.2', str,
265 u'Local IPv4 address for the Application Server',
266 SiteConfig.ValidateIP(),
267 None],
268 ['monitor_ip', '10.20.3.2', str,
269 u'Local IPv4 address for the Monitor Server',
270 SiteConfig.ValidateIP(),
271 None],
272 ['app_hostname', 'app', str,
273 u'Hostname for Application Server',
274 SiteConfig.ValidateNotEmpty(),
275 None],
276 ['monitor_hostname', 'mon', str,
277 u'Hostname for Monitor Server',
278 SiteConfig.ValidateNotEmpty(),
279 None],
280 ['dns_server', '8.8.8.8', str,
281 u'DNS server specified during installation',
282 SiteConfig.ValidateNotEmpty(),
283 None],
284 ['securedrop_app_gpg_public_key', 'SecureDrop.asc', str,
285 u'Local filepath to public key for '
286 'SecureDrop Application GPG public key',
287 SiteConfig.ValidatePath(self.args.ansible_path),
288 None],
289 ['securedrop_app_https_on_source_interface', False, bool,
290 u'Whether HTTPS should be enabled on '
291 'Source Interface (requires EV cert)',
292 SiteConfig.ValidateYesNo(),
293 lambda x: x.lower() == 'yes'],
294 ['securedrop_app_https_certificate_cert_src', '', str,
295 u'Local filepath to HTTPS certificate '
296 '(optional, only if using HTTPS on source interface)',
297 SiteConfig.ValidateOptionalPath(self.args.ansible_path),
298 None],
299 ['securedrop_app_https_certificate_key_src', '', str,
300 u'Local filepath to HTTPS certificate key '
301 '(optional, only if using HTTPS on source interface)',
302 SiteConfig.ValidateOptionalPath(self.args.ansible_path),
303 None],
304 ['securedrop_app_https_certificate_chain_src', '', str,
305 u'Local filepath to HTTPS certificate chain file '
306 '(optional, only if using HTTPS on source interface)',
307 SiteConfig.ValidateOptionalPath(self.args.ansible_path),
308 None],
309 ['securedrop_app_gpg_fingerprint', '', str,
310 u'Full fingerprint for the SecureDrop Application GPG Key',
311 SiteConfig.ValidateFingerprint(),
312 self.sanitize_fingerprint],
313 ['ossec_alert_gpg_public_key', 'ossec.pub', str,
314 u'Local filepath to OSSEC alerts GPG public key',
315 SiteConfig.ValidatePath(self.args.ansible_path),
316 None],
317 ['ossec_gpg_fpr', '', str,
318 u'Full fingerprint for the OSSEC alerts GPG public key',
319 SiteConfig.ValidateFingerprint(),
320 self.sanitize_fingerprint],
321 ['ossec_alert_email', '', str,
322 u'Admin email address for receiving OSSEC alerts',
323 SiteConfig.ValidateOSSECEmail(),
324 None],
325 ['journalist_alert_gpg_public_key', '', str,
326 u'Local filepath to journalist alerts GPG public key (optional)',
327 SiteConfig.ValidateOptionalPath(self.args.ansible_path),
328 None],
329 ['journalist_gpg_fpr', '', str,
330 u'Full fingerprint for the journalist alerts '
331 u'GPG public key (optional)',
332 SiteConfig.ValidateOptionalFingerprint(),
333 self.sanitize_fingerprint],
334 ['journalist_alert_email', '', str,
335 u'Email address for receiving journalist alerts (optional)',
336 SiteConfig.ValidateOptionalEmail(),
337 None],
338 ['smtp_relay', "smtp.gmail.com", str,
339 u'SMTP relay for sending OSSEC alerts',
340 SiteConfig.ValidateNotEmpty(),
341 None],
342 ['smtp_relay_port', 587, int,
343 u'SMTP port for sending OSSEC alerts',
344 SiteConfig.ValidateInt(),
345 int],
346 ['sasl_domain', "gmail.com", str,
347 u'SASL domain for sending OSSEC alerts',
348 None,
349 None],
350 ['sasl_username', '', str,
351 u'SASL username for sending OSSEC alerts',
352 SiteConfig.ValidateOSSECUsername(),
353 None],
354 ['sasl_password', '', str,
355 u'SASL password for sending OSSEC alerts',
356 SiteConfig.ValidateOSSECPassword(),
357 None],
358 ['enable_ssh_over_tor', True, bool,
359 u'Enable SSH over Tor (recommended, disables SSH over LAN). '
360 u'If you respond no, SSH will be available over LAN only',
361 SiteConfig.ValidateYesNo(),
362 lambda x: x.lower() == 'yes'],
363 ['securedrop_supported_locales', [], types.ListType,
364 u'Space separated list of additional locales to support '
365 '(' + translations + ')',
366 SiteConfig.ValidateLocales(self.args.app_path),
367 string.split],
368 ]
369
370 def load_and_update_config(self):
371 if self.exists():
372 self.config = self.load()
373
374 return self.update_config()
375
376 def update_config(self):
377 self.config.update(self.user_prompt_config())
378 self.save()
379 self.validate_gpg_keys()
380 self.validate_journalist_alert_email()
381 return True
382
383 def user_prompt_config(self):
384 config = {}
385 for desc in self.desc:
386 (var, default, type, prompt, validator, transform) = desc
387 if var == 'journalist_gpg_fpr':
388 if not config.get('journalist_alert_gpg_public_key',
389 None):
390 config[var] = ''
391 continue
392 if var == 'journalist_alert_email':
393 if not config.get('journalist_alert_gpg_public_key',
394 None):
395 config[var] = ''
396 continue
397 config[var] = self.user_prompt_config_one(desc,
398 self.config.get(var))
399 return config
400
401 def user_prompt_config_one(self, desc, from_config):
402 (var, default, type, prompt, validator, transform) = desc
403 if from_config is not None:
404 default = from_config
405 prompt += ': '
406 return self.validated_input(prompt, default, validator, transform)
407
408 def validated_input(self, prompt, default, validator, transform):
409 if type(default) is bool:
410 default = default and 'yes' or 'no'
411 if type(default) is int:
412 default = str(default)
413 if isinstance(default, types.ListType):
414 default = " ".join(default)
415 if type(default) is not str:
416 default = str(default)
417 kwargs = {}
418 if validator:
419 kwargs['validator'] = validator
420 value = prompt_toolkit.prompt(prompt,
421 default=unicode(default, 'utf-8'),
422 **kwargs)
423 if transform:
424 return transform(value)
425 else:
426 return value
427
428 def sanitize_fingerprint(self, value):
429 return value.upper().replace(' ', '')
430
431 def validate_gpg_keys(self):
432 keys = (('securedrop_app_gpg_public_key',
433 'securedrop_app_gpg_fingerprint'),
434
435 ('ossec_alert_gpg_public_key',
436 'ossec_gpg_fpr'),
437
438 ('journalist_alert_gpg_public_key',
439 'journalist_gpg_fpr'))
440 validate = os.path.join(
441 os.path.dirname(__file__), '..', 'bin',
442 'validate-gpg-key.sh')
443 for (public_key, fingerprint) in keys:
444 if (self.config[public_key] == '' and
445 self.config[fingerprint] == ''):
446 continue
447 public_key = os.path.join(self.args.ansible_path,
448 self.config[public_key])
449 fingerprint = self.config[fingerprint]
450 try:
451 sdlog.debug(subprocess.check_output(
452 [validate, public_key, fingerprint],
453 stderr=subprocess.STDOUT))
454 except subprocess.CalledProcessError as e:
455 sdlog.debug(e.output)
456 raise FingerprintException(
457 "fingerprint {} ".format(fingerprint) +
458 "does not match " +
459 "the public key {}".format(public_key))
460 return True
461
462 def validate_journalist_alert_email(self):
463 if (self.config['journalist_alert_gpg_public_key'] == '' and
464 self.config['journalist_gpg_fpr'] == ''):
465 return True
466
467 class Document(object):
468 def __init__(self, text):
469 self.text = text
470
471 try:
472 SiteConfig.ValidateEmail().validate(Document(
473 self.config['journalist_alert_email']))
474 except ValidationError as e:
475 raise JournalistAlertEmailException(
476 "journalist alerts email: " + e.message)
477 return True
478
479 def exists(self):
480 return os.path.exists(self.args.site_config)
481
482 def save(self):
483 with io.open(self.args.site_config, 'w') as site_config_file:
484 yaml.safe_dump(self.config,
485 site_config_file,
486 default_flow_style=False)
487
488 def load(self):
489 try:
490 with io.open(self.args.site_config) as site_config_file:
491 return yaml.safe_load(site_config_file)
492 except IOError:
493 sdlog.error("Config file missing, re-run with sdconfig")
494 raise
495 except yaml.YAMLError:
496 sdlog.error("There was an issue processing {}".format(
497 self.args.site_config))
498 raise
499
500
501 def setup_logger(verbose=False):
502 """ Configure logging handler """
503 # Set default level on parent
504 sdlog.setLevel(logging.DEBUG)
505 level = logging.DEBUG if verbose else logging.INFO
506
507 stdout = logging.StreamHandler(sys.stdout)
508 stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
509 stdout.setLevel(level)
510 sdlog.addHandler(stdout)
511
512
513 def sdconfig(args):
514 """Configure SD site settings"""
515 SiteConfig(args).load_and_update_config()
516
517
518 def install_securedrop(args):
519 """Install/Update SecureDrop"""
520 SiteConfig(args).load()
521
522 sdlog.info("Now installing SecureDrop on remote servers.")
523 sdlog.info("You will be prompted for the sudo password on the "
524 "servers.")
525 sdlog.info("The sudo password is only necessary during initial "
526 "installation.")
527 return subprocess.check_call([os.path.join(args.ansible_path,
528 'securedrop-prod.yml'), '--ask-become-pass'],
529 cwd=args.ansible_path)
530
531
532 def backup_securedrop(args):
533 """Perform backup of the SecureDrop Application Server.
534 Creates a tarball of submissions and server config, and fetches
535 back to the Admin Workstation. Future `restore` actions can be performed
536 with the backup tarball."""
537 sdlog.info("Backing up the SecureDrop Application Server")
538 ansible_cmd = [
539 'ansible-playbook',
540 os.path.join(args.ansible_path, 'securedrop-backup.yml'),
541 ]
542 return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)
543
544
545 def restore_securedrop(args):
546 """Perform restore of the SecureDrop Application Server.
547 Requires a tarball of submissions and server config, created via
548 the `backup` action."""
549 sdlog.info("Restoring the SecureDrop Application Server from backup")
550 # Canonicalize filepath to backup tarball, so Ansible sees only the
551 # basename. The files must live in args.ansible_path,
552 # but the securedrop-admin
553 # script will be invoked from the repo root, so preceding dirs are likely.
554 restore_file_basename = os.path.basename(args.restore_file)
555 ansible_cmd = [
556 'ansible-playbook',
557 os.path.join(args.ansible_path, 'securedrop-restore.yml'),
558 '-e',
559 "restore_file='{}'".format(restore_file_basename),
560 ]
561 return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)
562
563
564 def run_tails_config(args):
565 """Configure Tails environment post SD install"""
566 sdlog.info("Configuring Tails workstation environment")
567 sdlog.info(("You'll be prompted for the temporary Tails admin password,"
568 " which was set on Tails login screen"))
569 ansible_cmd = [
570 os.path.join(args.ansible_path, 'securedrop-tails.yml'),
571 "--ask-become-pass",
572 # Passing an empty inventory file to override the automatic dynamic
573 # inventory script, which fails if no site vars are configured.
574 '-i', '/dev/null',
575 ]
576 return subprocess.check_call(ansible_cmd,
577 cwd=args.ansible_path)
578
579
580 def check_for_updates_wrapper(args):
581 res, tag = check_for_updates(args)
582 # Because the command worked properly exit with 0.
583 return 0
584
585
586 def check_for_updates(args):
587 """Check for SecureDrop updates"""
588 sdlog.info("Checking for SecureDrop updates...")
589
590 # Determine what branch we are on
591 current_tag = subprocess.check_output(['git', 'describe'], cwd=args.root)
592
593 # Fetch all branches
594 git_fetch_cmd = ['git', 'fetch', '--all']
595 subprocess.check_call(git_fetch_cmd, cwd=args.root)
596
597 # Get latest tag
598 git_all_tags = ["git", "tag"]
599 all_tags = subprocess.check_output(git_all_tags,
600 cwd=args.root).rstrip('\n').split('\n')
601
602 # Do not check out any release candidate tags
603 all_prod_tags = [x for x in all_tags if 'rc' not in x]
604
605 latest_tag = all_prod_tags[-1]
606
607 if current_tag != latest_tag:
608 sdlog.info("Update needed")
609 return True, latest_tag
610 sdlog.info("All updates applied")
611 return False, latest_tag
612
613
614 def get_release_key_from_keyserver(args, keyserver=None, timeout=45):
615 gpg_recv = ['timeout', str(timeout), 'gpg', '--recv-key']
616 release_key = [RELEASE_KEY]
617
618 # We construct the gpg --recv-key command based on optional keyserver arg.
619 if keyserver:
620 get_key_cmd = gpg_recv + ['--keyserver', keyserver] + release_key
621 else:
622 get_key_cmd = gpg_recv + release_key
623
624 subprocess.check_call(get_key_cmd, cwd=args.root)
625
626
627 def update(args):
628 """Verify, and apply latest SecureDrop workstation update"""
629 sdlog.info("Applying SecureDrop updates...")
630
631 update_status, latest_tag = check_for_updates(args)
632
633 if not update_status:
634 # Exit if we're up to date
635 return 0
636
637 sdlog.info("Verifying signature on latest update...")
638
639 try:
640 # First try to get the release key using Tails default keyserver
641 get_release_key_from_keyserver(args)
642 except subprocess.CalledProcessError:
643 # Now try to get the key from a secondary keyserver.
644 secondary_keyserver = 'hkps://hkps.pool.sks-keyservers.net'
645 get_release_key_from_keyserver(args,
646 keyserver=secondary_keyserver)
647
648 git_verify_tag_cmd = ['git', 'tag', '-v', latest_tag]
649 try:
650 sig_result = subprocess.check_output(git_verify_tag_cmd,
651 stderr=subprocess.STDOUT,
652 cwd=args.root)
653
654 good_sig_text = 'Good signature from "SecureDrop Release Signing Key"'
655 bad_sig_text = 'BAD signature'
656 # To ensure that an adversary cannot name a malicious key good_sig_text
657 # we check that bad_sig_text does not appear and that the release key
658 # appears on the second line of the output.
659 gpg_lines = sig_result.split('\n')
660 if RELEASE_KEY in gpg_lines[1] and \
661 sig_result.count(good_sig_text) == 1 and \
662 bad_sig_text not in sig_result:
663 sdlog.info("Signature verification successful.")
664 else: # If anything else happens, fail and exit 1
665 sdlog.info("Signature verification failed.")
666 return 1
667
668 except subprocess.CalledProcessError:
669 # If there is no signature, or if the signature does not verify,
670 # then git tag -v exits subprocess.check_output will exit 1
671 # and subprocess.check_output will throw a CalledProcessError
672 sdlog.info("Signature verification failed.")
673 return 1
674
675 # Only if the proper signature verifies do we check out the latest
676 git_checkout_cmd = ['git', 'checkout', latest_tag]
677 subprocess.check_call(git_checkout_cmd, cwd=args.root)
678
679 sdlog.info("Updated to SecureDrop {}.".format(latest_tag))
680 return 0
681
682
683 def get_logs(args):
684 """Get logs for forensics and debugging purposes"""
685 sdlog.info("Gathering logs for forensics and debugging")
686 ansible_cmd = [
687 'ansible-playbook',
688 os.path.join(args.ansible_path, 'securedrop-logs.yml'),
689 ]
690 subprocess.check_call(ansible_cmd, cwd=args.ansible_path)
691 sdlog.info("Encrypt logs and send to [email protected] or upload "
692 "to the SecureDrop support portal.")
693 return 0
694
695
696 def set_default_paths(args):
697 if not args.ansible_path:
698 args.ansible_path = args.root + "/install_files/ansible-base"
699 args.ansible_path = os.path.realpath(args.ansible_path)
700 if not args.site_config:
701 args.site_config = args.ansible_path + "/group_vars/all/site-specific"
702 args.site_config = os.path.realpath(args.site_config)
703 if not args.app_path:
704 args.app_path = args.root + "/securedrop"
705 args.app_path = os.path.realpath(args.app_path)
706 return args
707
708
709 def parse_argv(argv):
710 class ArgParseFormatterCombo(argparse.ArgumentDefaultsHelpFormatter,
711 argparse.RawTextHelpFormatter):
712 """Needed to combine formatting classes for help output"""
713 pass
714
715 parser = argparse.ArgumentParser(description=__doc__,
716 formatter_class=ArgParseFormatterCombo)
717 parser.add_argument('-v', action='store_true', default=False,
718 help="Increase verbosity on output")
719 parser.add_argument('-d', action='store_true', default=False,
720 help="Developer mode. Not to be used in production.")
721 parser.add_argument('--root', required=True,
722 help="path to the root of the SecureDrop repository")
723 parser.add_argument('--site-config',
724 help="path to the YAML site configuration file")
725 parser.add_argument('--ansible-path',
726 help="path to the Ansible root")
727 parser.add_argument('--app-path',
728 help="path to the SecureDrop application root")
729 subparsers = parser.add_subparsers()
730
731 parse_sdconfig = subparsers.add_parser('sdconfig', help=sdconfig.__doc__)
732 parse_sdconfig.set_defaults(func=sdconfig)
733
734 parse_install = subparsers.add_parser('install',
735 help=install_securedrop.__doc__)
736 parse_install.set_defaults(func=install_securedrop)
737
738 parse_tailsconfig = subparsers.add_parser('tailsconfig',
739 help=run_tails_config.__doc__)
740 parse_tailsconfig.set_defaults(func=run_tails_config)
741
742 parse_backup = subparsers.add_parser('backup',
743 help=backup_securedrop.__doc__)
744 parse_backup.set_defaults(func=backup_securedrop)
745
746 parse_restore = subparsers.add_parser('restore',
747 help=restore_securedrop.__doc__)
748 parse_restore.set_defaults(func=restore_securedrop)
749 parse_restore.add_argument("restore_file")
750
751 parse_update = subparsers.add_parser('update', help=update.__doc__)
752 parse_update.set_defaults(func=update)
753
754 parse_check_updates = subparsers.add_parser('check_for_updates',
755 help=check_for_updates.__doc__)
756 parse_check_updates.set_defaults(func=check_for_updates_wrapper)
757
758 parse_logs = subparsers.add_parser('logs',
759 help=get_logs.__doc__)
760 parse_logs.set_defaults(func=get_logs)
761
762 return set_default_paths(parser.parse_args(argv))
763
764
765 def main(argv):
766 args = parse_argv(argv)
767 setup_logger(args.v)
768 if args.v:
769 return_code = args.func(args)
770 sys.exit(return_code)
771 else:
772 try:
773 return_code = args.func(args)
774 except KeyboardInterrupt:
775 sys.exit(-1)
776 except Exception as e:
777 raise SystemExit(
778 'ERROR (run with -v for more): {msg}'.format(msg=e))
779 else:
780 sys.exit(return_code)
781
782
783 if __name__ == "__main__":
784 main(sys.argv[1:])
785
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/admin/securedrop_admin/__init__.py b/admin/securedrop_admin/__init__.py
--- a/admin/securedrop_admin/__init__.py
+++ b/admin/securedrop_admin/__init__.py
@@ -588,7 +588,8 @@
sdlog.info("Checking for SecureDrop updates...")
# Determine what branch we are on
- current_tag = subprocess.check_output(['git', 'describe'], cwd=args.root)
+ current_tag = subprocess.check_output(['git', 'describe'],
+ cwd=args.root).rstrip('\n')
# Fetch all branches
git_fetch_cmd = ['git', 'fetch', '--all']
| {"golden_diff": "diff --git a/admin/securedrop_admin/__init__.py b/admin/securedrop_admin/__init__.py\n--- a/admin/securedrop_admin/__init__.py\n+++ b/admin/securedrop_admin/__init__.py\n@@ -588,7 +588,8 @@\n sdlog.info(\"Checking for SecureDrop updates...\")\n \n # Determine what branch we are on\n- current_tag = subprocess.check_output(['git', 'describe'], cwd=args.root)\n+ current_tag = subprocess.check_output(['git', 'describe'],\n+ cwd=args.root).rstrip('\\n')\n \n # Fetch all branches\n git_fetch_cmd = ['git', 'fetch', '--all']\n", "issue": "[QA] Tails GUI updater reporting new versions\n# Bug\r\n\r\n## Description\r\n\r\nThe 0.7.0 GUI updater reports new versions even when it's running the latest. Just ran a pre-flight check with the 0.7.0 tag, checked out inside a Tails VM. The install portion completed fine. So did `./securedrop-admin tailsconfig`. However, after finishing, it popped up the GUI declaring there were new updates. Which there definitely should not be, given that 0.7.0 is the latest release.\r\n\r\nAfter a reboot of the Tails VM, the GUI updater displayed again, prompting to install updates. I accepted. Here's the detailed output: https://gist.github.com/conorsch/2e2da8fb909df067b693949474ef945c\r\n\r\n\r\n\r\n## Steps to Reproduce\r\n\r\nSee above.\r\n\r\n## Expected Behavior\r\n\r\n0.7.0 is determined to be latest release; no further prompting. \r\n\r\n## Actual Behavior\r\n\r\nPrompts for updates even though 0.7.0 is latest release.\r\n\r\n## Comments\r\n\n", "before_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nSecureDrop Admin Toolkit.\n\nFor use by administrators to install, maintain, and manage their SD\ninstances.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport io\nimport re\nimport string\nimport subprocess\nimport sys\nimport types\nimport prompt_toolkit\nfrom prompt_toolkit.validation import Validator, ValidationError\nimport yaml\n\nsdlog = logging.getLogger(__name__)\nRELEASE_KEY = '22245C81E3BAEB4138B36061310F561200F4AD77'\n\n\nclass FingerprintException(Exception):\n pass\n\n\nclass JournalistAlertEmailException(Exception):\n pass\n\n\nclass SiteConfig(object):\n\n class ValidateNotEmpty(Validator):\n def validate(self, document):\n if document.text != '':\n return True\n raise ValidationError(\n message=\"Must not be an empty string\")\n\n class ValidateTime(Validator):\n def validate(self, document):\n if document.text.isdigit() and int(document.text) in range(0, 24):\n return True\n raise ValidationError(\n message=\"Must be an integer between 0 and 23\")\n\n class ValidateUser(Validator):\n def validate(self, document):\n text = document.text\n if text != '' and text != 'root' and text != 'amnesia':\n return True\n raise ValidationError(\n message=\"Must not be root, amnesia or an empty string\")\n\n class ValidateIP(Validator):\n def validate(self, document):\n if re.match('((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\\.|$)){4}$',\n document.text):\n return True\n raise ValidationError(\n message=\"An IP address must be something like 10.240.20.83\")\n\n class ValidateDNS(Validator):\n def validate(self):\n raise Exception() # pragma: no cover\n\n def is_tails(self):\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n return id == 'Tails'\n\n def lookup_fqdn(self, fqdn, dns=None):\n cmd = 'host -W=10 -T -4 ' + fqdn\n if self.is_tails():\n cmd = 'torify ' + cmd\n cmd += ' ' + (dns and dns or '8.8.8.8')\n try:\n result = subprocess.check_output(cmd.split(' '),\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n result = e.output\n sdlog.debug(cmd + ' => ' + result)\n return 'has address' in result\n\n class ValidateDNSServer(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn('gnu.org', document.text):\n return True\n raise ValidationError(\n message='Unable to resolve gnu.org using this DNS')\n\n class ValidateFQDN(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn(document.text):\n return True\n raise ValidationError(\n message='Unable to resolve ' + document.text)\n\n class ValidatePath(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidatePath, self).__init__()\n\n def validate(self, document):\n if document.text == '':\n raise ValidationError(\n message='an existing file name is required')\n path = os.path.join(self.basedir, document.text)\n if os.path.exists(path):\n return True\n raise ValidationError(\n message=path + ' file does not exist')\n\n class ValidateOptionalPath(ValidatePath):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalPath, self).validate(\n document)\n\n class ValidateYesNo(Validator):\n def validate(self, document):\n text = document.text.lower()\n if text == 'yes' or text == 'no':\n return True\n raise ValidationError(message=\"Must be either yes or no\")\n\n class ValidateFingerprint(Validator):\n def validate(self, document):\n text = document.text.replace(' ', '')\n if text == '65A1B5FF195B56353CC63DFFCC40EF1228271441':\n raise ValidationError(\n message='This is the TEST journalist fingerprint')\n if text == '600BC6D5142C68F35DDBCEA87B597104EDDDC102':\n raise ValidationError(\n message='This is the TEST admin fingerprint')\n if not re.match('[a-fA-F0-9]{40}$', text):\n raise ValidationError(\n message='fingerprints must be 40 hexadecimal characters')\n return True\n\n class ValidateOptionalFingerprint(ValidateFingerprint):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalFingerprint,\n self).validate(document)\n\n class ValidateInt(Validator):\n def validate(self, document):\n if re.match('\\d+$', document.text):\n return True\n raise ValidationError(message=\"Must be an integer\")\n\n class Locales(object):\n def __init__(self, appdir):\n self.translation_dir = os.path.realpath(\n os.path.join(appdir, 'translations'))\n\n def get_translations(self):\n translations = set(['en_US'])\n for dirname in os.listdir(self.translation_dir):\n if dirname != 'messages.pot':\n translations.add(dirname)\n return translations\n\n class ValidateLocales(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidateLocales, self).__init__()\n\n def validate(self, document):\n desired = document.text.split()\n existing = SiteConfig.Locales(self.basedir).get_translations()\n missing = set(desired) - set(existing)\n if not missing:\n return True\n raise ValidationError(\n message=\"The following locales do not exist \" + \" \".join(\n missing))\n\n class ValidateOSSECUsername(Validator):\n def validate(self, document):\n text = document.text\n if text and '@' not in text and 'test' != text:\n return True\n raise ValidationError(\n message=\"The SASL username should not include the domain name\")\n\n class ValidateOSSECPassword(Validator):\n def validate(self, document):\n text = document.text\n if len(text) >= 8 and 'password123' != text:\n return True\n raise ValidationError(\n message=\"Password for OSSEC email account must be strong\")\n\n class ValidateEmail(Validator):\n def validate(self, document):\n text = document.text\n if text == '':\n raise ValidationError(\n message=(\"Must not be empty\"))\n if '@' not in text:\n raise ValidationError(\n message=(\"Must contain a @\"))\n return True\n\n class ValidateOSSECEmail(ValidateEmail):\n def validate(self, document):\n super(SiteConfig.ValidateOSSECEmail, self).validate(document)\n text = document.text\n if '[email protected]' != text:\n return True\n raise ValidationError(\n message=(\"Must be set to something other than \"\n \"[email protected]\"))\n\n class ValidateOptionalEmail(ValidateEmail):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalEmail, self).validate(\n document)\n\n def __init__(self, args):\n self.args = args\n self.config = {}\n translations = SiteConfig.Locales(\n self.args.app_path).get_translations()\n translations = \" \".join(translations)\n self.desc = [\n ['ssh_users', 'sd', str,\n u'Username for SSH access to the servers',\n SiteConfig.ValidateUser(),\n None],\n ['daily_reboot_time', 4, int,\n u'Daily reboot time of the server (24-hour clock)',\n SiteConfig.ValidateTime(),\n int],\n ['app_ip', '10.20.2.2', str,\n u'Local IPv4 address for the Application Server',\n SiteConfig.ValidateIP(),\n None],\n ['monitor_ip', '10.20.3.2', str,\n u'Local IPv4 address for the Monitor Server',\n SiteConfig.ValidateIP(),\n None],\n ['app_hostname', 'app', str,\n u'Hostname for Application Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['monitor_hostname', 'mon', str,\n u'Hostname for Monitor Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['dns_server', '8.8.8.8', str,\n u'DNS server specified during installation',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['securedrop_app_gpg_public_key', 'SecureDrop.asc', str,\n u'Local filepath to public key for '\n 'SecureDrop Application GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['securedrop_app_https_on_source_interface', False, bool,\n u'Whether HTTPS should be enabled on '\n 'Source Interface (requires EV cert)',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_app_https_certificate_cert_src', '', str,\n u'Local filepath to HTTPS certificate '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_https_certificate_key_src', '', str,\n u'Local filepath to HTTPS certificate key '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_https_certificate_chain_src', '', str,\n u'Local filepath to HTTPS certificate chain file '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_gpg_fingerprint', '', str,\n u'Full fingerprint for the SecureDrop Application GPG Key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_gpg_public_key', 'ossec.pub', str,\n u'Local filepath to OSSEC alerts GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['ossec_gpg_fpr', '', str,\n u'Full fingerprint for the OSSEC alerts GPG public key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_email', '', str,\n u'Admin email address for receiving OSSEC alerts',\n SiteConfig.ValidateOSSECEmail(),\n None],\n ['journalist_alert_gpg_public_key', '', str,\n u'Local filepath to journalist alerts GPG public key (optional)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['journalist_gpg_fpr', '', str,\n u'Full fingerprint for the journalist alerts '\n u'GPG public key (optional)',\n SiteConfig.ValidateOptionalFingerprint(),\n self.sanitize_fingerprint],\n ['journalist_alert_email', '', str,\n u'Email address for receiving journalist alerts (optional)',\n SiteConfig.ValidateOptionalEmail(),\n None],\n ['smtp_relay', \"smtp.gmail.com\", str,\n u'SMTP relay for sending OSSEC alerts',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['smtp_relay_port', 587, int,\n u'SMTP port for sending OSSEC alerts',\n SiteConfig.ValidateInt(),\n int],\n ['sasl_domain', \"gmail.com\", str,\n u'SASL domain for sending OSSEC alerts',\n None,\n None],\n ['sasl_username', '', str,\n u'SASL username for sending OSSEC alerts',\n SiteConfig.ValidateOSSECUsername(),\n None],\n ['sasl_password', '', str,\n u'SASL password for sending OSSEC alerts',\n SiteConfig.ValidateOSSECPassword(),\n None],\n ['enable_ssh_over_tor', True, bool,\n u'Enable SSH over Tor (recommended, disables SSH over LAN). '\n u'If you respond no, SSH will be available over LAN only',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_supported_locales', [], types.ListType,\n u'Space separated list of additional locales to support '\n '(' + translations + ')',\n SiteConfig.ValidateLocales(self.args.app_path),\n string.split],\n ]\n\n def load_and_update_config(self):\n if self.exists():\n self.config = self.load()\n\n return self.update_config()\n\n def update_config(self):\n self.config.update(self.user_prompt_config())\n self.save()\n self.validate_gpg_keys()\n self.validate_journalist_alert_email()\n return True\n\n def user_prompt_config(self):\n config = {}\n for desc in self.desc:\n (var, default, type, prompt, validator, transform) = desc\n if var == 'journalist_gpg_fpr':\n if not config.get('journalist_alert_gpg_public_key',\n None):\n config[var] = ''\n continue\n if var == 'journalist_alert_email':\n if not config.get('journalist_alert_gpg_public_key',\n None):\n config[var] = ''\n continue\n config[var] = self.user_prompt_config_one(desc,\n self.config.get(var))\n return config\n\n def user_prompt_config_one(self, desc, from_config):\n (var, default, type, prompt, validator, transform) = desc\n if from_config is not None:\n default = from_config\n prompt += ': '\n return self.validated_input(prompt, default, validator, transform)\n\n def validated_input(self, prompt, default, validator, transform):\n if type(default) is bool:\n default = default and 'yes' or 'no'\n if type(default) is int:\n default = str(default)\n if isinstance(default, types.ListType):\n default = \" \".join(default)\n if type(default) is not str:\n default = str(default)\n kwargs = {}\n if validator:\n kwargs['validator'] = validator\n value = prompt_toolkit.prompt(prompt,\n default=unicode(default, 'utf-8'),\n **kwargs)\n if transform:\n return transform(value)\n else:\n return value\n\n def sanitize_fingerprint(self, value):\n return value.upper().replace(' ', '')\n\n def validate_gpg_keys(self):\n keys = (('securedrop_app_gpg_public_key',\n 'securedrop_app_gpg_fingerprint'),\n\n ('ossec_alert_gpg_public_key',\n 'ossec_gpg_fpr'),\n\n ('journalist_alert_gpg_public_key',\n 'journalist_gpg_fpr'))\n validate = os.path.join(\n os.path.dirname(__file__), '..', 'bin',\n 'validate-gpg-key.sh')\n for (public_key, fingerprint) in keys:\n if (self.config[public_key] == '' and\n self.config[fingerprint] == ''):\n continue\n public_key = os.path.join(self.args.ansible_path,\n self.config[public_key])\n fingerprint = self.config[fingerprint]\n try:\n sdlog.debug(subprocess.check_output(\n [validate, public_key, fingerprint],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n raise FingerprintException(\n \"fingerprint {} \".format(fingerprint) +\n \"does not match \" +\n \"the public key {}\".format(public_key))\n return True\n\n def validate_journalist_alert_email(self):\n if (self.config['journalist_alert_gpg_public_key'] == '' and\n self.config['journalist_gpg_fpr'] == ''):\n return True\n\n class Document(object):\n def __init__(self, text):\n self.text = text\n\n try:\n SiteConfig.ValidateEmail().validate(Document(\n self.config['journalist_alert_email']))\n except ValidationError as e:\n raise JournalistAlertEmailException(\n \"journalist alerts email: \" + e.message)\n return True\n\n def exists(self):\n return os.path.exists(self.args.site_config)\n\n def save(self):\n with io.open(self.args.site_config, 'w') as site_config_file:\n yaml.safe_dump(self.config,\n site_config_file,\n default_flow_style=False)\n\n def load(self):\n try:\n with io.open(self.args.site_config) as site_config_file:\n return yaml.safe_load(site_config_file)\n except IOError:\n sdlog.error(\"Config file missing, re-run with sdconfig\")\n raise\n except yaml.YAMLError:\n sdlog.error(\"There was an issue processing {}\".format(\n self.args.site_config))\n raise\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef sdconfig(args):\n \"\"\"Configure SD site settings\"\"\"\n SiteConfig(args).load_and_update_config()\n\n\ndef install_securedrop(args):\n \"\"\"Install/Update SecureDrop\"\"\"\n SiteConfig(args).load()\n\n sdlog.info(\"Now installing SecureDrop on remote servers.\")\n sdlog.info(\"You will be prompted for the sudo password on the \"\n \"servers.\")\n sdlog.info(\"The sudo password is only necessary during initial \"\n \"installation.\")\n return subprocess.check_call([os.path.join(args.ansible_path,\n 'securedrop-prod.yml'), '--ask-become-pass'],\n cwd=args.ansible_path)\n\n\ndef backup_securedrop(args):\n \"\"\"Perform backup of the SecureDrop Application Server.\n Creates a tarball of submissions and server config, and fetches\n back to the Admin Workstation. Future `restore` actions can be performed\n with the backup tarball.\"\"\"\n sdlog.info(\"Backing up the SecureDrop Application Server\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-backup.yml'),\n ]\n return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef restore_securedrop(args):\n \"\"\"Perform restore of the SecureDrop Application Server.\n Requires a tarball of submissions and server config, created via\n the `backup` action.\"\"\"\n sdlog.info(\"Restoring the SecureDrop Application Server from backup\")\n # Canonicalize filepath to backup tarball, so Ansible sees only the\n # basename. The files must live in args.ansible_path,\n # but the securedrop-admin\n # script will be invoked from the repo root, so preceding dirs are likely.\n restore_file_basename = os.path.basename(args.restore_file)\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-restore.yml'),\n '-e',\n \"restore_file='{}'\".format(restore_file_basename),\n ]\n return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef run_tails_config(args):\n \"\"\"Configure Tails environment post SD install\"\"\"\n sdlog.info(\"Configuring Tails workstation environment\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n ansible_cmd = [\n os.path.join(args.ansible_path, 'securedrop-tails.yml'),\n \"--ask-become-pass\",\n # Passing an empty inventory file to override the automatic dynamic\n # inventory script, which fails if no site vars are configured.\n '-i', '/dev/null',\n ]\n return subprocess.check_call(ansible_cmd,\n cwd=args.ansible_path)\n\n\ndef check_for_updates_wrapper(args):\n res, tag = check_for_updates(args)\n # Because the command worked properly exit with 0.\n return 0\n\n\ndef check_for_updates(args):\n \"\"\"Check for SecureDrop updates\"\"\"\n sdlog.info(\"Checking for SecureDrop updates...\")\n\n # Determine what branch we are on\n current_tag = subprocess.check_output(['git', 'describe'], cwd=args.root)\n\n # Fetch all branches\n git_fetch_cmd = ['git', 'fetch', '--all']\n subprocess.check_call(git_fetch_cmd, cwd=args.root)\n\n # Get latest tag\n git_all_tags = [\"git\", \"tag\"]\n all_tags = subprocess.check_output(git_all_tags,\n cwd=args.root).rstrip('\\n').split('\\n')\n\n # Do not check out any release candidate tags\n all_prod_tags = [x for x in all_tags if 'rc' not in x]\n\n latest_tag = all_prod_tags[-1]\n\n if current_tag != latest_tag:\n sdlog.info(\"Update needed\")\n return True, latest_tag\n sdlog.info(\"All updates applied\")\n return False, latest_tag\n\n\ndef get_release_key_from_keyserver(args, keyserver=None, timeout=45):\n gpg_recv = ['timeout', str(timeout), 'gpg', '--recv-key']\n release_key = [RELEASE_KEY]\n\n # We construct the gpg --recv-key command based on optional keyserver arg.\n if keyserver:\n get_key_cmd = gpg_recv + ['--keyserver', keyserver] + release_key\n else:\n get_key_cmd = gpg_recv + release_key\n\n subprocess.check_call(get_key_cmd, cwd=args.root)\n\n\ndef update(args):\n \"\"\"Verify, and apply latest SecureDrop workstation update\"\"\"\n sdlog.info(\"Applying SecureDrop updates...\")\n\n update_status, latest_tag = check_for_updates(args)\n\n if not update_status:\n # Exit if we're up to date\n return 0\n\n sdlog.info(\"Verifying signature on latest update...\")\n\n try:\n # First try to get the release key using Tails default keyserver\n get_release_key_from_keyserver(args)\n except subprocess.CalledProcessError:\n # Now try to get the key from a secondary keyserver.\n secondary_keyserver = 'hkps://hkps.pool.sks-keyservers.net'\n get_release_key_from_keyserver(args,\n keyserver=secondary_keyserver)\n\n git_verify_tag_cmd = ['git', 'tag', '-v', latest_tag]\n try:\n sig_result = subprocess.check_output(git_verify_tag_cmd,\n stderr=subprocess.STDOUT,\n cwd=args.root)\n\n good_sig_text = 'Good signature from \"SecureDrop Release Signing Key\"'\n bad_sig_text = 'BAD signature'\n # To ensure that an adversary cannot name a malicious key good_sig_text\n # we check that bad_sig_text does not appear and that the release key\n # appears on the second line of the output.\n gpg_lines = sig_result.split('\\n')\n if RELEASE_KEY in gpg_lines[1] and \\\n sig_result.count(good_sig_text) == 1 and \\\n bad_sig_text not in sig_result:\n sdlog.info(\"Signature verification successful.\")\n else: # If anything else happens, fail and exit 1\n sdlog.info(\"Signature verification failed.\")\n return 1\n\n except subprocess.CalledProcessError:\n # If there is no signature, or if the signature does not verify,\n # then git tag -v exits subprocess.check_output will exit 1\n # and subprocess.check_output will throw a CalledProcessError\n sdlog.info(\"Signature verification failed.\")\n return 1\n\n # Only if the proper signature verifies do we check out the latest\n git_checkout_cmd = ['git', 'checkout', latest_tag]\n subprocess.check_call(git_checkout_cmd, cwd=args.root)\n\n sdlog.info(\"Updated to SecureDrop {}.\".format(latest_tag))\n return 0\n\n\ndef get_logs(args):\n \"\"\"Get logs for forensics and debugging purposes\"\"\"\n sdlog.info(\"Gathering logs for forensics and debugging\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-logs.yml'),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n sdlog.info(\"Encrypt logs and send to [email protected] or upload \"\n \"to the SecureDrop support portal.\")\n return 0\n\n\ndef set_default_paths(args):\n if not args.ansible_path:\n args.ansible_path = args.root + \"/install_files/ansible-base\"\n args.ansible_path = os.path.realpath(args.ansible_path)\n if not args.site_config:\n args.site_config = args.ansible_path + \"/group_vars/all/site-specific\"\n args.site_config = os.path.realpath(args.site_config)\n if not args.app_path:\n args.app_path = args.root + \"/securedrop\"\n args.app_path = os.path.realpath(args.app_path)\n return args\n\n\ndef parse_argv(argv):\n class ArgParseFormatterCombo(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawTextHelpFormatter):\n \"\"\"Needed to combine formatting classes for help output\"\"\"\n pass\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=ArgParseFormatterCombo)\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.add_argument('-d', action='store_true', default=False,\n help=\"Developer mode. Not to be used in production.\")\n parser.add_argument('--root', required=True,\n help=\"path to the root of the SecureDrop repository\")\n parser.add_argument('--site-config',\n help=\"path to the YAML site configuration file\")\n parser.add_argument('--ansible-path',\n help=\"path to the Ansible root\")\n parser.add_argument('--app-path',\n help=\"path to the SecureDrop application root\")\n subparsers = parser.add_subparsers()\n\n parse_sdconfig = subparsers.add_parser('sdconfig', help=sdconfig.__doc__)\n parse_sdconfig.set_defaults(func=sdconfig)\n\n parse_install = subparsers.add_parser('install',\n help=install_securedrop.__doc__)\n parse_install.set_defaults(func=install_securedrop)\n\n parse_tailsconfig = subparsers.add_parser('tailsconfig',\n help=run_tails_config.__doc__)\n parse_tailsconfig.set_defaults(func=run_tails_config)\n\n parse_backup = subparsers.add_parser('backup',\n help=backup_securedrop.__doc__)\n parse_backup.set_defaults(func=backup_securedrop)\n\n parse_restore = subparsers.add_parser('restore',\n help=restore_securedrop.__doc__)\n parse_restore.set_defaults(func=restore_securedrop)\n parse_restore.add_argument(\"restore_file\")\n\n parse_update = subparsers.add_parser('update', help=update.__doc__)\n parse_update.set_defaults(func=update)\n\n parse_check_updates = subparsers.add_parser('check_for_updates',\n help=check_for_updates.__doc__)\n parse_check_updates.set_defaults(func=check_for_updates_wrapper)\n\n parse_logs = subparsers.add_parser('logs',\n help=get_logs.__doc__)\n parse_logs.set_defaults(func=get_logs)\n\n return set_default_paths(parser.parse_args(argv))\n\n\ndef main(argv):\n args = parse_argv(argv)\n setup_logger(args.v)\n if args.v:\n return_code = args.func(args)\n sys.exit(return_code)\n else:\n try:\n return_code = args.func(args)\n except KeyboardInterrupt:\n sys.exit(-1)\n except Exception as e:\n raise SystemExit(\n 'ERROR (run with -v for more): {msg}'.format(msg=e))\n else:\n sys.exit(return_code)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "admin/securedrop_admin/__init__.py"}], "after_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nSecureDrop Admin Toolkit.\n\nFor use by administrators to install, maintain, and manage their SD\ninstances.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport io\nimport re\nimport string\nimport subprocess\nimport sys\nimport types\nimport prompt_toolkit\nfrom prompt_toolkit.validation import Validator, ValidationError\nimport yaml\n\nsdlog = logging.getLogger(__name__)\nRELEASE_KEY = '22245C81E3BAEB4138B36061310F561200F4AD77'\n\n\nclass FingerprintException(Exception):\n pass\n\n\nclass JournalistAlertEmailException(Exception):\n pass\n\n\nclass SiteConfig(object):\n\n class ValidateNotEmpty(Validator):\n def validate(self, document):\n if document.text != '':\n return True\n raise ValidationError(\n message=\"Must not be an empty string\")\n\n class ValidateTime(Validator):\n def validate(self, document):\n if document.text.isdigit() and int(document.text) in range(0, 24):\n return True\n raise ValidationError(\n message=\"Must be an integer between 0 and 23\")\n\n class ValidateUser(Validator):\n def validate(self, document):\n text = document.text\n if text != '' and text != 'root' and text != 'amnesia':\n return True\n raise ValidationError(\n message=\"Must not be root, amnesia or an empty string\")\n\n class ValidateIP(Validator):\n def validate(self, document):\n if re.match('((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\\.|$)){4}$',\n document.text):\n return True\n raise ValidationError(\n message=\"An IP address must be something like 10.240.20.83\")\n\n class ValidateDNS(Validator):\n def validate(self):\n raise Exception() # pragma: no cover\n\n def is_tails(self):\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n return id == 'Tails'\n\n def lookup_fqdn(self, fqdn, dns=None):\n cmd = 'host -W=10 -T -4 ' + fqdn\n if self.is_tails():\n cmd = 'torify ' + cmd\n cmd += ' ' + (dns and dns or '8.8.8.8')\n try:\n result = subprocess.check_output(cmd.split(' '),\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n result = e.output\n sdlog.debug(cmd + ' => ' + result)\n return 'has address' in result\n\n class ValidateDNSServer(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn('gnu.org', document.text):\n return True\n raise ValidationError(\n message='Unable to resolve gnu.org using this DNS')\n\n class ValidateFQDN(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn(document.text):\n return True\n raise ValidationError(\n message='Unable to resolve ' + document.text)\n\n class ValidatePath(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidatePath, self).__init__()\n\n def validate(self, document):\n if document.text == '':\n raise ValidationError(\n message='an existing file name is required')\n path = os.path.join(self.basedir, document.text)\n if os.path.exists(path):\n return True\n raise ValidationError(\n message=path + ' file does not exist')\n\n class ValidateOptionalPath(ValidatePath):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalPath, self).validate(\n document)\n\n class ValidateYesNo(Validator):\n def validate(self, document):\n text = document.text.lower()\n if text == 'yes' or text == 'no':\n return True\n raise ValidationError(message=\"Must be either yes or no\")\n\n class ValidateFingerprint(Validator):\n def validate(self, document):\n text = document.text.replace(' ', '')\n if text == '65A1B5FF195B56353CC63DFFCC40EF1228271441':\n raise ValidationError(\n message='This is the TEST journalist fingerprint')\n if text == '600BC6D5142C68F35DDBCEA87B597104EDDDC102':\n raise ValidationError(\n message='This is the TEST admin fingerprint')\n if not re.match('[a-fA-F0-9]{40}$', text):\n raise ValidationError(\n message='fingerprints must be 40 hexadecimal characters')\n return True\n\n class ValidateOptionalFingerprint(ValidateFingerprint):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalFingerprint,\n self).validate(document)\n\n class ValidateInt(Validator):\n def validate(self, document):\n if re.match('\\d+$', document.text):\n return True\n raise ValidationError(message=\"Must be an integer\")\n\n class Locales(object):\n def __init__(self, appdir):\n self.translation_dir = os.path.realpath(\n os.path.join(appdir, 'translations'))\n\n def get_translations(self):\n translations = set(['en_US'])\n for dirname in os.listdir(self.translation_dir):\n if dirname != 'messages.pot':\n translations.add(dirname)\n return translations\n\n class ValidateLocales(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidateLocales, self).__init__()\n\n def validate(self, document):\n desired = document.text.split()\n existing = SiteConfig.Locales(self.basedir).get_translations()\n missing = set(desired) - set(existing)\n if not missing:\n return True\n raise ValidationError(\n message=\"The following locales do not exist \" + \" \".join(\n missing))\n\n class ValidateOSSECUsername(Validator):\n def validate(self, document):\n text = document.text\n if text and '@' not in text and 'test' != text:\n return True\n raise ValidationError(\n message=\"The SASL username should not include the domain name\")\n\n class ValidateOSSECPassword(Validator):\n def validate(self, document):\n text = document.text\n if len(text) >= 8 and 'password123' != text:\n return True\n raise ValidationError(\n message=\"Password for OSSEC email account must be strong\")\n\n class ValidateEmail(Validator):\n def validate(self, document):\n text = document.text\n if text == '':\n raise ValidationError(\n message=(\"Must not be empty\"))\n if '@' not in text:\n raise ValidationError(\n message=(\"Must contain a @\"))\n return True\n\n class ValidateOSSECEmail(ValidateEmail):\n def validate(self, document):\n super(SiteConfig.ValidateOSSECEmail, self).validate(document)\n text = document.text\n if '[email protected]' != text:\n return True\n raise ValidationError(\n message=(\"Must be set to something other than \"\n \"[email protected]\"))\n\n class ValidateOptionalEmail(ValidateEmail):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalEmail, self).validate(\n document)\n\n def __init__(self, args):\n self.args = args\n self.config = {}\n translations = SiteConfig.Locales(\n self.args.app_path).get_translations()\n translations = \" \".join(translations)\n self.desc = [\n ['ssh_users', 'sd', str,\n u'Username for SSH access to the servers',\n SiteConfig.ValidateUser(),\n None],\n ['daily_reboot_time', 4, int,\n u'Daily reboot time of the server (24-hour clock)',\n SiteConfig.ValidateTime(),\n int],\n ['app_ip', '10.20.2.2', str,\n u'Local IPv4 address for the Application Server',\n SiteConfig.ValidateIP(),\n None],\n ['monitor_ip', '10.20.3.2', str,\n u'Local IPv4 address for the Monitor Server',\n SiteConfig.ValidateIP(),\n None],\n ['app_hostname', 'app', str,\n u'Hostname for Application Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['monitor_hostname', 'mon', str,\n u'Hostname for Monitor Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['dns_server', '8.8.8.8', str,\n u'DNS server specified during installation',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['securedrop_app_gpg_public_key', 'SecureDrop.asc', str,\n u'Local filepath to public key for '\n 'SecureDrop Application GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['securedrop_app_https_on_source_interface', False, bool,\n u'Whether HTTPS should be enabled on '\n 'Source Interface (requires EV cert)',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_app_https_certificate_cert_src', '', str,\n u'Local filepath to HTTPS certificate '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_https_certificate_key_src', '', str,\n u'Local filepath to HTTPS certificate key '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_https_certificate_chain_src', '', str,\n u'Local filepath to HTTPS certificate chain file '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_gpg_fingerprint', '', str,\n u'Full fingerprint for the SecureDrop Application GPG Key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_gpg_public_key', 'ossec.pub', str,\n u'Local filepath to OSSEC alerts GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['ossec_gpg_fpr', '', str,\n u'Full fingerprint for the OSSEC alerts GPG public key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_email', '', str,\n u'Admin email address for receiving OSSEC alerts',\n SiteConfig.ValidateOSSECEmail(),\n None],\n ['journalist_alert_gpg_public_key', '', str,\n u'Local filepath to journalist alerts GPG public key (optional)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['journalist_gpg_fpr', '', str,\n u'Full fingerprint for the journalist alerts '\n u'GPG public key (optional)',\n SiteConfig.ValidateOptionalFingerprint(),\n self.sanitize_fingerprint],\n ['journalist_alert_email', '', str,\n u'Email address for receiving journalist alerts (optional)',\n SiteConfig.ValidateOptionalEmail(),\n None],\n ['smtp_relay', \"smtp.gmail.com\", str,\n u'SMTP relay for sending OSSEC alerts',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['smtp_relay_port', 587, int,\n u'SMTP port for sending OSSEC alerts',\n SiteConfig.ValidateInt(),\n int],\n ['sasl_domain', \"gmail.com\", str,\n u'SASL domain for sending OSSEC alerts',\n None,\n None],\n ['sasl_username', '', str,\n u'SASL username for sending OSSEC alerts',\n SiteConfig.ValidateOSSECUsername(),\n None],\n ['sasl_password', '', str,\n u'SASL password for sending OSSEC alerts',\n SiteConfig.ValidateOSSECPassword(),\n None],\n ['enable_ssh_over_tor', True, bool,\n u'Enable SSH over Tor (recommended, disables SSH over LAN). '\n u'If you respond no, SSH will be available over LAN only',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_supported_locales', [], types.ListType,\n u'Space separated list of additional locales to support '\n '(' + translations + ')',\n SiteConfig.ValidateLocales(self.args.app_path),\n string.split],\n ]\n\n def load_and_update_config(self):\n if self.exists():\n self.config = self.load()\n\n return self.update_config()\n\n def update_config(self):\n self.config.update(self.user_prompt_config())\n self.save()\n self.validate_gpg_keys()\n self.validate_journalist_alert_email()\n return True\n\n def user_prompt_config(self):\n config = {}\n for desc in self.desc:\n (var, default, type, prompt, validator, transform) = desc\n if var == 'journalist_gpg_fpr':\n if not config.get('journalist_alert_gpg_public_key',\n None):\n config[var] = ''\n continue\n if var == 'journalist_alert_email':\n if not config.get('journalist_alert_gpg_public_key',\n None):\n config[var] = ''\n continue\n config[var] = self.user_prompt_config_one(desc,\n self.config.get(var))\n return config\n\n def user_prompt_config_one(self, desc, from_config):\n (var, default, type, prompt, validator, transform) = desc\n if from_config is not None:\n default = from_config\n prompt += ': '\n return self.validated_input(prompt, default, validator, transform)\n\n def validated_input(self, prompt, default, validator, transform):\n if type(default) is bool:\n default = default and 'yes' or 'no'\n if type(default) is int:\n default = str(default)\n if isinstance(default, types.ListType):\n default = \" \".join(default)\n if type(default) is not str:\n default = str(default)\n kwargs = {}\n if validator:\n kwargs['validator'] = validator\n value = prompt_toolkit.prompt(prompt,\n default=unicode(default, 'utf-8'),\n **kwargs)\n if transform:\n return transform(value)\n else:\n return value\n\n def sanitize_fingerprint(self, value):\n return value.upper().replace(' ', '')\n\n def validate_gpg_keys(self):\n keys = (('securedrop_app_gpg_public_key',\n 'securedrop_app_gpg_fingerprint'),\n\n ('ossec_alert_gpg_public_key',\n 'ossec_gpg_fpr'),\n\n ('journalist_alert_gpg_public_key',\n 'journalist_gpg_fpr'))\n validate = os.path.join(\n os.path.dirname(__file__), '..', 'bin',\n 'validate-gpg-key.sh')\n for (public_key, fingerprint) in keys:\n if (self.config[public_key] == '' and\n self.config[fingerprint] == ''):\n continue\n public_key = os.path.join(self.args.ansible_path,\n self.config[public_key])\n fingerprint = self.config[fingerprint]\n try:\n sdlog.debug(subprocess.check_output(\n [validate, public_key, fingerprint],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n raise FingerprintException(\n \"fingerprint {} \".format(fingerprint) +\n \"does not match \" +\n \"the public key {}\".format(public_key))\n return True\n\n def validate_journalist_alert_email(self):\n if (self.config['journalist_alert_gpg_public_key'] == '' and\n self.config['journalist_gpg_fpr'] == ''):\n return True\n\n class Document(object):\n def __init__(self, text):\n self.text = text\n\n try:\n SiteConfig.ValidateEmail().validate(Document(\n self.config['journalist_alert_email']))\n except ValidationError as e:\n raise JournalistAlertEmailException(\n \"journalist alerts email: \" + e.message)\n return True\n\n def exists(self):\n return os.path.exists(self.args.site_config)\n\n def save(self):\n with io.open(self.args.site_config, 'w') as site_config_file:\n yaml.safe_dump(self.config,\n site_config_file,\n default_flow_style=False)\n\n def load(self):\n try:\n with io.open(self.args.site_config) as site_config_file:\n return yaml.safe_load(site_config_file)\n except IOError:\n sdlog.error(\"Config file missing, re-run with sdconfig\")\n raise\n except yaml.YAMLError:\n sdlog.error(\"There was an issue processing {}\".format(\n self.args.site_config))\n raise\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef sdconfig(args):\n \"\"\"Configure SD site settings\"\"\"\n SiteConfig(args).load_and_update_config()\n\n\ndef install_securedrop(args):\n \"\"\"Install/Update SecureDrop\"\"\"\n SiteConfig(args).load()\n\n sdlog.info(\"Now installing SecureDrop on remote servers.\")\n sdlog.info(\"You will be prompted for the sudo password on the \"\n \"servers.\")\n sdlog.info(\"The sudo password is only necessary during initial \"\n \"installation.\")\n return subprocess.check_call([os.path.join(args.ansible_path,\n 'securedrop-prod.yml'), '--ask-become-pass'],\n cwd=args.ansible_path)\n\n\ndef backup_securedrop(args):\n \"\"\"Perform backup of the SecureDrop Application Server.\n Creates a tarball of submissions and server config, and fetches\n back to the Admin Workstation. Future `restore` actions can be performed\n with the backup tarball.\"\"\"\n sdlog.info(\"Backing up the SecureDrop Application Server\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-backup.yml'),\n ]\n return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef restore_securedrop(args):\n \"\"\"Perform restore of the SecureDrop Application Server.\n Requires a tarball of submissions and server config, created via\n the `backup` action.\"\"\"\n sdlog.info(\"Restoring the SecureDrop Application Server from backup\")\n # Canonicalize filepath to backup tarball, so Ansible sees only the\n # basename. The files must live in args.ansible_path,\n # but the securedrop-admin\n # script will be invoked from the repo root, so preceding dirs are likely.\n restore_file_basename = os.path.basename(args.restore_file)\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-restore.yml'),\n '-e',\n \"restore_file='{}'\".format(restore_file_basename),\n ]\n return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef run_tails_config(args):\n \"\"\"Configure Tails environment post SD install\"\"\"\n sdlog.info(\"Configuring Tails workstation environment\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n ansible_cmd = [\n os.path.join(args.ansible_path, 'securedrop-tails.yml'),\n \"--ask-become-pass\",\n # Passing an empty inventory file to override the automatic dynamic\n # inventory script, which fails if no site vars are configured.\n '-i', '/dev/null',\n ]\n return subprocess.check_call(ansible_cmd,\n cwd=args.ansible_path)\n\n\ndef check_for_updates_wrapper(args):\n res, tag = check_for_updates(args)\n # Because the command worked properly exit with 0.\n return 0\n\n\ndef check_for_updates(args):\n \"\"\"Check for SecureDrop updates\"\"\"\n sdlog.info(\"Checking for SecureDrop updates...\")\n\n # Determine what branch we are on\n current_tag = subprocess.check_output(['git', 'describe'],\n cwd=args.root).rstrip('\\n')\n\n # Fetch all branches\n git_fetch_cmd = ['git', 'fetch', '--all']\n subprocess.check_call(git_fetch_cmd, cwd=args.root)\n\n # Get latest tag\n git_all_tags = [\"git\", \"tag\"]\n all_tags = subprocess.check_output(git_all_tags,\n cwd=args.root).rstrip('\\n').split('\\n')\n\n # Do not check out any release candidate tags\n all_prod_tags = [x for x in all_tags if 'rc' not in x]\n\n latest_tag = all_prod_tags[-1]\n\n if current_tag != latest_tag:\n sdlog.info(\"Update needed\")\n return True, latest_tag\n sdlog.info(\"All updates applied\")\n return False, latest_tag\n\n\ndef get_release_key_from_keyserver(args, keyserver=None, timeout=45):\n gpg_recv = ['timeout', str(timeout), 'gpg', '--recv-key']\n release_key = [RELEASE_KEY]\n\n # We construct the gpg --recv-key command based on optional keyserver arg.\n if keyserver:\n get_key_cmd = gpg_recv + ['--keyserver', keyserver] + release_key\n else:\n get_key_cmd = gpg_recv + release_key\n\n subprocess.check_call(get_key_cmd, cwd=args.root)\n\n\ndef update(args):\n \"\"\"Verify, and apply latest SecureDrop workstation update\"\"\"\n sdlog.info(\"Applying SecureDrop updates...\")\n\n update_status, latest_tag = check_for_updates(args)\n\n if not update_status:\n # Exit if we're up to date\n return 0\n\n sdlog.info(\"Verifying signature on latest update...\")\n\n try:\n # First try to get the release key using Tails default keyserver\n get_release_key_from_keyserver(args)\n except subprocess.CalledProcessError:\n # Now try to get the key from a secondary keyserver.\n secondary_keyserver = 'hkps://hkps.pool.sks-keyservers.net'\n get_release_key_from_keyserver(args,\n keyserver=secondary_keyserver)\n\n git_verify_tag_cmd = ['git', 'tag', '-v', latest_tag]\n try:\n sig_result = subprocess.check_output(git_verify_tag_cmd,\n stderr=subprocess.STDOUT,\n cwd=args.root)\n\n good_sig_text = 'Good signature from \"SecureDrop Release Signing Key\"'\n bad_sig_text = 'BAD signature'\n # To ensure that an adversary cannot name a malicious key good_sig_text\n # we check that bad_sig_text does not appear and that the release key\n # appears on the second line of the output.\n gpg_lines = sig_result.split('\\n')\n if RELEASE_KEY in gpg_lines[1] and \\\n sig_result.count(good_sig_text) == 1 and \\\n bad_sig_text not in sig_result:\n sdlog.info(\"Signature verification successful.\")\n else: # If anything else happens, fail and exit 1\n sdlog.info(\"Signature verification failed.\")\n return 1\n\n except subprocess.CalledProcessError:\n # If there is no signature, or if the signature does not verify,\n # then git tag -v exits subprocess.check_output will exit 1\n # and subprocess.check_output will throw a CalledProcessError\n sdlog.info(\"Signature verification failed.\")\n return 1\n\n # Only if the proper signature verifies do we check out the latest\n git_checkout_cmd = ['git', 'checkout', latest_tag]\n subprocess.check_call(git_checkout_cmd, cwd=args.root)\n\n sdlog.info(\"Updated to SecureDrop {}.\".format(latest_tag))\n return 0\n\n\ndef get_logs(args):\n \"\"\"Get logs for forensics and debugging purposes\"\"\"\n sdlog.info(\"Gathering logs for forensics and debugging\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-logs.yml'),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n sdlog.info(\"Encrypt logs and send to [email protected] or upload \"\n \"to the SecureDrop support portal.\")\n return 0\n\n\ndef set_default_paths(args):\n if not args.ansible_path:\n args.ansible_path = args.root + \"/install_files/ansible-base\"\n args.ansible_path = os.path.realpath(args.ansible_path)\n if not args.site_config:\n args.site_config = args.ansible_path + \"/group_vars/all/site-specific\"\n args.site_config = os.path.realpath(args.site_config)\n if not args.app_path:\n args.app_path = args.root + \"/securedrop\"\n args.app_path = os.path.realpath(args.app_path)\n return args\n\n\ndef parse_argv(argv):\n class ArgParseFormatterCombo(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawTextHelpFormatter):\n \"\"\"Needed to combine formatting classes for help output\"\"\"\n pass\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=ArgParseFormatterCombo)\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.add_argument('-d', action='store_true', default=False,\n help=\"Developer mode. Not to be used in production.\")\n parser.add_argument('--root', required=True,\n help=\"path to the root of the SecureDrop repository\")\n parser.add_argument('--site-config',\n help=\"path to the YAML site configuration file\")\n parser.add_argument('--ansible-path',\n help=\"path to the Ansible root\")\n parser.add_argument('--app-path',\n help=\"path to the SecureDrop application root\")\n subparsers = parser.add_subparsers()\n\n parse_sdconfig = subparsers.add_parser('sdconfig', help=sdconfig.__doc__)\n parse_sdconfig.set_defaults(func=sdconfig)\n\n parse_install = subparsers.add_parser('install',\n help=install_securedrop.__doc__)\n parse_install.set_defaults(func=install_securedrop)\n\n parse_tailsconfig = subparsers.add_parser('tailsconfig',\n help=run_tails_config.__doc__)\n parse_tailsconfig.set_defaults(func=run_tails_config)\n\n parse_backup = subparsers.add_parser('backup',\n help=backup_securedrop.__doc__)\n parse_backup.set_defaults(func=backup_securedrop)\n\n parse_restore = subparsers.add_parser('restore',\n help=restore_securedrop.__doc__)\n parse_restore.set_defaults(func=restore_securedrop)\n parse_restore.add_argument(\"restore_file\")\n\n parse_update = subparsers.add_parser('update', help=update.__doc__)\n parse_update.set_defaults(func=update)\n\n parse_check_updates = subparsers.add_parser('check_for_updates',\n help=check_for_updates.__doc__)\n parse_check_updates.set_defaults(func=check_for_updates_wrapper)\n\n parse_logs = subparsers.add_parser('logs',\n help=get_logs.__doc__)\n parse_logs.set_defaults(func=get_logs)\n\n return set_default_paths(parser.parse_args(argv))\n\n\ndef main(argv):\n args = parse_argv(argv)\n setup_logger(args.v)\n if args.v:\n return_code = args.func(args)\n sys.exit(return_code)\n else:\n try:\n return_code = args.func(args)\n except KeyboardInterrupt:\n sys.exit(-1)\n except Exception as e:\n raise SystemExit(\n 'ERROR (run with -v for more): {msg}'.format(msg=e))\n else:\n sys.exit(return_code)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "admin/securedrop_admin/__init__.py"}]} |
gh_patches_debug_1583 | rasdani/github-patches | git_diff | vyperlang__vyper-2513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test fail with web3.py 5.21.0
### Version Information
* vyper Version (output of `vyper --version`): latest master (cff69d63)
* OS: macos
* Python Version (output of `python --version`): 3.9.6
### What's your issue about?
tests fail
tests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J...
FAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO...
FAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as...
FAILED tests/parser/features/test_assert.py::test_assest_reason_revert
misses the string "execution reverted"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import os
4 import subprocess
5
6 from setuptools import find_packages, setup
7
8 __version__ = "0.3.0"
9
10 extras_require = {
11 "test": [
12 "pytest>=5.4,<6.0",
13 "pytest-cov>=2.10,<3.0",
14 "pytest-instafail>=0.4,<1.0",
15 "pytest-xdist>=1.32,<2.0",
16 "eth-tester[py-evm]>=0.5.0b1,<0.6",
17 "py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+
18 "web3==5.12.3",
19 "tox>=3.15,<4.0",
20 "lark-parser==0.10.0",
21 "hypothesis[lark]>=5.37.1,<6.0",
22 ],
23 "lint": [
24 "black==21.9b0",
25 "flake8==3.9.2",
26 "flake8-bugbear==20.1.4",
27 "flake8-use-fstring==1.1",
28 "isort==5.9.3",
29 "mypy==0.910",
30 ],
31 "docs": ["recommonmark", "sphinx>=3.0,<4.0", "sphinx_rtd_theme>=0.5,<0.6"],
32 "dev": ["ipython", "pre-commit", "pyinstaller", "twine"],
33 }
34
35 extras_require["dev"] = (
36 extras_require["test"] + extras_require["lint"] + extras_require["docs"] + extras_require["dev"]
37 )
38
39 hash_file_rel_path = os.path.join("vyper", "vyper_git_version.txt")
40 hashfile = os.path.relpath(hash_file_rel_path)
41
42 try:
43 commithash = subprocess.check_output("git rev-parse HEAD".split())
44 commithash_str = commithash.decode("utf-8").strip()
45 with open(hashfile, "w") as fh:
46 fh.write(f"{__version__}\n{commithash_str}")
47 except subprocess.CalledProcessError:
48 pass
49
50 with open("README.md", "r") as f:
51 long_description = f.read()
52
53 setup(
54 name="vyper",
55 version=__version__,
56 description="Vyper: the Pythonic Programming Language for the EVM",
57 long_description=long_description,
58 long_description_content_type="text/markdown",
59 author="Vyper Team",
60 author_email="",
61 url="https://github.com/vyperlang/vyper",
62 license="Apache License 2.0",
63 keywords="ethereum evm smart contract language",
64 include_package_data=True,
65 packages=find_packages(exclude=("tests", "docs")),
66 python_requires=">=3.7,<3.10",
67 py_modules=["vyper"],
68 install_requires=[
69 "asttokens==2.0.4",
70 "pycryptodome>=3.5.1,<4",
71 "semantic-version==2.8.5",
72 "cached-property==1.5.2 ; python_version<'3.8'",
73 ],
74 setup_requires=["pytest-runner"],
75 tests_require=extras_require["test"],
76 extras_require=extras_require,
77 entry_points={
78 "console_scripts": [
79 "vyper=vyper.cli.vyper_compile:_parse_cli_args",
80 "vyper-serve=vyper.cli.vyper_serve:_parse_cli_args",
81 "vyper-lll=vyper.cli.vyper_lll:_parse_cli_args",
82 "vyper-json=vyper.cli.vyper_json:_parse_cli_args",
83 ]
84 },
85 classifiers=[
86 "Intended Audience :: Developers",
87 "License :: OSI Approved :: Apache Software License",
88 "Programming Language :: Python :: 3.7",
89 "Programming Language :: Python :: 3.8",
90 "Programming Language :: Python :: 3.9",
91 ],
92 data_files=[("", [hash_file_rel_path])],
93 )
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
"pytest-xdist>=1.32,<2.0",
"eth-tester[py-evm]>=0.5.0b1,<0.6",
"py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+
- "web3==5.12.3",
+ "web3==5.21.0",
"tox>=3.15,<4.0",
"lark-parser==0.10.0",
"hypothesis[lark]>=5.37.1,<6.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n- \"web3==5.12.3\",\n+ \"web3==5.21.0\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n", "issue": "test fail with web3.py 5.21.0\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): latest master (cff69d63)\r\n* OS: macos\r\n* Python Version (output of `python --version`): 3.9.6\r\n\r\n### What's your issue about?\r\n\r\ntests fail\r\n\r\ntests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J...\r\nFAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO...\r\nFAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as...\r\nFAILED tests/parser/features/test_assert.py::test_assest_reason_revert\r\n\r\nmisses the string \"execution reverted\"\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\n__version__ = \"0.3.0\"\n\nextras_require = {\n \"test\": [\n \"pytest>=5.4,<6.0\",\n \"pytest-cov>=2.10,<3.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n \"web3==5.12.3\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n ],\n \"lint\": [\n \"black==21.9b0\",\n \"flake8==3.9.2\",\n \"flake8-bugbear==20.1.4\",\n \"flake8-use-fstring==1.1\",\n \"isort==5.9.3\",\n \"mypy==0.910\",\n ],\n \"docs\": [\"recommonmark\", \"sphinx>=3.0,<4.0\", \"sphinx_rtd_theme>=0.5,<0.6\"],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"test\"] + extras_require[\"lint\"] + extras_require[\"docs\"] + extras_require[\"dev\"]\n)\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_version.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\ntry:\n commithash = subprocess.check_output(\"git rev-parse HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(f\"{__version__}\\n{commithash_str}\")\nexcept subprocess.CalledProcessError:\n pass\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"vyper\",\n version=__version__,\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n python_requires=\">=3.7,<3.10\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"asttokens==2.0.4\",\n \"pycryptodome>=3.5.1,<4\",\n \"semantic-version==2.8.5\",\n \"cached-property==1.5.2 ; python_version<'3.8'\",\n ],\n setup_requires=[\"pytest-runner\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"vyper-lll=vyper.cli.vyper_lll:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\n__version__ = \"0.3.0\"\n\nextras_require = {\n \"test\": [\n \"pytest>=5.4,<6.0\",\n \"pytest-cov>=2.10,<3.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n \"web3==5.21.0\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n ],\n \"lint\": [\n \"black==21.9b0\",\n \"flake8==3.9.2\",\n \"flake8-bugbear==20.1.4\",\n \"flake8-use-fstring==1.1\",\n \"isort==5.9.3\",\n \"mypy==0.910\",\n ],\n \"docs\": [\"recommonmark\", \"sphinx>=3.0,<4.0\", \"sphinx_rtd_theme>=0.5,<0.6\"],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"test\"] + extras_require[\"lint\"] + extras_require[\"docs\"] + extras_require[\"dev\"]\n)\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_version.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\ntry:\n commithash = subprocess.check_output(\"git rev-parse HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(f\"{__version__}\\n{commithash_str}\")\nexcept subprocess.CalledProcessError:\n pass\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"vyper\",\n version=__version__,\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n python_requires=\">=3.7,<3.10\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"asttokens==2.0.4\",\n \"pycryptodome>=3.5.1,<4\",\n \"semantic-version==2.8.5\",\n \"cached-property==1.5.2 ; python_version<'3.8'\",\n ],\n setup_requires=[\"pytest-runner\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"vyper-lll=vyper.cli.vyper_lll:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1584 | rasdani/github-patches | git_diff | fossasia__open-event-server-5151 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
For the edit action button in admin/users the super admin should be allowed to make the users as an admin.
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
The patch for the users is working but is_admin is a readonly property even for the super user. Used the patch request for users and changed the value for is_admin from true to false. But there is no change after the request to the value of is_admin.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to v1/users/user_id
2. Send a patch request
3. Change the value of is_admin for some user from false to true.
4. See error

After the request

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/users.py`
Content:
```
1 import base64
2
3 from flask import Blueprint, request, jsonify, abort, make_response
4 from flask_jwt import current_identity as current_user
5 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
6
7 from app import get_settings
8 from app.api.bootstrap import api
9 from app.api.helpers.db import safe_query, get_count
10 from app.api.helpers.exceptions import ConflictException
11 from app.api.helpers.exceptions import ForbiddenException
12 from app.api.helpers.files import create_save_image_sizes, make_frontend_url
13 from app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action
14 from app.api.helpers.permission_manager import has_access
15 from app.api.helpers.permissions import is_user_itself
16 from app.api.helpers.utilities import get_serializer, str_generator
17 from app.api.schema.users import UserSchema, UserSchemaPublic
18 from app.models import db
19 from app.models.access_code import AccessCode
20 from app.models.discount_code import DiscountCode
21 from app.models.email_notification import EmailNotification
22 from app.models.event_invoice import EventInvoice
23 from app.models.feedback import Feedback
24 from app.models.mail import USER_REGISTER_WITH_PASSWORD
25 from app.models.notification import Notification
26 from app.models.session import Session
27 from app.models.speaker import Speaker
28 from app.models.ticket_holder import TicketHolder
29 from app.models.user import User
30 from app.models.users_events_role import UsersEventsRoles
31
32 user_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')
33
34
35 class UserList(ResourceList):
36 """
37 List and create Users
38 """
39 def before_create_object(self, data, view_kwargs):
40 """
41 method to check if there is an existing user with same email which is received in data to create a new user
42 :param data:
43 :param view_kwargs:
44 :return:
45 """
46 if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:
47 raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
48
49 def after_create_object(self, user, data, view_kwargs):
50 """
51 method to send-
52 email notification
53 mail link for register verification
54 add image urls
55 :param user:
56 :param data:
57 :param view_kwargs:
58 :return:
59 """
60 s = get_serializer()
61 hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')
62 link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})
63 send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],
64 email=user.email)
65 send_email_confirmation(user.email, link)
66
67 if data.get('original_image_url'):
68 uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)
69 uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']
70 del uploaded_images['large_image_url']
71 self.session.query(User).filter_by(id=user.id).update(uploaded_images)
72
73 decorators = (api.has_permission('is_admin', methods="GET"),)
74 schema = UserSchema
75 data_layer = {'session': db.session,
76 'model': User,
77 'methods': {
78 'before_create_object': before_create_object,
79 'after_create_object': after_create_object
80 }}
81
82
83 class UserDetail(ResourceDetail):
84 """
85 User detail by id
86 """
87 def before_get(self, args, kwargs):
88
89 if current_user.is_admin or current_user.is_super_admin or current_user:
90 self.schema = UserSchema
91 else:
92 self.schema = UserSchemaPublic
93
94 def before_get_object(self, view_kwargs):
95 """
96 before get method for user object
97 :param view_kwargs:
98 :return:
99 """
100 if view_kwargs.get('notification_id') is not None:
101 notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')
102 if notification.user_id is not None:
103 view_kwargs['id'] = notification.user_id
104 else:
105 view_kwargs['id'] = None
106
107 if view_kwargs.get('feedback_id') is not None:
108 print(view_kwargs['feedback_id'])
109 feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')
110 if feedback.user_id is not None:
111 view_kwargs['id'] = feedback.user_id
112 else:
113 view_kwargs['id'] = None
114
115 if view_kwargs.get('attendee_id') is not None:
116 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')
117 if attendee.user is not None:
118 if (not has_access('is_user_itself',
119 user_id=attendee.user.id) or not has_access('is_coorganizer',
120 event_id=attendee.event_id)):
121 raise ForbiddenException({'source': ''}, 'Access Forbidden')
122 view_kwargs['id'] = attendee.user.id
123 else:
124 view_kwargs['id'] = None
125
126 if view_kwargs.get('event_invoice_id') is not None:
127 event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')
128 if event_invoice.user_id is not None:
129 view_kwargs['id'] = event_invoice.user_id
130 else:
131 view_kwargs['id'] = None
132
133 if view_kwargs.get('users_events_role_id') is not None:
134 users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],
135 'users_events_role_id')
136 if users_events_role.user_id is not None:
137 view_kwargs['id'] = users_events_role.user_id
138
139 if view_kwargs.get('speaker_id') is not None:
140 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
141 if speaker.user_id is not None:
142 view_kwargs['id'] = speaker.user_id
143 else:
144 view_kwargs['id'] = None
145
146 if view_kwargs.get('session_id') is not None:
147 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
148 if session.creator_id is not None:
149 view_kwargs['id'] = session.creator_id
150 else:
151 view_kwargs['id'] = None
152
153 if view_kwargs.get('access_code_id') is not None:
154 access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')
155 if access_code.marketer_id is not None:
156 view_kwargs['id'] = access_code.marketer_id
157 else:
158 view_kwargs['id'] = None
159
160 if view_kwargs.get('discount_code_id') is not None:
161 discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')
162 if discount_code.marketer_id is not None:
163 view_kwargs['id'] = discount_code.marketer_id
164 else:
165 view_kwargs['id'] = None
166
167 if view_kwargs.get('email_notification_id') is not None:
168 email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],
169 'email_notification_id')
170 if email_notification.user_id is not None:
171 view_kwargs['id'] = email_notification.user_id
172 else:
173 view_kwargs['id'] = None
174
175 def before_update_object(self, user, data, view_kwargs):
176 if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:
177 uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)
178 data['original_image_url'] = uploaded_images['original_image_url']
179 data['small_image_url'] = uploaded_images['thumbnail_image_url']
180 data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']
181 data['icon_image_url'] = uploaded_images['icon_image_url']
182
183 if data.get('email') and data['email'] != user.email:
184 view_kwargs['email_changed'] = user.email
185
186 if data.get('is_admin') != user.is_admin:
187 user.is_admin = not user.is_admin
188
189 def after_update_object(self, user, data, view_kwargs):
190 """
191 method to mail user about email change
192 :param user:
193 :param data:
194 :param view_kwargs:
195 :return:
196 """
197 if view_kwargs.get('email_changed'):
198 send_email_change_user_email(user, view_kwargs.get('email_changed'))
199
200 decorators = (api.has_permission('is_user_itself', fetch="user_id,id", fetch_as="user_id",
201 model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,
202 DiscountCode, EmailNotification, Speaker, User],
203 fetch_key_url="notification_id, feedback_id, users_events_role_id, session_id, \
204 event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id",
205 leave_if=lambda a: a.get('attendee_id')), )
206 schema = UserSchema
207 data_layer = {'session': db.session,
208 'model': User,
209 'methods': {
210 'before_get_object': before_get_object,
211 'before_update_object': before_update_object,
212 'after_update_object': after_update_object
213 }}
214
215
216 class UserRelationship(ResourceRelationship):
217 """
218 User Relationship
219 """
220 decorators = (is_user_itself, )
221 schema = UserSchema
222 data_layer = {'session': db.session,
223 'model': User}
224
225
226 @user_misc_routes.route('/users/checkEmail', methods=['POST'])
227 def is_email_available():
228 email = request.json.get('email', None)
229 if email:
230 if get_count(db.session.query(User).filter_by(email=email)):
231 return jsonify(
232 result="False"
233 )
234 else:
235 return jsonify(
236 result="True"
237 )
238 else:
239 abort(
240 make_response(jsonify(error="Email field missing"), 422)
241 )
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/users.py b/app/api/users.py
--- a/app/api/users.py
+++ b/app/api/users.py
@@ -183,7 +183,7 @@
if data.get('email') and data['email'] != user.email:
view_kwargs['email_changed'] = user.email
- if data.get('is_admin') != user.is_admin:
+ if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:
user.is_admin = not user.is_admin
def after_update_object(self, user, data, view_kwargs):
| {"golden_diff": "diff --git a/app/api/users.py b/app/api/users.py\n--- a/app/api/users.py\n+++ b/app/api/users.py\n@@ -183,7 +183,7 @@\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n \n- if data.get('is_admin') != user.is_admin:\n+ if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n \n def after_update_object(self, user, data, view_kwargs):\n", "issue": "For the edit action button in admin/users the super admin should be allowed to make the users as an admin.\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe patch for the users is working but is_admin is a readonly property even for the super user. Used the patch request for users and changed the value for is_admin from true to false. But there is no change after the request to the value of is_admin. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to v1/users/user_id\r\n2. Send a patch request\r\n3. Change the value of is_admin for some user from false to true.\r\n4. See error\r\n\r\n\r\n\r\nAfter the request\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n print(view_kwargs['feedback_id'])\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n\n if data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')), )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself, )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py"}], "after_files": [{"content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n print(view_kwargs['feedback_id'])\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n\n if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')), )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself, )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py"}]} |
gh_patches_debug_1585 | rasdani/github-patches | git_diff | feast-dev__feast-3588 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
to_remote_storage() resulting in undeleted temporary tables in Snowflake
## Expected Behavior
When calling get_historical_features.to_remote_storage(), any temporary tables created in Snowflake are deleted after the Snowflake session ends.
## Current Behavior
When calling get_historical_features.to_remote_storage(), the temporary tables created during the join process are not deleted after the Snowflake session ends. These tables are set to a retention time of 1 day, but they are not deleted and still exist after 24 hours.
I tested this with `to_df()` and the above described issue does not occur. I also tried explicitly ending the session to make sure that wasn't the issue, but even after confirming the session was ended, the issue still persists.
## Steps to reproduce
1. For the FeatureStore object, set the RepoConfig offline store config to specify the following:
```json
{
"blob_export_location": <s3_staging_url>,
"storage_integration_name": <storage_integration>,
"role": <stage_role>,
"schema_": <stage_schema>,
}
```
2. Call `get_historical_features(entity_df=entity, features=features, full_feature_names=True).to_remote_storage()`
3. Check snowflake stage tables and look for tables created at the time of running that start with `temporary_`
### Specifications
- Version: 0.30.0
- Platform: Ubuntu
- Subsystem:
## Possible Solution
No possible solution known at the time of reporting
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/infra/offline_stores/snowflake.py`
Content:
```
1 import contextlib
2 import os
3 import uuid
4 import warnings
5 from datetime import datetime
6 from functools import reduce
7 from pathlib import Path
8 from typing import (
9 TYPE_CHECKING,
10 Any,
11 Callable,
12 ContextManager,
13 Dict,
14 Iterator,
15 List,
16 Optional,
17 Tuple,
18 Union,
19 cast,
20 )
21
22 import numpy as np
23 import pandas as pd
24 import pyarrow
25 from pydantic import Field, StrictStr
26 from pydantic.typing import Literal
27 from pytz import utc
28
29 from feast import OnDemandFeatureView
30 from feast.data_source import DataSource
31 from feast.errors import (
32 EntitySQLEmptyResults,
33 InvalidEntityType,
34 InvalidSparkSessionException,
35 )
36 from feast.feature_logging import LoggingConfig, LoggingSource
37 from feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_VAL, FeatureView
38 from feast.infra.offline_stores import offline_utils
39 from feast.infra.offline_stores.offline_store import (
40 OfflineStore,
41 RetrievalJob,
42 RetrievalMetadata,
43 )
44 from feast.infra.offline_stores.snowflake_source import (
45 SavedDatasetSnowflakeStorage,
46 SnowflakeLoggingDestination,
47 SnowflakeSource,
48 )
49 from feast.infra.registry.base_registry import BaseRegistry
50 from feast.infra.utils.snowflake.snowflake_utils import (
51 GetSnowflakeConnection,
52 execute_snowflake_statement,
53 write_pandas,
54 write_parquet,
55 )
56 from feast.repo_config import FeastConfigBaseModel, RepoConfig
57 from feast.saved_dataset import SavedDatasetStorage
58 from feast.usage import log_exceptions_and_usage
59
60 try:
61 from snowflake.connector import SnowflakeConnection
62 except ImportError as e:
63 from feast.errors import FeastExtrasDependencyImportError
64
65 raise FeastExtrasDependencyImportError("snowflake", str(e))
66
67 if TYPE_CHECKING:
68 from pyspark.sql import DataFrame, SparkSession
69
70 warnings.filterwarnings("ignore", category=DeprecationWarning)
71
72
73 class SnowflakeOfflineStoreConfig(FeastConfigBaseModel):
74 """Offline store config for Snowflake"""
75
76 type: Literal["snowflake.offline"] = "snowflake.offline"
77 """ Offline store type selector """
78
79 config_path: Optional[str] = os.path.expanduser("~/.snowsql/config")
80 """ Snowflake config path -- absolute path required (Cant use ~)"""
81
82 account: Optional[str] = None
83 """ Snowflake deployment identifier -- drop .snowflakecomputing.com """
84
85 user: Optional[str] = None
86 """ Snowflake user name """
87
88 password: Optional[str] = None
89 """ Snowflake password """
90
91 role: Optional[str] = None
92 """ Snowflake role name """
93
94 warehouse: Optional[str] = None
95 """ Snowflake warehouse name """
96
97 authenticator: Optional[str] = None
98 """ Snowflake authenticator name """
99
100 database: StrictStr
101 """ Snowflake database name """
102
103 schema_: Optional[str] = Field("PUBLIC", alias="schema")
104 """ Snowflake schema name """
105
106 storage_integration_name: Optional[str] = None
107 """ Storage integration name in snowflake """
108
109 blob_export_location: Optional[str] = None
110 """ Location (in S3, Google storage or Azure storage) where data is offloaded """
111
112 convert_timestamp_columns: Optional[bool] = None
113 """ Convert timestamp columns on export to a Parquet-supported format """
114
115 class Config:
116 allow_population_by_field_name = True
117
118
119 class SnowflakeOfflineStore(OfflineStore):
120 @staticmethod
121 @log_exceptions_and_usage(offline_store="snowflake")
122 def pull_latest_from_table_or_query(
123 config: RepoConfig,
124 data_source: DataSource,
125 join_key_columns: List[str],
126 feature_name_columns: List[str],
127 timestamp_field: str,
128 created_timestamp_column: Optional[str],
129 start_date: datetime,
130 end_date: datetime,
131 ) -> RetrievalJob:
132 assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)
133 assert isinstance(data_source, SnowflakeSource)
134
135 from_expression = data_source.get_table_query_string()
136 if not data_source.database and data_source.table:
137 from_expression = f'"{config.offline_store.database}"."{config.offline_store.schema_}".{from_expression}'
138
139 if join_key_columns:
140 partition_by_join_key_string = '"' + '", "'.join(join_key_columns) + '"'
141 partition_by_join_key_string = (
142 "PARTITION BY " + partition_by_join_key_string
143 )
144 else:
145 partition_by_join_key_string = ""
146
147 timestamp_columns = [timestamp_field]
148 if created_timestamp_column:
149 timestamp_columns.append(created_timestamp_column)
150
151 timestamp_desc_string = '"' + '" DESC, "'.join(timestamp_columns) + '" DESC'
152 field_string = (
153 '"'
154 + '", "'.join(join_key_columns + feature_name_columns + timestamp_columns)
155 + '"'
156 )
157
158 if config.offline_store.convert_timestamp_columns:
159 select_fields = list(
160 map(
161 lambda field_name: f'"{field_name}"',
162 join_key_columns + feature_name_columns,
163 )
164 )
165 select_timestamps = list(
166 map(
167 lambda field_name: f"to_varchar({field_name}, 'YYYY-MM-DD\"T\"HH24:MI:SS.FFTZH:TZM') as {field_name}",
168 timestamp_columns,
169 )
170 )
171 inner_field_string = ", ".join(select_fields + select_timestamps)
172 else:
173 select_fields = list(
174 map(
175 lambda field_name: f'"{field_name}"',
176 join_key_columns + feature_name_columns + timestamp_columns,
177 )
178 )
179 inner_field_string = ", ".join(select_fields)
180
181 if data_source.snowflake_options.warehouse:
182 config.offline_store.warehouse = data_source.snowflake_options.warehouse
183
184 with GetSnowflakeConnection(config.offline_store) as conn:
185 snowflake_conn = conn
186
187 start_date = start_date.astimezone(tz=utc)
188 end_date = end_date.astimezone(tz=utc)
189
190 query = f"""
191 SELECT
192 {field_string}
193 {f''', TRIM({repr(DUMMY_ENTITY_VAL)}::VARIANT,'"') AS "{DUMMY_ENTITY_ID}"''' if not join_key_columns else ""}
194 FROM (
195 SELECT {inner_field_string},
196 ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS "_feast_row"
197 FROM {from_expression}
198 WHERE "{timestamp_field}" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'
199 )
200 WHERE "_feast_row" = 1
201 """
202
203 return SnowflakeRetrievalJob(
204 query=query,
205 snowflake_conn=snowflake_conn,
206 config=config,
207 full_feature_names=False,
208 on_demand_feature_views=None,
209 )
210
211 @staticmethod
212 @log_exceptions_and_usage(offline_store="snowflake")
213 def pull_all_from_table_or_query(
214 config: RepoConfig,
215 data_source: DataSource,
216 join_key_columns: List[str],
217 feature_name_columns: List[str],
218 timestamp_field: str,
219 start_date: datetime,
220 end_date: datetime,
221 ) -> RetrievalJob:
222 assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)
223 assert isinstance(data_source, SnowflakeSource)
224
225 from_expression = data_source.get_table_query_string()
226 if not data_source.database and data_source.table:
227 from_expression = f'"{config.offline_store.database}"."{config.offline_store.schema_}".{from_expression}'
228
229 field_string = (
230 '"'
231 + '", "'.join(join_key_columns + feature_name_columns + [timestamp_field])
232 + '"'
233 )
234
235 if data_source.snowflake_options.warehouse:
236 config.offline_store.warehouse = data_source.snowflake_options.warehouse
237
238 with GetSnowflakeConnection(config.offline_store) as conn:
239 snowflake_conn = conn
240
241 start_date = start_date.astimezone(tz=utc)
242 end_date = end_date.astimezone(tz=utc)
243
244 query = f"""
245 SELECT {field_string}
246 FROM {from_expression}
247 WHERE "{timestamp_field}" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'
248 """
249
250 return SnowflakeRetrievalJob(
251 query=query,
252 snowflake_conn=snowflake_conn,
253 config=config,
254 full_feature_names=False,
255 )
256
257 @staticmethod
258 @log_exceptions_and_usage(offline_store="snowflake")
259 def get_historical_features(
260 config: RepoConfig,
261 feature_views: List[FeatureView],
262 feature_refs: List[str],
263 entity_df: Union[pd.DataFrame, str],
264 registry: BaseRegistry,
265 project: str,
266 full_feature_names: bool = False,
267 ) -> RetrievalJob:
268 assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)
269 for fv in feature_views:
270 assert isinstance(fv.batch_source, SnowflakeSource)
271
272 with GetSnowflakeConnection(config.offline_store) as conn:
273 snowflake_conn = conn
274
275 entity_schema = _get_entity_schema(entity_df, snowflake_conn, config)
276
277 entity_df_event_timestamp_col = (
278 offline_utils.infer_event_timestamp_from_entity_df(entity_schema)
279 )
280
281 entity_df_event_timestamp_range = _get_entity_df_event_timestamp_range(
282 entity_df,
283 entity_df_event_timestamp_col,
284 snowflake_conn,
285 )
286
287 @contextlib.contextmanager
288 def query_generator() -> Iterator[str]:
289
290 table_name = offline_utils.get_temp_entity_table_name()
291
292 _upload_entity_df(entity_df, snowflake_conn, config, table_name)
293
294 expected_join_keys = offline_utils.get_expected_join_keys(
295 project, feature_views, registry
296 )
297
298 offline_utils.assert_expected_columns_in_entity_df(
299 entity_schema, expected_join_keys, entity_df_event_timestamp_col
300 )
301
302 # Build a query context containing all information required to template the Snowflake SQL query
303 query_context = offline_utils.get_feature_view_query_context(
304 feature_refs,
305 feature_views,
306 registry,
307 project,
308 entity_df_event_timestamp_range,
309 )
310
311 query_context = _fix_entity_selections_identifiers(query_context)
312
313 # Generate the Snowflake SQL query from the query context
314 query = offline_utils.build_point_in_time_query(
315 query_context,
316 left_table_query_string=table_name,
317 entity_df_event_timestamp_col=entity_df_event_timestamp_col,
318 entity_df_columns=entity_schema.keys(),
319 query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN,
320 full_feature_names=full_feature_names,
321 )
322
323 yield query
324
325 return SnowflakeRetrievalJob(
326 query=query_generator,
327 snowflake_conn=snowflake_conn,
328 config=config,
329 full_feature_names=full_feature_names,
330 on_demand_feature_views=OnDemandFeatureView.get_requested_odfvs(
331 feature_refs, project, registry
332 ),
333 metadata=RetrievalMetadata(
334 features=feature_refs,
335 keys=list(entity_schema.keys() - {entity_df_event_timestamp_col}),
336 min_event_timestamp=entity_df_event_timestamp_range[0],
337 max_event_timestamp=entity_df_event_timestamp_range[1],
338 ),
339 )
340
341 @staticmethod
342 def write_logged_features(
343 config: RepoConfig,
344 data: Union[pyarrow.Table, Path],
345 source: LoggingSource,
346 logging_config: LoggingConfig,
347 registry: BaseRegistry,
348 ):
349 assert isinstance(logging_config.destination, SnowflakeLoggingDestination)
350
351 with GetSnowflakeConnection(config.offline_store) as conn:
352 snowflake_conn = conn
353
354 if isinstance(data, Path):
355 write_parquet(
356 snowflake_conn,
357 data,
358 source.get_schema(registry),
359 table_name=logging_config.destination.table_name,
360 auto_create_table=True,
361 )
362 else:
363 write_pandas(
364 snowflake_conn,
365 data.to_pandas(),
366 table_name=logging_config.destination.table_name,
367 auto_create_table=True,
368 )
369
370 @staticmethod
371 def offline_write_batch(
372 config: RepoConfig,
373 feature_view: FeatureView,
374 table: pyarrow.Table,
375 progress: Optional[Callable[[int], Any]],
376 ):
377 assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)
378 assert isinstance(feature_view.batch_source, SnowflakeSource)
379
380 pa_schema, column_names = offline_utils.get_pyarrow_schema_from_batch_source(
381 config, feature_view.batch_source
382 )
383 if column_names != table.column_names:
384 raise ValueError(
385 f"The input pyarrow table has schema {table.schema} with the incorrect columns {table.column_names}. "
386 f"The schema is expected to be {pa_schema} with the columns (in this exact order) to be {column_names}."
387 )
388
389 if table.schema != pa_schema:
390 table = table.cast(pa_schema)
391
392 with GetSnowflakeConnection(config.offline_store) as conn:
393 snowflake_conn = conn
394
395 write_pandas(
396 snowflake_conn,
397 table.to_pandas(),
398 table_name=feature_view.batch_source.table,
399 auto_create_table=True,
400 )
401
402
403 class SnowflakeRetrievalJob(RetrievalJob):
404 def __init__(
405 self,
406 query: Union[str, Callable[[], ContextManager[str]]],
407 snowflake_conn: SnowflakeConnection,
408 config: RepoConfig,
409 full_feature_names: bool,
410 on_demand_feature_views: Optional[List[OnDemandFeatureView]] = None,
411 metadata: Optional[RetrievalMetadata] = None,
412 ):
413
414 if not isinstance(query, str):
415 self._query_generator = query
416 else:
417
418 @contextlib.contextmanager
419 def query_generator() -> Iterator[str]:
420 assert isinstance(query, str)
421 yield query
422
423 self._query_generator = query_generator
424
425 self.snowflake_conn = snowflake_conn
426 self.config = config
427 self._full_feature_names = full_feature_names
428 self._on_demand_feature_views = on_demand_feature_views or []
429 self._metadata = metadata
430 self.export_path: Optional[str]
431 if self.config.offline_store.blob_export_location:
432 self.export_path = f"{self.config.offline_store.blob_export_location}/{self.config.project}/{uuid.uuid4()}"
433 else:
434 self.export_path = None
435
436 @property
437 def full_feature_names(self) -> bool:
438 return self._full_feature_names
439
440 @property
441 def on_demand_feature_views(self) -> List[OnDemandFeatureView]:
442 return self._on_demand_feature_views
443
444 def _to_df_internal(self, timeout: Optional[int] = None) -> pd.DataFrame:
445 with self._query_generator() as query:
446
447 df = execute_snowflake_statement(
448 self.snowflake_conn, query
449 ).fetch_pandas_all()
450
451 return df
452
453 def _to_arrow_internal(self, timeout: Optional[int] = None) -> pyarrow.Table:
454 with self._query_generator() as query:
455
456 pa_table = execute_snowflake_statement(
457 self.snowflake_conn, query
458 ).fetch_arrow_all()
459
460 if pa_table:
461 return pa_table
462 else:
463 empty_result = execute_snowflake_statement(self.snowflake_conn, query)
464
465 return pyarrow.Table.from_pandas(
466 pd.DataFrame(columns=[md.name for md in empty_result.description])
467 )
468
469 def to_snowflake(self, table_name: str, temporary=False) -> None:
470 """Save dataset as a new Snowflake table"""
471 if self.on_demand_feature_views:
472 transformed_df = self.to_df()
473
474 write_pandas(
475 self.snowflake_conn, transformed_df, table_name, auto_create_table=True
476 )
477
478 return None
479
480 with self._query_generator() as query:
481 query = f'CREATE {"TEMPORARY" if temporary else ""} TABLE IF NOT EXISTS "{table_name}" AS ({query});\n'
482
483 execute_snowflake_statement(self.snowflake_conn, query)
484
485 def to_sql(self) -> str:
486 """
487 Returns the SQL query that will be executed in Snowflake to build the historical feature table.
488 """
489 with self._query_generator() as query:
490 return query
491
492 def to_spark_df(self, spark_session: "SparkSession") -> "DataFrame":
493 """
494 Method to convert snowflake query results to pyspark data frame.
495
496 Args:
497 spark_session: spark Session variable of current environment.
498
499 Returns:
500 spark_df: A pyspark dataframe.
501 """
502
503 try:
504 from pyspark.sql import DataFrame, SparkSession
505 except ImportError as e:
506 from feast.errors import FeastExtrasDependencyImportError
507
508 raise FeastExtrasDependencyImportError("spark", str(e))
509
510 if isinstance(spark_session, SparkSession):
511 with self._query_generator() as query:
512
513 arrow_batches = execute_snowflake_statement(
514 self.snowflake_conn, query
515 ).fetch_arrow_batches()
516
517 if arrow_batches:
518 spark_df = reduce(
519 DataFrame.unionAll,
520 [
521 spark_session.createDataFrame(batch.to_pandas())
522 for batch in arrow_batches
523 ],
524 )
525
526 return spark_df
527
528 else:
529 raise EntitySQLEmptyResults(query)
530
531 else:
532 raise InvalidSparkSessionException(spark_session)
533
534 def persist(self, storage: SavedDatasetStorage, allow_overwrite: bool = False):
535 assert isinstance(storage, SavedDatasetSnowflakeStorage)
536 self.to_snowflake(table_name=storage.snowflake_options.table)
537
538 @property
539 def metadata(self) -> Optional[RetrievalMetadata]:
540 return self._metadata
541
542 def supports_remote_storage_export(self) -> bool:
543 return (
544 self.config.offline_store.storage_integration_name
545 and self.config.offline_store.blob_export_location
546 )
547
548 def to_remote_storage(self) -> List[str]:
549 if not self.export_path:
550 raise ValueError(
551 "to_remote_storage() requires `blob_export_location` to be specified in config"
552 )
553 if not self.config.offline_store.storage_integration_name:
554 raise ValueError(
555 "to_remote_storage() requires `storage_integration_name` to be specified in config"
556 )
557
558 table = f"temporary_{uuid.uuid4().hex}"
559 self.to_snowflake(table)
560
561 query = f"""
562 COPY INTO '{self.export_path}/{table}' FROM "{self.config.offline_store.database}"."{self.config.offline_store.schema_}"."{table}"\n
563 STORAGE_INTEGRATION = {self.config.offline_store.storage_integration_name}\n
564 FILE_FORMAT = (TYPE = PARQUET)
565 DETAILED_OUTPUT = TRUE
566 HEADER = TRUE
567 """
568 cursor = execute_snowflake_statement(self.snowflake_conn, query)
569
570 file_name_column_index = [
571 idx for idx, rm in enumerate(cursor.description) if rm.name == "FILE_NAME"
572 ][0]
573 return [
574 f"{self.export_path}/{row[file_name_column_index]}"
575 for row in cursor.fetchall()
576 ]
577
578
579 def _get_entity_schema(
580 entity_df: Union[pd.DataFrame, str],
581 snowflake_conn: SnowflakeConnection,
582 config: RepoConfig,
583 ) -> Dict[str, np.dtype]:
584
585 if isinstance(entity_df, pd.DataFrame):
586
587 return dict(zip(entity_df.columns, entity_df.dtypes))
588
589 else:
590
591 query = f"SELECT * FROM ({entity_df}) LIMIT 1"
592 limited_entity_df = execute_snowflake_statement(
593 snowflake_conn, query
594 ).fetch_pandas_all()
595
596 return dict(zip(limited_entity_df.columns, limited_entity_df.dtypes))
597
598
599 def _upload_entity_df(
600 entity_df: Union[pd.DataFrame, str],
601 snowflake_conn: SnowflakeConnection,
602 config: RepoConfig,
603 table_name: str,
604 ) -> None:
605
606 if isinstance(entity_df, pd.DataFrame):
607 # Write the data from the DataFrame to the table
608 # Known issues with following entity data types: BINARY
609 write_pandas(
610 snowflake_conn,
611 entity_df,
612 table_name,
613 auto_create_table=True,
614 create_temp_table=True,
615 )
616
617 return None
618 elif isinstance(entity_df, str):
619 # If the entity_df is a string (SQL query), create a Snowflake table out of it,
620 query = f'CREATE TEMPORARY TABLE "{table_name}" AS ({entity_df})'
621 execute_snowflake_statement(snowflake_conn, query)
622
623 return None
624 else:
625 raise InvalidEntityType(type(entity_df))
626
627
628 def _fix_entity_selections_identifiers(query_context) -> list:
629
630 for i, qc in enumerate(query_context):
631 for j, es in enumerate(qc.entity_selections):
632 query_context[i].entity_selections[j] = f'"{es}"'.replace(" AS ", '" AS "')
633
634 return query_context
635
636
637 def _get_entity_df_event_timestamp_range(
638 entity_df: Union[pd.DataFrame, str],
639 entity_df_event_timestamp_col: str,
640 snowflake_conn: SnowflakeConnection,
641 ) -> Tuple[datetime, datetime]:
642 if isinstance(entity_df, pd.DataFrame):
643 entity_df_event_timestamp = entity_df.loc[
644 :, entity_df_event_timestamp_col
645 ].infer_objects()
646 if pd.api.types.is_string_dtype(entity_df_event_timestamp):
647 entity_df_event_timestamp = pd.to_datetime(
648 entity_df_event_timestamp, utc=True
649 )
650 entity_df_event_timestamp_range = (
651 entity_df_event_timestamp.min().to_pydatetime(),
652 entity_df_event_timestamp.max().to_pydatetime(),
653 )
654 elif isinstance(entity_df, str):
655 # If the entity_df is a string (SQL query), determine range
656 # from table
657 query = f'SELECT MIN("{entity_df_event_timestamp_col}") AS "min_value", MAX("{entity_df_event_timestamp_col}") AS "max_value" FROM ({entity_df})'
658 results = execute_snowflake_statement(snowflake_conn, query).fetchall()
659
660 entity_df_event_timestamp_range = cast(Tuple[datetime, datetime], results[0])
661 if (
662 entity_df_event_timestamp_range[0] is None
663 or entity_df_event_timestamp_range[1] is None
664 ):
665 raise EntitySQLEmptyResults(entity_df)
666 else:
667 raise InvalidEntityType(type(entity_df))
668
669 return entity_df_event_timestamp_range
670
671
672 MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = """
673 /*
674 Compute a deterministic hash for the `left_table_query_string` that will be used throughout
675 all the logic as the field to GROUP BY the data
676 */
677 WITH "entity_dataframe" AS (
678 SELECT *,
679 "{{entity_df_event_timestamp_col}}" AS "entity_timestamp"
680 {% for featureview in featureviews %}
681 {% if featureview.entities %}
682 ,(
683 {% for entity in featureview.entities %}
684 CAST("{{entity}}" AS VARCHAR) ||
685 {% endfor %}
686 CAST("{{entity_df_event_timestamp_col}}" AS VARCHAR)
687 ) AS "{{featureview.name}}__entity_row_unique_id"
688 {% else %}
689 ,CAST("{{entity_df_event_timestamp_col}}" AS VARCHAR) AS "{{featureview.name}}__entity_row_unique_id"
690 {% endif %}
691 {% endfor %}
692 FROM "{{ left_table_query_string }}"
693 ),
694
695 {% for featureview in featureviews %}
696
697 "{{ featureview.name }}__entity_dataframe" AS (
698 SELECT
699 {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}
700 "entity_timestamp",
701 "{{featureview.name}}__entity_row_unique_id"
702 FROM "entity_dataframe"
703 GROUP BY
704 {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}
705 "entity_timestamp",
706 "{{featureview.name}}__entity_row_unique_id"
707 ),
708
709 /*
710 This query template performs the point-in-time correctness join for a single feature set table
711 to the provided entity table.
712
713 1. We first join the current feature_view to the entity dataframe that has been passed.
714 This JOIN has the following logic:
715 - For each row of the entity dataframe, only keep the rows where the `timestamp_field`
716 is less than the one provided in the entity dataframe
717 - If there a TTL for the current feature_view, also keep the rows where the `timestamp_field`
718 is higher the the one provided minus the TTL
719 - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been
720 computed previously
721
722 The output of this CTE will contain all the necessary information and already filtered out most
723 of the data that is not relevant.
724 */
725
726 "{{ featureview.name }}__subquery" AS (
727 SELECT
728 "{{ featureview.timestamp_field }}" as "event_timestamp",
729 {{'"' ~ featureview.created_timestamp_column ~ '" as "created_timestamp",' if featureview.created_timestamp_column else '' }}
730 {{featureview.entity_selections | join(', ')}}{% if featureview.entity_selections %},{% else %}{% endif %}
731 {% for feature in featureview.features %}
732 "{{ feature }}" as {% if full_feature_names %}"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}"{% else %}"{{ featureview.field_mapping.get(feature, feature) }}"{% endif %}{% if loop.last %}{% else %}, {% endif %}
733 {% endfor %}
734 FROM {{ featureview.table_subquery }}
735 WHERE "{{ featureview.timestamp_field }}" <= '{{ featureview.max_event_timestamp }}'
736 {% if featureview.ttl == 0 %}{% else %}
737 AND "{{ featureview.timestamp_field }}" >= '{{ featureview.min_event_timestamp }}'
738 {% endif %}
739 ),
740
741 "{{ featureview.name }}__base" AS (
742 SELECT
743 "subquery".*,
744 "entity_dataframe"."entity_timestamp",
745 "entity_dataframe"."{{featureview.name}}__entity_row_unique_id"
746 FROM "{{ featureview.name }}__subquery" AS "subquery"
747 INNER JOIN "{{ featureview.name }}__entity_dataframe" AS "entity_dataframe"
748 ON TRUE
749 AND "subquery"."event_timestamp" <= "entity_dataframe"."entity_timestamp"
750
751 {% if featureview.ttl == 0 %}{% else %}
752 AND "subquery"."event_timestamp" >= TIMESTAMPADD(second,-{{ featureview.ttl }},"entity_dataframe"."entity_timestamp")
753 {% endif %}
754
755 {% for entity in featureview.entities %}
756 AND "subquery"."{{ entity }}" = "entity_dataframe"."{{ entity }}"
757 {% endfor %}
758 ),
759
760 /*
761 2. If the `created_timestamp_column` has been set, we need to
762 deduplicate the data first. This is done by calculating the
763 `MAX(created_at_timestamp)` for each event_timestamp.
764 We then join the data on the next CTE
765 */
766 {% if featureview.created_timestamp_column %}
767 "{{ featureview.name }}__dedup" AS (
768 SELECT
769 "{{featureview.name}}__entity_row_unique_id",
770 "event_timestamp",
771 MAX("created_timestamp") AS "created_timestamp"
772 FROM "{{ featureview.name }}__base"
773 GROUP BY "{{featureview.name}}__entity_row_unique_id", "event_timestamp"
774 ),
775 {% endif %}
776
777 /*
778 3. The data has been filtered during the first CTE "*__base"
779 Thus we only need to compute the latest timestamp of each feature.
780 */
781 "{{ featureview.name }}__latest" AS (
782 SELECT
783 "event_timestamp",
784 {% if featureview.created_timestamp_column %}"created_timestamp",{% endif %}
785 "{{featureview.name}}__entity_row_unique_id"
786 FROM
787 (
788 SELECT *,
789 ROW_NUMBER() OVER(
790 PARTITION BY "{{featureview.name}}__entity_row_unique_id"
791 ORDER BY "event_timestamp" DESC{% if featureview.created_timestamp_column %},"created_timestamp" DESC{% endif %}
792 ) AS "row_number"
793 FROM "{{ featureview.name }}__base"
794 {% if featureview.created_timestamp_column %}
795 INNER JOIN "{{ featureview.name }}__dedup"
796 USING ("{{featureview.name}}__entity_row_unique_id", "event_timestamp", "created_timestamp")
797 {% endif %}
798 )
799 WHERE "row_number" = 1
800 ),
801
802 /*
803 4. Once we know the latest value of each feature for a given timestamp,
804 we can join again the data back to the original "base" dataset
805 */
806 "{{ featureview.name }}__cleaned" AS (
807 SELECT "base".*
808 FROM "{{ featureview.name }}__base" AS "base"
809 INNER JOIN "{{ featureview.name }}__latest"
810 USING(
811 "{{featureview.name}}__entity_row_unique_id",
812 "event_timestamp"
813 {% if featureview.created_timestamp_column %}
814 ,"created_timestamp"
815 {% endif %}
816 )
817 ){% if loop.last %}{% else %}, {% endif %}
818
819
820 {% endfor %}
821 /*
822 Joins the outputs of multiple time travel joins to a single table.
823 The entity_dataframe dataset being our source of truth here.
824 */
825
826 SELECT "{{ final_output_feature_names | join('", "')}}"
827 FROM "entity_dataframe"
828 {% for featureview in featureviews %}
829 LEFT JOIN (
830 SELECT
831 "{{featureview.name}}__entity_row_unique_id"
832 {% for feature in featureview.features %}
833 ,{% if full_feature_names %}"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}"{% else %}"{{ featureview.field_mapping.get(feature, feature) }}"{% endif %}
834 {% endfor %}
835 FROM "{{ featureview.name }}__cleaned"
836 ) "{{ featureview.name }}__cleaned" USING ("{{featureview.name}}__entity_row_unique_id")
837 {% endfor %}
838 """
839
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/infra/offline_stores/snowflake.py b/sdk/python/feast/infra/offline_stores/snowflake.py
--- a/sdk/python/feast/infra/offline_stores/snowflake.py
+++ b/sdk/python/feast/infra/offline_stores/snowflake.py
@@ -556,7 +556,7 @@
)
table = f"temporary_{uuid.uuid4().hex}"
- self.to_snowflake(table)
+ self.to_snowflake(table, temporary=True)
query = f"""
COPY INTO '{self.export_path}/{table}' FROM "{self.config.offline_store.database}"."{self.config.offline_store.schema_}"."{table}"\n
| {"golden_diff": "diff --git a/sdk/python/feast/infra/offline_stores/snowflake.py b/sdk/python/feast/infra/offline_stores/snowflake.py\n--- a/sdk/python/feast/infra/offline_stores/snowflake.py\n+++ b/sdk/python/feast/infra/offline_stores/snowflake.py\n@@ -556,7 +556,7 @@\n )\n \n table = f\"temporary_{uuid.uuid4().hex}\"\n- self.to_snowflake(table)\n+ self.to_snowflake(table, temporary=True)\n \n query = f\"\"\"\n COPY INTO '{self.export_path}/{table}' FROM \"{self.config.offline_store.database}\".\"{self.config.offline_store.schema_}\".\"{table}\"\\n\n", "issue": "to_remote_storage() resulting in undeleted temporary tables in Snowflake\n## Expected Behavior \r\nWhen calling get_historical_features.to_remote_storage(), any temporary tables created in Snowflake are deleted after the Snowflake session ends.\r\n## Current Behavior\r\nWhen calling get_historical_features.to_remote_storage(), the temporary tables created during the join process are not deleted after the Snowflake session ends. These tables are set to a retention time of 1 day, but they are not deleted and still exist after 24 hours.\r\n\r\nI tested this with `to_df()` and the above described issue does not occur. I also tried explicitly ending the session to make sure that wasn't the issue, but even after confirming the session was ended, the issue still persists.\r\n\r\n## Steps to reproduce\r\n1. For the FeatureStore object, set the RepoConfig offline store config to specify the following:\r\n```json\r\n{\r\n \"blob_export_location\": <s3_staging_url>,\r\n \"storage_integration_name\": <storage_integration>,\r\n \"role\": <stage_role>,\r\n \"schema_\": <stage_schema>,\r\n}\r\n```\r\n2. Call `get_historical_features(entity_df=entity, features=features, full_feature_names=True).to_remote_storage()`\r\n3. Check snowflake stage tables and look for tables created at the time of running that start with `temporary_`\r\n\r\n### Specifications\r\n\r\n- Version: 0.30.0\r\n- Platform: Ubuntu\r\n- Subsystem:\r\n\r\n## Possible Solution\r\nNo possible solution known at the time of reporting\n", "before_files": [{"content": "import contextlib\nimport os\nimport uuid\nimport warnings\nfrom datetime import datetime\nfrom functools import reduce\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ContextManager,\n Dict,\n Iterator,\n List,\n Optional,\n Tuple,\n Union,\n cast,\n)\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow\nfrom pydantic import Field, StrictStr\nfrom pydantic.typing import Literal\nfrom pytz import utc\n\nfrom feast import OnDemandFeatureView\nfrom feast.data_source import DataSource\nfrom feast.errors import (\n EntitySQLEmptyResults,\n InvalidEntityType,\n InvalidSparkSessionException,\n)\nfrom feast.feature_logging import LoggingConfig, LoggingSource\nfrom feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_VAL, FeatureView\nfrom feast.infra.offline_stores import offline_utils\nfrom feast.infra.offline_stores.offline_store import (\n OfflineStore,\n RetrievalJob,\n RetrievalMetadata,\n)\nfrom feast.infra.offline_stores.snowflake_source import (\n SavedDatasetSnowflakeStorage,\n SnowflakeLoggingDestination,\n SnowflakeSource,\n)\nfrom feast.infra.registry.base_registry import BaseRegistry\nfrom feast.infra.utils.snowflake.snowflake_utils import (\n GetSnowflakeConnection,\n execute_snowflake_statement,\n write_pandas,\n write_parquet,\n)\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\nfrom feast.saved_dataset import SavedDatasetStorage\nfrom feast.usage import log_exceptions_and_usage\n\ntry:\n from snowflake.connector import SnowflakeConnection\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"snowflake\", str(e))\n\nif TYPE_CHECKING:\n from pyspark.sql import DataFrame, SparkSession\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\nclass SnowflakeOfflineStoreConfig(FeastConfigBaseModel):\n \"\"\"Offline store config for Snowflake\"\"\"\n\n type: Literal[\"snowflake.offline\"] = \"snowflake.offline\"\n \"\"\" Offline store type selector \"\"\"\n\n config_path: Optional[str] = os.path.expanduser(\"~/.snowsql/config\")\n \"\"\" Snowflake config path -- absolute path required (Cant use ~)\"\"\"\n\n account: Optional[str] = None\n \"\"\" Snowflake deployment identifier -- drop .snowflakecomputing.com \"\"\"\n\n user: Optional[str] = None\n \"\"\" Snowflake user name \"\"\"\n\n password: Optional[str] = None\n \"\"\" Snowflake password \"\"\"\n\n role: Optional[str] = None\n \"\"\" Snowflake role name \"\"\"\n\n warehouse: Optional[str] = None\n \"\"\" Snowflake warehouse name \"\"\"\n\n authenticator: Optional[str] = None\n \"\"\" Snowflake authenticator name \"\"\"\n\n database: StrictStr\n \"\"\" Snowflake database name \"\"\"\n\n schema_: Optional[str] = Field(\"PUBLIC\", alias=\"schema\")\n \"\"\" Snowflake schema name \"\"\"\n\n storage_integration_name: Optional[str] = None\n \"\"\" Storage integration name in snowflake \"\"\"\n\n blob_export_location: Optional[str] = None\n \"\"\" Location (in S3, Google storage or Azure storage) where data is offloaded \"\"\"\n\n convert_timestamp_columns: Optional[bool] = None\n \"\"\" Convert timestamp columns on export to a Parquet-supported format \"\"\"\n\n class Config:\n allow_population_by_field_name = True\n\n\nclass SnowflakeOfflineStore(OfflineStore):\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def pull_latest_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n created_timestamp_column: Optional[str],\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(data_source, SnowflakeSource)\n\n from_expression = data_source.get_table_query_string()\n if not data_source.database and data_source.table:\n from_expression = f'\"{config.offline_store.database}\".\"{config.offline_store.schema_}\".{from_expression}'\n\n if join_key_columns:\n partition_by_join_key_string = '\"' + '\", \"'.join(join_key_columns) + '\"'\n partition_by_join_key_string = (\n \"PARTITION BY \" + partition_by_join_key_string\n )\n else:\n partition_by_join_key_string = \"\"\n\n timestamp_columns = [timestamp_field]\n if created_timestamp_column:\n timestamp_columns.append(created_timestamp_column)\n\n timestamp_desc_string = '\"' + '\" DESC, \"'.join(timestamp_columns) + '\" DESC'\n field_string = (\n '\"'\n + '\", \"'.join(join_key_columns + feature_name_columns + timestamp_columns)\n + '\"'\n )\n\n if config.offline_store.convert_timestamp_columns:\n select_fields = list(\n map(\n lambda field_name: f'\"{field_name}\"',\n join_key_columns + feature_name_columns,\n )\n )\n select_timestamps = list(\n map(\n lambda field_name: f\"to_varchar({field_name}, 'YYYY-MM-DD\\\"T\\\"HH24:MI:SS.FFTZH:TZM') as {field_name}\",\n timestamp_columns,\n )\n )\n inner_field_string = \", \".join(select_fields + select_timestamps)\n else:\n select_fields = list(\n map(\n lambda field_name: f'\"{field_name}\"',\n join_key_columns + feature_name_columns + timestamp_columns,\n )\n )\n inner_field_string = \", \".join(select_fields)\n\n if data_source.snowflake_options.warehouse:\n config.offline_store.warehouse = data_source.snowflake_options.warehouse\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n start_date = start_date.astimezone(tz=utc)\n end_date = end_date.astimezone(tz=utc)\n\n query = f\"\"\"\n SELECT\n {field_string}\n {f''', TRIM({repr(DUMMY_ENTITY_VAL)}::VARIANT,'\"') AS \"{DUMMY_ENTITY_ID}\"''' if not join_key_columns else \"\"}\n FROM (\n SELECT {inner_field_string},\n ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS \"_feast_row\"\n FROM {from_expression}\n WHERE \"{timestamp_field}\" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'\n )\n WHERE \"_feast_row\" = 1\n \"\"\"\n\n return SnowflakeRetrievalJob(\n query=query,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=False,\n on_demand_feature_views=None,\n )\n\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def pull_all_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(data_source, SnowflakeSource)\n\n from_expression = data_source.get_table_query_string()\n if not data_source.database and data_source.table:\n from_expression = f'\"{config.offline_store.database}\".\"{config.offline_store.schema_}\".{from_expression}'\n\n field_string = (\n '\"'\n + '\", \"'.join(join_key_columns + feature_name_columns + [timestamp_field])\n + '\"'\n )\n\n if data_source.snowflake_options.warehouse:\n config.offline_store.warehouse = data_source.snowflake_options.warehouse\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n start_date = start_date.astimezone(tz=utc)\n end_date = end_date.astimezone(tz=utc)\n\n query = f\"\"\"\n SELECT {field_string}\n FROM {from_expression}\n WHERE \"{timestamp_field}\" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'\n \"\"\"\n\n return SnowflakeRetrievalJob(\n query=query,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=False,\n )\n\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def get_historical_features(\n config: RepoConfig,\n feature_views: List[FeatureView],\n feature_refs: List[str],\n entity_df: Union[pd.DataFrame, str],\n registry: BaseRegistry,\n project: str,\n full_feature_names: bool = False,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n for fv in feature_views:\n assert isinstance(fv.batch_source, SnowflakeSource)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n entity_schema = _get_entity_schema(entity_df, snowflake_conn, config)\n\n entity_df_event_timestamp_col = (\n offline_utils.infer_event_timestamp_from_entity_df(entity_schema)\n )\n\n entity_df_event_timestamp_range = _get_entity_df_event_timestamp_range(\n entity_df,\n entity_df_event_timestamp_col,\n snowflake_conn,\n )\n\n @contextlib.contextmanager\n def query_generator() -> Iterator[str]:\n\n table_name = offline_utils.get_temp_entity_table_name()\n\n _upload_entity_df(entity_df, snowflake_conn, config, table_name)\n\n expected_join_keys = offline_utils.get_expected_join_keys(\n project, feature_views, registry\n )\n\n offline_utils.assert_expected_columns_in_entity_df(\n entity_schema, expected_join_keys, entity_df_event_timestamp_col\n )\n\n # Build a query context containing all information required to template the Snowflake SQL query\n query_context = offline_utils.get_feature_view_query_context(\n feature_refs,\n feature_views,\n registry,\n project,\n entity_df_event_timestamp_range,\n )\n\n query_context = _fix_entity_selections_identifiers(query_context)\n\n # Generate the Snowflake SQL query from the query context\n query = offline_utils.build_point_in_time_query(\n query_context,\n left_table_query_string=table_name,\n entity_df_event_timestamp_col=entity_df_event_timestamp_col,\n entity_df_columns=entity_schema.keys(),\n query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN,\n full_feature_names=full_feature_names,\n )\n\n yield query\n\n return SnowflakeRetrievalJob(\n query=query_generator,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=full_feature_names,\n on_demand_feature_views=OnDemandFeatureView.get_requested_odfvs(\n feature_refs, project, registry\n ),\n metadata=RetrievalMetadata(\n features=feature_refs,\n keys=list(entity_schema.keys() - {entity_df_event_timestamp_col}),\n min_event_timestamp=entity_df_event_timestamp_range[0],\n max_event_timestamp=entity_df_event_timestamp_range[1],\n ),\n )\n\n @staticmethod\n def write_logged_features(\n config: RepoConfig,\n data: Union[pyarrow.Table, Path],\n source: LoggingSource,\n logging_config: LoggingConfig,\n registry: BaseRegistry,\n ):\n assert isinstance(logging_config.destination, SnowflakeLoggingDestination)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n if isinstance(data, Path):\n write_parquet(\n snowflake_conn,\n data,\n source.get_schema(registry),\n table_name=logging_config.destination.table_name,\n auto_create_table=True,\n )\n else:\n write_pandas(\n snowflake_conn,\n data.to_pandas(),\n table_name=logging_config.destination.table_name,\n auto_create_table=True,\n )\n\n @staticmethod\n def offline_write_batch(\n config: RepoConfig,\n feature_view: FeatureView,\n table: pyarrow.Table,\n progress: Optional[Callable[[int], Any]],\n ):\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(feature_view.batch_source, SnowflakeSource)\n\n pa_schema, column_names = offline_utils.get_pyarrow_schema_from_batch_source(\n config, feature_view.batch_source\n )\n if column_names != table.column_names:\n raise ValueError(\n f\"The input pyarrow table has schema {table.schema} with the incorrect columns {table.column_names}. \"\n f\"The schema is expected to be {pa_schema} with the columns (in this exact order) to be {column_names}.\"\n )\n\n if table.schema != pa_schema:\n table = table.cast(pa_schema)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n write_pandas(\n snowflake_conn,\n table.to_pandas(),\n table_name=feature_view.batch_source.table,\n auto_create_table=True,\n )\n\n\nclass SnowflakeRetrievalJob(RetrievalJob):\n def __init__(\n self,\n query: Union[str, Callable[[], ContextManager[str]]],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n full_feature_names: bool,\n on_demand_feature_views: Optional[List[OnDemandFeatureView]] = None,\n metadata: Optional[RetrievalMetadata] = None,\n ):\n\n if not isinstance(query, str):\n self._query_generator = query\n else:\n\n @contextlib.contextmanager\n def query_generator() -> Iterator[str]:\n assert isinstance(query, str)\n yield query\n\n self._query_generator = query_generator\n\n self.snowflake_conn = snowflake_conn\n self.config = config\n self._full_feature_names = full_feature_names\n self._on_demand_feature_views = on_demand_feature_views or []\n self._metadata = metadata\n self.export_path: Optional[str]\n if self.config.offline_store.blob_export_location:\n self.export_path = f\"{self.config.offline_store.blob_export_location}/{self.config.project}/{uuid.uuid4()}\"\n else:\n self.export_path = None\n\n @property\n def full_feature_names(self) -> bool:\n return self._full_feature_names\n\n @property\n def on_demand_feature_views(self) -> List[OnDemandFeatureView]:\n return self._on_demand_feature_views\n\n def _to_df_internal(self, timeout: Optional[int] = None) -> pd.DataFrame:\n with self._query_generator() as query:\n\n df = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_pandas_all()\n\n return df\n\n def _to_arrow_internal(self, timeout: Optional[int] = None) -> pyarrow.Table:\n with self._query_generator() as query:\n\n pa_table = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_arrow_all()\n\n if pa_table:\n return pa_table\n else:\n empty_result = execute_snowflake_statement(self.snowflake_conn, query)\n\n return pyarrow.Table.from_pandas(\n pd.DataFrame(columns=[md.name for md in empty_result.description])\n )\n\n def to_snowflake(self, table_name: str, temporary=False) -> None:\n \"\"\"Save dataset as a new Snowflake table\"\"\"\n if self.on_demand_feature_views:\n transformed_df = self.to_df()\n\n write_pandas(\n self.snowflake_conn, transformed_df, table_name, auto_create_table=True\n )\n\n return None\n\n with self._query_generator() as query:\n query = f'CREATE {\"TEMPORARY\" if temporary else \"\"} TABLE IF NOT EXISTS \"{table_name}\" AS ({query});\\n'\n\n execute_snowflake_statement(self.snowflake_conn, query)\n\n def to_sql(self) -> str:\n \"\"\"\n Returns the SQL query that will be executed in Snowflake to build the historical feature table.\n \"\"\"\n with self._query_generator() as query:\n return query\n\n def to_spark_df(self, spark_session: \"SparkSession\") -> \"DataFrame\":\n \"\"\"\n Method to convert snowflake query results to pyspark data frame.\n\n Args:\n spark_session: spark Session variable of current environment.\n\n Returns:\n spark_df: A pyspark dataframe.\n \"\"\"\n\n try:\n from pyspark.sql import DataFrame, SparkSession\n except ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"spark\", str(e))\n\n if isinstance(spark_session, SparkSession):\n with self._query_generator() as query:\n\n arrow_batches = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_arrow_batches()\n\n if arrow_batches:\n spark_df = reduce(\n DataFrame.unionAll,\n [\n spark_session.createDataFrame(batch.to_pandas())\n for batch in arrow_batches\n ],\n )\n\n return spark_df\n\n else:\n raise EntitySQLEmptyResults(query)\n\n else:\n raise InvalidSparkSessionException(spark_session)\n\n def persist(self, storage: SavedDatasetStorage, allow_overwrite: bool = False):\n assert isinstance(storage, SavedDatasetSnowflakeStorage)\n self.to_snowflake(table_name=storage.snowflake_options.table)\n\n @property\n def metadata(self) -> Optional[RetrievalMetadata]:\n return self._metadata\n\n def supports_remote_storage_export(self) -> bool:\n return (\n self.config.offline_store.storage_integration_name\n and self.config.offline_store.blob_export_location\n )\n\n def to_remote_storage(self) -> List[str]:\n if not self.export_path:\n raise ValueError(\n \"to_remote_storage() requires `blob_export_location` to be specified in config\"\n )\n if not self.config.offline_store.storage_integration_name:\n raise ValueError(\n \"to_remote_storage() requires `storage_integration_name` to be specified in config\"\n )\n\n table = f\"temporary_{uuid.uuid4().hex}\"\n self.to_snowflake(table)\n\n query = f\"\"\"\n COPY INTO '{self.export_path}/{table}' FROM \"{self.config.offline_store.database}\".\"{self.config.offline_store.schema_}\".\"{table}\"\\n\n STORAGE_INTEGRATION = {self.config.offline_store.storage_integration_name}\\n\n FILE_FORMAT = (TYPE = PARQUET)\n DETAILED_OUTPUT = TRUE\n HEADER = TRUE\n \"\"\"\n cursor = execute_snowflake_statement(self.snowflake_conn, query)\n\n file_name_column_index = [\n idx for idx, rm in enumerate(cursor.description) if rm.name == \"FILE_NAME\"\n ][0]\n return [\n f\"{self.export_path}/{row[file_name_column_index]}\"\n for row in cursor.fetchall()\n ]\n\n\ndef _get_entity_schema(\n entity_df: Union[pd.DataFrame, str],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n) -> Dict[str, np.dtype]:\n\n if isinstance(entity_df, pd.DataFrame):\n\n return dict(zip(entity_df.columns, entity_df.dtypes))\n\n else:\n\n query = f\"SELECT * FROM ({entity_df}) LIMIT 1\"\n limited_entity_df = execute_snowflake_statement(\n snowflake_conn, query\n ).fetch_pandas_all()\n\n return dict(zip(limited_entity_df.columns, limited_entity_df.dtypes))\n\n\ndef _upload_entity_df(\n entity_df: Union[pd.DataFrame, str],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n table_name: str,\n) -> None:\n\n if isinstance(entity_df, pd.DataFrame):\n # Write the data from the DataFrame to the table\n # Known issues with following entity data types: BINARY\n write_pandas(\n snowflake_conn,\n entity_df,\n table_name,\n auto_create_table=True,\n create_temp_table=True,\n )\n\n return None\n elif isinstance(entity_df, str):\n # If the entity_df is a string (SQL query), create a Snowflake table out of it,\n query = f'CREATE TEMPORARY TABLE \"{table_name}\" AS ({entity_df})'\n execute_snowflake_statement(snowflake_conn, query)\n\n return None\n else:\n raise InvalidEntityType(type(entity_df))\n\n\ndef _fix_entity_selections_identifiers(query_context) -> list:\n\n for i, qc in enumerate(query_context):\n for j, es in enumerate(qc.entity_selections):\n query_context[i].entity_selections[j] = f'\"{es}\"'.replace(\" AS \", '\" AS \"')\n\n return query_context\n\n\ndef _get_entity_df_event_timestamp_range(\n entity_df: Union[pd.DataFrame, str],\n entity_df_event_timestamp_col: str,\n snowflake_conn: SnowflakeConnection,\n) -> Tuple[datetime, datetime]:\n if isinstance(entity_df, pd.DataFrame):\n entity_df_event_timestamp = entity_df.loc[\n :, entity_df_event_timestamp_col\n ].infer_objects()\n if pd.api.types.is_string_dtype(entity_df_event_timestamp):\n entity_df_event_timestamp = pd.to_datetime(\n entity_df_event_timestamp, utc=True\n )\n entity_df_event_timestamp_range = (\n entity_df_event_timestamp.min().to_pydatetime(),\n entity_df_event_timestamp.max().to_pydatetime(),\n )\n elif isinstance(entity_df, str):\n # If the entity_df is a string (SQL query), determine range\n # from table\n query = f'SELECT MIN(\"{entity_df_event_timestamp_col}\") AS \"min_value\", MAX(\"{entity_df_event_timestamp_col}\") AS \"max_value\" FROM ({entity_df})'\n results = execute_snowflake_statement(snowflake_conn, query).fetchall()\n\n entity_df_event_timestamp_range = cast(Tuple[datetime, datetime], results[0])\n if (\n entity_df_event_timestamp_range[0] is None\n or entity_df_event_timestamp_range[1] is None\n ):\n raise EntitySQLEmptyResults(entity_df)\n else:\n raise InvalidEntityType(type(entity_df))\n\n return entity_df_event_timestamp_range\n\n\nMULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = \"\"\"\n/*\n Compute a deterministic hash for the `left_table_query_string` that will be used throughout\n all the logic as the field to GROUP BY the data\n*/\nWITH \"entity_dataframe\" AS (\n SELECT *,\n \"{{entity_df_event_timestamp_col}}\" AS \"entity_timestamp\"\n {% for featureview in featureviews %}\n {% if featureview.entities %}\n ,(\n {% for entity in featureview.entities %}\n CAST(\"{{entity}}\" AS VARCHAR) ||\n {% endfor %}\n CAST(\"{{entity_df_event_timestamp_col}}\" AS VARCHAR)\n ) AS \"{{featureview.name}}__entity_row_unique_id\"\n {% else %}\n ,CAST(\"{{entity_df_event_timestamp_col}}\" AS VARCHAR) AS \"{{featureview.name}}__entity_row_unique_id\"\n {% endif %}\n {% endfor %}\n FROM \"{{ left_table_query_string }}\"\n),\n\n{% for featureview in featureviews %}\n\n\"{{ featureview.name }}__entity_dataframe\" AS (\n SELECT\n {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}\n \"entity_timestamp\",\n \"{{featureview.name}}__entity_row_unique_id\"\n FROM \"entity_dataframe\"\n GROUP BY\n {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}\n \"entity_timestamp\",\n \"{{featureview.name}}__entity_row_unique_id\"\n),\n\n/*\n This query template performs the point-in-time correctness join for a single feature set table\n to the provided entity table.\n\n 1. We first join the current feature_view to the entity dataframe that has been passed.\n This JOIN has the following logic:\n - For each row of the entity dataframe, only keep the rows where the `timestamp_field`\n is less than the one provided in the entity dataframe\n - If there a TTL for the current feature_view, also keep the rows where the `timestamp_field`\n is higher the the one provided minus the TTL\n - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been\n computed previously\n\n The output of this CTE will contain all the necessary information and already filtered out most\n of the data that is not relevant.\n*/\n\n\"{{ featureview.name }}__subquery\" AS (\n SELECT\n \"{{ featureview.timestamp_field }}\" as \"event_timestamp\",\n {{'\"' ~ featureview.created_timestamp_column ~ '\" as \"created_timestamp\",' if featureview.created_timestamp_column else '' }}\n {{featureview.entity_selections | join(', ')}}{% if featureview.entity_selections %},{% else %}{% endif %}\n {% for feature in featureview.features %}\n \"{{ feature }}\" as {% if full_feature_names %}\"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}\"{% else %}\"{{ featureview.field_mapping.get(feature, feature) }}\"{% endif %}{% if loop.last %}{% else %}, {% endif %}\n {% endfor %}\n FROM {{ featureview.table_subquery }}\n WHERE \"{{ featureview.timestamp_field }}\" <= '{{ featureview.max_event_timestamp }}'\n {% if featureview.ttl == 0 %}{% else %}\n AND \"{{ featureview.timestamp_field }}\" >= '{{ featureview.min_event_timestamp }}'\n {% endif %}\n),\n\n\"{{ featureview.name }}__base\" AS (\n SELECT\n \"subquery\".*,\n \"entity_dataframe\".\"entity_timestamp\",\n \"entity_dataframe\".\"{{featureview.name}}__entity_row_unique_id\"\n FROM \"{{ featureview.name }}__subquery\" AS \"subquery\"\n INNER JOIN \"{{ featureview.name }}__entity_dataframe\" AS \"entity_dataframe\"\n ON TRUE\n AND \"subquery\".\"event_timestamp\" <= \"entity_dataframe\".\"entity_timestamp\"\n\n {% if featureview.ttl == 0 %}{% else %}\n AND \"subquery\".\"event_timestamp\" >= TIMESTAMPADD(second,-{{ featureview.ttl }},\"entity_dataframe\".\"entity_timestamp\")\n {% endif %}\n\n {% for entity in featureview.entities %}\n AND \"subquery\".\"{{ entity }}\" = \"entity_dataframe\".\"{{ entity }}\"\n {% endfor %}\n),\n\n/*\n 2. If the `created_timestamp_column` has been set, we need to\n deduplicate the data first. This is done by calculating the\n `MAX(created_at_timestamp)` for each event_timestamp.\n We then join the data on the next CTE\n*/\n{% if featureview.created_timestamp_column %}\n\"{{ featureview.name }}__dedup\" AS (\n SELECT\n \"{{featureview.name}}__entity_row_unique_id\",\n \"event_timestamp\",\n MAX(\"created_timestamp\") AS \"created_timestamp\"\n FROM \"{{ featureview.name }}__base\"\n GROUP BY \"{{featureview.name}}__entity_row_unique_id\", \"event_timestamp\"\n),\n{% endif %}\n\n/*\n 3. The data has been filtered during the first CTE \"*__base\"\n Thus we only need to compute the latest timestamp of each feature.\n*/\n\"{{ featureview.name }}__latest\" AS (\n SELECT\n \"event_timestamp\",\n {% if featureview.created_timestamp_column %}\"created_timestamp\",{% endif %}\n \"{{featureview.name}}__entity_row_unique_id\"\n FROM\n (\n SELECT *,\n ROW_NUMBER() OVER(\n PARTITION BY \"{{featureview.name}}__entity_row_unique_id\"\n ORDER BY \"event_timestamp\" DESC{% if featureview.created_timestamp_column %},\"created_timestamp\" DESC{% endif %}\n ) AS \"row_number\"\n FROM \"{{ featureview.name }}__base\"\n {% if featureview.created_timestamp_column %}\n INNER JOIN \"{{ featureview.name }}__dedup\"\n USING (\"{{featureview.name}}__entity_row_unique_id\", \"event_timestamp\", \"created_timestamp\")\n {% endif %}\n )\n WHERE \"row_number\" = 1\n),\n\n/*\n 4. Once we know the latest value of each feature for a given timestamp,\n we can join again the data back to the original \"base\" dataset\n*/\n\"{{ featureview.name }}__cleaned\" AS (\n SELECT \"base\".*\n FROM \"{{ featureview.name }}__base\" AS \"base\"\n INNER JOIN \"{{ featureview.name }}__latest\"\n USING(\n \"{{featureview.name}}__entity_row_unique_id\",\n \"event_timestamp\"\n {% if featureview.created_timestamp_column %}\n ,\"created_timestamp\"\n {% endif %}\n )\n){% if loop.last %}{% else %}, {% endif %}\n\n\n{% endfor %}\n/*\n Joins the outputs of multiple time travel joins to a single table.\n The entity_dataframe dataset being our source of truth here.\n */\n\nSELECT \"{{ final_output_feature_names | join('\", \"')}}\"\nFROM \"entity_dataframe\"\n{% for featureview in featureviews %}\nLEFT JOIN (\n SELECT\n \"{{featureview.name}}__entity_row_unique_id\"\n {% for feature in featureview.features %}\n ,{% if full_feature_names %}\"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}\"{% else %}\"{{ featureview.field_mapping.get(feature, feature) }}\"{% endif %}\n {% endfor %}\n FROM \"{{ featureview.name }}__cleaned\"\n) \"{{ featureview.name }}__cleaned\" USING (\"{{featureview.name}}__entity_row_unique_id\")\n{% endfor %}\n\"\"\"\n", "path": "sdk/python/feast/infra/offline_stores/snowflake.py"}], "after_files": [{"content": "import contextlib\nimport os\nimport uuid\nimport warnings\nfrom datetime import datetime\nfrom functools import reduce\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ContextManager,\n Dict,\n Iterator,\n List,\n Optional,\n Tuple,\n Union,\n cast,\n)\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow\nfrom pydantic import Field, StrictStr\nfrom pydantic.typing import Literal\nfrom pytz import utc\n\nfrom feast import OnDemandFeatureView\nfrom feast.data_source import DataSource\nfrom feast.errors import (\n EntitySQLEmptyResults,\n InvalidEntityType,\n InvalidSparkSessionException,\n)\nfrom feast.feature_logging import LoggingConfig, LoggingSource\nfrom feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_VAL, FeatureView\nfrom feast.infra.offline_stores import offline_utils\nfrom feast.infra.offline_stores.offline_store import (\n OfflineStore,\n RetrievalJob,\n RetrievalMetadata,\n)\nfrom feast.infra.offline_stores.snowflake_source import (\n SavedDatasetSnowflakeStorage,\n SnowflakeLoggingDestination,\n SnowflakeSource,\n)\nfrom feast.infra.registry.base_registry import BaseRegistry\nfrom feast.infra.utils.snowflake.snowflake_utils import (\n GetSnowflakeConnection,\n execute_snowflake_statement,\n write_pandas,\n write_parquet,\n)\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\nfrom feast.saved_dataset import SavedDatasetStorage\nfrom feast.usage import log_exceptions_and_usage\n\ntry:\n from snowflake.connector import SnowflakeConnection\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"snowflake\", str(e))\n\nif TYPE_CHECKING:\n from pyspark.sql import DataFrame, SparkSession\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\nclass SnowflakeOfflineStoreConfig(FeastConfigBaseModel):\n \"\"\"Offline store config for Snowflake\"\"\"\n\n type: Literal[\"snowflake.offline\"] = \"snowflake.offline\"\n \"\"\" Offline store type selector \"\"\"\n\n config_path: Optional[str] = os.path.expanduser(\"~/.snowsql/config\")\n \"\"\" Snowflake config path -- absolute path required (Cant use ~)\"\"\"\n\n account: Optional[str] = None\n \"\"\" Snowflake deployment identifier -- drop .snowflakecomputing.com \"\"\"\n\n user: Optional[str] = None\n \"\"\" Snowflake user name \"\"\"\n\n password: Optional[str] = None\n \"\"\" Snowflake password \"\"\"\n\n role: Optional[str] = None\n \"\"\" Snowflake role name \"\"\"\n\n warehouse: Optional[str] = None\n \"\"\" Snowflake warehouse name \"\"\"\n\n authenticator: Optional[str] = None\n \"\"\" Snowflake authenticator name \"\"\"\n\n database: StrictStr\n \"\"\" Snowflake database name \"\"\"\n\n schema_: Optional[str] = Field(\"PUBLIC\", alias=\"schema\")\n \"\"\" Snowflake schema name \"\"\"\n\n storage_integration_name: Optional[str] = None\n \"\"\" Storage integration name in snowflake \"\"\"\n\n blob_export_location: Optional[str] = None\n \"\"\" Location (in S3, Google storage or Azure storage) where data is offloaded \"\"\"\n\n convert_timestamp_columns: Optional[bool] = None\n \"\"\" Convert timestamp columns on export to a Parquet-supported format \"\"\"\n\n class Config:\n allow_population_by_field_name = True\n\n\nclass SnowflakeOfflineStore(OfflineStore):\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def pull_latest_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n created_timestamp_column: Optional[str],\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(data_source, SnowflakeSource)\n\n from_expression = data_source.get_table_query_string()\n if not data_source.database and data_source.table:\n from_expression = f'\"{config.offline_store.database}\".\"{config.offline_store.schema_}\".{from_expression}'\n\n if join_key_columns:\n partition_by_join_key_string = '\"' + '\", \"'.join(join_key_columns) + '\"'\n partition_by_join_key_string = (\n \"PARTITION BY \" + partition_by_join_key_string\n )\n else:\n partition_by_join_key_string = \"\"\n\n timestamp_columns = [timestamp_field]\n if created_timestamp_column:\n timestamp_columns.append(created_timestamp_column)\n\n timestamp_desc_string = '\"' + '\" DESC, \"'.join(timestamp_columns) + '\" DESC'\n field_string = (\n '\"'\n + '\", \"'.join(join_key_columns + feature_name_columns + timestamp_columns)\n + '\"'\n )\n\n if config.offline_store.convert_timestamp_columns:\n select_fields = list(\n map(\n lambda field_name: f'\"{field_name}\"',\n join_key_columns + feature_name_columns,\n )\n )\n select_timestamps = list(\n map(\n lambda field_name: f\"to_varchar({field_name}, 'YYYY-MM-DD\\\"T\\\"HH24:MI:SS.FFTZH:TZM') as {field_name}\",\n timestamp_columns,\n )\n )\n inner_field_string = \", \".join(select_fields + select_timestamps)\n else:\n select_fields = list(\n map(\n lambda field_name: f'\"{field_name}\"',\n join_key_columns + feature_name_columns + timestamp_columns,\n )\n )\n inner_field_string = \", \".join(select_fields)\n\n if data_source.snowflake_options.warehouse:\n config.offline_store.warehouse = data_source.snowflake_options.warehouse\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n start_date = start_date.astimezone(tz=utc)\n end_date = end_date.astimezone(tz=utc)\n\n query = f\"\"\"\n SELECT\n {field_string}\n {f''', TRIM({repr(DUMMY_ENTITY_VAL)}::VARIANT,'\"') AS \"{DUMMY_ENTITY_ID}\"''' if not join_key_columns else \"\"}\n FROM (\n SELECT {inner_field_string},\n ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS \"_feast_row\"\n FROM {from_expression}\n WHERE \"{timestamp_field}\" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'\n )\n WHERE \"_feast_row\" = 1\n \"\"\"\n\n return SnowflakeRetrievalJob(\n query=query,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=False,\n on_demand_feature_views=None,\n )\n\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def pull_all_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(data_source, SnowflakeSource)\n\n from_expression = data_source.get_table_query_string()\n if not data_source.database and data_source.table:\n from_expression = f'\"{config.offline_store.database}\".\"{config.offline_store.schema_}\".{from_expression}'\n\n field_string = (\n '\"'\n + '\", \"'.join(join_key_columns + feature_name_columns + [timestamp_field])\n + '\"'\n )\n\n if data_source.snowflake_options.warehouse:\n config.offline_store.warehouse = data_source.snowflake_options.warehouse\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n start_date = start_date.astimezone(tz=utc)\n end_date = end_date.astimezone(tz=utc)\n\n query = f\"\"\"\n SELECT {field_string}\n FROM {from_expression}\n WHERE \"{timestamp_field}\" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'\n \"\"\"\n\n return SnowflakeRetrievalJob(\n query=query,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=False,\n )\n\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def get_historical_features(\n config: RepoConfig,\n feature_views: List[FeatureView],\n feature_refs: List[str],\n entity_df: Union[pd.DataFrame, str],\n registry: BaseRegistry,\n project: str,\n full_feature_names: bool = False,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n for fv in feature_views:\n assert isinstance(fv.batch_source, SnowflakeSource)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n entity_schema = _get_entity_schema(entity_df, snowflake_conn, config)\n\n entity_df_event_timestamp_col = (\n offline_utils.infer_event_timestamp_from_entity_df(entity_schema)\n )\n\n entity_df_event_timestamp_range = _get_entity_df_event_timestamp_range(\n entity_df,\n entity_df_event_timestamp_col,\n snowflake_conn,\n )\n\n @contextlib.contextmanager\n def query_generator() -> Iterator[str]:\n\n table_name = offline_utils.get_temp_entity_table_name()\n\n _upload_entity_df(entity_df, snowflake_conn, config, table_name)\n\n expected_join_keys = offline_utils.get_expected_join_keys(\n project, feature_views, registry\n )\n\n offline_utils.assert_expected_columns_in_entity_df(\n entity_schema, expected_join_keys, entity_df_event_timestamp_col\n )\n\n # Build a query context containing all information required to template the Snowflake SQL query\n query_context = offline_utils.get_feature_view_query_context(\n feature_refs,\n feature_views,\n registry,\n project,\n entity_df_event_timestamp_range,\n )\n\n query_context = _fix_entity_selections_identifiers(query_context)\n\n # Generate the Snowflake SQL query from the query context\n query = offline_utils.build_point_in_time_query(\n query_context,\n left_table_query_string=table_name,\n entity_df_event_timestamp_col=entity_df_event_timestamp_col,\n entity_df_columns=entity_schema.keys(),\n query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN,\n full_feature_names=full_feature_names,\n )\n\n yield query\n\n return SnowflakeRetrievalJob(\n query=query_generator,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=full_feature_names,\n on_demand_feature_views=OnDemandFeatureView.get_requested_odfvs(\n feature_refs, project, registry\n ),\n metadata=RetrievalMetadata(\n features=feature_refs,\n keys=list(entity_schema.keys() - {entity_df_event_timestamp_col}),\n min_event_timestamp=entity_df_event_timestamp_range[0],\n max_event_timestamp=entity_df_event_timestamp_range[1],\n ),\n )\n\n @staticmethod\n def write_logged_features(\n config: RepoConfig,\n data: Union[pyarrow.Table, Path],\n source: LoggingSource,\n logging_config: LoggingConfig,\n registry: BaseRegistry,\n ):\n assert isinstance(logging_config.destination, SnowflakeLoggingDestination)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n if isinstance(data, Path):\n write_parquet(\n snowflake_conn,\n data,\n source.get_schema(registry),\n table_name=logging_config.destination.table_name,\n auto_create_table=True,\n )\n else:\n write_pandas(\n snowflake_conn,\n data.to_pandas(),\n table_name=logging_config.destination.table_name,\n auto_create_table=True,\n )\n\n @staticmethod\n def offline_write_batch(\n config: RepoConfig,\n feature_view: FeatureView,\n table: pyarrow.Table,\n progress: Optional[Callable[[int], Any]],\n ):\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(feature_view.batch_source, SnowflakeSource)\n\n pa_schema, column_names = offline_utils.get_pyarrow_schema_from_batch_source(\n config, feature_view.batch_source\n )\n if column_names != table.column_names:\n raise ValueError(\n f\"The input pyarrow table has schema {table.schema} with the incorrect columns {table.column_names}. \"\n f\"The schema is expected to be {pa_schema} with the columns (in this exact order) to be {column_names}.\"\n )\n\n if table.schema != pa_schema:\n table = table.cast(pa_schema)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n write_pandas(\n snowflake_conn,\n table.to_pandas(),\n table_name=feature_view.batch_source.table,\n auto_create_table=True,\n )\n\n\nclass SnowflakeRetrievalJob(RetrievalJob):\n def __init__(\n self,\n query: Union[str, Callable[[], ContextManager[str]]],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n full_feature_names: bool,\n on_demand_feature_views: Optional[List[OnDemandFeatureView]] = None,\n metadata: Optional[RetrievalMetadata] = None,\n ):\n\n if not isinstance(query, str):\n self._query_generator = query\n else:\n\n @contextlib.contextmanager\n def query_generator() -> Iterator[str]:\n assert isinstance(query, str)\n yield query\n\n self._query_generator = query_generator\n\n self.snowflake_conn = snowflake_conn\n self.config = config\n self._full_feature_names = full_feature_names\n self._on_demand_feature_views = on_demand_feature_views or []\n self._metadata = metadata\n self.export_path: Optional[str]\n if self.config.offline_store.blob_export_location:\n self.export_path = f\"{self.config.offline_store.blob_export_location}/{self.config.project}/{uuid.uuid4()}\"\n else:\n self.export_path = None\n\n @property\n def full_feature_names(self) -> bool:\n return self._full_feature_names\n\n @property\n def on_demand_feature_views(self) -> List[OnDemandFeatureView]:\n return self._on_demand_feature_views\n\n def _to_df_internal(self, timeout: Optional[int] = None) -> pd.DataFrame:\n with self._query_generator() as query:\n\n df = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_pandas_all()\n\n return df\n\n def _to_arrow_internal(self, timeout: Optional[int] = None) -> pyarrow.Table:\n with self._query_generator() as query:\n\n pa_table = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_arrow_all()\n\n if pa_table:\n return pa_table\n else:\n empty_result = execute_snowflake_statement(self.snowflake_conn, query)\n\n return pyarrow.Table.from_pandas(\n pd.DataFrame(columns=[md.name for md in empty_result.description])\n )\n\n def to_snowflake(self, table_name: str, temporary=False) -> None:\n \"\"\"Save dataset as a new Snowflake table\"\"\"\n if self.on_demand_feature_views:\n transformed_df = self.to_df()\n\n write_pandas(\n self.snowflake_conn, transformed_df, table_name, auto_create_table=True\n )\n\n return None\n\n with self._query_generator() as query:\n query = f'CREATE {\"TEMPORARY\" if temporary else \"\"} TABLE IF NOT EXISTS \"{table_name}\" AS ({query});\\n'\n\n execute_snowflake_statement(self.snowflake_conn, query)\n\n def to_sql(self) -> str:\n \"\"\"\n Returns the SQL query that will be executed in Snowflake to build the historical feature table.\n \"\"\"\n with self._query_generator() as query:\n return query\n\n def to_spark_df(self, spark_session: \"SparkSession\") -> \"DataFrame\":\n \"\"\"\n Method to convert snowflake query results to pyspark data frame.\n\n Args:\n spark_session: spark Session variable of current environment.\n\n Returns:\n spark_df: A pyspark dataframe.\n \"\"\"\n\n try:\n from pyspark.sql import DataFrame, SparkSession\n except ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"spark\", str(e))\n\n if isinstance(spark_session, SparkSession):\n with self._query_generator() as query:\n\n arrow_batches = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_arrow_batches()\n\n if arrow_batches:\n spark_df = reduce(\n DataFrame.unionAll,\n [\n spark_session.createDataFrame(batch.to_pandas())\n for batch in arrow_batches\n ],\n )\n\n return spark_df\n\n else:\n raise EntitySQLEmptyResults(query)\n\n else:\n raise InvalidSparkSessionException(spark_session)\n\n def persist(self, storage: SavedDatasetStorage, allow_overwrite: bool = False):\n assert isinstance(storage, SavedDatasetSnowflakeStorage)\n self.to_snowflake(table_name=storage.snowflake_options.table)\n\n @property\n def metadata(self) -> Optional[RetrievalMetadata]:\n return self._metadata\n\n def supports_remote_storage_export(self) -> bool:\n return (\n self.config.offline_store.storage_integration_name\n and self.config.offline_store.blob_export_location\n )\n\n def to_remote_storage(self) -> List[str]:\n if not self.export_path:\n raise ValueError(\n \"to_remote_storage() requires `blob_export_location` to be specified in config\"\n )\n if not self.config.offline_store.storage_integration_name:\n raise ValueError(\n \"to_remote_storage() requires `storage_integration_name` to be specified in config\"\n )\n\n table = f\"temporary_{uuid.uuid4().hex}\"\n self.to_snowflake(table, temporary=True)\n\n query = f\"\"\"\n COPY INTO '{self.export_path}/{table}' FROM \"{self.config.offline_store.database}\".\"{self.config.offline_store.schema_}\".\"{table}\"\\n\n STORAGE_INTEGRATION = {self.config.offline_store.storage_integration_name}\\n\n FILE_FORMAT = (TYPE = PARQUET)\n DETAILED_OUTPUT = TRUE\n HEADER = TRUE\n \"\"\"\n cursor = execute_snowflake_statement(self.snowflake_conn, query)\n\n file_name_column_index = [\n idx for idx, rm in enumerate(cursor.description) if rm.name == \"FILE_NAME\"\n ][0]\n return [\n f\"{self.export_path}/{row[file_name_column_index]}\"\n for row in cursor.fetchall()\n ]\n\n\ndef _get_entity_schema(\n entity_df: Union[pd.DataFrame, str],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n) -> Dict[str, np.dtype]:\n\n if isinstance(entity_df, pd.DataFrame):\n\n return dict(zip(entity_df.columns, entity_df.dtypes))\n\n else:\n\n query = f\"SELECT * FROM ({entity_df}) LIMIT 1\"\n limited_entity_df = execute_snowflake_statement(\n snowflake_conn, query\n ).fetch_pandas_all()\n\n return dict(zip(limited_entity_df.columns, limited_entity_df.dtypes))\n\n\ndef _upload_entity_df(\n entity_df: Union[pd.DataFrame, str],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n table_name: str,\n) -> None:\n\n if isinstance(entity_df, pd.DataFrame):\n # Write the data from the DataFrame to the table\n # Known issues with following entity data types: BINARY\n write_pandas(\n snowflake_conn,\n entity_df,\n table_name,\n auto_create_table=True,\n create_temp_table=True,\n )\n\n return None\n elif isinstance(entity_df, str):\n # If the entity_df is a string (SQL query), create a Snowflake table out of it,\n query = f'CREATE TEMPORARY TABLE \"{table_name}\" AS ({entity_df})'\n execute_snowflake_statement(snowflake_conn, query)\n\n return None\n else:\n raise InvalidEntityType(type(entity_df))\n\n\ndef _fix_entity_selections_identifiers(query_context) -> list:\n\n for i, qc in enumerate(query_context):\n for j, es in enumerate(qc.entity_selections):\n query_context[i].entity_selections[j] = f'\"{es}\"'.replace(\" AS \", '\" AS \"')\n\n return query_context\n\n\ndef _get_entity_df_event_timestamp_range(\n entity_df: Union[pd.DataFrame, str],\n entity_df_event_timestamp_col: str,\n snowflake_conn: SnowflakeConnection,\n) -> Tuple[datetime, datetime]:\n if isinstance(entity_df, pd.DataFrame):\n entity_df_event_timestamp = entity_df.loc[\n :, entity_df_event_timestamp_col\n ].infer_objects()\n if pd.api.types.is_string_dtype(entity_df_event_timestamp):\n entity_df_event_timestamp = pd.to_datetime(\n entity_df_event_timestamp, utc=True\n )\n entity_df_event_timestamp_range = (\n entity_df_event_timestamp.min().to_pydatetime(),\n entity_df_event_timestamp.max().to_pydatetime(),\n )\n elif isinstance(entity_df, str):\n # If the entity_df is a string (SQL query), determine range\n # from table\n query = f'SELECT MIN(\"{entity_df_event_timestamp_col}\") AS \"min_value\", MAX(\"{entity_df_event_timestamp_col}\") AS \"max_value\" FROM ({entity_df})'\n results = execute_snowflake_statement(snowflake_conn, query).fetchall()\n\n entity_df_event_timestamp_range = cast(Tuple[datetime, datetime], results[0])\n if (\n entity_df_event_timestamp_range[0] is None\n or entity_df_event_timestamp_range[1] is None\n ):\n raise EntitySQLEmptyResults(entity_df)\n else:\n raise InvalidEntityType(type(entity_df))\n\n return entity_df_event_timestamp_range\n\n\nMULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = \"\"\"\n/*\n Compute a deterministic hash for the `left_table_query_string` that will be used throughout\n all the logic as the field to GROUP BY the data\n*/\nWITH \"entity_dataframe\" AS (\n SELECT *,\n \"{{entity_df_event_timestamp_col}}\" AS \"entity_timestamp\"\n {% for featureview in featureviews %}\n {% if featureview.entities %}\n ,(\n {% for entity in featureview.entities %}\n CAST(\"{{entity}}\" AS VARCHAR) ||\n {% endfor %}\n CAST(\"{{entity_df_event_timestamp_col}}\" AS VARCHAR)\n ) AS \"{{featureview.name}}__entity_row_unique_id\"\n {% else %}\n ,CAST(\"{{entity_df_event_timestamp_col}}\" AS VARCHAR) AS \"{{featureview.name}}__entity_row_unique_id\"\n {% endif %}\n {% endfor %}\n FROM \"{{ left_table_query_string }}\"\n),\n\n{% for featureview in featureviews %}\n\n\"{{ featureview.name }}__entity_dataframe\" AS (\n SELECT\n {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}\n \"entity_timestamp\",\n \"{{featureview.name}}__entity_row_unique_id\"\n FROM \"entity_dataframe\"\n GROUP BY\n {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}\n \"entity_timestamp\",\n \"{{featureview.name}}__entity_row_unique_id\"\n),\n\n/*\n This query template performs the point-in-time correctness join for a single feature set table\n to the provided entity table.\n\n 1. We first join the current feature_view to the entity dataframe that has been passed.\n This JOIN has the following logic:\n - For each row of the entity dataframe, only keep the rows where the `timestamp_field`\n is less than the one provided in the entity dataframe\n - If there a TTL for the current feature_view, also keep the rows where the `timestamp_field`\n is higher the the one provided minus the TTL\n - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been\n computed previously\n\n The output of this CTE will contain all the necessary information and already filtered out most\n of the data that is not relevant.\n*/\n\n\"{{ featureview.name }}__subquery\" AS (\n SELECT\n \"{{ featureview.timestamp_field }}\" as \"event_timestamp\",\n {{'\"' ~ featureview.created_timestamp_column ~ '\" as \"created_timestamp\",' if featureview.created_timestamp_column else '' }}\n {{featureview.entity_selections | join(', ')}}{% if featureview.entity_selections %},{% else %}{% endif %}\n {% for feature in featureview.features %}\n \"{{ feature }}\" as {% if full_feature_names %}\"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}\"{% else %}\"{{ featureview.field_mapping.get(feature, feature) }}\"{% endif %}{% if loop.last %}{% else %}, {% endif %}\n {% endfor %}\n FROM {{ featureview.table_subquery }}\n WHERE \"{{ featureview.timestamp_field }}\" <= '{{ featureview.max_event_timestamp }}'\n {% if featureview.ttl == 0 %}{% else %}\n AND \"{{ featureview.timestamp_field }}\" >= '{{ featureview.min_event_timestamp }}'\n {% endif %}\n),\n\n\"{{ featureview.name }}__base\" AS (\n SELECT\n \"subquery\".*,\n \"entity_dataframe\".\"entity_timestamp\",\n \"entity_dataframe\".\"{{featureview.name}}__entity_row_unique_id\"\n FROM \"{{ featureview.name }}__subquery\" AS \"subquery\"\n INNER JOIN \"{{ featureview.name }}__entity_dataframe\" AS \"entity_dataframe\"\n ON TRUE\n AND \"subquery\".\"event_timestamp\" <= \"entity_dataframe\".\"entity_timestamp\"\n\n {% if featureview.ttl == 0 %}{% else %}\n AND \"subquery\".\"event_timestamp\" >= TIMESTAMPADD(second,-{{ featureview.ttl }},\"entity_dataframe\".\"entity_timestamp\")\n {% endif %}\n\n {% for entity in featureview.entities %}\n AND \"subquery\".\"{{ entity }}\" = \"entity_dataframe\".\"{{ entity }}\"\n {% endfor %}\n),\n\n/*\n 2. If the `created_timestamp_column` has been set, we need to\n deduplicate the data first. This is done by calculating the\n `MAX(created_at_timestamp)` for each event_timestamp.\n We then join the data on the next CTE\n*/\n{% if featureview.created_timestamp_column %}\n\"{{ featureview.name }}__dedup\" AS (\n SELECT\n \"{{featureview.name}}__entity_row_unique_id\",\n \"event_timestamp\",\n MAX(\"created_timestamp\") AS \"created_timestamp\"\n FROM \"{{ featureview.name }}__base\"\n GROUP BY \"{{featureview.name}}__entity_row_unique_id\", \"event_timestamp\"\n),\n{% endif %}\n\n/*\n 3. The data has been filtered during the first CTE \"*__base\"\n Thus we only need to compute the latest timestamp of each feature.\n*/\n\"{{ featureview.name }}__latest\" AS (\n SELECT\n \"event_timestamp\",\n {% if featureview.created_timestamp_column %}\"created_timestamp\",{% endif %}\n \"{{featureview.name}}__entity_row_unique_id\"\n FROM\n (\n SELECT *,\n ROW_NUMBER() OVER(\n PARTITION BY \"{{featureview.name}}__entity_row_unique_id\"\n ORDER BY \"event_timestamp\" DESC{% if featureview.created_timestamp_column %},\"created_timestamp\" DESC{% endif %}\n ) AS \"row_number\"\n FROM \"{{ featureview.name }}__base\"\n {% if featureview.created_timestamp_column %}\n INNER JOIN \"{{ featureview.name }}__dedup\"\n USING (\"{{featureview.name}}__entity_row_unique_id\", \"event_timestamp\", \"created_timestamp\")\n {% endif %}\n )\n WHERE \"row_number\" = 1\n),\n\n/*\n 4. Once we know the latest value of each feature for a given timestamp,\n we can join again the data back to the original \"base\" dataset\n*/\n\"{{ featureview.name }}__cleaned\" AS (\n SELECT \"base\".*\n FROM \"{{ featureview.name }}__base\" AS \"base\"\n INNER JOIN \"{{ featureview.name }}__latest\"\n USING(\n \"{{featureview.name}}__entity_row_unique_id\",\n \"event_timestamp\"\n {% if featureview.created_timestamp_column %}\n ,\"created_timestamp\"\n {% endif %}\n )\n){% if loop.last %}{% else %}, {% endif %}\n\n\n{% endfor %}\n/*\n Joins the outputs of multiple time travel joins to a single table.\n The entity_dataframe dataset being our source of truth here.\n */\n\nSELECT \"{{ final_output_feature_names | join('\", \"')}}\"\nFROM \"entity_dataframe\"\n{% for featureview in featureviews %}\nLEFT JOIN (\n SELECT\n \"{{featureview.name}}__entity_row_unique_id\"\n {% for feature in featureview.features %}\n ,{% if full_feature_names %}\"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}\"{% else %}\"{{ featureview.field_mapping.get(feature, feature) }}\"{% endif %}\n {% endfor %}\n FROM \"{{ featureview.name }}__cleaned\"\n) \"{{ featureview.name }}__cleaned\" USING (\"{{featureview.name}}__entity_row_unique_id\")\n{% endfor %}\n\"\"\"\n", "path": "sdk/python/feast/infra/offline_stores/snowflake.py"}]} |
gh_patches_debug_1586 | rasdani/github-patches | git_diff | kubeflow__pipelines-4187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow output artifact store configuration (vs hard coded)
it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`).
see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148
it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.
i suggest making it configurable, i can do such PR if we agree its needed.
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samples/core/loop_parallelism/loop_parallelism.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import kfp.dsl as dsl
16 import kfp
17
18
19 @kfp.components.create_component_from_func
20 def print_op(s: str):
21 print(s)
22
23 @dsl.pipeline(name='my-pipeline')
24 def pipeline2(my_pipe_param=10):
25 loop_args = [{'A_a': 1, 'B_b': 2}, {'A_a': 10, 'B_b': 20}]
26 with dsl.ParallelFor(loop_args, parallelism=1) as item:
27 print_op(item)
28 print_op(item.A_a)
29 print_op(item.B_b)
30
31
32 if __name__ == '__main__':
33 kfp.compiler.Compiler().compile(pipeline, __file__ + '.yaml')
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/samples/core/loop_parallelism/loop_parallelism.py b/samples/core/loop_parallelism/loop_parallelism.py
--- a/samples/core/loop_parallelism/loop_parallelism.py
+++ b/samples/core/loop_parallelism/loop_parallelism.py
@@ -21,9 +21,9 @@
print(s)
@dsl.pipeline(name='my-pipeline')
-def pipeline2(my_pipe_param=10):
+def pipeline():
loop_args = [{'A_a': 1, 'B_b': 2}, {'A_a': 10, 'B_b': 20}]
- with dsl.ParallelFor(loop_args, parallelism=1) as item:
+ with dsl.ParallelFor(loop_args, parallelism=10) as item:
print_op(item)
print_op(item.A_a)
print_op(item.B_b)
| {"golden_diff": "diff --git a/samples/core/loop_parallelism/loop_parallelism.py b/samples/core/loop_parallelism/loop_parallelism.py\n--- a/samples/core/loop_parallelism/loop_parallelism.py\n+++ b/samples/core/loop_parallelism/loop_parallelism.py\n@@ -21,9 +21,9 @@\n print(s)\n \n @dsl.pipeline(name='my-pipeline')\n-def pipeline2(my_pipe_param=10):\n+def pipeline():\n loop_args = [{'A_a': 1, 'B_b': 2}, {'A_a': 10, 'B_b': 20}]\n- with dsl.ParallelFor(loop_args, parallelism=1) as item:\n+ with dsl.ParallelFor(loop_args, parallelism=10) as item:\n print_op(item)\n print_op(item.A_a)\n print_op(item.B_b)\n", "issue": "allow output artifact store configuration (vs hard coded)\nit seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). \r\n\r\nsee: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148\r\n\r\nit would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.\r\ni suggest making it configurable, i can do such PR if we agree its needed. \nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfp.dsl as dsl\nimport kfp\n\n\[email protected]_component_from_func\ndef print_op(s: str):\n print(s)\n\[email protected](name='my-pipeline')\ndef pipeline2(my_pipe_param=10):\n loop_args = [{'A_a': 1, 'B_b': 2}, {'A_a': 10, 'B_b': 20}]\n with dsl.ParallelFor(loop_args, parallelism=1) as item:\n print_op(item)\n print_op(item.A_a)\n print_op(item.B_b)\n\n\nif __name__ == '__main__':\n kfp.compiler.Compiler().compile(pipeline, __file__ + '.yaml')\n", "path": "samples/core/loop_parallelism/loop_parallelism.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfp.dsl as dsl\nimport kfp\n\n\[email protected]_component_from_func\ndef print_op(s: str):\n print(s)\n\[email protected](name='my-pipeline')\ndef pipeline():\n loop_args = [{'A_a': 1, 'B_b': 2}, {'A_a': 10, 'B_b': 20}]\n with dsl.ParallelFor(loop_args, parallelism=10) as item:\n print_op(item)\n print_op(item.A_a)\n print_op(item.B_b)\n\n\nif __name__ == '__main__':\n kfp.compiler.Compiler().compile(pipeline, __file__ + '.yaml')\n", "path": "samples/core/loop_parallelism/loop_parallelism.py"}]} |
gh_patches_debug_1587 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-6866 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
httpauth is not attached to replay request
#### Problem Description
I set mitmproxy to run in reverse mode as a proxy to real server, and then protect mitmproxy with a pair of user:pass in the proxyauth option. A regular request would go through, but a reply of that same request would return 401 Unauthorized
#### Steps to reproduce the behavior:
1. turn on reverse mode in mitmweb
2. set basic auth in proxyauth in 'username:pass' format
3. initiate a success request
4. replay the request
#### System Information
Mitmproxy: 10.1.5
Python: 3.11.6
OpenSSL: OpenSSL 3.1.4 24 Oct 2023
Platform: Linux-4.14.276-211.499.amzn2.x86_64-x86_64-with-glibc2.31
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/proxyauth.py`
Content:
```
1 from __future__ import annotations
2
3 import binascii
4 import weakref
5 from abc import ABC
6 from abc import abstractmethod
7 from collections.abc import MutableMapping
8 from typing import Optional
9
10 import ldap3
11 import passlib.apache
12
13 from mitmproxy import connection
14 from mitmproxy import ctx
15 from mitmproxy import exceptions
16 from mitmproxy import http
17 from mitmproxy.net.http import status_codes
18 from mitmproxy.proxy import mode_specs
19 from mitmproxy.proxy.layers import modes
20
21 REALM = "mitmproxy"
22
23
24 class ProxyAuth:
25 validator: Validator | None = None
26
27 def __init__(self) -> None:
28 self.authenticated: MutableMapping[connection.Client, tuple[str, str]] = (
29 weakref.WeakKeyDictionary()
30 )
31 """Contains all connections that are permanently authenticated after an HTTP CONNECT"""
32
33 def load(self, loader):
34 loader.add_option(
35 "proxyauth",
36 Optional[str],
37 None,
38 """
39 Require proxy authentication. Format:
40 "username:pass",
41 "any" to accept any user/pass combination,
42 "@path" to use an Apache htpasswd file,
43 or "ldap[s]:url_server_ldap[:port]:dn_auth:password:dn_subtree[?search_filter_key=...]" for LDAP authentication.
44 """,
45 )
46
47 def configure(self, updated):
48 if "proxyauth" in updated:
49 auth = ctx.options.proxyauth
50 if auth:
51 if auth == "any":
52 self.validator = AcceptAll()
53 elif auth.startswith("@"):
54 self.validator = Htpasswd(auth)
55 elif ctx.options.proxyauth.startswith("ldap"):
56 self.validator = Ldap(auth)
57 elif ":" in ctx.options.proxyauth:
58 self.validator = SingleUser(auth)
59 else:
60 raise exceptions.OptionsError("Invalid proxyauth specification.")
61 else:
62 self.validator = None
63
64 def socks5_auth(self, data: modes.Socks5AuthData) -> None:
65 if self.validator and self.validator(data.username, data.password):
66 data.valid = True
67 self.authenticated[data.client_conn] = data.username, data.password
68
69 def http_connect(self, f: http.HTTPFlow) -> None:
70 if self.validator and self.authenticate_http(f):
71 # Make a note that all further requests over this connection are ok.
72 self.authenticated[f.client_conn] = f.metadata["proxyauth"]
73
74 def requestheaders(self, f: http.HTTPFlow) -> None:
75 if self.validator:
76 # Is this connection authenticated by a previous HTTP CONNECT?
77 if f.client_conn in self.authenticated:
78 f.metadata["proxyauth"] = self.authenticated[f.client_conn]
79 else:
80 self.authenticate_http(f)
81
82 def authenticate_http(self, f: http.HTTPFlow) -> bool:
83 """
84 Authenticate an HTTP request, returns if authentication was successful.
85
86 If valid credentials are found, the matching authentication header is removed.
87 In no or invalid credentials are found, flow.response is set to an error page.
88 """
89 assert self.validator
90 username = None
91 password = None
92 is_valid = False
93
94 is_proxy = is_http_proxy(f)
95 auth_header = http_auth_header(is_proxy)
96 try:
97 auth_value = f.request.headers.get(auth_header, "")
98 scheme, username, password = parse_http_basic_auth(auth_value)
99 is_valid = self.validator(username, password)
100 except Exception:
101 pass
102
103 if is_valid:
104 f.metadata["proxyauth"] = (username, password)
105 del f.request.headers[auth_header]
106 return True
107 else:
108 f.response = make_auth_required_response(is_proxy)
109 return False
110
111
112 def make_auth_required_response(is_proxy: bool) -> http.Response:
113 if is_proxy:
114 status_code = status_codes.PROXY_AUTH_REQUIRED
115 headers = {"Proxy-Authenticate": f'Basic realm="{REALM}"'}
116 else:
117 status_code = status_codes.UNAUTHORIZED
118 headers = {"WWW-Authenticate": f'Basic realm="{REALM}"'}
119
120 reason = http.status_codes.RESPONSES[status_code]
121 return http.Response.make(
122 status_code,
123 (
124 f"<html>"
125 f"<head><title>{status_code} {reason}</title></head>"
126 f"<body><h1>{status_code} {reason}</h1></body>"
127 f"</html>"
128 ),
129 headers,
130 )
131
132
133 def http_auth_header(is_proxy: bool) -> str:
134 if is_proxy:
135 return "Proxy-Authorization"
136 else:
137 return "Authorization"
138
139
140 def is_http_proxy(f: http.HTTPFlow) -> bool:
141 """
142 Returns:
143 - True, if authentication is done as if mitmproxy is a proxy
144 - False, if authentication is done as if mitmproxy is an HTTP server
145 """
146 return isinstance(
147 f.client_conn.proxy_mode, (mode_specs.RegularMode, mode_specs.UpstreamMode)
148 )
149
150
151 def mkauth(username: str, password: str, scheme: str = "basic") -> str:
152 """
153 Craft a basic auth string
154 """
155 v = binascii.b2a_base64((username + ":" + password).encode("utf8")).decode("ascii")
156 return scheme + " " + v
157
158
159 def parse_http_basic_auth(s: str) -> tuple[str, str, str]:
160 """
161 Parse a basic auth header.
162 Raises a ValueError if the input is invalid.
163 """
164 scheme, authinfo = s.split()
165 if scheme.lower() != "basic":
166 raise ValueError("Unknown scheme")
167 try:
168 user, password = (
169 binascii.a2b_base64(authinfo.encode()).decode("utf8", "replace").split(":")
170 )
171 except binascii.Error as e:
172 raise ValueError(str(e))
173 return scheme, user, password
174
175
176 class Validator(ABC):
177 """Base class for all username/password validators."""
178
179 @abstractmethod
180 def __call__(self, username: str, password: str) -> bool:
181 raise NotImplementedError
182
183
184 class AcceptAll(Validator):
185 def __call__(self, username: str, password: str) -> bool:
186 return True
187
188
189 class SingleUser(Validator):
190 def __init__(self, proxyauth: str):
191 try:
192 self.username, self.password = proxyauth.split(":")
193 except ValueError:
194 raise exceptions.OptionsError("Invalid single-user auth specification.")
195
196 def __call__(self, username: str, password: str) -> bool:
197 return self.username == username and self.password == password
198
199
200 class Htpasswd(Validator):
201 def __init__(self, proxyauth: str):
202 path = proxyauth[1:]
203 try:
204 self.htpasswd = passlib.apache.HtpasswdFile(path)
205 except (ValueError, OSError):
206 raise exceptions.OptionsError(f"Could not open htpasswd file: {path}")
207
208 def __call__(self, username: str, password: str) -> bool:
209 return self.htpasswd.check_password(username, password)
210
211
212 class Ldap(Validator):
213 conn: ldap3.Connection
214 server: ldap3.Server
215 dn_subtree: str
216 filter_key: str
217
218 def __init__(self, proxyauth: str):
219 (
220 use_ssl,
221 url,
222 port,
223 ldap_user,
224 ldap_pass,
225 self.dn_subtree,
226 self.filter_key,
227 ) = self.parse_spec(proxyauth)
228 server = ldap3.Server(url, port=port, use_ssl=use_ssl)
229 conn = ldap3.Connection(server, ldap_user, ldap_pass, auto_bind=True)
230 self.conn = conn
231 self.server = server
232
233 @staticmethod
234 def parse_spec(spec: str) -> tuple[bool, str, int | None, str, str, str, str]:
235 try:
236 if spec.count(":") > 4:
237 (
238 security,
239 url,
240 port_str,
241 ldap_user,
242 ldap_pass,
243 dn_subtree,
244 ) = spec.split(":")
245 port = int(port_str)
246 else:
247 security, url, ldap_user, ldap_pass, dn_subtree = spec.split(":")
248 port = None
249
250 if "?" in dn_subtree:
251 dn_subtree, search_str = dn_subtree.split("?")
252 key, value = search_str.split("=")
253 if key == "search_filter_key":
254 search_filter_key = value
255 else:
256 raise ValueError
257 else:
258 search_filter_key = "cn"
259
260 if security == "ldaps":
261 use_ssl = True
262 elif security == "ldap":
263 use_ssl = False
264 else:
265 raise ValueError
266
267 return (
268 use_ssl,
269 url,
270 port,
271 ldap_user,
272 ldap_pass,
273 dn_subtree,
274 search_filter_key,
275 )
276 except ValueError:
277 raise exceptions.OptionsError(f"Invalid LDAP specification: {spec}")
278
279 def __call__(self, username: str, password: str) -> bool:
280 if not username or not password:
281 return False
282 self.conn.search(self.dn_subtree, f"({self.filter_key}={username})")
283 if self.conn.response:
284 c = ldap3.Connection(
285 self.server, self.conn.response[0]["dn"], password, auto_bind=True
286 )
287 if c:
288 return True
289 return False
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/proxyauth.py b/mitmproxy/addons/proxyauth.py
--- a/mitmproxy/addons/proxyauth.py
+++ b/mitmproxy/addons/proxyauth.py
@@ -76,6 +76,8 @@
# Is this connection authenticated by a previous HTTP CONNECT?
if f.client_conn in self.authenticated:
f.metadata["proxyauth"] = self.authenticated[f.client_conn]
+ elif f.is_replay:
+ pass
else:
self.authenticate_http(f)
| {"golden_diff": "diff --git a/mitmproxy/addons/proxyauth.py b/mitmproxy/addons/proxyauth.py\n--- a/mitmproxy/addons/proxyauth.py\n+++ b/mitmproxy/addons/proxyauth.py\n@@ -76,6 +76,8 @@\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n+ elif f.is_replay:\n+ pass\n else:\n self.authenticate_http(f)\n", "issue": "httpauth is not attached to replay request \n#### Problem Description\r\nI set mitmproxy to run in reverse mode as a proxy to real server, and then protect mitmproxy with a pair of user:pass in the proxyauth option. A regular request would go through, but a reply of that same request would return 401 Unauthorized\r\n\r\n#### Steps to reproduce the behavior:\r\n1. turn on reverse mode in mitmweb\r\n2. set basic auth in proxyauth in 'username:pass' format\r\n3. initiate a success request\r\n4. replay the request\r\n\r\n#### System Information\r\nMitmproxy: 10.1.5\r\nPython: 3.11.6\r\nOpenSSL: OpenSSL 3.1.4 24 Oct 2023\r\nPlatform: Linux-4.14.276-211.499.amzn2.x86_64-x86_64-with-glibc2.31\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport binascii\nimport weakref\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom collections.abc import MutableMapping\nfrom typing import Optional\n\nimport ldap3\nimport passlib.apache\n\nfrom mitmproxy import connection\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import http\nfrom mitmproxy.net.http import status_codes\nfrom mitmproxy.proxy import mode_specs\nfrom mitmproxy.proxy.layers import modes\n\nREALM = \"mitmproxy\"\n\n\nclass ProxyAuth:\n validator: Validator | None = None\n\n def __init__(self) -> None:\n self.authenticated: MutableMapping[connection.Client, tuple[str, str]] = (\n weakref.WeakKeyDictionary()\n )\n \"\"\"Contains all connections that are permanently authenticated after an HTTP CONNECT\"\"\"\n\n def load(self, loader):\n loader.add_option(\n \"proxyauth\",\n Optional[str],\n None,\n \"\"\"\n Require proxy authentication. Format:\n \"username:pass\",\n \"any\" to accept any user/pass combination,\n \"@path\" to use an Apache htpasswd file,\n or \"ldap[s]:url_server_ldap[:port]:dn_auth:password:dn_subtree[?search_filter_key=...]\" for LDAP authentication.\n \"\"\",\n )\n\n def configure(self, updated):\n if \"proxyauth\" in updated:\n auth = ctx.options.proxyauth\n if auth:\n if auth == \"any\":\n self.validator = AcceptAll()\n elif auth.startswith(\"@\"):\n self.validator = Htpasswd(auth)\n elif ctx.options.proxyauth.startswith(\"ldap\"):\n self.validator = Ldap(auth)\n elif \":\" in ctx.options.proxyauth:\n self.validator = SingleUser(auth)\n else:\n raise exceptions.OptionsError(\"Invalid proxyauth specification.\")\n else:\n self.validator = None\n\n def socks5_auth(self, data: modes.Socks5AuthData) -> None:\n if self.validator and self.validator(data.username, data.password):\n data.valid = True\n self.authenticated[data.client_conn] = data.username, data.password\n\n def http_connect(self, f: http.HTTPFlow) -> None:\n if self.validator and self.authenticate_http(f):\n # Make a note that all further requests over this connection are ok.\n self.authenticated[f.client_conn] = f.metadata[\"proxyauth\"]\n\n def requestheaders(self, f: http.HTTPFlow) -> None:\n if self.validator:\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n else:\n self.authenticate_http(f)\n\n def authenticate_http(self, f: http.HTTPFlow) -> bool:\n \"\"\"\n Authenticate an HTTP request, returns if authentication was successful.\n\n If valid credentials are found, the matching authentication header is removed.\n In no or invalid credentials are found, flow.response is set to an error page.\n \"\"\"\n assert self.validator\n username = None\n password = None\n is_valid = False\n\n is_proxy = is_http_proxy(f)\n auth_header = http_auth_header(is_proxy)\n try:\n auth_value = f.request.headers.get(auth_header, \"\")\n scheme, username, password = parse_http_basic_auth(auth_value)\n is_valid = self.validator(username, password)\n except Exception:\n pass\n\n if is_valid:\n f.metadata[\"proxyauth\"] = (username, password)\n del f.request.headers[auth_header]\n return True\n else:\n f.response = make_auth_required_response(is_proxy)\n return False\n\n\ndef make_auth_required_response(is_proxy: bool) -> http.Response:\n if is_proxy:\n status_code = status_codes.PROXY_AUTH_REQUIRED\n headers = {\"Proxy-Authenticate\": f'Basic realm=\"{REALM}\"'}\n else:\n status_code = status_codes.UNAUTHORIZED\n headers = {\"WWW-Authenticate\": f'Basic realm=\"{REALM}\"'}\n\n reason = http.status_codes.RESPONSES[status_code]\n return http.Response.make(\n status_code,\n (\n f\"<html>\"\n f\"<head><title>{status_code} {reason}</title></head>\"\n f\"<body><h1>{status_code} {reason}</h1></body>\"\n f\"</html>\"\n ),\n headers,\n )\n\n\ndef http_auth_header(is_proxy: bool) -> str:\n if is_proxy:\n return \"Proxy-Authorization\"\n else:\n return \"Authorization\"\n\n\ndef is_http_proxy(f: http.HTTPFlow) -> bool:\n \"\"\"\n Returns:\n - True, if authentication is done as if mitmproxy is a proxy\n - False, if authentication is done as if mitmproxy is an HTTP server\n \"\"\"\n return isinstance(\n f.client_conn.proxy_mode, (mode_specs.RegularMode, mode_specs.UpstreamMode)\n )\n\n\ndef mkauth(username: str, password: str, scheme: str = \"basic\") -> str:\n \"\"\"\n Craft a basic auth string\n \"\"\"\n v = binascii.b2a_base64((username + \":\" + password).encode(\"utf8\")).decode(\"ascii\")\n return scheme + \" \" + v\n\n\ndef parse_http_basic_auth(s: str) -> tuple[str, str, str]:\n \"\"\"\n Parse a basic auth header.\n Raises a ValueError if the input is invalid.\n \"\"\"\n scheme, authinfo = s.split()\n if scheme.lower() != \"basic\":\n raise ValueError(\"Unknown scheme\")\n try:\n user, password = (\n binascii.a2b_base64(authinfo.encode()).decode(\"utf8\", \"replace\").split(\":\")\n )\n except binascii.Error as e:\n raise ValueError(str(e))\n return scheme, user, password\n\n\nclass Validator(ABC):\n \"\"\"Base class for all username/password validators.\"\"\"\n\n @abstractmethod\n def __call__(self, username: str, password: str) -> bool:\n raise NotImplementedError\n\n\nclass AcceptAll(Validator):\n def __call__(self, username: str, password: str) -> bool:\n return True\n\n\nclass SingleUser(Validator):\n def __init__(self, proxyauth: str):\n try:\n self.username, self.password = proxyauth.split(\":\")\n except ValueError:\n raise exceptions.OptionsError(\"Invalid single-user auth specification.\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.username == username and self.password == password\n\n\nclass Htpasswd(Validator):\n def __init__(self, proxyauth: str):\n path = proxyauth[1:]\n try:\n self.htpasswd = passlib.apache.HtpasswdFile(path)\n except (ValueError, OSError):\n raise exceptions.OptionsError(f\"Could not open htpasswd file: {path}\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.htpasswd.check_password(username, password)\n\n\nclass Ldap(Validator):\n conn: ldap3.Connection\n server: ldap3.Server\n dn_subtree: str\n filter_key: str\n\n def __init__(self, proxyauth: str):\n (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n self.dn_subtree,\n self.filter_key,\n ) = self.parse_spec(proxyauth)\n server = ldap3.Server(url, port=port, use_ssl=use_ssl)\n conn = ldap3.Connection(server, ldap_user, ldap_pass, auto_bind=True)\n self.conn = conn\n self.server = server\n\n @staticmethod\n def parse_spec(spec: str) -> tuple[bool, str, int | None, str, str, str, str]:\n try:\n if spec.count(\":\") > 4:\n (\n security,\n url,\n port_str,\n ldap_user,\n ldap_pass,\n dn_subtree,\n ) = spec.split(\":\")\n port = int(port_str)\n else:\n security, url, ldap_user, ldap_pass, dn_subtree = spec.split(\":\")\n port = None\n\n if \"?\" in dn_subtree:\n dn_subtree, search_str = dn_subtree.split(\"?\")\n key, value = search_str.split(\"=\")\n if key == \"search_filter_key\":\n search_filter_key = value\n else:\n raise ValueError\n else:\n search_filter_key = \"cn\"\n\n if security == \"ldaps\":\n use_ssl = True\n elif security == \"ldap\":\n use_ssl = False\n else:\n raise ValueError\n\n return (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n dn_subtree,\n search_filter_key,\n )\n except ValueError:\n raise exceptions.OptionsError(f\"Invalid LDAP specification: {spec}\")\n\n def __call__(self, username: str, password: str) -> bool:\n if not username or not password:\n return False\n self.conn.search(self.dn_subtree, f\"({self.filter_key}={username})\")\n if self.conn.response:\n c = ldap3.Connection(\n self.server, self.conn.response[0][\"dn\"], password, auto_bind=True\n )\n if c:\n return True\n return False\n", "path": "mitmproxy/addons/proxyauth.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport binascii\nimport weakref\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom collections.abc import MutableMapping\nfrom typing import Optional\n\nimport ldap3\nimport passlib.apache\n\nfrom mitmproxy import connection\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import http\nfrom mitmproxy.net.http import status_codes\nfrom mitmproxy.proxy import mode_specs\nfrom mitmproxy.proxy.layers import modes\n\nREALM = \"mitmproxy\"\n\n\nclass ProxyAuth:\n validator: Validator | None = None\n\n def __init__(self) -> None:\n self.authenticated: MutableMapping[connection.Client, tuple[str, str]] = (\n weakref.WeakKeyDictionary()\n )\n \"\"\"Contains all connections that are permanently authenticated after an HTTP CONNECT\"\"\"\n\n def load(self, loader):\n loader.add_option(\n \"proxyauth\",\n Optional[str],\n None,\n \"\"\"\n Require proxy authentication. Format:\n \"username:pass\",\n \"any\" to accept any user/pass combination,\n \"@path\" to use an Apache htpasswd file,\n or \"ldap[s]:url_server_ldap[:port]:dn_auth:password:dn_subtree[?search_filter_key=...]\" for LDAP authentication.\n \"\"\",\n )\n\n def configure(self, updated):\n if \"proxyauth\" in updated:\n auth = ctx.options.proxyauth\n if auth:\n if auth == \"any\":\n self.validator = AcceptAll()\n elif auth.startswith(\"@\"):\n self.validator = Htpasswd(auth)\n elif ctx.options.proxyauth.startswith(\"ldap\"):\n self.validator = Ldap(auth)\n elif \":\" in ctx.options.proxyauth:\n self.validator = SingleUser(auth)\n else:\n raise exceptions.OptionsError(\"Invalid proxyauth specification.\")\n else:\n self.validator = None\n\n def socks5_auth(self, data: modes.Socks5AuthData) -> None:\n if self.validator and self.validator(data.username, data.password):\n data.valid = True\n self.authenticated[data.client_conn] = data.username, data.password\n\n def http_connect(self, f: http.HTTPFlow) -> None:\n if self.validator and self.authenticate_http(f):\n # Make a note that all further requests over this connection are ok.\n self.authenticated[f.client_conn] = f.metadata[\"proxyauth\"]\n\n def requestheaders(self, f: http.HTTPFlow) -> None:\n if self.validator:\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n elif f.is_replay:\n pass\n else:\n self.authenticate_http(f)\n\n def authenticate_http(self, f: http.HTTPFlow) -> bool:\n \"\"\"\n Authenticate an HTTP request, returns if authentication was successful.\n\n If valid credentials are found, the matching authentication header is removed.\n In no or invalid credentials are found, flow.response is set to an error page.\n \"\"\"\n assert self.validator\n username = None\n password = None\n is_valid = False\n\n is_proxy = is_http_proxy(f)\n auth_header = http_auth_header(is_proxy)\n try:\n auth_value = f.request.headers.get(auth_header, \"\")\n scheme, username, password = parse_http_basic_auth(auth_value)\n is_valid = self.validator(username, password)\n except Exception:\n pass\n\n if is_valid:\n f.metadata[\"proxyauth\"] = (username, password)\n del f.request.headers[auth_header]\n return True\n else:\n f.response = make_auth_required_response(is_proxy)\n return False\n\n\ndef make_auth_required_response(is_proxy: bool) -> http.Response:\n if is_proxy:\n status_code = status_codes.PROXY_AUTH_REQUIRED\n headers = {\"Proxy-Authenticate\": f'Basic realm=\"{REALM}\"'}\n else:\n status_code = status_codes.UNAUTHORIZED\n headers = {\"WWW-Authenticate\": f'Basic realm=\"{REALM}\"'}\n\n reason = http.status_codes.RESPONSES[status_code]\n return http.Response.make(\n status_code,\n (\n f\"<html>\"\n f\"<head><title>{status_code} {reason}</title></head>\"\n f\"<body><h1>{status_code} {reason}</h1></body>\"\n f\"</html>\"\n ),\n headers,\n )\n\n\ndef http_auth_header(is_proxy: bool) -> str:\n if is_proxy:\n return \"Proxy-Authorization\"\n else:\n return \"Authorization\"\n\n\ndef is_http_proxy(f: http.HTTPFlow) -> bool:\n \"\"\"\n Returns:\n - True, if authentication is done as if mitmproxy is a proxy\n - False, if authentication is done as if mitmproxy is an HTTP server\n \"\"\"\n return isinstance(\n f.client_conn.proxy_mode, (mode_specs.RegularMode, mode_specs.UpstreamMode)\n )\n\n\ndef mkauth(username: str, password: str, scheme: str = \"basic\") -> str:\n \"\"\"\n Craft a basic auth string\n \"\"\"\n v = binascii.b2a_base64((username + \":\" + password).encode(\"utf8\")).decode(\"ascii\")\n return scheme + \" \" + v\n\n\ndef parse_http_basic_auth(s: str) -> tuple[str, str, str]:\n \"\"\"\n Parse a basic auth header.\n Raises a ValueError if the input is invalid.\n \"\"\"\n scheme, authinfo = s.split()\n if scheme.lower() != \"basic\":\n raise ValueError(\"Unknown scheme\")\n try:\n user, password = (\n binascii.a2b_base64(authinfo.encode()).decode(\"utf8\", \"replace\").split(\":\")\n )\n except binascii.Error as e:\n raise ValueError(str(e))\n return scheme, user, password\n\n\nclass Validator(ABC):\n \"\"\"Base class for all username/password validators.\"\"\"\n\n @abstractmethod\n def __call__(self, username: str, password: str) -> bool:\n raise NotImplementedError\n\n\nclass AcceptAll(Validator):\n def __call__(self, username: str, password: str) -> bool:\n return True\n\n\nclass SingleUser(Validator):\n def __init__(self, proxyauth: str):\n try:\n self.username, self.password = proxyauth.split(\":\")\n except ValueError:\n raise exceptions.OptionsError(\"Invalid single-user auth specification.\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.username == username and self.password == password\n\n\nclass Htpasswd(Validator):\n def __init__(self, proxyauth: str):\n path = proxyauth[1:]\n try:\n self.htpasswd = passlib.apache.HtpasswdFile(path)\n except (ValueError, OSError):\n raise exceptions.OptionsError(f\"Could not open htpasswd file: {path}\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.htpasswd.check_password(username, password)\n\n\nclass Ldap(Validator):\n conn: ldap3.Connection\n server: ldap3.Server\n dn_subtree: str\n filter_key: str\n\n def __init__(self, proxyauth: str):\n (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n self.dn_subtree,\n self.filter_key,\n ) = self.parse_spec(proxyauth)\n server = ldap3.Server(url, port=port, use_ssl=use_ssl)\n conn = ldap3.Connection(server, ldap_user, ldap_pass, auto_bind=True)\n self.conn = conn\n self.server = server\n\n @staticmethod\n def parse_spec(spec: str) -> tuple[bool, str, int | None, str, str, str, str]:\n try:\n if spec.count(\":\") > 4:\n (\n security,\n url,\n port_str,\n ldap_user,\n ldap_pass,\n dn_subtree,\n ) = spec.split(\":\")\n port = int(port_str)\n else:\n security, url, ldap_user, ldap_pass, dn_subtree = spec.split(\":\")\n port = None\n\n if \"?\" in dn_subtree:\n dn_subtree, search_str = dn_subtree.split(\"?\")\n key, value = search_str.split(\"=\")\n if key == \"search_filter_key\":\n search_filter_key = value\n else:\n raise ValueError\n else:\n search_filter_key = \"cn\"\n\n if security == \"ldaps\":\n use_ssl = True\n elif security == \"ldap\":\n use_ssl = False\n else:\n raise ValueError\n\n return (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n dn_subtree,\n search_filter_key,\n )\n except ValueError:\n raise exceptions.OptionsError(f\"Invalid LDAP specification: {spec}\")\n\n def __call__(self, username: str, password: str) -> bool:\n if not username or not password:\n return False\n self.conn.search(self.dn_subtree, f\"({self.filter_key}={username})\")\n if self.conn.response:\n c = ldap3.Connection(\n self.server, self.conn.response[0][\"dn\"], password, auto_bind=True\n )\n if c:\n return True\n return False\n", "path": "mitmproxy/addons/proxyauth.py"}]} |
gh_patches_debug_1588 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2740 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check Azure Front Door WAF enabled fails even when a WAF is correctly assigned
**Describe the issue**
[`CKV_AZURE_121`](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py) fails despite a Web Application Firewall policy being correctly applied.
WAF policies are applied by specifying a value for `web_application_firewall_policy_link_id` inside a `frontend_endpoint` block within the `azurerm_frontdoor` resource itself.
The [documentation](https://docs.bridgecrew.io/docs/ensure-that-azure-front-door-enables-waf) seems to expect that the `web_application_firewall_policy_link_id` attribute is defined in the resource block itself, rather than in a sub-block (`frontend_endpoint`).
- [`azurerm_frontdoor` resource documentation reference](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/frontdoor#web_application_firewall_policy_link_id)
**Examples**
```terraform
resource "azurerm_frontdoor" "test" {
name = "test-front-door"
resource_group_name = var.resource_group_name
enforce_backend_pools_certificate_name_check = false
tags = var.tags
frontend_endpoint {
name = "DefaultFrontend"
host_name = "test-front-door.azurefd.net"
web_application_firewall_policy_link_id = azurerm_frontdoor_firewall_policy.test.id
}
# ...
```
**Version (please complete the following information):**
- Checkov Version: 2.0.930
**Additional context**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py`
Content:
```
1 from checkov.common.models.consts import ANY_VALUE
2 from checkov.common.models.enums import CheckCategories
3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
4
5
6 class AzureFrontDoorEnablesWAF(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that Azure Front Door enables WAF"
9 id = "CKV_AZURE_121"
10 supported_resources = ['azurerm_frontdoor']
11 categories = [CheckCategories.NETWORKING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "web_application_firewall_policy_link_id"
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = AzureFrontDoorEnablesWAF()
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
--- a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
+++ b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
@@ -12,7 +12,7 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
- return "web_application_firewall_policy_link_id"
+ return "frontend_endpoint/[0]/web_application_firewall_policy_link_id"
def get_expected_value(self):
return ANY_VALUE
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n--- a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n+++ b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n@@ -12,7 +12,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def get_inspected_key(self):\n- return \"web_application_firewall_policy_link_id\"\n+ return \"frontend_endpoint/[0]/web_application_firewall_policy_link_id\"\n \n def get_expected_value(self):\n return ANY_VALUE\n", "issue": "Check Azure Front Door WAF enabled fails even when a WAF is correctly assigned\n**Describe the issue**\r\n[`CKV_AZURE_121`](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py) fails despite a Web Application Firewall policy being correctly applied. \r\n\r\nWAF policies are applied by specifying a value for `web_application_firewall_policy_link_id` inside a `frontend_endpoint` block within the `azurerm_frontdoor` resource itself.\r\n\r\nThe [documentation](https://docs.bridgecrew.io/docs/ensure-that-azure-front-door-enables-waf) seems to expect that the `web_application_firewall_policy_link_id` attribute is defined in the resource block itself, rather than in a sub-block (`frontend_endpoint`).\r\n\r\n- [`azurerm_frontdoor` resource documentation reference](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/frontdoor#web_application_firewall_policy_link_id)\r\n\r\n**Examples**\r\n```terraform\r\nresource \"azurerm_frontdoor\" \"test\" {\r\n name = \"test-front-door\"\r\n resource_group_name = var.resource_group_name\r\n enforce_backend_pools_certificate_name_check = false\r\n tags = var.tags\r\n\r\n frontend_endpoint {\r\n name = \"DefaultFrontend\"\r\n host_name = \"test-front-door.azurefd.net\"\r\n web_application_firewall_policy_link_id = azurerm_frontdoor_firewall_policy.test.id\r\n }\r\n\r\n # ... \r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version: 2.0.930\r\n\r\n**Additional context**\r\n\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AzureFrontDoorEnablesWAF(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Front Door enables WAF\"\n id = \"CKV_AZURE_121\"\n supported_resources = ['azurerm_frontdoor']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"web_application_firewall_policy_link_id\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = AzureFrontDoorEnablesWAF()\n", "path": "checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py"}], "after_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AzureFrontDoorEnablesWAF(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Front Door enables WAF\"\n id = \"CKV_AZURE_121\"\n supported_resources = ['azurerm_frontdoor']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"frontend_endpoint/[0]/web_application_firewall_policy_link_id\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = AzureFrontDoorEnablesWAF()\n", "path": "checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py"}]} |
gh_patches_debug_1589 | rasdani/github-patches | git_diff | data-for-change__anyway-1244 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set default values of 0 to elements in head_on_collisions_comparison
Example for the element that is missing

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `anyway/infographics_utils.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import logging
3 import datetime
4 import json
5 import pandas as pd
6 from collections import defaultdict
7 from sqlalchemy import func
8 from sqlalchemy import cast, Numeric
9 from sqlalchemy import desc
10 from flask import Response
11 from .constants import CONST
12 from .models import (NewsFlash, AccidentMarkerView, InvolvedMarkerView, RoadSegments)
13 from .parsers import resolution_dict
14 from .app_and_db import db
15
16 '''
17 Widget structure:
18 {
19 'name': str,
20 'rank': int (Integer),
21 'data': {
22 'items': list (Array) | dictionary (Object),
23 'text': dictionary (Object) - can be empty
24 }
25 'meta': dictionary (Object) - can be empty
26 }
27 '''
28 class Widget():
29 def __init__(self, name, rank, items, text=None, meta=None):
30 self.name = name
31 self.rank = rank
32 self.items = items
33 self.text = text
34 self.meta = meta
35
36 def serialize(self):
37 output = {}
38 output['name'] = self.name
39 output['rank'] = self.rank
40 output['data'] = {}
41 output['data']['items'] = self.items
42 if self.text:
43 output['data']['text'] = self.text
44 if self.meta:
45 output['meta'] = self.meta
46 return output
47
48
49 def extract_news_flash_location(news_flash_id):
50 news_flash_obj = db.session.query(NewsFlash).filter(
51 NewsFlash.id == news_flash_id).first()
52 if not news_flash_obj:
53 logging.warn('could not find news flash id {}'.format(news_flash_id))
54 return None
55 resolution = news_flash_obj.resolution if news_flash_obj.resolution else None
56 if not news_flash_obj or not resolution or resolution not in resolution_dict:
57 logging.warn(
58 'could not find valid resolution for news flash id {}'.format(news_flash_id))
59 return None
60 data = {'resolution': resolution}
61 for field in resolution_dict[resolution]:
62 curr_field = getattr(news_flash_obj, field)
63 if curr_field is not None:
64 data[field] = curr_field
65 gps = {}
66 for field in ['lon', 'lat']:
67 gps[field] = getattr(news_flash_obj, field)
68 return {'name': 'location', 'data': data, 'gps': gps}
69
70
71 def get_query(table_obj, filters, start_time, end_time):
72 query = db.session.query(table_obj)
73 if start_time:
74 query = query.filter(
75 getattr(table_obj, 'accident_timestamp') >= start_time)
76 if end_time:
77 query = query.filter(
78 getattr(table_obj, 'accident_timestamp') <= end_time)
79 if filters:
80 for field_name, value in filters.items():
81 if isinstance(value, list):
82 values = value
83 else:
84 values = [value]
85 query = query.filter((getattr(table_obj, field_name)).in_(values))
86 return query
87
88 def get_accident_count_by_accident_type(location_info, start_time, end_time):
89 all_accident_type_count = get_accidents_stats(table_obj=AccidentMarkerView,
90 filters=location_info,
91 group_by='accident_type_hebrew',
92 count='accident_type_hebrew',
93 start_time=start_time,
94 end_time=end_time)
95 merged_accident_type_count = [{'accident_type': 'התנגשות', 'count': 0}]
96 for item in all_accident_type_count:
97 if 'התנגשות' in item['accident_type']:
98 merged_accident_type_count[0]['count'] += item['count']
99 else:
100 merged_accident_type_count.append(item)
101 return merged_accident_type_count
102
103 def get_top_road_segments_accidents_per_km(resolution, location_info, start_time=None, end_time=None, limit=5):
104 if resolution != 'כביש בינעירוני': # relevent for non urban roads only
105 return {}
106
107 query = get_query(table_obj=AccidentMarkerView, filters=None,
108 start_time=start_time, end_time=end_time)
109
110 query = query.with_entities(
111 AccidentMarkerView.road_segment_name,
112 func.count(AccidentMarkerView.road_segment_name).label(
113 'total_accidents'),
114 (RoadSegments.to_km - RoadSegments.from_km).label('segment_length'),
115 cast((func.count(AccidentMarkerView.road_segment_name) / (RoadSegments.to_km - RoadSegments.from_km)),
116 Numeric(10, 4)).label(
117 'accidents_per_km')) \
118 .filter(AccidentMarkerView.road1 == RoadSegments.road) \
119 .filter(AccidentMarkerView.road_segment_number == RoadSegments.segment) \
120 .filter(AccidentMarkerView.road1 == location_info['road1']) \
121 .filter(AccidentMarkerView.road_segment_name is not None) \
122 .group_by(AccidentMarkerView.road_segment_name, RoadSegments.from_km, RoadSegments.to_km) \
123 .order_by(desc('accidents_per_km')) \
124 .limit(limit)
125
126 result = pd.read_sql_query(query.statement, query.session.bind)
127 return result.to_dict(orient='records') # pylint: disable=no-member
128
129
130 def get_accidents_stats(table_obj, filters=None, group_by=None, count=None, start_time=None, end_time=None):
131 filters = filters or {}
132 filters['provider_code'] = [
133 CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]
134 # get stats
135 query = get_query(table_obj, filters, start_time, end_time)
136 if group_by:
137 query = query.group_by(group_by)
138 query = query.with_entities(group_by, func.count(count))
139 df = pd.read_sql_query(query.statement, query.session.bind)
140 df.rename(columns={'count_1': 'count'}, inplace=True) # pylint: disable=no-member
141 df.columns = [c.replace('_hebrew', '') for c in df.columns]
142 return df.to_dict(orient='records') if group_by or count else df.to_dict() # pylint: disable=no-member
143
144
145 def get_injured_filters(location_info):
146 new_filters = {}
147 for curr_filter, curr_values in location_info.items():
148 if curr_filter in ['region_hebrew', 'district_hebrew', 'district_hebrew', 'yishuv_name']:
149 new_filter_name = 'accident_' + curr_filter
150 new_filters[new_filter_name] = curr_values
151 else:
152 new_filters[curr_filter] = curr_values
153 new_filters['injury_severity'] = [1, 2, 3, 4, 5]
154 return new_filters
155
156
157 def get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit=10):
158 filters = filters or {}
159 filters['provider_code'] = [
160 CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]
161 query = get_query(table_obj, filters, start_time, end_time)
162 query = query.with_entities(*entities)
163 query = query.order_by(getattr(table_obj, "accident_severity"), getattr(
164 table_obj, "accident_timestamp").desc())
165 query = query.limit(limit)
166 df = pd.read_sql_query(query.statement, query.session.bind)
167 df.columns = [c.replace('_hebrew', '') for c in df.columns]
168 return df.to_dict(orient='records') # pylint: disable=no-member
169
170
171 def get_most_severe_accidents(table_obj, filters, start_time, end_time, limit=10):
172 entities = 'longitude', 'latitude', 'accident_severity_hebrew', 'accident_timestamp', 'accident_type_hebrew'
173 return get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit)
174
175
176 def get_accidents_heat_map(table_obj, filters, start_time, end_time):
177 filters = filters or {}
178 filters['provider_code'] = [
179 CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]
180 query = get_query(table_obj, filters, start_time, end_time)
181 query = query.with_entities('longitude', 'latitude')
182 df = pd.read_sql_query(query.statement, query.session.bind)
183 return df.to_dict(orient='records') # pylint: disable=no-member
184
185
186 def filter_and_group_injured_count_per_age_group(data_of_ages):
187 import re
188 range_dict = {0: 14, 15: 24, 25: 64, 65: 200}
189 return_dict_by_required_age_group = defaultdict(int)
190
191 for age_range_and_count in data_of_ages:
192 age_range = age_range_and_count['age_group']
193 count = age_range_and_count['count']
194
195 # Parse the db age range
196 match_parsing = re.match("([0-9]{2})\\-([0-9]{2})", age_range)
197 if match_parsing:
198 regex_age_matches = match_parsing.groups()
199 if len(regex_age_matches) != 2:
200 return_dict_by_required_age_group["unknown"] += count
201 continue
202 min_age_raw, max_age_raw = regex_age_matches
203 else:
204 match_parsing = re.match("([0-9]{2})\\+", age_range) # e.g 85+
205 if match_parsing:
206 # We assume that no body live beyond age 200
207 min_age_raw, max_age_raw = match_parsing.group(1), 200
208 else:
209 return_dict_by_required_age_group["unknown"] += count
210 continue
211
212 # Find to what "bucket" to aggregate the data
213 min_age = int(min_age_raw)
214 max_age = int(max_age_raw)
215 for item in range_dict.items():
216 item_min_range, item_max_range = item
217 if item_min_range <= min_age <= item_max_range and item_min_range <= max_age <= item_max_range:
218 string_age_range = f'{item_min_range:02}-{item_max_range:02}'
219 return_dict_by_required_age_group[string_age_range] += count
220 break
221
222 # Rename the last key
223 return_dict_by_required_age_group["65+"] = return_dict_by_required_age_group["65-200"]
224 del return_dict_by_required_age_group["65-200"]
225
226 return return_dict_by_required_age_group
227
228
229 def get_most_severe_accidents_table_title(location_text):
230 return 'תאונות חמורות ב' + location_text
231
232
233 def get_accident_count_by_severity(location_info, location_text, start_time, end_time):
234 count_by_severity = get_accidents_stats(table_obj=AccidentMarkerView,
235 filters=location_info,
236 group_by='accident_severity_hebrew',
237 count='accident_severity_hebrew',
238 start_time=start_time,
239 end_time=end_time)
240 severity_dict = {'קטלנית': 'fatal',
241 'קשה': 'severe',
242 'קלה': 'light'}
243 items = {}
244 total_accidents_count = 0
245 start_year = start_time.year
246 end_year = end_time.year
247 for severity_and_count in count_by_severity:
248 accident_severity_hebrew = severity_and_count['accident_severity']
249 severity_english = severity_dict[accident_severity_hebrew]
250 severity_count_text = 'severity_{}_count'.format(severity_english)
251 items[severity_count_text] = severity_and_count['count']
252 total_accidents_count += severity_and_count['count']
253 items['start_year'] = start_year
254 items['end_year'] = end_year
255 items['total_accidents_count'] = total_accidents_count
256 return items
257
258
259 def get_most_severe_accidents_table(location_info, start_time, end_time):
260 entities = 'id', 'provider_code', 'accident_timestamp', 'accident_type_hebrew', 'accident_year'
261 accidents = get_most_severe_accidents_with_entities(
262 table_obj=AccidentMarkerView,
263 filters=location_info,
264 entities=entities,
265 start_time=start_time,
266 end_time=end_time)
267 # Add casualties
268 for accident in accidents:
269 accident['type'] = accident['accident_type']
270 dt = accident['accident_timestamp'].to_pydatetime()
271 accident['date'] = dt.strftime("%d/%m/%y")
272 accident['hour'] = dt.strftime("%H:%M")
273 num = get_casualties_count_in_accident(
274 accident['id'], accident['provider_code'], 1, accident['accident_year'])
275 accident['killed_count'] = num
276 num = get_casualties_count_in_accident(
277 accident['id'], accident['provider_code'], [2, 3], accident['accident_year'])
278 accident['injured_count'] = num
279 del accident['accident_timestamp'], accident['accident_type'], accident['id'], accident['provider_code']
280 return accidents
281
282
283 # count of dead and severely injured
284 def get_casualties_count_in_accident(accident_id, provider_code, injury_severity, accident_year):
285 filters = {'accident_id': accident_id,
286 'provider_code': provider_code,
287 'injury_severity': injury_severity,
288 'accident_year': accident_year}
289 casualties = get_accidents_stats(table_obj=InvolvedMarkerView, filters=filters,
290 group_by='injury_severity', count='injury_severity')
291 res = 0
292 for ca in casualties:
293 res += ca['count']
294 return res
295
296
297 # generate text describing location or road segment of news flash
298 # to be used by most severe accidents additional info widget
299 def get_news_flash_location_text(news_flash_id):
300 news_flash_item = db.session.query(NewsFlash).filter(
301 NewsFlash.id == news_flash_id).first()
302 nf = news_flash_item.serialize()
303 resolution = nf['resolution'] if nf['resolution'] else ''
304 yishuv_name = nf['yishuv_name'] if nf['yishuv_name'] else ''
305 road1 = str(int(nf['road1'])) if nf['road1'] else ''
306 road2 = str(int(nf['road2'])) if nf['road2'] else ''
307 street1_hebrew = nf['street1_hebrew'] if nf['street1_hebrew'] else ''
308 road_segment_name = nf['road_segment_name'] if nf['road_segment_name'] else ''
309 if resolution == 'כביש בינעירוני' and road1 and road_segment_name:
310 res = 'כביש ' + road1 + ' במקטע ' + road_segment_name
311 elif resolution == 'עיר' and not yishuv_name:
312 res = nf['location']
313 elif resolution == 'עיר' and yishuv_name:
314 res = nf['yishuv_name']
315 elif resolution == 'צומת בינעירוני' and road1 and road2:
316 res = 'צומת כביש ' + road1 + ' עם כביש ' + road2
317 elif resolution == 'צומת בינעירוני' and road1 and road_segment_name:
318 res = 'כביש ' + road1 + ' במקטע ' + road_segment_name
319 elif resolution == 'רחוב' and yishuv_name and street1_hebrew:
320 res = ' רחוב ' + street1_hebrew + ' ב' + yishuv_name
321 else:
322 logging.warning(
323 "Did not found quality resolution. Using location field. News Flash id:{}".format(nf['id']))
324 res = nf['location']
325 return res
326
327
328 def extract_news_flash_obj(news_flash_id):
329 news_flash_obj = db.session.query(NewsFlash).filter(
330 NewsFlash.id == news_flash_id).first()
331
332 if not news_flash_obj:
333 logging.warning('Could not find news flash id {}'.format(news_flash_id))
334 return None
335
336 return news_flash_obj
337
338
339 def sum_road_accidents_by_specific_type(road_data, field_name):
340 dict_merge = defaultdict(int)
341 for accident_data in road_data:
342 if accident_data['accident_type'] == field_name:
343 dict_merge[field_name] += accident_data['count']
344 else:
345 dict_merge['תאונות אחרות'] += accident_data['count']
346 return dict_merge
347
348
349 def convert_roads_fatal_accidents_to_frontend_view(data_dict):
350 data_list = []
351 for key, value in data_dict.items():
352 data_list.append({'desc': key, 'count': value})
353 return data_list
354
355
356 def get_head_to_head_stat(news_flash_id, start_time, end_time):
357 news_flash_obj = extract_news_flash_obj(news_flash_id)
358 road_data = {}
359 filter_dict = {'road_type': CONST.ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION,
360 'accident_severity': CONST.ACCIDENT_SEVERITY_DEADLY}
361 all_roads_data = get_accidents_stats(table_obj=AccidentMarkerView,
362 filters=filter_dict,
363 group_by='accident_type_hebrew', count='accident_type_hebrew',
364 start_time=start_time, end_time=end_time)
365
366 if news_flash_obj.road1 and news_flash_obj.road_segment_name:
367 filter_dict.update({'road1': news_flash_obj.road1, 'road_segment_name': news_flash_obj.road_segment_name})
368 road_data = get_accidents_stats(table_obj=AccidentMarkerView,
369 filters=filter_dict,
370 group_by='accident_type_hebrew', count='accident_type_hebrew',
371 start_time=start_time, end_time=end_time)
372
373 road_data_dict = sum_road_accidents_by_specific_type(road_data, 'התנגשות חזית בחזית')
374 all_roads_data_dict = sum_road_accidents_by_specific_type(all_roads_data, 'התנגשות חזית בחזית')
375
376 return {'specific_road_segment_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(road_data_dict),
377 'all_roads_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(all_roads_data_dict)}
378
379 #gets the latest date an accident has occured
380 def get_latest_accident_date(table_obj, filters):
381 filters= filters or {}
382 filters['provider_code'] = [CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]
383 query = db.session.query(func.max(table_obj.accident_timestamp))
384 df = pd.read_sql_query(query.statement, query.session.bind)
385 return (df.to_dict(orient='records'))[0].get("max_1") # pylint: disable=no-member
386
387 def create_infographics_data(news_flash_id, number_of_years_ago):
388 output = {}
389 try:
390 number_of_years_ago = int(number_of_years_ago)
391 except ValueError:
392 return Response({})
393 if number_of_years_ago < 0 or number_of_years_ago > 100:
394 return Response({})
395 location_info = extract_news_flash_location(news_flash_id)
396 if location_info is None:
397 return Response({})
398 logging.debug('location_info:{}'.format(location_info))
399 location_text = get_news_flash_location_text(news_flash_id)
400 logging.debug('location_text:{}'.format(location_text))
401 gps = location_info['gps']
402 location_info = location_info['data']
403 output['meta'] = {'location_info': location_info.copy(),
404 'location_text': location_text}
405 output['widgets'] = []
406 resolution = location_info.pop('resolution')
407 if resolution is None:
408 return Response({})
409
410 if all(value is None for value in location_info.values()):
411 return Response({})
412
413 last_accident_date=get_latest_accident_date(table_obj=AccidentMarkerView, filters=None)
414 #converting to datetime object to get the date
415 end_time=last_accident_date.to_pydatetime().date()
416
417 start_time = datetime.date(
418 end_time.year + 1 - number_of_years_ago, 1, 1)
419
420 #accident_severity count
421 items = get_accident_count_by_severity(location_info=location_info,
422 location_text=location_text,
423 start_time=start_time,
424 end_time=end_time)
425
426 accident_count_by_severity = Widget(name='accident_count_by_severity',
427 rank=1,
428 items=items)
429 output['widgets'].append(accident_count_by_severity.serialize())
430
431 # most severe accidents table
432 most_severe_accidents_table = Widget(name='most_severe_accidents_table',
433 rank=2,
434 items=get_most_severe_accidents_table(location_info, start_time, end_time),
435 text={'title':get_most_severe_accidents_table_title(location_text)})
436 output['widgets'].append(most_severe_accidents_table.serialize())
437
438 # most severe accidents
439 most_severe_accidents = Widget(name='most_severe_accidents',
440 rank=3,
441 items=get_most_severe_accidents(table_obj=AccidentMarkerView,
442 filters=location_info,
443 start_time=start_time,
444 end_time=end_time))
445 output['widgets'].append(most_severe_accidents.serialize())
446
447 # street view
448 street_view = Widget(name='street_view',
449 rank=4,
450 items={'longitude': gps['lon'],
451 'latitude': gps['lat']})
452 output['widgets'].append(street_view.serialize())
453
454 # head to head accidents
455 head_on_collisions_comparison = Widget(name='head_on_collisions_comparison',
456 rank=5,
457 items=get_head_to_head_stat(news_flash_id=news_flash_id,
458 start_time=start_time,
459 end_time=end_time))
460 output['widgets'].append(head_on_collisions_comparison.serialize())
461
462 # accident_type count
463 accident_count_by_accident_type = Widget(name='accident_count_by_accident_type',
464 rank=6,
465 items=get_accident_count_by_accident_type(location_info=location_info,
466 start_time=start_time,
467 end_time=end_time))
468 output['widgets'].append(accident_count_by_accident_type.serialize())
469
470 # accidents heat map
471 accidents_heat_map = Widget(name='accidents_heat_map',
472 rank=7,
473 items=get_accidents_heat_map(table_obj=AccidentMarkerView,
474 filters=location_info,
475 start_time=start_time,
476 end_time=end_time))
477 output['widgets'].append(accidents_heat_map.serialize())
478
479 # accident count by accident year
480 accident_count_by_accident_year = Widget(name='accident_count_by_accident_year',
481 rank=8,
482 items=get_accidents_stats(table_obj=AccidentMarkerView,
483 filters=location_info,
484 group_by='accident_year',
485 count='accident_year',
486 start_time=start_time,
487 end_time=end_time),
488 text={'title':'כמות תאונות'})
489 output['widgets'].append(accident_count_by_accident_year.serialize())
490
491 # injured count by accident year
492 injured_count_by_accident_year = Widget(name='injured_count_by_accident_year',
493 rank=9,
494 items=get_accidents_stats(table_obj=InvolvedMarkerView,
495 filters=get_injured_filters(location_info),
496 group_by='accident_year',
497 count='accident_year',
498 start_time=start_time,
499 end_time=end_time),
500 text={'title':'כמות פצועים'})
501 output['widgets'].append(injured_count_by_accident_year.serialize())
502
503 # accident count on day light
504 accident_count_by_day_night = Widget(name='accident_count_by_day_night',
505 rank=10,
506 items=get_accidents_stats(table_obj=AccidentMarkerView,
507 filters=location_info,
508 group_by='day_night_hebrew',
509 count='day_night_hebrew',
510 start_time=start_time,
511 end_time=end_time),
512 text={'title':'כמות תאונות ביום ובלילה'})
513 output['widgets'].append(accident_count_by_day_night.serialize())
514
515 # accidents distribution count by hour
516 accidents_count_by_hour = Widget(name='accidents_count_by_hour',
517 rank=11,
518 items=get_accidents_stats(table_obj=AccidentMarkerView,
519 filters=location_info,
520 group_by='accident_hour',
521 count='accident_hour',
522 start_time=start_time,
523 end_time=end_time),
524 text={'title':'כמות תאונות לפי שעה'})
525 output['widgets'].append(accidents_count_by_hour.serialize())
526
527 # accident count by road_light
528 accident_count_by_road_light = Widget(name='accident_count_by_road_light',
529 rank=12,
530 items=get_accidents_stats(table_obj=AccidentMarkerView,
531 filters=location_info,
532 group_by='road_light_hebrew',
533 count='road_light_hebrew',
534 start_time=start_time,
535 end_time=end_time),
536 text={'title':'כמות תאונות לפי תאורה'})
537 output['widgets'].append(accident_count_by_road_light.serialize())
538
539 # accident count by road_segment
540 top_road_segments_accidents_per_km = Widget(name='top_road_segments_accidents_per_km',
541 rank=13,
542 items=get_top_road_segments_accidents_per_km(resolution=resolution,
543 location_info=location_info,
544 start_time=start_time,
545 end_time=end_time))
546 output['widgets'].append(top_road_segments_accidents_per_km.serialize())
547
548 # injured count per age group
549 data_of_injured_count_per_age_group_raw = get_accidents_stats(table_obj=InvolvedMarkerView,
550 filters=get_injured_filters(location_info),
551 group_by='age_group_hebrew',
552 count='age_group_hebrew',
553 start_time=start_time,
554 end_time=end_time)
555 data_of_injured_count_per_age_group = filter_and_group_injured_count_per_age_group(data_of_injured_count_per_age_group_raw)
556 injured_count_per_age_group = Widget(name='injured_count_per_age_group',
557 rank=14,
558 items=data_of_injured_count_per_age_group)
559 output['widgets'].append(injured_count_per_age_group.serialize())
560
561 # vision zero
562 vision_zero = Widget(name='vision_zero',
563 rank=15,
564 items=['vision_zero_2_plus_1'])
565 output['widgets'].append(vision_zero.serialize())
566
567 return Response(json.dumps(output, default=str), mimetype="application/json")
568
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/anyway/infographics_utils.py b/anyway/infographics_utils.py
--- a/anyway/infographics_utils.py
+++ b/anyway/infographics_utils.py
@@ -338,6 +338,9 @@
def sum_road_accidents_by_specific_type(road_data, field_name):
dict_merge = defaultdict(int)
+ dict_merge[field_name] = 0
+ dict_merge['תאונות אחרות'] = 0
+
for accident_data in road_data:
if accident_data['accident_type'] == field_name:
dict_merge[field_name] += accident_data['count']
| {"golden_diff": "diff --git a/anyway/infographics_utils.py b/anyway/infographics_utils.py\n--- a/anyway/infographics_utils.py\n+++ b/anyway/infographics_utils.py\n@@ -338,6 +338,9 @@\n \n def sum_road_accidents_by_specific_type(road_data, field_name):\n dict_merge = defaultdict(int)\n+ dict_merge[field_name] = 0\n+ dict_merge['\u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05d0\u05d7\u05e8\u05d5\u05ea'] = 0\n+\n for accident_data in road_data:\n if accident_data['accident_type'] == field_name:\n dict_merge[field_name] += accident_data['count']\n", "issue": "Set default values of 0 to elements in head_on_collisions_comparison\nExample for the element that is missing\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport logging\nimport datetime\nimport json\nimport pandas as pd\nfrom collections import defaultdict\nfrom sqlalchemy import func\nfrom sqlalchemy import cast, Numeric\nfrom sqlalchemy import desc\nfrom flask import Response\nfrom .constants import CONST\nfrom .models import (NewsFlash, AccidentMarkerView, InvolvedMarkerView, RoadSegments)\nfrom .parsers import resolution_dict\nfrom .app_and_db import db\n\n'''\n Widget structure:\n {\n 'name': str,\n 'rank': int (Integer),\n 'data': {\n 'items': list (Array) | dictionary (Object),\n 'text': dictionary (Object) - can be empty\n }\n 'meta': dictionary (Object) - can be empty\n }\n'''\nclass Widget():\n def __init__(self, name, rank, items, text=None, meta=None):\n self.name = name\n self.rank = rank\n self.items = items\n self.text = text\n self.meta = meta\n\n def serialize(self):\n output = {}\n output['name'] = self.name\n output['rank'] = self.rank\n output['data'] = {}\n output['data']['items'] = self.items\n if self.text:\n output['data']['text'] = self.text\n if self.meta:\n output['meta'] = self.meta\n return output\n\n\ndef extract_news_flash_location(news_flash_id):\n news_flash_obj = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n if not news_flash_obj:\n logging.warn('could not find news flash id {}'.format(news_flash_id))\n return None\n resolution = news_flash_obj.resolution if news_flash_obj.resolution else None\n if not news_flash_obj or not resolution or resolution not in resolution_dict:\n logging.warn(\n 'could not find valid resolution for news flash id {}'.format(news_flash_id))\n return None\n data = {'resolution': resolution}\n for field in resolution_dict[resolution]:\n curr_field = getattr(news_flash_obj, field)\n if curr_field is not None:\n data[field] = curr_field\n gps = {}\n for field in ['lon', 'lat']:\n gps[field] = getattr(news_flash_obj, field)\n return {'name': 'location', 'data': data, 'gps': gps}\n\n\ndef get_query(table_obj, filters, start_time, end_time):\n query = db.session.query(table_obj)\n if start_time:\n query = query.filter(\n getattr(table_obj, 'accident_timestamp') >= start_time)\n if end_time:\n query = query.filter(\n getattr(table_obj, 'accident_timestamp') <= end_time)\n if filters:\n for field_name, value in filters.items():\n if isinstance(value, list):\n values = value\n else:\n values = [value]\n query = query.filter((getattr(table_obj, field_name)).in_(values))\n return query\n\ndef get_accident_count_by_accident_type(location_info, start_time, end_time):\n all_accident_type_count = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_type_hebrew',\n count='accident_type_hebrew',\n start_time=start_time,\n end_time=end_time)\n merged_accident_type_count = [{'accident_type': '\u05d4\u05ea\u05e0\u05d2\u05e9\u05d5\u05ea', 'count': 0}]\n for item in all_accident_type_count:\n if '\u05d4\u05ea\u05e0\u05d2\u05e9\u05d5\u05ea' in item['accident_type']:\n merged_accident_type_count[0]['count'] += item['count']\n else:\n merged_accident_type_count.append(item)\n return merged_accident_type_count\n\ndef get_top_road_segments_accidents_per_km(resolution, location_info, start_time=None, end_time=None, limit=5):\n if resolution != '\u05db\u05d1\u05d9\u05e9 \u05d1\u05d9\u05e0\u05e2\u05d9\u05e8\u05d5\u05e0\u05d9': # relevent for non urban roads only\n return {}\n\n query = get_query(table_obj=AccidentMarkerView, filters=None,\n start_time=start_time, end_time=end_time)\n\n query = query.with_entities(\n AccidentMarkerView.road_segment_name,\n func.count(AccidentMarkerView.road_segment_name).label(\n 'total_accidents'),\n (RoadSegments.to_km - RoadSegments.from_km).label('segment_length'),\n cast((func.count(AccidentMarkerView.road_segment_name) / (RoadSegments.to_km - RoadSegments.from_km)),\n Numeric(10, 4)).label(\n 'accidents_per_km')) \\\n .filter(AccidentMarkerView.road1 == RoadSegments.road) \\\n .filter(AccidentMarkerView.road_segment_number == RoadSegments.segment) \\\n .filter(AccidentMarkerView.road1 == location_info['road1']) \\\n .filter(AccidentMarkerView.road_segment_name is not None) \\\n .group_by(AccidentMarkerView.road_segment_name, RoadSegments.from_km, RoadSegments.to_km) \\\n .order_by(desc('accidents_per_km')) \\\n .limit(limit)\n\n result = pd.read_sql_query(query.statement, query.session.bind)\n return result.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef get_accidents_stats(table_obj, filters=None, group_by=None, count=None, start_time=None, end_time=None):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n # get stats\n query = get_query(table_obj, filters, start_time, end_time)\n if group_by:\n query = query.group_by(group_by)\n query = query.with_entities(group_by, func.count(count))\n df = pd.read_sql_query(query.statement, query.session.bind)\n df.rename(columns={'count_1': 'count'}, inplace=True) # pylint: disable=no-member\n df.columns = [c.replace('_hebrew', '') for c in df.columns]\n return df.to_dict(orient='records') if group_by or count else df.to_dict() # pylint: disable=no-member\n\n\ndef get_injured_filters(location_info):\n new_filters = {}\n for curr_filter, curr_values in location_info.items():\n if curr_filter in ['region_hebrew', 'district_hebrew', 'district_hebrew', 'yishuv_name']:\n new_filter_name = 'accident_' + curr_filter\n new_filters[new_filter_name] = curr_values\n else:\n new_filters[curr_filter] = curr_values\n new_filters['injury_severity'] = [1, 2, 3, 4, 5]\n return new_filters\n\n\ndef get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit=10):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = get_query(table_obj, filters, start_time, end_time)\n query = query.with_entities(*entities)\n query = query.order_by(getattr(table_obj, \"accident_severity\"), getattr(\n table_obj, \"accident_timestamp\").desc())\n query = query.limit(limit)\n df = pd.read_sql_query(query.statement, query.session.bind)\n df.columns = [c.replace('_hebrew', '') for c in df.columns]\n return df.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef get_most_severe_accidents(table_obj, filters, start_time, end_time, limit=10):\n entities = 'longitude', 'latitude', 'accident_severity_hebrew', 'accident_timestamp', 'accident_type_hebrew'\n return get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit)\n\n\ndef get_accidents_heat_map(table_obj, filters, start_time, end_time):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = get_query(table_obj, filters, start_time, end_time)\n query = query.with_entities('longitude', 'latitude')\n df = pd.read_sql_query(query.statement, query.session.bind)\n return df.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef filter_and_group_injured_count_per_age_group(data_of_ages):\n import re\n range_dict = {0: 14, 15: 24, 25: 64, 65: 200}\n return_dict_by_required_age_group = defaultdict(int)\n\n for age_range_and_count in data_of_ages:\n age_range = age_range_and_count['age_group']\n count = age_range_and_count['count']\n\n # Parse the db age range\n match_parsing = re.match(\"([0-9]{2})\\\\-([0-9]{2})\", age_range)\n if match_parsing:\n regex_age_matches = match_parsing.groups()\n if len(regex_age_matches) != 2:\n return_dict_by_required_age_group[\"unknown\"] += count\n continue\n min_age_raw, max_age_raw = regex_age_matches\n else:\n match_parsing = re.match(\"([0-9]{2})\\\\+\", age_range) # e.g 85+\n if match_parsing:\n # We assume that no body live beyond age 200\n min_age_raw, max_age_raw = match_parsing.group(1), 200\n else:\n return_dict_by_required_age_group[\"unknown\"] += count\n continue\n\n # Find to what \"bucket\" to aggregate the data\n min_age = int(min_age_raw)\n max_age = int(max_age_raw)\n for item in range_dict.items():\n item_min_range, item_max_range = item\n if item_min_range <= min_age <= item_max_range and item_min_range <= max_age <= item_max_range:\n string_age_range = f'{item_min_range:02}-{item_max_range:02}'\n return_dict_by_required_age_group[string_age_range] += count\n break\n\n # Rename the last key\n return_dict_by_required_age_group[\"65+\"] = return_dict_by_required_age_group[\"65-200\"]\n del return_dict_by_required_age_group[\"65-200\"]\n\n return return_dict_by_required_age_group\n\n\ndef get_most_severe_accidents_table_title(location_text):\n return '\u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05d7\u05de\u05d5\u05e8\u05d5\u05ea \u05d1' + location_text\n\n\ndef get_accident_count_by_severity(location_info, location_text, start_time, end_time):\n count_by_severity = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_severity_hebrew',\n count='accident_severity_hebrew',\n start_time=start_time,\n end_time=end_time)\n severity_dict = {'\u05e7\u05d8\u05dc\u05e0\u05d9\u05ea': 'fatal',\n '\u05e7\u05e9\u05d4': 'severe',\n '\u05e7\u05dc\u05d4': 'light'}\n items = {}\n total_accidents_count = 0\n start_year = start_time.year\n end_year = end_time.year\n for severity_and_count in count_by_severity:\n accident_severity_hebrew = severity_and_count['accident_severity']\n severity_english = severity_dict[accident_severity_hebrew]\n severity_count_text = 'severity_{}_count'.format(severity_english)\n items[severity_count_text] = severity_and_count['count']\n total_accidents_count += severity_and_count['count']\n items['start_year'] = start_year\n items['end_year'] = end_year\n items['total_accidents_count'] = total_accidents_count\n return items\n\n\ndef get_most_severe_accidents_table(location_info, start_time, end_time):\n entities = 'id', 'provider_code', 'accident_timestamp', 'accident_type_hebrew', 'accident_year'\n accidents = get_most_severe_accidents_with_entities(\n table_obj=AccidentMarkerView,\n filters=location_info,\n entities=entities,\n start_time=start_time,\n end_time=end_time)\n # Add casualties\n for accident in accidents:\n accident['type'] = accident['accident_type']\n dt = accident['accident_timestamp'].to_pydatetime()\n accident['date'] = dt.strftime(\"%d/%m/%y\")\n accident['hour'] = dt.strftime(\"%H:%M\")\n num = get_casualties_count_in_accident(\n accident['id'], accident['provider_code'], 1, accident['accident_year'])\n accident['killed_count'] = num\n num = get_casualties_count_in_accident(\n accident['id'], accident['provider_code'], [2, 3], accident['accident_year'])\n accident['injured_count'] = num\n del accident['accident_timestamp'], accident['accident_type'], accident['id'], accident['provider_code']\n return accidents\n\n\n# count of dead and severely injured\ndef get_casualties_count_in_accident(accident_id, provider_code, injury_severity, accident_year):\n filters = {'accident_id': accident_id,\n 'provider_code': provider_code,\n 'injury_severity': injury_severity,\n 'accident_year': accident_year}\n casualties = get_accidents_stats(table_obj=InvolvedMarkerView, filters=filters,\n group_by='injury_severity', count='injury_severity')\n res = 0\n for ca in casualties:\n res += ca['count']\n return res\n\n\n# generate text describing location or road segment of news flash\n# to be used by most severe accidents additional info widget\ndef get_news_flash_location_text(news_flash_id):\n news_flash_item = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n nf = news_flash_item.serialize()\n resolution = nf['resolution'] if nf['resolution'] else ''\n yishuv_name = nf['yishuv_name'] if nf['yishuv_name'] else ''\n road1 = str(int(nf['road1'])) if nf['road1'] else ''\n road2 = str(int(nf['road2'])) if nf['road2'] else ''\n street1_hebrew = nf['street1_hebrew'] if nf['street1_hebrew'] else ''\n road_segment_name = nf['road_segment_name'] if nf['road_segment_name'] else ''\n if resolution == '\u05db\u05d1\u05d9\u05e9 \u05d1\u05d9\u05e0\u05e2\u05d9\u05e8\u05d5\u05e0\u05d9' and road1 and road_segment_name:\n res = '\u05db\u05d1\u05d9\u05e9 ' + road1 + ' \u05d1\u05de\u05e7\u05d8\u05e2 ' + road_segment_name\n elif resolution == '\u05e2\u05d9\u05e8' and not yishuv_name:\n res = nf['location']\n elif resolution == '\u05e2\u05d9\u05e8' and yishuv_name:\n res = nf['yishuv_name']\n elif resolution == '\u05e6\u05d5\u05de\u05ea \u05d1\u05d9\u05e0\u05e2\u05d9\u05e8\u05d5\u05e0\u05d9' and road1 and road2:\n res = '\u05e6\u05d5\u05de\u05ea \u05db\u05d1\u05d9\u05e9 ' + road1 + ' \u05e2\u05dd \u05db\u05d1\u05d9\u05e9 ' + road2\n elif resolution == '\u05e6\u05d5\u05de\u05ea \u05d1\u05d9\u05e0\u05e2\u05d9\u05e8\u05d5\u05e0\u05d9' and road1 and road_segment_name:\n res = '\u05db\u05d1\u05d9\u05e9 ' + road1 + ' \u05d1\u05de\u05e7\u05d8\u05e2 ' + road_segment_name\n elif resolution == '\u05e8\u05d7\u05d5\u05d1' and yishuv_name and street1_hebrew:\n res = ' \u05e8\u05d7\u05d5\u05d1 ' + street1_hebrew + ' \u05d1' + yishuv_name\n else:\n logging.warning(\n \"Did not found quality resolution. Using location field. News Flash id:{}\".format(nf['id']))\n res = nf['location']\n return res\n\n\ndef extract_news_flash_obj(news_flash_id):\n news_flash_obj = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n\n if not news_flash_obj:\n logging.warning('Could not find news flash id {}'.format(news_flash_id))\n return None\n\n return news_flash_obj\n\n\ndef sum_road_accidents_by_specific_type(road_data, field_name):\n dict_merge = defaultdict(int)\n for accident_data in road_data:\n if accident_data['accident_type'] == field_name:\n dict_merge[field_name] += accident_data['count']\n else:\n dict_merge['\u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05d0\u05d7\u05e8\u05d5\u05ea'] += accident_data['count']\n return dict_merge\n\n\ndef convert_roads_fatal_accidents_to_frontend_view(data_dict):\n data_list = []\n for key, value in data_dict.items():\n data_list.append({'desc': key, 'count': value})\n return data_list\n\n\ndef get_head_to_head_stat(news_flash_id, start_time, end_time):\n news_flash_obj = extract_news_flash_obj(news_flash_id)\n road_data = {}\n filter_dict = {'road_type': CONST.ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION,\n 'accident_severity': CONST.ACCIDENT_SEVERITY_DEADLY}\n all_roads_data = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=filter_dict,\n group_by='accident_type_hebrew', count='accident_type_hebrew',\n start_time=start_time, end_time=end_time)\n\n if news_flash_obj.road1 and news_flash_obj.road_segment_name:\n filter_dict.update({'road1': news_flash_obj.road1, 'road_segment_name': news_flash_obj.road_segment_name})\n road_data = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=filter_dict,\n group_by='accident_type_hebrew', count='accident_type_hebrew',\n start_time=start_time, end_time=end_time)\n\n road_data_dict = sum_road_accidents_by_specific_type(road_data, '\u05d4\u05ea\u05e0\u05d2\u05e9\u05d5\u05ea \u05d7\u05d6\u05d9\u05ea \u05d1\u05d7\u05d6\u05d9\u05ea')\n all_roads_data_dict = sum_road_accidents_by_specific_type(all_roads_data, '\u05d4\u05ea\u05e0\u05d2\u05e9\u05d5\u05ea \u05d7\u05d6\u05d9\u05ea \u05d1\u05d7\u05d6\u05d9\u05ea')\n\n return {'specific_road_segment_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(road_data_dict),\n 'all_roads_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(all_roads_data_dict)}\n\n#gets the latest date an accident has occured\ndef get_latest_accident_date(table_obj, filters):\n filters= filters or {}\n filters['provider_code'] = [CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = db.session.query(func.max(table_obj.accident_timestamp))\n df = pd.read_sql_query(query.statement, query.session.bind)\n return (df.to_dict(orient='records'))[0].get(\"max_1\") # pylint: disable=no-member\n\ndef create_infographics_data(news_flash_id, number_of_years_ago):\n output = {}\n try:\n number_of_years_ago = int(number_of_years_ago)\n except ValueError:\n return Response({})\n if number_of_years_ago < 0 or number_of_years_ago > 100:\n return Response({})\n location_info = extract_news_flash_location(news_flash_id)\n if location_info is None:\n return Response({})\n logging.debug('location_info:{}'.format(location_info))\n location_text = get_news_flash_location_text(news_flash_id)\n logging.debug('location_text:{}'.format(location_text))\n gps = location_info['gps']\n location_info = location_info['data']\n output['meta'] = {'location_info': location_info.copy(),\n 'location_text': location_text}\n output['widgets'] = []\n resolution = location_info.pop('resolution')\n if resolution is None:\n return Response({})\n\n if all(value is None for value in location_info.values()):\n return Response({})\n\n last_accident_date=get_latest_accident_date(table_obj=AccidentMarkerView, filters=None)\n #converting to datetime object to get the date\n end_time=last_accident_date.to_pydatetime().date()\n\n start_time = datetime.date(\n end_time.year + 1 - number_of_years_ago, 1, 1)\n\n #accident_severity count\n items = get_accident_count_by_severity(location_info=location_info,\n location_text=location_text,\n start_time=start_time,\n end_time=end_time)\n\n accident_count_by_severity = Widget(name='accident_count_by_severity',\n rank=1,\n items=items)\n output['widgets'].append(accident_count_by_severity.serialize())\n\n # most severe accidents table\n most_severe_accidents_table = Widget(name='most_severe_accidents_table',\n rank=2,\n items=get_most_severe_accidents_table(location_info, start_time, end_time),\n text={'title':get_most_severe_accidents_table_title(location_text)})\n output['widgets'].append(most_severe_accidents_table.serialize())\n\n # most severe accidents\n most_severe_accidents = Widget(name='most_severe_accidents',\n rank=3,\n items=get_most_severe_accidents(table_obj=AccidentMarkerView,\n filters=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(most_severe_accidents.serialize())\n\n # street view\n street_view = Widget(name='street_view',\n rank=4,\n items={'longitude': gps['lon'],\n 'latitude': gps['lat']})\n output['widgets'].append(street_view.serialize())\n\n # head to head accidents\n head_on_collisions_comparison = Widget(name='head_on_collisions_comparison',\n rank=5,\n items=get_head_to_head_stat(news_flash_id=news_flash_id,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(head_on_collisions_comparison.serialize())\n\n # accident_type count\n accident_count_by_accident_type = Widget(name='accident_count_by_accident_type',\n rank=6,\n items=get_accident_count_by_accident_type(location_info=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(accident_count_by_accident_type.serialize())\n\n # accidents heat map\n accidents_heat_map = Widget(name='accidents_heat_map',\n rank=7,\n items=get_accidents_heat_map(table_obj=AccidentMarkerView,\n filters=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(accidents_heat_map.serialize())\n\n # accident count by accident year\n accident_count_by_accident_year = Widget(name='accident_count_by_accident_year',\n rank=8,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_year',\n count='accident_year',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05ea\u05d0\u05d5\u05e0\u05d5\u05ea'})\n output['widgets'].append(accident_count_by_accident_year.serialize())\n\n # injured count by accident year\n injured_count_by_accident_year = Widget(name='injured_count_by_accident_year',\n rank=9,\n items=get_accidents_stats(table_obj=InvolvedMarkerView,\n filters=get_injured_filters(location_info),\n group_by='accident_year',\n count='accident_year',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05e4\u05e6\u05d5\u05e2\u05d9\u05dd'})\n output['widgets'].append(injured_count_by_accident_year.serialize())\n\n # accident count on day light\n accident_count_by_day_night = Widget(name='accident_count_by_day_night',\n rank=10,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='day_night_hebrew',\n count='day_night_hebrew',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05d1\u05d9\u05d5\u05dd \u05d5\u05d1\u05dc\u05d9\u05dc\u05d4'})\n output['widgets'].append(accident_count_by_day_night.serialize())\n\n # accidents distribution count by hour\n accidents_count_by_hour = Widget(name='accidents_count_by_hour',\n rank=11,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_hour',\n count='accident_hour',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05dc\u05e4\u05d9 \u05e9\u05e2\u05d4'})\n output['widgets'].append(accidents_count_by_hour.serialize())\n\n # accident count by road_light\n accident_count_by_road_light = Widget(name='accident_count_by_road_light',\n rank=12,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='road_light_hebrew',\n count='road_light_hebrew',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05dc\u05e4\u05d9 \u05ea\u05d0\u05d5\u05e8\u05d4'})\n output['widgets'].append(accident_count_by_road_light.serialize())\n\n # accident count by road_segment\n top_road_segments_accidents_per_km = Widget(name='top_road_segments_accidents_per_km',\n rank=13,\n items=get_top_road_segments_accidents_per_km(resolution=resolution,\n location_info=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(top_road_segments_accidents_per_km.serialize())\n\n # injured count per age group\n data_of_injured_count_per_age_group_raw = get_accidents_stats(table_obj=InvolvedMarkerView,\n filters=get_injured_filters(location_info),\n group_by='age_group_hebrew',\n count='age_group_hebrew',\n start_time=start_time,\n end_time=end_time)\n data_of_injured_count_per_age_group = filter_and_group_injured_count_per_age_group(data_of_injured_count_per_age_group_raw)\n injured_count_per_age_group = Widget(name='injured_count_per_age_group',\n rank=14,\n items=data_of_injured_count_per_age_group)\n output['widgets'].append(injured_count_per_age_group.serialize())\n\n # vision zero\n vision_zero = Widget(name='vision_zero',\n rank=15,\n items=['vision_zero_2_plus_1'])\n output['widgets'].append(vision_zero.serialize())\n\n return Response(json.dumps(output, default=str), mimetype=\"application/json\")\n", "path": "anyway/infographics_utils.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport logging\nimport datetime\nimport json\nimport pandas as pd\nfrom collections import defaultdict\nfrom sqlalchemy import func\nfrom sqlalchemy import cast, Numeric\nfrom sqlalchemy import desc\nfrom flask import Response\nfrom .constants import CONST\nfrom .models import (NewsFlash, AccidentMarkerView, InvolvedMarkerView, RoadSegments)\nfrom .parsers import resolution_dict\nfrom .app_and_db import db\n\n'''\n Widget structure:\n {\n 'name': str,\n 'rank': int (Integer),\n 'data': {\n 'items': list (Array) | dictionary (Object),\n 'text': dictionary (Object) - can be empty\n }\n 'meta': dictionary (Object) - can be empty\n }\n'''\nclass Widget():\n def __init__(self, name, rank, items, text=None, meta=None):\n self.name = name\n self.rank = rank\n self.items = items\n self.text = text\n self.meta = meta\n\n def serialize(self):\n output = {}\n output['name'] = self.name\n output['rank'] = self.rank\n output['data'] = {}\n output['data']['items'] = self.items\n if self.text:\n output['data']['text'] = self.text\n if self.meta:\n output['meta'] = self.meta\n return output\n\n\ndef extract_news_flash_location(news_flash_id):\n news_flash_obj = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n if not news_flash_obj:\n logging.warn('could not find news flash id {}'.format(news_flash_id))\n return None\n resolution = news_flash_obj.resolution if news_flash_obj.resolution else None\n if not news_flash_obj or not resolution or resolution not in resolution_dict:\n logging.warn(\n 'could not find valid resolution for news flash id {}'.format(news_flash_id))\n return None\n data = {'resolution': resolution}\n for field in resolution_dict[resolution]:\n curr_field = getattr(news_flash_obj, field)\n if curr_field is not None:\n data[field] = curr_field\n gps = {}\n for field in ['lon', 'lat']:\n gps[field] = getattr(news_flash_obj, field)\n return {'name': 'location', 'data': data, 'gps': gps}\n\n\ndef get_query(table_obj, filters, start_time, end_time):\n query = db.session.query(table_obj)\n if start_time:\n query = query.filter(\n getattr(table_obj, 'accident_timestamp') >= start_time)\n if end_time:\n query = query.filter(\n getattr(table_obj, 'accident_timestamp') <= end_time)\n if filters:\n for field_name, value in filters.items():\n if isinstance(value, list):\n values = value\n else:\n values = [value]\n query = query.filter((getattr(table_obj, field_name)).in_(values))\n return query\n\ndef get_accident_count_by_accident_type(location_info, start_time, end_time):\n all_accident_type_count = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_type_hebrew',\n count='accident_type_hebrew',\n start_time=start_time,\n end_time=end_time)\n merged_accident_type_count = [{'accident_type': '\u05d4\u05ea\u05e0\u05d2\u05e9\u05d5\u05ea', 'count': 0}]\n for item in all_accident_type_count:\n if '\u05d4\u05ea\u05e0\u05d2\u05e9\u05d5\u05ea' in item['accident_type']:\n merged_accident_type_count[0]['count'] += item['count']\n else:\n merged_accident_type_count.append(item)\n return merged_accident_type_count\n\ndef get_top_road_segments_accidents_per_km(resolution, location_info, start_time=None, end_time=None, limit=5):\n if resolution != '\u05db\u05d1\u05d9\u05e9 \u05d1\u05d9\u05e0\u05e2\u05d9\u05e8\u05d5\u05e0\u05d9': # relevent for non urban roads only\n return {}\n\n query = get_query(table_obj=AccidentMarkerView, filters=None,\n start_time=start_time, end_time=end_time)\n\n query = query.with_entities(\n AccidentMarkerView.road_segment_name,\n func.count(AccidentMarkerView.road_segment_name).label(\n 'total_accidents'),\n (RoadSegments.to_km - RoadSegments.from_km).label('segment_length'),\n cast((func.count(AccidentMarkerView.road_segment_name) / (RoadSegments.to_km - RoadSegments.from_km)),\n Numeric(10, 4)).label(\n 'accidents_per_km')) \\\n .filter(AccidentMarkerView.road1 == RoadSegments.road) \\\n .filter(AccidentMarkerView.road_segment_number == RoadSegments.segment) \\\n .filter(AccidentMarkerView.road1 == location_info['road1']) \\\n .filter(AccidentMarkerView.road_segment_name is not None) \\\n .group_by(AccidentMarkerView.road_segment_name, RoadSegments.from_km, RoadSegments.to_km) \\\n .order_by(desc('accidents_per_km')) \\\n .limit(limit)\n\n result = pd.read_sql_query(query.statement, query.session.bind)\n return result.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef get_accidents_stats(table_obj, filters=None, group_by=None, count=None, start_time=None, end_time=None):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n # get stats\n query = get_query(table_obj, filters, start_time, end_time)\n if group_by:\n query = query.group_by(group_by)\n query = query.with_entities(group_by, func.count(count))\n df = pd.read_sql_query(query.statement, query.session.bind)\n df.rename(columns={'count_1': 'count'}, inplace=True) # pylint: disable=no-member\n df.columns = [c.replace('_hebrew', '') for c in df.columns]\n return df.to_dict(orient='records') if group_by or count else df.to_dict() # pylint: disable=no-member\n\n\ndef get_injured_filters(location_info):\n new_filters = {}\n for curr_filter, curr_values in location_info.items():\n if curr_filter in ['region_hebrew', 'district_hebrew', 'district_hebrew', 'yishuv_name']:\n new_filter_name = 'accident_' + curr_filter\n new_filters[new_filter_name] = curr_values\n else:\n new_filters[curr_filter] = curr_values\n new_filters['injury_severity'] = [1, 2, 3, 4, 5]\n return new_filters\n\n\ndef get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit=10):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = get_query(table_obj, filters, start_time, end_time)\n query = query.with_entities(*entities)\n query = query.order_by(getattr(table_obj, \"accident_severity\"), getattr(\n table_obj, \"accident_timestamp\").desc())\n query = query.limit(limit)\n df = pd.read_sql_query(query.statement, query.session.bind)\n df.columns = [c.replace('_hebrew', '') for c in df.columns]\n return df.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef get_most_severe_accidents(table_obj, filters, start_time, end_time, limit=10):\n entities = 'longitude', 'latitude', 'accident_severity_hebrew', 'accident_timestamp', 'accident_type_hebrew'\n return get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit)\n\n\ndef get_accidents_heat_map(table_obj, filters, start_time, end_time):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = get_query(table_obj, filters, start_time, end_time)\n query = query.with_entities('longitude', 'latitude')\n df = pd.read_sql_query(query.statement, query.session.bind)\n return df.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef filter_and_group_injured_count_per_age_group(data_of_ages):\n import re\n range_dict = {0: 14, 15: 24, 25: 64, 65: 200}\n return_dict_by_required_age_group = defaultdict(int)\n\n for age_range_and_count in data_of_ages:\n age_range = age_range_and_count['age_group']\n count = age_range_and_count['count']\n\n # Parse the db age range\n match_parsing = re.match(\"([0-9]{2})\\\\-([0-9]{2})\", age_range)\n if match_parsing:\n regex_age_matches = match_parsing.groups()\n if len(regex_age_matches) != 2:\n return_dict_by_required_age_group[\"unknown\"] += count\n continue\n min_age_raw, max_age_raw = regex_age_matches\n else:\n match_parsing = re.match(\"([0-9]{2})\\\\+\", age_range) # e.g 85+\n if match_parsing:\n # We assume that no body live beyond age 200\n min_age_raw, max_age_raw = match_parsing.group(1), 200\n else:\n return_dict_by_required_age_group[\"unknown\"] += count\n continue\n\n # Find to what \"bucket\" to aggregate the data\n min_age = int(min_age_raw)\n max_age = int(max_age_raw)\n for item in range_dict.items():\n item_min_range, item_max_range = item\n if item_min_range <= min_age <= item_max_range and item_min_range <= max_age <= item_max_range:\n string_age_range = f'{item_min_range:02}-{item_max_range:02}'\n return_dict_by_required_age_group[string_age_range] += count\n break\n\n # Rename the last key\n return_dict_by_required_age_group[\"65+\"] = return_dict_by_required_age_group[\"65-200\"]\n del return_dict_by_required_age_group[\"65-200\"]\n\n return return_dict_by_required_age_group\n\n\ndef get_most_severe_accidents_table_title(location_text):\n return '\u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05d7\u05de\u05d5\u05e8\u05d5\u05ea \u05d1' + location_text\n\n\ndef get_accident_count_by_severity(location_info, location_text, start_time, end_time):\n count_by_severity = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_severity_hebrew',\n count='accident_severity_hebrew',\n start_time=start_time,\n end_time=end_time)\n severity_dict = {'\u05e7\u05d8\u05dc\u05e0\u05d9\u05ea': 'fatal',\n '\u05e7\u05e9\u05d4': 'severe',\n '\u05e7\u05dc\u05d4': 'light'}\n items = {}\n total_accidents_count = 0\n start_year = start_time.year\n end_year = end_time.year\n for severity_and_count in count_by_severity:\n accident_severity_hebrew = severity_and_count['accident_severity']\n severity_english = severity_dict[accident_severity_hebrew]\n severity_count_text = 'severity_{}_count'.format(severity_english)\n items[severity_count_text] = severity_and_count['count']\n total_accidents_count += severity_and_count['count']\n items['start_year'] = start_year\n items['end_year'] = end_year\n items['total_accidents_count'] = total_accidents_count\n return items\n\n\ndef get_most_severe_accidents_table(location_info, start_time, end_time):\n entities = 'id', 'provider_code', 'accident_timestamp', 'accident_type_hebrew', 'accident_year'\n accidents = get_most_severe_accidents_with_entities(\n table_obj=AccidentMarkerView,\n filters=location_info,\n entities=entities,\n start_time=start_time,\n end_time=end_time)\n # Add casualties\n for accident in accidents:\n accident['type'] = accident['accident_type']\n dt = accident['accident_timestamp'].to_pydatetime()\n accident['date'] = dt.strftime(\"%d/%m/%y\")\n accident['hour'] = dt.strftime(\"%H:%M\")\n num = get_casualties_count_in_accident(\n accident['id'], accident['provider_code'], 1, accident['accident_year'])\n accident['killed_count'] = num\n num = get_casualties_count_in_accident(\n accident['id'], accident['provider_code'], [2, 3], accident['accident_year'])\n accident['injured_count'] = num\n del accident['accident_timestamp'], accident['accident_type'], accident['id'], accident['provider_code']\n return accidents\n\n\n# count of dead and severely injured\ndef get_casualties_count_in_accident(accident_id, provider_code, injury_severity, accident_year):\n filters = {'accident_id': accident_id,\n 'provider_code': provider_code,\n 'injury_severity': injury_severity,\n 'accident_year': accident_year}\n casualties = get_accidents_stats(table_obj=InvolvedMarkerView, filters=filters,\n group_by='injury_severity', count='injury_severity')\n res = 0\n for ca in casualties:\n res += ca['count']\n return res\n\n\n# generate text describing location or road segment of news flash\n# to be used by most severe accidents additional info widget\ndef get_news_flash_location_text(news_flash_id):\n news_flash_item = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n nf = news_flash_item.serialize()\n resolution = nf['resolution'] if nf['resolution'] else ''\n yishuv_name = nf['yishuv_name'] if nf['yishuv_name'] else ''\n road1 = str(int(nf['road1'])) if nf['road1'] else ''\n road2 = str(int(nf['road2'])) if nf['road2'] else ''\n street1_hebrew = nf['street1_hebrew'] if nf['street1_hebrew'] else ''\n road_segment_name = nf['road_segment_name'] if nf['road_segment_name'] else ''\n if resolution == '\u05db\u05d1\u05d9\u05e9 \u05d1\u05d9\u05e0\u05e2\u05d9\u05e8\u05d5\u05e0\u05d9' and road1 and road_segment_name:\n res = '\u05db\u05d1\u05d9\u05e9 ' + road1 + ' \u05d1\u05de\u05e7\u05d8\u05e2 ' + road_segment_name\n elif resolution == '\u05e2\u05d9\u05e8' and not yishuv_name:\n res = nf['location']\n elif resolution == '\u05e2\u05d9\u05e8' and yishuv_name:\n res = nf['yishuv_name']\n elif resolution == '\u05e6\u05d5\u05de\u05ea \u05d1\u05d9\u05e0\u05e2\u05d9\u05e8\u05d5\u05e0\u05d9' and road1 and road2:\n res = '\u05e6\u05d5\u05de\u05ea \u05db\u05d1\u05d9\u05e9 ' + road1 + ' \u05e2\u05dd \u05db\u05d1\u05d9\u05e9 ' + road2\n elif resolution == '\u05e6\u05d5\u05de\u05ea \u05d1\u05d9\u05e0\u05e2\u05d9\u05e8\u05d5\u05e0\u05d9' and road1 and road_segment_name:\n res = '\u05db\u05d1\u05d9\u05e9 ' + road1 + ' \u05d1\u05de\u05e7\u05d8\u05e2 ' + road_segment_name\n elif resolution == '\u05e8\u05d7\u05d5\u05d1' and yishuv_name and street1_hebrew:\n res = ' \u05e8\u05d7\u05d5\u05d1 ' + street1_hebrew + ' \u05d1' + yishuv_name\n else:\n logging.warning(\n \"Did not found quality resolution. Using location field. News Flash id:{}\".format(nf['id']))\n res = nf['location']\n return res\n\n\ndef extract_news_flash_obj(news_flash_id):\n news_flash_obj = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n\n if not news_flash_obj:\n logging.warning('Could not find news flash id {}'.format(news_flash_id))\n return None\n\n return news_flash_obj\n\n\ndef sum_road_accidents_by_specific_type(road_data, field_name):\n dict_merge = defaultdict(int)\n dict_merge[field_name] = 0\n dict_merge['\u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05d0\u05d7\u05e8\u05d5\u05ea'] = 0\n\n for accident_data in road_data:\n if accident_data['accident_type'] == field_name:\n dict_merge[field_name] += accident_data['count']\n else:\n dict_merge['\u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05d0\u05d7\u05e8\u05d5\u05ea'] += accident_data['count']\n return dict_merge\n\n\ndef convert_roads_fatal_accidents_to_frontend_view(data_dict):\n data_list = []\n for key, value in data_dict.items():\n data_list.append({'desc': key, 'count': value})\n return data_list\n\n\ndef get_head_to_head_stat(news_flash_id, start_time, end_time):\n news_flash_obj = extract_news_flash_obj(news_flash_id)\n road_data = {}\n filter_dict = {'road_type': CONST.ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION,\n 'accident_severity': CONST.ACCIDENT_SEVERITY_DEADLY}\n all_roads_data = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=filter_dict,\n group_by='accident_type_hebrew', count='accident_type_hebrew',\n start_time=start_time, end_time=end_time)\n\n if news_flash_obj.road1 and news_flash_obj.road_segment_name:\n filter_dict.update({'road1': news_flash_obj.road1, 'road_segment_name': news_flash_obj.road_segment_name})\n road_data = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=filter_dict,\n group_by='accident_type_hebrew', count='accident_type_hebrew',\n start_time=start_time, end_time=end_time)\n\n road_data_dict = sum_road_accidents_by_specific_type(road_data, '\u05d4\u05ea\u05e0\u05d2\u05e9\u05d5\u05ea \u05d7\u05d6\u05d9\u05ea \u05d1\u05d7\u05d6\u05d9\u05ea')\n all_roads_data_dict = sum_road_accidents_by_specific_type(all_roads_data, '\u05d4\u05ea\u05e0\u05d2\u05e9\u05d5\u05ea \u05d7\u05d6\u05d9\u05ea \u05d1\u05d7\u05d6\u05d9\u05ea')\n\n return {'specific_road_segment_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(road_data_dict),\n 'all_roads_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(all_roads_data_dict)}\n\n#gets the latest date an accident has occured\ndef get_latest_accident_date(table_obj, filters):\n filters= filters or {}\n filters['provider_code'] = [CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = db.session.query(func.max(table_obj.accident_timestamp))\n df = pd.read_sql_query(query.statement, query.session.bind)\n return (df.to_dict(orient='records'))[0].get(\"max_1\") # pylint: disable=no-member\n\ndef create_infographics_data(news_flash_id, number_of_years_ago):\n output = {}\n try:\n number_of_years_ago = int(number_of_years_ago)\n except ValueError:\n return Response({})\n if number_of_years_ago < 0 or number_of_years_ago > 100:\n return Response({})\n location_info = extract_news_flash_location(news_flash_id)\n if location_info is None:\n return Response({})\n logging.debug('location_info:{}'.format(location_info))\n location_text = get_news_flash_location_text(news_flash_id)\n logging.debug('location_text:{}'.format(location_text))\n gps = location_info['gps']\n location_info = location_info['data']\n output['meta'] = {'location_info': location_info.copy(),\n 'location_text': location_text}\n output['widgets'] = []\n resolution = location_info.pop('resolution')\n if resolution is None:\n return Response({})\n\n if all(value is None for value in location_info.values()):\n return Response({})\n\n last_accident_date=get_latest_accident_date(table_obj=AccidentMarkerView, filters=None)\n #converting to datetime object to get the date\n end_time=last_accident_date.to_pydatetime().date()\n\n start_time = datetime.date(\n end_time.year + 1 - number_of_years_ago, 1, 1)\n\n #accident_severity count\n items = get_accident_count_by_severity(location_info=location_info,\n location_text=location_text,\n start_time=start_time,\n end_time=end_time)\n\n accident_count_by_severity = Widget(name='accident_count_by_severity',\n rank=1,\n items=items)\n output['widgets'].append(accident_count_by_severity.serialize())\n\n # most severe accidents table\n most_severe_accidents_table = Widget(name='most_severe_accidents_table',\n rank=2,\n items=get_most_severe_accidents_table(location_info, start_time, end_time),\n text={'title':get_most_severe_accidents_table_title(location_text)})\n output['widgets'].append(most_severe_accidents_table.serialize())\n\n # most severe accidents\n most_severe_accidents = Widget(name='most_severe_accidents',\n rank=3,\n items=get_most_severe_accidents(table_obj=AccidentMarkerView,\n filters=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(most_severe_accidents.serialize())\n\n # street view\n street_view = Widget(name='street_view',\n rank=4,\n items={'longitude': gps['lon'],\n 'latitude': gps['lat']})\n output['widgets'].append(street_view.serialize())\n\n # head to head accidents\n head_on_collisions_comparison = Widget(name='head_on_collisions_comparison',\n rank=5,\n items=get_head_to_head_stat(news_flash_id=news_flash_id,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(head_on_collisions_comparison.serialize())\n\n # accident_type count\n accident_count_by_accident_type = Widget(name='accident_count_by_accident_type',\n rank=6,\n items=get_accident_count_by_accident_type(location_info=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(accident_count_by_accident_type.serialize())\n\n # accidents heat map\n accidents_heat_map = Widget(name='accidents_heat_map',\n rank=7,\n items=get_accidents_heat_map(table_obj=AccidentMarkerView,\n filters=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(accidents_heat_map.serialize())\n\n # accident count by accident year\n accident_count_by_accident_year = Widget(name='accident_count_by_accident_year',\n rank=8,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_year',\n count='accident_year',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05ea\u05d0\u05d5\u05e0\u05d5\u05ea'})\n output['widgets'].append(accident_count_by_accident_year.serialize())\n\n # injured count by accident year\n injured_count_by_accident_year = Widget(name='injured_count_by_accident_year',\n rank=9,\n items=get_accidents_stats(table_obj=InvolvedMarkerView,\n filters=get_injured_filters(location_info),\n group_by='accident_year',\n count='accident_year',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05e4\u05e6\u05d5\u05e2\u05d9\u05dd'})\n output['widgets'].append(injured_count_by_accident_year.serialize())\n\n # accident count on day light\n accident_count_by_day_night = Widget(name='accident_count_by_day_night',\n rank=10,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='day_night_hebrew',\n count='day_night_hebrew',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05d1\u05d9\u05d5\u05dd \u05d5\u05d1\u05dc\u05d9\u05dc\u05d4'})\n output['widgets'].append(accident_count_by_day_night.serialize())\n\n # accidents distribution count by hour\n accidents_count_by_hour = Widget(name='accidents_count_by_hour',\n rank=11,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_hour',\n count='accident_hour',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05dc\u05e4\u05d9 \u05e9\u05e2\u05d4'})\n output['widgets'].append(accidents_count_by_hour.serialize())\n\n # accident count by road_light\n accident_count_by_road_light = Widget(name='accident_count_by_road_light',\n rank=12,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='road_light_hebrew',\n count='road_light_hebrew',\n start_time=start_time,\n end_time=end_time),\n text={'title':'\u05db\u05de\u05d5\u05ea \u05ea\u05d0\u05d5\u05e0\u05d5\u05ea \u05dc\u05e4\u05d9 \u05ea\u05d0\u05d5\u05e8\u05d4'})\n output['widgets'].append(accident_count_by_road_light.serialize())\n\n # accident count by road_segment\n top_road_segments_accidents_per_km = Widget(name='top_road_segments_accidents_per_km',\n rank=13,\n items=get_top_road_segments_accidents_per_km(resolution=resolution,\n location_info=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(top_road_segments_accidents_per_km.serialize())\n\n # injured count per age group\n data_of_injured_count_per_age_group_raw = get_accidents_stats(table_obj=InvolvedMarkerView,\n filters=get_injured_filters(location_info),\n group_by='age_group_hebrew',\n count='age_group_hebrew',\n start_time=start_time,\n end_time=end_time)\n data_of_injured_count_per_age_group = filter_and_group_injured_count_per_age_group(data_of_injured_count_per_age_group_raw)\n injured_count_per_age_group = Widget(name='injured_count_per_age_group',\n rank=14,\n items=data_of_injured_count_per_age_group)\n output['widgets'].append(injured_count_per_age_group.serialize())\n\n # vision zero\n vision_zero = Widget(name='vision_zero',\n rank=15,\n items=['vision_zero_2_plus_1'])\n output['widgets'].append(vision_zero.serialize())\n\n return Response(json.dumps(output, default=str), mimetype=\"application/json\")\n", "path": "anyway/infographics_utils.py"}]} |
gh_patches_debug_1590 | rasdani/github-patches | git_diff | pyca__cryptography-7406 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
release.py should link to GH create PAT page
We can pre-fill what permissions are needed to improve the UX of doing a release. Example URL: https://github.com/settings/tokens/new?description=foo&scopes=repo,workflow
@reaperhulk do you know what scopes are required?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `release.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 import getpass
6 import glob
7 import io
8 import os
9 import subprocess
10 import time
11 import zipfile
12
13 import click
14
15 import requests
16
17
18 def run(*args, **kwargs):
19 print("[running] {0}".format(list(args)))
20 subprocess.check_call(list(args), **kwargs)
21
22
23 def wait_for_build_complete_github_actions(session, token, run_url):
24 while True:
25 response = session.get(
26 run_url,
27 headers={
28 "Content-Type": "application/json",
29 "Authorization": "token {}".format(token),
30 },
31 )
32 response.raise_for_status()
33 if response.json()["conclusion"] is not None:
34 break
35 time.sleep(3)
36
37
38 def download_artifacts_github_actions(session, token, run_url):
39 response = session.get(
40 run_url,
41 headers={
42 "Content-Type": "application/json",
43 "Authorization": "token {}".format(token),
44 },
45 )
46 response.raise_for_status()
47
48 response = session.get(
49 response.json()["artifacts_url"],
50 headers={
51 "Content-Type": "application/json",
52 "Authorization": "token {}".format(token),
53 },
54 )
55 response.raise_for_status()
56 paths = []
57 for artifact in response.json()["artifacts"]:
58 response = session.get(
59 artifact["archive_download_url"],
60 headers={
61 "Content-Type": "application/json",
62 "Authorization": "token {}".format(token),
63 },
64 )
65 with zipfile.ZipFile(io.BytesIO(response.content)) as z:
66 for name in z.namelist():
67 if not name.endswith(".whl"):
68 continue
69 p = z.open(name)
70 out_path = os.path.join(
71 os.path.dirname(__file__),
72 "dist",
73 os.path.basename(name),
74 )
75 with open(out_path, "wb") as f:
76 f.write(p.read())
77 paths.append(out_path)
78 return paths
79
80
81 def fetch_github_actions_wheels(token, version):
82 session = requests.Session()
83
84 response = session.get(
85 (
86 "https://api.github.com/repos/pyca/cryptography/actions/workflows/"
87 "wheel-builder.yml/runs?event=push"
88 ),
89 headers={
90 "Content-Type": "application/json",
91 "Authorization": "token {}".format(token),
92 },
93 )
94 response.raise_for_status()
95 run_url = response.json()["workflow_runs"][0]["url"]
96 wait_for_build_complete_github_actions(session, token, run_url)
97 return download_artifacts_github_actions(session, token, run_url)
98
99
100 @click.command()
101 @click.argument("version")
102 def release(version):
103 """
104 ``version`` should be a string like '0.4' or '1.0'.
105 """
106 github_token = getpass.getpass("Github person access token: ")
107
108 # Tag and push the tag (this will trigger the wheel builder in Actions)
109 run("git", "tag", "-s", version, "-m", "{0} release".format(version))
110 run("git", "push", "--tags")
111
112 # Generate and upload vector packages
113 run("python", "setup.py", "sdist", "bdist_wheel", cwd="vectors/")
114 packages = glob.glob(
115 "vectors/dist/cryptography_vectors-{0}*".format(version)
116 )
117 run("twine", "upload", "-s", *packages)
118
119 # Generate sdist for upload
120 run("python", "setup.py", "sdist")
121 sdist = glob.glob("dist/cryptography-{0}*".format(version))
122
123 # Wait for Actions to complete and download the wheels
124 github_actions_wheel_paths = fetch_github_actions_wheels(
125 github_token, version
126 )
127
128 # Upload sdist and wheels
129 run("twine", "upload", "-s", *sdist)
130 run("twine", "upload", *github_actions_wheel_paths)
131
132
133 if __name__ == "__main__":
134 release()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/release.py b/release.py
--- a/release.py
+++ b/release.py
@@ -103,6 +103,11 @@
"""
``version`` should be a string like '0.4' or '1.0'.
"""
+ print(
+ f"Create a new GH PAT at: "
+ f"https://github.com/settings/tokens/new?"
+ f"description={version}&scopes=repo"
+ )
github_token = getpass.getpass("Github person access token: ")
# Tag and push the tag (this will trigger the wheel builder in Actions)
| {"golden_diff": "diff --git a/release.py b/release.py\n--- a/release.py\n+++ b/release.py\n@@ -103,6 +103,11 @@\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n+ print(\n+ f\"Create a new GH PAT at: \"\n+ f\"https://github.com/settings/tokens/new?\"\n+ f\"description={version}&scopes=repo\"\n+ )\n github_token = getpass.getpass(\"Github person access token: \")\n \n # Tag and push the tag (this will trigger the wheel builder in Actions)\n", "issue": "release.py should link to GH create PAT page\nWe can pre-fill what permissions are needed to improve the UX of doing a release. Example URL: https://github.com/settings/tokens/new?description=foo&scopes=repo,workflow\r\n\r\n@reaperhulk do you know what scopes are required?\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport getpass\nimport glob\nimport io\nimport os\nimport subprocess\nimport time\nimport zipfile\n\nimport click\n\nimport requests\n\n\ndef run(*args, **kwargs):\n print(\"[running] {0}\".format(list(args)))\n subprocess.check_call(list(args), **kwargs)\n\n\ndef wait_for_build_complete_github_actions(session, token, run_url):\n while True:\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n if response.json()[\"conclusion\"] is not None:\n break\n time.sleep(3)\n\n\ndef download_artifacts_github_actions(session, token, run_url):\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n\n response = session.get(\n response.json()[\"artifacts_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n paths = []\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n artifact[\"archive_download_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n with zipfile.ZipFile(io.BytesIO(response.content)) as z:\n for name in z.namelist():\n if not name.endswith(\".whl\"):\n continue\n p = z.open(name)\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n os.path.basename(name),\n )\n with open(out_path, \"wb\") as f:\n f.write(p.read())\n paths.append(out_path)\n return paths\n\n\ndef fetch_github_actions_wheels(token, version):\n session = requests.Session()\n\n response = session.get(\n (\n \"https://api.github.com/repos/pyca/cryptography/actions/workflows/\"\n \"wheel-builder.yml/runs?event=push\"\n ),\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n run_url = response.json()[\"workflow_runs\"][0][\"url\"]\n wait_for_build_complete_github_actions(session, token, run_url)\n return download_artifacts_github_actions(session, token, run_url)\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n github_token = getpass.getpass(\"Github person access token: \")\n\n # Tag and push the tag (this will trigger the wheel builder in Actions)\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n # Generate and upload vector packages\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n packages = glob.glob(\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n run(\"twine\", \"upload\", \"-s\", *packages)\n\n # Generate sdist for upload\n run(\"python\", \"setup.py\", \"sdist\")\n sdist = glob.glob(\"dist/cryptography-{0}*\".format(version))\n\n # Wait for Actions to complete and download the wheels\n github_actions_wheel_paths = fetch_github_actions_wheels(\n github_token, version\n )\n\n # Upload sdist and wheels\n run(\"twine\", \"upload\", \"-s\", *sdist)\n run(\"twine\", \"upload\", *github_actions_wheel_paths)\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport getpass\nimport glob\nimport io\nimport os\nimport subprocess\nimport time\nimport zipfile\n\nimport click\n\nimport requests\n\n\ndef run(*args, **kwargs):\n print(\"[running] {0}\".format(list(args)))\n subprocess.check_call(list(args), **kwargs)\n\n\ndef wait_for_build_complete_github_actions(session, token, run_url):\n while True:\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n if response.json()[\"conclusion\"] is not None:\n break\n time.sleep(3)\n\n\ndef download_artifacts_github_actions(session, token, run_url):\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n\n response = session.get(\n response.json()[\"artifacts_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n paths = []\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n artifact[\"archive_download_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n with zipfile.ZipFile(io.BytesIO(response.content)) as z:\n for name in z.namelist():\n if not name.endswith(\".whl\"):\n continue\n p = z.open(name)\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n os.path.basename(name),\n )\n with open(out_path, \"wb\") as f:\n f.write(p.read())\n paths.append(out_path)\n return paths\n\n\ndef fetch_github_actions_wheels(token, version):\n session = requests.Session()\n\n response = session.get(\n (\n \"https://api.github.com/repos/pyca/cryptography/actions/workflows/\"\n \"wheel-builder.yml/runs?event=push\"\n ),\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n run_url = response.json()[\"workflow_runs\"][0][\"url\"]\n wait_for_build_complete_github_actions(session, token, run_url)\n return download_artifacts_github_actions(session, token, run_url)\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n print(\n f\"Create a new GH PAT at: \"\n f\"https://github.com/settings/tokens/new?\"\n f\"description={version}&scopes=repo\"\n )\n github_token = getpass.getpass(\"Github person access token: \")\n\n # Tag and push the tag (this will trigger the wheel builder in Actions)\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n # Generate and upload vector packages\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n packages = glob.glob(\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n run(\"twine\", \"upload\", \"-s\", *packages)\n\n # Generate sdist for upload\n run(\"python\", \"setup.py\", \"sdist\")\n sdist = glob.glob(\"dist/cryptography-{0}*\".format(version))\n\n # Wait for Actions to complete and download the wheels\n github_actions_wheel_paths = fetch_github_actions_wheels(\n github_token, version\n )\n\n # Upload sdist and wheels\n run(\"twine\", \"upload\", \"-s\", *sdist)\n run(\"twine\", \"upload\", *github_actions_wheel_paths)\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py"}]} |
gh_patches_debug_1591 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-2375 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
error in multi-process accessing of the same non-exist persistent cache
**Is your feature request related to a problem? Please describe.**
the persistent dataset will first check the existence of a cache directory and the create a new one if needed:
https://github.com/Project-MONAI/MONAI/blob/feb3a334b7bbf302b13a6da80e0b022a4cf75a4e/monai/data/dataset.py#L163-L165
these steps may run into a race condition in a multiprocess context.
```py
python -m tests.test_persistentdataset
persistent 1
persistent 0
create /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test
create /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test
Process Process-2:
Traceback (most recent call last):
File "/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "MONAI/tests/utils.py", line 296, in run_process
raise e
File "MONAI/tests/utils.py", line 287, in run_process
func(*args, **kwargs)
File "MONAI/tests/utils.py", line 471, in _call_original_func
return f(*args, **kwargs)
File "MONAI/tests/test_persistentdataset.py", line 166, in test_mp_dataset
ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=cache_dir)
File "MONAI/monai/data/dataset.py", line 172, in __init__
self.cache_dir.mkdir(parents=True)
File "/usr/local/anaconda3/envs/py37/lib/python3.7/pathlib.py", line 1273, in mkdir
self._accessor.mkdir(self, mode)
FileExistsError: [Errno 17] File exists: '/var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test'
F
======================================================================
FAIL: test_mp_dataset (__main__.TestDistCreateDataset)
----------------------------------------------------------------------
Traceback (most recent call last):
File MONAI/tests/utils.py", line 343, in _wrapper
assert results.get(), "Distributed call failed."
AssertionError: Distributed call failed.
----------------------------------------------------------------------
```
error in multi-process accessing of the same non-exist persistent cache
**Is your feature request related to a problem? Please describe.**
the persistent dataset will first check the existence of a cache directory and the create a new one if needed:
https://github.com/Project-MONAI/MONAI/blob/feb3a334b7bbf302b13a6da80e0b022a4cf75a4e/monai/data/dataset.py#L163-L165
these steps may run into a race condition in a multiprocess context.
```py
python -m tests.test_persistentdataset
persistent 1
persistent 0
create /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test
create /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test
Process Process-2:
Traceback (most recent call last):
File "/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "MONAI/tests/utils.py", line 296, in run_process
raise e
File "MONAI/tests/utils.py", line 287, in run_process
func(*args, **kwargs)
File "MONAI/tests/utils.py", line 471, in _call_original_func
return f(*args, **kwargs)
File "MONAI/tests/test_persistentdataset.py", line 166, in test_mp_dataset
ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=cache_dir)
File "MONAI/monai/data/dataset.py", line 172, in __init__
self.cache_dir.mkdir(parents=True)
File "/usr/local/anaconda3/envs/py37/lib/python3.7/pathlib.py", line 1273, in mkdir
self._accessor.mkdir(self, mode)
FileExistsError: [Errno 17] File exists: '/var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test'
F
======================================================================
FAIL: test_mp_dataset (__main__.TestDistCreateDataset)
----------------------------------------------------------------------
Traceback (most recent call last):
File MONAI/tests/utils.py", line 343, in _wrapper
assert results.get(), "Distributed call failed."
AssertionError: Distributed call failed.
----------------------------------------------------------------------
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monai/data/dataset.py`
Content:
```
1 # Copyright 2020 - 2021 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12
13 import collections.abc
14 import math
15 import pickle
16 import shutil
17 import sys
18 import tempfile
19 import threading
20 import time
21 import warnings
22 from copy import deepcopy
23 from multiprocessing.pool import ThreadPool
24 from pathlib import Path
25 from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union
26
27 import numpy as np
28 import torch
29 from torch.utils.data import Dataset as _TorchDataset
30 from torch.utils.data import Subset
31
32 from monai.data.utils import first, pickle_hashing
33 from monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform
34 from monai.utils import MAX_SEED, get_seed, min_version, optional_import
35
36 if TYPE_CHECKING:
37 from tqdm import tqdm
38
39 has_tqdm = True
40 else:
41 tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
42
43 lmdb, _ = optional_import("lmdb")
44
45
46 class Dataset(_TorchDataset):
47 """
48 A generic dataset with a length property and an optional callable data transform
49 when fetching a data sample.
50 If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
51 for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
52
53 For example, typical input data can be a list of dictionaries::
54
55 [{ { {
56 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
57 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
58 'extra': 123 'extra': 456 'extra': 789
59 }, }, }]
60 """
61
62 def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:
63 """
64 Args:
65 data: input data to load and transform to generate dataset for model.
66 transform: a callable data transform on input data.
67
68 """
69 self.data = data
70 self.transform = transform
71
72 def __len__(self) -> int:
73 return len(self.data)
74
75 def _transform(self, index: int):
76 """
77 Fetch single data item from `self.data`.
78 """
79 data_i = self.data[index]
80 return apply_transform(self.transform, data_i) if self.transform is not None else data_i
81
82 def __getitem__(self, index: Union[int, slice, Sequence[int]]):
83 """
84 Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.
85 """
86 if isinstance(index, slice):
87 # dataset[:42]
88 start, stop, step = index.indices(len(self))
89 indices = range(start, stop, step)
90 return Subset(dataset=self, indices=indices)
91 if isinstance(index, collections.abc.Sequence):
92 # dataset[[1, 3, 4]]
93 return Subset(dataset=self, indices=index)
94 return self._transform(index)
95
96
97 class PersistentDataset(Dataset):
98 """
99 Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
100 it can operate transforms for specific fields. Results from the non-random transform components are computed
101 when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.
102 If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
103 for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
104
105 For example, typical input data can be a list of dictionaries::
106
107 [{ { {
108 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
109 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
110 'extra': 123 'extra': 456 'extra': 789
111 }, }, }]
112
113 For a composite transform like
114
115 .. code-block:: python
116
117 [ LoadImaged(keys=['image', 'label']),
118 Orientationd(keys=['image', 'label'], axcodes='RAS'),
119 ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
120 RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),
121 pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),
122 ToTensord(keys=['image', 'label'])]
123
124 Upon first use a filename based dataset will be processed by the transform for the
125 [LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to
126 the `cache_dir` before applying the remaining random dependant transforms
127 [RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.
128
129 Subsequent uses of a dataset directly read pre-processed results from `cache_dir`
130 followed by applying the random dependant parts of transform processing.
131
132 Note:
133 The input data must be a list of file paths and will hash them as cache keys.
134
135 When loading persistent cache content, it can't guarantee the cached data matches current
136 transform chain, so please make sure to use exactly the same non-random transforms and the
137 args as the cache content, otherwise, it may cause unexpected errors.
138
139 """
140
141 def __init__(
142 self,
143 data: Sequence,
144 transform: Union[Sequence[Callable], Callable],
145 cache_dir: Optional[Union[Path, str]],
146 hash_func: Callable[..., bytes] = pickle_hashing,
147 ) -> None:
148 """
149 Args:
150 data: input data file paths to load and transform to generate dataset for model.
151 `PersistentDataset` expects input data to be a list of serializable
152 and hashes them as cache keys using `hash_func`.
153 transform: transforms to execute operations on input data.
154 cache_dir: If specified, this is the location for persistent storage
155 of pre-computed transformed data tensors. The cache_dir is computed once, and
156 persists on disk until explicitly removed. Different runs, programs, experiments
157 may share a common cache dir provided that the transforms pre-processing is consistent.
158 If `cache_dir` doesn't exist, will automatically create it.
159 If `cache_dir` is `None`, there is effectively no caching.
160 hash_func: a callable to compute hash from data items to be cached.
161 defaults to `monai.data.utils.pickle_hashing`.
162
163 """
164 if not isinstance(transform, Compose):
165 transform = Compose(transform)
166 super().__init__(data=data, transform=transform)
167 self.cache_dir = Path(cache_dir) if cache_dir is not None else None
168 self.hash_func = hash_func
169 if self.cache_dir is not None:
170 if not self.cache_dir.exists():
171 self.cache_dir.mkdir(parents=True)
172 if not self.cache_dir.is_dir():
173 raise ValueError("cache_dir must be a directory.")
174
175 def _pre_transform(self, item_transformed):
176 """
177 Process the data from original state up to the first random element.
178
179 Args:
180 item_transformed: The data to be transformed
181
182 Returns:
183 the transformed element up to the first identified
184 random transform object
185
186 """
187 for _transform in self.transform.transforms: # type:ignore
188 # execute all the deterministic transforms
189 if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
190 break
191 # this is to be consistent with CacheDataset even though it's not in a multi-thread situation.
192 _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
193 item_transformed = apply_transform(_xform, item_transformed)
194 return item_transformed
195
196 def _post_transform(self, item_transformed):
197 """
198 Process the data from before the first random transform to the final state ready for evaluation.
199
200 Args:
201 item_transformed: The data to be transformed (already processed up to the first random transform)
202
203 Returns:
204 the transformed element through the random transforms
205
206 """
207 if not isinstance(self.transform, Compose):
208 raise ValueError("transform must be an instance of monai.transforms.Compose.")
209 start_post_randomize_run = False
210 for _transform in self.transform.transforms:
211 if (
212 start_post_randomize_run
213 or isinstance(_transform, Randomizable)
214 or not isinstance(_transform, Transform)
215 ):
216 start_post_randomize_run = True
217 item_transformed = apply_transform(_transform, item_transformed)
218 return item_transformed
219
220 def _cachecheck(self, item_transformed):
221 """
222 A function to cache the expensive input data transform operations
223 so that huge data sets (larger than computer memory) can be processed
224 on the fly as needed, and intermediate results written to disk for
225 future use.
226
227 Args:
228 item_transformed: The current data element to be mutated into transformed representation
229
230 Returns:
231 The transformed data_element, either from cache, or explicitly computing it.
232
233 Warning:
234 The current implementation does not encode transform information as part of the
235 hashing mechanism used for generating cache names. If the transforms applied are
236 changed in any way, the objects in the cache dir will be invalid. The hash for the
237 cache is ONLY dependant on the input filename paths.
238
239 """
240 hashfile = None
241 if self.cache_dir is not None:
242 data_item_md5 = self.hash_func(item_transformed).decode("utf-8")
243 hashfile = self.cache_dir / f"{data_item_md5}.pt"
244
245 if hashfile is not None and hashfile.is_file(): # cache hit
246 return torch.load(hashfile)
247
248 _item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed
249 if hashfile is not None:
250 # NOTE: Writing to a temporary directory and then using a nearly atomic rename operation
251 # to make the cache more robust to manual killing of parent process
252 # which may leave partially written cache files in an incomplete state
253 with tempfile.TemporaryDirectory() as tmpdirname:
254 temp_hash_file = Path(tmpdirname) / hashfile.name
255 torch.save(_item_transformed, temp_hash_file)
256 if temp_hash_file.is_file() and not hashfile.is_file():
257 # On Unix, if target exists and is a file, it will be replaced silently if the user has permission.
258 # for more details: https://docs.python.org/3/library/shutil.html#shutil.move.
259 try:
260 shutil.move(temp_hash_file, hashfile)
261 except FileExistsError:
262 pass
263 return _item_transformed
264
265 def _transform(self, index: int):
266 pre_random_item = self._cachecheck(self.data[index])
267 return self._post_transform(pre_random_item)
268
269
270 class CacheNTransDataset(PersistentDataset):
271 """
272 Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.
273
274 """
275
276 def __init__(
277 self,
278 data: Sequence,
279 transform: Union[Sequence[Callable], Callable],
280 cache_n_trans: int,
281 cache_dir: Optional[Union[Path, str]],
282 hash_func: Callable[..., bytes] = pickle_hashing,
283 ) -> None:
284 """
285 Args:
286 data: input data file paths to load and transform to generate dataset for model.
287 `PersistentDataset` expects input data to be a list of serializable
288 and hashes them as cache keys using `hash_func`.
289 transform: transforms to execute operations on input data.
290 cache_n_trans: cache the result of first N transforms.
291 cache_dir: If specified, this is the location for persistent storage
292 of pre-computed transformed data tensors. The cache_dir is computed once, and
293 persists on disk until explicitly removed. Different runs, programs, experiments
294 may share a common cache dir provided that the transforms pre-processing is consistent.
295 If `cache_dir` doesn't exist, will automatically create it.
296 If `cache_dir` is `None`, there is effectively no caching.
297 hash_func: a callable to compute hash from data items to be cached.
298 defaults to `monai.data.utils.pickle_hashing`.
299
300 """
301 super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
302 self.cache_n_trans = cache_n_trans
303
304 def _pre_transform(self, item_transformed):
305 """
306 Process the data from original state up to the N element.
307
308 Args:
309 item_transformed: The data to be transformed
310
311 Returns:
312 the transformed element up to the N transform object
313 """
314 if not isinstance(self.transform, Compose):
315 raise ValueError("transform must be an instance of monai.transforms.Compose.")
316 for i, _transform in enumerate(self.transform.transforms):
317 if i == self.cache_n_trans:
318 break
319 _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
320 item_transformed = apply_transform(_xform, item_transformed)
321 return item_transformed
322
323 def _post_transform(self, item_transformed):
324 """
325 Process the data from before the N + 1 transform to the final state ready for evaluation.
326
327 Args:
328 item_transformed: The data to be transformed (already processed up to the first N transform)
329
330 Returns:
331 the final transformed result
332 """
333 if not isinstance(self.transform, Compose):
334 raise ValueError("transform must be an instance of monai.transforms.Compose.")
335 for i, _transform in enumerate(self.transform.transforms):
336 if i >= self.cache_n_trans:
337 item_transformed = apply_transform(_transform, item_transformed)
338 return item_transformed
339
340
341 class LMDBDataset(PersistentDataset):
342 """
343 Extension of `PersistentDataset` using LMDB as the backend.
344
345 See Also:
346 :py:class:`monai.data.PersistentDataset`
347
348 Examples:
349
350 >>> items = [{"data": i} for i in range(5)]
351 # [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]
352 >>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd("data", delay_time=1))
353 >>> print(list(lmdb_ds)) # using the cached results
354
355 """
356
357 def __init__(
358 self,
359 data: Sequence,
360 transform: Union[Sequence[Callable], Callable],
361 cache_dir: Union[Path, str] = "cache",
362 hash_func: Callable[..., bytes] = pickle_hashing,
363 db_name: str = "monai_cache",
364 progress: bool = True,
365 pickle_protocol=pickle.HIGHEST_PROTOCOL,
366 lmdb_kwargs: Optional[dict] = None,
367 ) -> None:
368 """
369 Args:
370 data: input data file paths to load and transform to generate dataset for model.
371 `LMDBDataset` expects input data to be a list of serializable
372 and hashes them as cache keys using `hash_func`.
373 transform: transforms to execute operations on input data.
374 cache_dir: if specified, this is the location for persistent storage
375 of pre-computed transformed data tensors. The cache_dir is computed once, and
376 persists on disk until explicitly removed. Different runs, programs, experiments
377 may share a common cache dir provided that the transforms pre-processing is consistent.
378 If the cache_dir doesn't exist, will automatically create it. Defaults to "./cache".
379 hash_func: a callable to compute hash from data items to be cached.
380 defaults to `monai.data.utils.pickle_hashing`.
381 db_name: lmdb database file name. Defaults to "monai_cache".
382 progress: whether to display a progress bar.
383 pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.
384 https://docs.python.org/3/library/pickle.html#pickle-protocols
385 lmdb_kwargs: additional keyword arguments to the lmdb environment.
386 for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class
387 """
388 super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
389 self.progress = progress
390 if not self.cache_dir:
391 raise ValueError("cache_dir must be specified.")
392 self.db_file = self.cache_dir / f"{db_name}.lmdb"
393 self.pickle_protocol = pickle_protocol
394 self.lmdb_kwargs = lmdb_kwargs or {}
395 if not self.lmdb_kwargs.get("map_size", 0):
396 self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size
397 self._read_env = None
398 print(f"Accessing lmdb file: {self.db_file.absolute()}.")
399
400 def _fill_cache_start_reader(self):
401 # create cache
402 self.lmdb_kwargs["readonly"] = False
403 env = lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
404 if self.progress and not has_tqdm:
405 warnings.warn("LMDBDataset: tqdm is not installed. not displaying the caching progress.")
406 for item in tqdm(self.data) if has_tqdm and self.progress else self.data:
407 key = self.hash_func(item)
408 done, retry, val = False, 5, None
409 while not done and retry > 0:
410 try:
411 with env.begin(write=True) as txn:
412 with txn.cursor() as cursor:
413 done = cursor.set_key(key)
414 if done:
415 continue
416 if val is None:
417 val = self._pre_transform(deepcopy(item)) # keep the original hashed
418 val = pickle.dumps(val, protocol=self.pickle_protocol)
419 txn.put(key, val)
420 done = True
421 except lmdb.MapFullError:
422 done, retry = False, retry - 1
423 size = env.info()["map_size"]
424 new_size = size * 2
425 warnings.warn(f"Resizing the cache database from {int(size) >> 20}MB to {int(new_size) >> 20}MB.")
426 env.set_mapsize(new_size)
427 except lmdb.MapResizedError:
428 # the mapsize is increased by another process
429 # set_mapsize with a size of 0 to adopt the new size,
430 env.set_mapsize(0)
431 if not done: # still has the map full error
432 size = env.info()["map_size"]
433 env.close()
434 raise ValueError(f"LMDB map size reached, increase size above current size of {size}.")
435 size = env.info()["map_size"]
436 env.close()
437 # read-only database env
438 self.lmdb_kwargs["readonly"] = True
439 self.lmdb_kwargs["map_size"] = size
440 if self.lmdb_kwargs.get("lock", None) is None:
441 self.lmdb_kwargs["lock"] = False
442 if self.lmdb_kwargs.get("readahead", None) is None:
443 self.lmdb_kwargs["readahead"] = False
444 return lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
445
446 def _cachecheck(self, item_transformed):
447 """
448 if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.
449
450 """
451 if self._read_env is None:
452 self._read_env = self._fill_cache_start_reader()
453 with self._read_env.begin(write=False) as txn:
454 data = txn.get(self.hash_func(item_transformed))
455 if data is None:
456 warnings.warn("LMDBDataset: cache key not found, running fallback caching.")
457 return super()._cachecheck(item_transformed)
458 try:
459 return pickle.loads(data)
460 except Exception as err:
461 raise RuntimeError("Invalid cache value, corrupted lmdb file?") from err
462
463 def info(self):
464 """
465 Returns: dataset info dictionary.
466
467 """
468 if self._read_env is None:
469 self._read_env = self._fill_cache_start_reader()
470 out = dict(self._read_env.info())
471 out["size"] = len(self.data)
472 out["filename"] = f"{self.db_file.absolute()}"
473 return out
474
475
476 class CacheDataset(Dataset):
477 """
478 Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.
479
480 By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.
481 If the requested data is not in the cache, all transforms will run normally
482 (see also :py:class:`monai.data.dataset.Dataset`).
483
484 Users can set the cache rate or number of items to cache.
485 It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.
486
487 To improve the caching efficiency, please always put as many as possible non-random transforms
488 before the randomized ones when composing the chain of transforms.
489 If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
490 for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
491
492 For example, if the transform is a `Compose` of::
493
494 transforms = Compose([
495 LoadImaged(),
496 AddChanneld(),
497 Spacingd(),
498 Orientationd(),
499 ScaleIntensityRanged(),
500 RandCropByPosNegLabeld(),
501 ToTensord()
502 ])
503
504 when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,
505 this dataset will cache the results up to ``ScaleIntensityRanged``, as
506 all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`
507 can be cached. During training, the dataset will load the cached results and run
508 ``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform
509 and the outcome not cached.
510
511 Note:
512 `CacheDataset` executes non-random transforms and prepares cache content in the main process before
513 the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process
514 during training. it may take a long time to prepare cache content according to the size of expected cache data.
515 So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to
516 temporarily skip caching.
517
518 """
519
520 def __init__(
521 self,
522 data: Sequence,
523 transform: Union[Sequence[Callable], Callable],
524 cache_num: int = sys.maxsize,
525 cache_rate: float = 1.0,
526 num_workers: Optional[int] = None,
527 progress: bool = True,
528 ) -> None:
529 """
530 Args:
531 data: input data to load and transform to generate dataset for model.
532 transform: transforms to execute operations on input data.
533 cache_num: number of items to be cached. Default is `sys.maxsize`.
534 will take the minimum of (cache_num, data_length x cache_rate, data_length).
535 cache_rate: percentage of cached data in total, default is 1.0 (cache all).
536 will take the minimum of (cache_num, data_length x cache_rate, data_length).
537 num_workers: the number of worker processes to use.
538 If num_workers is None then the number returned by os.cpu_count() is used.
539 progress: whether to display a progress bar.
540 """
541 if not isinstance(transform, Compose):
542 transform = Compose(transform)
543 super().__init__(data=data, transform=transform)
544 self.progress = progress
545 self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))
546 self.num_workers = num_workers
547 if self.num_workers is not None:
548 self.num_workers = max(int(self.num_workers), 1)
549 self._cache: List = self._fill_cache()
550
551 def _fill_cache(self) -> List:
552 if self.cache_num <= 0:
553 return []
554 if self.progress and not has_tqdm:
555 warnings.warn("tqdm is not installed, will not show the caching progress bar.")
556 with ThreadPool(self.num_workers) as p:
557 if self.progress and has_tqdm:
558 return list(
559 tqdm(
560 p.imap(self._load_cache_item, range(self.cache_num)),
561 total=self.cache_num,
562 desc="Loading dataset",
563 )
564 )
565 return list(p.imap(self._load_cache_item, range(self.cache_num)))
566
567 def _load_cache_item(self, idx: int):
568 """
569 Args:
570 idx: the index of the input data sequence.
571 """
572 item = self.data[idx]
573 for _transform in self.transform.transforms: # type:ignore
574 # execute all the deterministic transforms
575 if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
576 break
577 _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
578 item = apply_transform(_xform, item)
579 return item
580
581 def _transform(self, index: int):
582 if index % len(self) >= self.cache_num: # support negative index
583 # no cache for this index, execute all the transforms directly
584 return super()._transform(index)
585 # load data from cache and execute from the first random transform
586 start_run = False
587 if self._cache is None:
588 self._cache = self._fill_cache()
589 data = self._cache[index]
590 if not isinstance(self.transform, Compose):
591 raise ValueError("transform must be an instance of monai.transforms.Compose.")
592 for _transform in self.transform.transforms:
593 if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
594 # only need to deep copy data on first non-deterministic transform
595 if not start_run:
596 start_run = True
597 data = deepcopy(data)
598 data = apply_transform(_transform, data)
599 return data
600
601
602 class SmartCacheDataset(Randomizable, CacheDataset):
603 """
604 Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.
605 At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items
606 in the cache are used for training. This ensures that data needed for training is readily available,
607 keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic
608 transform sequence before being fed to GPU. At the same time, another thread is preparing replacement
609 items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart
610 Cache replaces the same number of items with replacement items.
611 Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.
612 Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),
613 where r is the configured replace rate).
614 For more details, please refer to:
615 https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache
616 If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
617 for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
618
619 For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.
620 so the actual training images cached and replaced for every epoch are as below::
621
622 epoch 1: [image1, image2, image3, image4]
623 epoch 2: [image2, image3, image4, image5]
624 epoch 3: [image3, image4, image5, image1]
625 epoch 3: [image4, image5, image1, image2]
626 epoch N: [image[N % 5] ...]
627
628 The usage of `SmartCacheDataset` contains 4 steps:
629
630 1. Initialize `SmartCacheDataset` object and cache for the first epoch.
631 2. Call `start()` to run replacement thread in background.
632 3. Call `update_cache()` before every epoch to replace training items.
633 4. Call `shutdown()` when training ends.
634
635 Note:
636 This replacement will not work for below cases:
637 1. Set the `multiprocessing_context` of DataLoader to `spawn`.
638 2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.
639 3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.
640
641 If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,
642 otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.
643
644 Args:
645 data: input data to load and transform to generate dataset for model.
646 transform: transforms to execute operations on input data.
647 replace_rate: percentage of the cached items to be replaced in every epoch.
648 cache_num: number of items to be cached. Default is `sys.maxsize`.
649 will take the minimum of (cache_num, data_length x cache_rate, data_length).
650 cache_rate: percentage of cached data in total, default is 1.0 (cache all).
651 will take the minimum of (cache_num, data_length x cache_rate, data_length).
652 num_init_workers: the number of worker threads to initialize the cache for first epoch.
653 If num_init_workers is None then the number returned by os.cpu_count() is used.
654 num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.
655 If num_replace_workers is None then the number returned by os.cpu_count() is used.
656 progress: whether to display a progress bar when caching for the first epoch.
657 shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.
658 seed: random seed if shuffle is `True`, default to `0`.
659 """
660
661 def __init__(
662 self,
663 data: Sequence,
664 transform: Union[Sequence[Callable], Callable],
665 replace_rate: float,
666 cache_num: int = sys.maxsize,
667 cache_rate: float = 1.0,
668 num_init_workers: Optional[int] = None,
669 num_replace_workers: Optional[int] = None,
670 progress: bool = True,
671 shuffle: bool = True,
672 seed: int = 0,
673 ) -> None:
674 if shuffle:
675 self.set_random_state(seed=seed)
676 self.randomize(data)
677
678 super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress)
679 if self._cache is None:
680 self._cache = self._fill_cache()
681 if self.cache_num >= len(data):
682 warnings.warn(
683 "cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset."
684 )
685 if replace_rate <= 0:
686 raise ValueError("replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.")
687
688 self.num_replace_workers: Optional[int] = num_replace_workers
689 if self.num_replace_workers is not None:
690 self.num_replace_workers = max(int(self.num_replace_workers), 1)
691
692 self._total_num: int = len(data)
693 self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)
694 self._replacements: List[Any] = [None for _ in range(self._replace_num)]
695 self._replace_data_idx: List[int] = list(range(self._replace_num))
696
697 self._start_pos: int = 0
698 self._update_lock: threading.Lock = threading.Lock()
699 self._round: int = 1
700 self._replace_done: bool = False
701 self._replace_mgr: Optional[threading.Thread] = None
702
703 self._compute_data_idx()
704
705 def randomize(self, data: Sequence) -> None:
706 try:
707 self.R.shuffle(data)
708 except TypeError as e:
709 warnings.warn(f"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.")
710
711 def _compute_data_idx(self):
712 """
713 Update the replacement data position in the total data.
714
715 """
716 for i in range(self._replace_num):
717 pos: int = self._start_pos + self.cache_num + i
718 if pos >= self._total_num:
719 pos -= self._total_num
720 self._replace_data_idx[i] = pos
721
722 def is_started(self):
723 """
724 Check whether the replacement thread is already started.
725
726 """
727 if self._replace_mgr is None:
728 return False
729 return self._replace_mgr.is_alive()
730
731 def start(self):
732 """
733 Start the background thread to replace training items for every epoch.
734
735 """
736 if self._replace_mgr is None or not self.is_started():
737 self._restart()
738
739 def _restart(self):
740 """
741 Restart background thread if killed for some reason.
742
743 """
744 self._round = 1
745 self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)
746 self._replace_mgr.start()
747
748 def _try_update_cache(self):
749 """
750 Update the cache items with new replacement for current epoch.
751
752 """
753 with self._update_lock:
754 if not self._replace_done:
755 return False
756
757 del self._cache[: self._replace_num]
758 self._cache.extend(self._replacements)
759
760 self._start_pos += self._replace_num
761 if self._start_pos >= self._total_num:
762 self._start_pos -= self._total_num
763
764 self._compute_data_idx()
765
766 # ready for next round
767 self._round += 1
768 self._replace_done = False
769 return True
770
771 def update_cache(self):
772 """
773 Update cache items for current epoch, need to call this function before every epoch.
774 If the cache has been shutdown before, need to restart the `_replace_mgr` thread.
775
776 """
777 if not self._replace_mgr.is_alive():
778 self._restart()
779
780 # make sure update is done
781 while not self._try_update_cache():
782 time.sleep(0.01)
783
784 def _try_shutdown(self):
785 """
786 Wait for thread lock to shut down the background thread.
787
788 """
789 with self._update_lock:
790 if self._replace_done:
791 self._round = 0
792 self._replace_done = False
793 return True
794 return False
795
796 def shutdown(self):
797 """
798 Shut down the background thread for replacement.
799
800 """
801 if not self.is_started():
802 return
803
804 # wait until replace mgr is done the current round
805 while not self._try_shutdown():
806 time.sleep(0.01)
807 self._replace_mgr.join()
808
809 def _replace_cache_thread(self, index: int):
810 """
811 Execute deterministic transforms on the new data for replacement.
812
813 """
814 pos: int = self._replace_data_idx[index]
815 self._replacements[index] = self._load_cache_item(pos)
816
817 def _compute_replacements(self):
818 """
819 Compute expected items for the replacement of next epoch, execute deterministic transforms.
820 It can support multi-threads to accelerate the computation progress.
821
822 """
823 with ThreadPool(self.num_replace_workers) as p:
824 p.map(self._replace_cache_thread, list(range(self._replace_num)))
825
826 self._replace_done = True
827
828 def _try_manage_replacement(self, check_round):
829 """
830 Wait thread lock and replace training items in the background thread.
831
832 """
833 with self._update_lock:
834 if self._round <= 0:
835 # shutdown replacement
836 self._replace_done = True
837 return True, -1
838
839 if self._round != check_round:
840 self._compute_replacements()
841 return False, self._round
842
843 def manage_replacement(self):
844 """
845 Background thread for replacement.
846
847 """
848 check_round: int = -1
849 done = False
850 while not done:
851 done, check_round = self._try_manage_replacement(check_round)
852 time.sleep(0.01)
853
854 def __len__(self):
855 """
856 The dataset length is given by cache_num instead of len(data).
857
858 """
859 return self.cache_num
860
861
862 class ZipDataset(Dataset):
863 """
864 Zip several PyTorch datasets and output data(with the same index) together in a tuple.
865 If the output of single dataset is already a tuple, flatten it and extend to the result.
866 For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),
867 finally return (img, imgmeta, seg, segmeta).
868 And if the datasets don't have same length, use the minimum length of them as the length
869 of ZipDataset.
870 If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
871 for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
872
873 Examples::
874
875 >>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])
876 >>> print(len(zip_data))
877 2
878 >>> for item in zip_data:
879 >>> print(item)
880 [1, 4]
881 [2, 5]
882
883 """
884
885 def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:
886 """
887 Args:
888 datasets: list of datasets to zip together.
889 transform: a callable data transform operates on the zipped item from `datasets`.
890 """
891 super().__init__(list(datasets), transform=transform)
892
893 def __len__(self) -> int:
894 return min((len(dataset) for dataset in self.data))
895
896 def _transform(self, index: int):
897 def to_list(x):
898 return list(x) if isinstance(x, (tuple, list)) else [x]
899
900 data = []
901 for dataset in self.data:
902 data.extend(to_list(dataset[index]))
903 if self.transform is not None:
904 data = apply_transform(self.transform, data, map_items=False) # transform the list data
905 # use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists
906 return tuple(data)
907
908
909 class ArrayDataset(Randomizable, _TorchDataset):
910 """
911 Dataset for segmentation and classification tasks based on array format input data and transforms.
912 It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.
913 The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.
914 For example:
915 If train based on Nifti format images without metadata, all transforms can be composed::
916
917 img_transform = Compose(
918 [
919 LoadImage(image_only=True),
920 AddChannel(),
921 RandAdjustContrast()
922 ]
923 )
924 ArrayDataset(img_file_list, img_transform=img_transform)
925
926 If training based on images and the metadata, the array transforms can not be composed
927 because several transforms receives multiple parameters or return multiple values. Then Users need
928 to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix
929 to `Spacing` transform::
930
931 class TestCompose(Compose):
932 def __call__(self, input_):
933 img, metadata = self.transforms[0](input_)
934 img = self.transforms[1](img)
935 img, _, _ = self.transforms[2](img, metadata["affine"])
936 return self.transforms[3](img), metadata
937 img_transform = TestCompose(
938 [
939 LoadImage(image_only=False),
940 AddChannel(),
941 Spacing(pixdim=(1.5, 1.5, 3.0)),
942 RandAdjustContrast()
943 ]
944 )
945 ArrayDataset(img_file_list, img_transform=img_transform)
946
947 Examples::
948
949 >>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)
950 >>> print(ds[0])
951 1.1
952
953 >>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])
954 >>> print(ds[0])
955 [1, 5]
956
957 """
958
959 def __init__(
960 self,
961 img: Sequence,
962 img_transform: Optional[Callable] = None,
963 seg: Optional[Sequence] = None,
964 seg_transform: Optional[Callable] = None,
965 labels: Optional[Sequence] = None,
966 label_transform: Optional[Callable] = None,
967 ) -> None:
968 """
969 Initializes the dataset with the filename lists. The transform `img_transform` is applied
970 to the images and `seg_transform` to the segmentations.
971
972 Args:
973 img: sequence of images.
974 img_transform: transform to apply to each element in `img`.
975 seg: sequence of segmentations.
976 seg_transform: transform to apply to each element in `seg`.
977 labels: sequence of labels.
978 label_transform: transform to apply to each element in `labels`.
979
980 """
981 items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]
982 self.set_random_state(seed=get_seed())
983 datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]
984 self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)
985
986 self._seed = 0 # transform synchronization seed
987
988 def __len__(self) -> int:
989 return len(self.dataset)
990
991 def randomize(self, data: Optional[Any] = None) -> None:
992 self._seed = self.R.randint(MAX_SEED, dtype="uint32")
993
994 def __getitem__(self, index: int):
995 self.randomize()
996 if isinstance(self.dataset, ZipDataset):
997 # set transforms of each zip component
998 for dataset in self.dataset.data:
999 transform = getattr(dataset, "transform", None)
1000 if isinstance(transform, Randomizable):
1001 transform.set_random_state(seed=self._seed)
1002 transform = getattr(self.dataset, "transform", None)
1003 if isinstance(transform, Randomizable):
1004 transform.set_random_state(seed=self._seed)
1005 return self.dataset[index]
1006
1007
1008 class NPZDictItemDataset(Dataset):
1009 """
1010 Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and
1011 stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts
1012 mapping names to an item extracted from the loaded arrays.
1013 If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
1014 for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
1015
1016 Args:
1017 npzfile: Path to .npz file or stream containing .npz file data
1018 keys: Maps keys to load from file to name to store in dataset
1019 transform: Transform to apply to batch dict
1020 other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__
1021 """
1022
1023 def __init__(
1024 self,
1025 npzfile: Union[str, IO],
1026 keys: Dict[str, str],
1027 transform: Optional[Callable] = None,
1028 other_keys: Optional[Sequence[str]] = (),
1029 ):
1030 self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else "STREAM"
1031 self.keys: Dict[str, str] = dict(keys)
1032 dat = np.load(npzfile)
1033
1034 self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}
1035 self.length = self.arrays[first(self.keys.values())].shape[0]
1036
1037 self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}
1038
1039 for k, v in self.arrays.items():
1040 if v.shape[0] != self.length:
1041 raise ValueError(
1042 "All loaded arrays must have the same first dimension "
1043 f"size {self.length}, array `{k}` has size {v.shape[0]}"
1044 )
1045
1046 super().__init__([], transform)
1047
1048 def __len__(self):
1049 return self.length
1050
1051 def _transform(self, index: int):
1052 data = {k: v[index] for k, v in self.arrays.items()}
1053
1054 if self.transform is not None:
1055 data = apply_transform(self.transform, data)
1056
1057 return data
1058
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/monai/data/dataset.py b/monai/data/dataset.py
--- a/monai/data/dataset.py
+++ b/monai/data/dataset.py
@@ -168,7 +168,7 @@
self.hash_func = hash_func
if self.cache_dir is not None:
if not self.cache_dir.exists():
- self.cache_dir.mkdir(parents=True)
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
if not self.cache_dir.is_dir():
raise ValueError("cache_dir must be a directory.")
| {"golden_diff": "diff --git a/monai/data/dataset.py b/monai/data/dataset.py\n--- a/monai/data/dataset.py\n+++ b/monai/data/dataset.py\n@@ -168,7 +168,7 @@\n self.hash_func = hash_func\n if self.cache_dir is not None:\n if not self.cache_dir.exists():\n- self.cache_dir.mkdir(parents=True)\n+ self.cache_dir.mkdir(parents=True, exist_ok=True)\n if not self.cache_dir.is_dir():\n raise ValueError(\"cache_dir must be a directory.\")\n", "issue": "error in multi-process accessing of the same non-exist persistent cache\n**Is your feature request related to a problem? Please describe.**\r\nthe persistent dataset will first check the existence of a cache directory and the create a new one if needed:\r\nhttps://github.com/Project-MONAI/MONAI/blob/feb3a334b7bbf302b13a6da80e0b022a4cf75a4e/monai/data/dataset.py#L163-L165\r\nthese steps may run into a race condition in a multiprocess context.\r\n\r\n```py\r\npython -m tests.test_persistentdataset\r\npersistent 1\r\npersistent 0\r\ncreate /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test\r\ncreate /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test\r\nProcess Process-2:\r\nTraceback (most recent call last):\r\n File \"/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py\", line 297, in _bootstrap\r\n self.run()\r\n File \"/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py\", line 99, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"MONAI/tests/utils.py\", line 296, in run_process\r\n raise e\r\n File \"MONAI/tests/utils.py\", line 287, in run_process\r\n func(*args, **kwargs)\r\n File \"MONAI/tests/utils.py\", line 471, in _call_original_func\r\n return f(*args, **kwargs)\r\n File \"MONAI/tests/test_persistentdataset.py\", line 166, in test_mp_dataset\r\n ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=cache_dir)\r\n File \"MONAI/monai/data/dataset.py\", line 172, in __init__\r\n self.cache_dir.mkdir(parents=True)\r\n File \"/usr/local/anaconda3/envs/py37/lib/python3.7/pathlib.py\", line 1273, in mkdir\r\n self._accessor.mkdir(self, mode)\r\nFileExistsError: [Errno 17] File exists: '/var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test'\r\nF\r\n======================================================================\r\nFAIL: test_mp_dataset (__main__.TestDistCreateDataset)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File MONAI/tests/utils.py\", line 343, in _wrapper\r\n assert results.get(), \"Distributed call failed.\"\r\nAssertionError: Distributed call failed.\r\n\r\n----------------------------------------------------------------------\r\n```\nerror in multi-process accessing of the same non-exist persistent cache\n**Is your feature request related to a problem? Please describe.**\r\nthe persistent dataset will first check the existence of a cache directory and the create a new one if needed:\r\nhttps://github.com/Project-MONAI/MONAI/blob/feb3a334b7bbf302b13a6da80e0b022a4cf75a4e/monai/data/dataset.py#L163-L165\r\nthese steps may run into a race condition in a multiprocess context.\r\n\r\n```py\r\npython -m tests.test_persistentdataset\r\npersistent 1\r\npersistent 0\r\ncreate /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test\r\ncreate /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test\r\nProcess Process-2:\r\nTraceback (most recent call last):\r\n File \"/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py\", line 297, in _bootstrap\r\n self.run()\r\n File \"/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py\", line 99, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"MONAI/tests/utils.py\", line 296, in run_process\r\n raise e\r\n File \"MONAI/tests/utils.py\", line 287, in run_process\r\n func(*args, **kwargs)\r\n File \"MONAI/tests/utils.py\", line 471, in _call_original_func\r\n return f(*args, **kwargs)\r\n File \"MONAI/tests/test_persistentdataset.py\", line 166, in test_mp_dataset\r\n ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=cache_dir)\r\n File \"MONAI/monai/data/dataset.py\", line 172, in __init__\r\n self.cache_dir.mkdir(parents=True)\r\n File \"/usr/local/anaconda3/envs/py37/lib/python3.7/pathlib.py\", line 1273, in mkdir\r\n self._accessor.mkdir(self, mode)\r\nFileExistsError: [Errno 17] File exists: '/var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test'\r\nF\r\n======================================================================\r\nFAIL: test_mp_dataset (__main__.TestDistCreateDataset)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File MONAI/tests/utils.py\", line 343, in _wrapper\r\n assert results.get(), \"Distributed call failed.\"\r\nAssertionError: Distributed call failed.\r\n\r\n----------------------------------------------------------------------\r\n```\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections.abc\nimport math\nimport pickle\nimport shutil\nimport sys\nimport tempfile\nimport threading\nimport time\nimport warnings\nfrom copy import deepcopy\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset as _TorchDataset\nfrom torch.utils.data import Subset\n\nfrom monai.data.utils import first, pickle_hashing\nfrom monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform\nfrom monai.utils import MAX_SEED, get_seed, min_version, optional_import\n\nif TYPE_CHECKING:\n from tqdm import tqdm\n\n has_tqdm = True\nelse:\n tqdm, has_tqdm = optional_import(\"tqdm\", \"4.47.0\", min_version, \"tqdm\")\n\nlmdb, _ = optional_import(\"lmdb\")\n\n\nclass Dataset(_TorchDataset):\n \"\"\"\n A generic dataset with a length property and an optional callable data transform\n when fetching a data sample.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, typical input data can be a list of dictionaries::\n\n [{ { {\n 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',\n 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',\n 'extra': 123 'extra': 456 'extra': 789\n }, }, }]\n \"\"\"\n\n def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:\n \"\"\"\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: a callable data transform on input data.\n\n \"\"\"\n self.data = data\n self.transform = transform\n\n def __len__(self) -> int:\n return len(self.data)\n\n def _transform(self, index: int):\n \"\"\"\n Fetch single data item from `self.data`.\n \"\"\"\n data_i = self.data[index]\n return apply_transform(self.transform, data_i) if self.transform is not None else data_i\n\n def __getitem__(self, index: Union[int, slice, Sequence[int]]):\n \"\"\"\n Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.\n \"\"\"\n if isinstance(index, slice):\n # dataset[:42]\n start, stop, step = index.indices(len(self))\n indices = range(start, stop, step)\n return Subset(dataset=self, indices=indices)\n if isinstance(index, collections.abc.Sequence):\n # dataset[[1, 3, 4]]\n return Subset(dataset=self, indices=index)\n return self._transform(index)\n\n\nclass PersistentDataset(Dataset):\n \"\"\"\n Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,\n it can operate transforms for specific fields. Results from the non-random transform components are computed\n when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, typical input data can be a list of dictionaries::\n\n [{ { {\n 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',\n 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',\n 'extra': 123 'extra': 456 'extra': 789\n }, }, }]\n\n For a composite transform like\n\n .. code-block:: python\n\n [ LoadImaged(keys=['image', 'label']),\n Orientationd(keys=['image', 'label'], axcodes='RAS'),\n ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),\n pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),\n ToTensord(keys=['image', 'label'])]\n\n Upon first use a filename based dataset will be processed by the transform for the\n [LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to\n the `cache_dir` before applying the remaining random dependant transforms\n [RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.\n\n Subsequent uses of a dataset directly read pre-processed results from `cache_dir`\n followed by applying the random dependant parts of transform processing.\n\n Note:\n The input data must be a list of file paths and will hash them as cache keys.\n\n When loading persistent cache content, it can't guarantee the cached data matches current\n transform chain, so please make sure to use exactly the same non-random transforms and the\n args as the cache content, otherwise, it may cause unexpected errors.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_dir: Optional[Union[Path, str]],\n hash_func: Callable[..., bytes] = pickle_hashing,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `PersistentDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_dir: If specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If `cache_dir` doesn't exist, will automatically create it.\n If `cache_dir` is `None`, there is effectively no caching.\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n\n \"\"\"\n if not isinstance(transform, Compose):\n transform = Compose(transform)\n super().__init__(data=data, transform=transform)\n self.cache_dir = Path(cache_dir) if cache_dir is not None else None\n self.hash_func = hash_func\n if self.cache_dir is not None:\n if not self.cache_dir.exists():\n self.cache_dir.mkdir(parents=True)\n if not self.cache_dir.is_dir():\n raise ValueError(\"cache_dir must be a directory.\")\n\n def _pre_transform(self, item_transformed):\n \"\"\"\n Process the data from original state up to the first random element.\n\n Args:\n item_transformed: The data to be transformed\n\n Returns:\n the transformed element up to the first identified\n random transform object\n\n \"\"\"\n for _transform in self.transform.transforms: # type:ignore\n # execute all the deterministic transforms\n if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n break\n # this is to be consistent with CacheDataset even though it's not in a multi-thread situation.\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item_transformed = apply_transform(_xform, item_transformed)\n return item_transformed\n\n def _post_transform(self, item_transformed):\n \"\"\"\n Process the data from before the first random transform to the final state ready for evaluation.\n\n Args:\n item_transformed: The data to be transformed (already processed up to the first random transform)\n\n Returns:\n the transformed element through the random transforms\n\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n start_post_randomize_run = False\n for _transform in self.transform.transforms:\n if (\n start_post_randomize_run\n or isinstance(_transform, Randomizable)\n or not isinstance(_transform, Transform)\n ):\n start_post_randomize_run = True\n item_transformed = apply_transform(_transform, item_transformed)\n return item_transformed\n\n def _cachecheck(self, item_transformed):\n \"\"\"\n A function to cache the expensive input data transform operations\n so that huge data sets (larger than computer memory) can be processed\n on the fly as needed, and intermediate results written to disk for\n future use.\n\n Args:\n item_transformed: The current data element to be mutated into transformed representation\n\n Returns:\n The transformed data_element, either from cache, or explicitly computing it.\n\n Warning:\n The current implementation does not encode transform information as part of the\n hashing mechanism used for generating cache names. If the transforms applied are\n changed in any way, the objects in the cache dir will be invalid. The hash for the\n cache is ONLY dependant on the input filename paths.\n\n \"\"\"\n hashfile = None\n if self.cache_dir is not None:\n data_item_md5 = self.hash_func(item_transformed).decode(\"utf-8\")\n hashfile = self.cache_dir / f\"{data_item_md5}.pt\"\n\n if hashfile is not None and hashfile.is_file(): # cache hit\n return torch.load(hashfile)\n\n _item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed\n if hashfile is not None:\n # NOTE: Writing to a temporary directory and then using a nearly atomic rename operation\n # to make the cache more robust to manual killing of parent process\n # which may leave partially written cache files in an incomplete state\n with tempfile.TemporaryDirectory() as tmpdirname:\n temp_hash_file = Path(tmpdirname) / hashfile.name\n torch.save(_item_transformed, temp_hash_file)\n if temp_hash_file.is_file() and not hashfile.is_file():\n # On Unix, if target exists and is a file, it will be replaced silently if the user has permission.\n # for more details: https://docs.python.org/3/library/shutil.html#shutil.move.\n try:\n shutil.move(temp_hash_file, hashfile)\n except FileExistsError:\n pass\n return _item_transformed\n\n def _transform(self, index: int):\n pre_random_item = self._cachecheck(self.data[index])\n return self._post_transform(pre_random_item)\n\n\nclass CacheNTransDataset(PersistentDataset):\n \"\"\"\n Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_n_trans: int,\n cache_dir: Optional[Union[Path, str]],\n hash_func: Callable[..., bytes] = pickle_hashing,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `PersistentDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_n_trans: cache the result of first N transforms.\n cache_dir: If specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If `cache_dir` doesn't exist, will automatically create it.\n If `cache_dir` is `None`, there is effectively no caching.\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n\n \"\"\"\n super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)\n self.cache_n_trans = cache_n_trans\n\n def _pre_transform(self, item_transformed):\n \"\"\"\n Process the data from original state up to the N element.\n\n Args:\n item_transformed: The data to be transformed\n\n Returns:\n the transformed element up to the N transform object\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for i, _transform in enumerate(self.transform.transforms):\n if i == self.cache_n_trans:\n break\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item_transformed = apply_transform(_xform, item_transformed)\n return item_transformed\n\n def _post_transform(self, item_transformed):\n \"\"\"\n Process the data from before the N + 1 transform to the final state ready for evaluation.\n\n Args:\n item_transformed: The data to be transformed (already processed up to the first N transform)\n\n Returns:\n the final transformed result\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for i, _transform in enumerate(self.transform.transforms):\n if i >= self.cache_n_trans:\n item_transformed = apply_transform(_transform, item_transformed)\n return item_transformed\n\n\nclass LMDBDataset(PersistentDataset):\n \"\"\"\n Extension of `PersistentDataset` using LMDB as the backend.\n\n See Also:\n :py:class:`monai.data.PersistentDataset`\n\n Examples:\n\n >>> items = [{\"data\": i} for i in range(5)]\n # [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]\n >>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd(\"data\", delay_time=1))\n >>> print(list(lmdb_ds)) # using the cached results\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_dir: Union[Path, str] = \"cache\",\n hash_func: Callable[..., bytes] = pickle_hashing,\n db_name: str = \"monai_cache\",\n progress: bool = True,\n pickle_protocol=pickle.HIGHEST_PROTOCOL,\n lmdb_kwargs: Optional[dict] = None,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `LMDBDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_dir: if specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If the cache_dir doesn't exist, will automatically create it. Defaults to \"./cache\".\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n db_name: lmdb database file name. Defaults to \"monai_cache\".\n progress: whether to display a progress bar.\n pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.\n https://docs.python.org/3/library/pickle.html#pickle-protocols\n lmdb_kwargs: additional keyword arguments to the lmdb environment.\n for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class\n \"\"\"\n super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)\n self.progress = progress\n if not self.cache_dir:\n raise ValueError(\"cache_dir must be specified.\")\n self.db_file = self.cache_dir / f\"{db_name}.lmdb\"\n self.pickle_protocol = pickle_protocol\n self.lmdb_kwargs = lmdb_kwargs or {}\n if not self.lmdb_kwargs.get(\"map_size\", 0):\n self.lmdb_kwargs[\"map_size\"] = 1024 ** 4 # default map_size\n self._read_env = None\n print(f\"Accessing lmdb file: {self.db_file.absolute()}.\")\n\n def _fill_cache_start_reader(self):\n # create cache\n self.lmdb_kwargs[\"readonly\"] = False\n env = lmdb.open(path=f\"{self.db_file}\", subdir=False, **self.lmdb_kwargs)\n if self.progress and not has_tqdm:\n warnings.warn(\"LMDBDataset: tqdm is not installed. not displaying the caching progress.\")\n for item in tqdm(self.data) if has_tqdm and self.progress else self.data:\n key = self.hash_func(item)\n done, retry, val = False, 5, None\n while not done and retry > 0:\n try:\n with env.begin(write=True) as txn:\n with txn.cursor() as cursor:\n done = cursor.set_key(key)\n if done:\n continue\n if val is None:\n val = self._pre_transform(deepcopy(item)) # keep the original hashed\n val = pickle.dumps(val, protocol=self.pickle_protocol)\n txn.put(key, val)\n done = True\n except lmdb.MapFullError:\n done, retry = False, retry - 1\n size = env.info()[\"map_size\"]\n new_size = size * 2\n warnings.warn(f\"Resizing the cache database from {int(size) >> 20}MB to {int(new_size) >> 20}MB.\")\n env.set_mapsize(new_size)\n except lmdb.MapResizedError:\n # the mapsize is increased by another process\n # set_mapsize with a size of 0 to adopt the new size,\n env.set_mapsize(0)\n if not done: # still has the map full error\n size = env.info()[\"map_size\"]\n env.close()\n raise ValueError(f\"LMDB map size reached, increase size above current size of {size}.\")\n size = env.info()[\"map_size\"]\n env.close()\n # read-only database env\n self.lmdb_kwargs[\"readonly\"] = True\n self.lmdb_kwargs[\"map_size\"] = size\n if self.lmdb_kwargs.get(\"lock\", None) is None:\n self.lmdb_kwargs[\"lock\"] = False\n if self.lmdb_kwargs.get(\"readahead\", None) is None:\n self.lmdb_kwargs[\"readahead\"] = False\n return lmdb.open(path=f\"{self.db_file}\", subdir=False, **self.lmdb_kwargs)\n\n def _cachecheck(self, item_transformed):\n \"\"\"\n if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.\n\n \"\"\"\n if self._read_env is None:\n self._read_env = self._fill_cache_start_reader()\n with self._read_env.begin(write=False) as txn:\n data = txn.get(self.hash_func(item_transformed))\n if data is None:\n warnings.warn(\"LMDBDataset: cache key not found, running fallback caching.\")\n return super()._cachecheck(item_transformed)\n try:\n return pickle.loads(data)\n except Exception as err:\n raise RuntimeError(\"Invalid cache value, corrupted lmdb file?\") from err\n\n def info(self):\n \"\"\"\n Returns: dataset info dictionary.\n\n \"\"\"\n if self._read_env is None:\n self._read_env = self._fill_cache_start_reader()\n out = dict(self._read_env.info())\n out[\"size\"] = len(self.data)\n out[\"filename\"] = f\"{self.db_file.absolute()}\"\n return out\n\n\nclass CacheDataset(Dataset):\n \"\"\"\n Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.\n\n By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.\n If the requested data is not in the cache, all transforms will run normally\n (see also :py:class:`monai.data.dataset.Dataset`).\n\n Users can set the cache rate or number of items to cache.\n It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.\n\n To improve the caching efficiency, please always put as many as possible non-random transforms\n before the randomized ones when composing the chain of transforms.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, if the transform is a `Compose` of::\n\n transforms = Compose([\n LoadImaged(),\n AddChanneld(),\n Spacingd(),\n Orientationd(),\n ScaleIntensityRanged(),\n RandCropByPosNegLabeld(),\n ToTensord()\n ])\n\n when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,\n this dataset will cache the results up to ``ScaleIntensityRanged``, as\n all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`\n can be cached. During training, the dataset will load the cached results and run\n ``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform\n and the outcome not cached.\n\n Note:\n `CacheDataset` executes non-random transforms and prepares cache content in the main process before\n the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process\n during training. it may take a long time to prepare cache content according to the size of expected cache data.\n So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to\n temporarily skip caching.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_num: int = sys.maxsize,\n cache_rate: float = 1.0,\n num_workers: Optional[int] = None,\n progress: bool = True,\n ) -> None:\n \"\"\"\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: transforms to execute operations on input data.\n cache_num: number of items to be cached. Default is `sys.maxsize`.\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n cache_rate: percentage of cached data in total, default is 1.0 (cache all).\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n num_workers: the number of worker processes to use.\n If num_workers is None then the number returned by os.cpu_count() is used.\n progress: whether to display a progress bar.\n \"\"\"\n if not isinstance(transform, Compose):\n transform = Compose(transform)\n super().__init__(data=data, transform=transform)\n self.progress = progress\n self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))\n self.num_workers = num_workers\n if self.num_workers is not None:\n self.num_workers = max(int(self.num_workers), 1)\n self._cache: List = self._fill_cache()\n\n def _fill_cache(self) -> List:\n if self.cache_num <= 0:\n return []\n if self.progress and not has_tqdm:\n warnings.warn(\"tqdm is not installed, will not show the caching progress bar.\")\n with ThreadPool(self.num_workers) as p:\n if self.progress and has_tqdm:\n return list(\n tqdm(\n p.imap(self._load_cache_item, range(self.cache_num)),\n total=self.cache_num,\n desc=\"Loading dataset\",\n )\n )\n return list(p.imap(self._load_cache_item, range(self.cache_num)))\n\n def _load_cache_item(self, idx: int):\n \"\"\"\n Args:\n idx: the index of the input data sequence.\n \"\"\"\n item = self.data[idx]\n for _transform in self.transform.transforms: # type:ignore\n # execute all the deterministic transforms\n if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n break\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item = apply_transform(_xform, item)\n return item\n\n def _transform(self, index: int):\n if index % len(self) >= self.cache_num: # support negative index\n # no cache for this index, execute all the transforms directly\n return super()._transform(index)\n # load data from cache and execute from the first random transform\n start_run = False\n if self._cache is None:\n self._cache = self._fill_cache()\n data = self._cache[index]\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for _transform in self.transform.transforms:\n if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n # only need to deep copy data on first non-deterministic transform\n if not start_run:\n start_run = True\n data = deepcopy(data)\n data = apply_transform(_transform, data)\n return data\n\n\nclass SmartCacheDataset(Randomizable, CacheDataset):\n \"\"\"\n Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.\n At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items\n in the cache are used for training. This ensures that data needed for training is readily available,\n keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic\n transform sequence before being fed to GPU. At the same time, another thread is preparing replacement\n items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart\n Cache replaces the same number of items with replacement items.\n Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.\n Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),\n where r is the configured replace rate).\n For more details, please refer to:\n https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.\n so the actual training images cached and replaced for every epoch are as below::\n\n epoch 1: [image1, image2, image3, image4]\n epoch 2: [image2, image3, image4, image5]\n epoch 3: [image3, image4, image5, image1]\n epoch 3: [image4, image5, image1, image2]\n epoch N: [image[N % 5] ...]\n\n The usage of `SmartCacheDataset` contains 4 steps:\n\n 1. Initialize `SmartCacheDataset` object and cache for the first epoch.\n 2. Call `start()` to run replacement thread in background.\n 3. Call `update_cache()` before every epoch to replace training items.\n 4. Call `shutdown()` when training ends.\n\n Note:\n This replacement will not work for below cases:\n 1. Set the `multiprocessing_context` of DataLoader to `spawn`.\n 2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.\n 3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.\n\n If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,\n otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.\n\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: transforms to execute operations on input data.\n replace_rate: percentage of the cached items to be replaced in every epoch.\n cache_num: number of items to be cached. Default is `sys.maxsize`.\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n cache_rate: percentage of cached data in total, default is 1.0 (cache all).\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n num_init_workers: the number of worker threads to initialize the cache for first epoch.\n If num_init_workers is None then the number returned by os.cpu_count() is used.\n num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.\n If num_replace_workers is None then the number returned by os.cpu_count() is used.\n progress: whether to display a progress bar when caching for the first epoch.\n shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.\n seed: random seed if shuffle is `True`, default to `0`.\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n replace_rate: float,\n cache_num: int = sys.maxsize,\n cache_rate: float = 1.0,\n num_init_workers: Optional[int] = None,\n num_replace_workers: Optional[int] = None,\n progress: bool = True,\n shuffle: bool = True,\n seed: int = 0,\n ) -> None:\n if shuffle:\n self.set_random_state(seed=seed)\n self.randomize(data)\n\n super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress)\n if self._cache is None:\n self._cache = self._fill_cache()\n if self.cache_num >= len(data):\n warnings.warn(\n \"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset.\"\n )\n if replace_rate <= 0:\n raise ValueError(\"replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.\")\n\n self.num_replace_workers: Optional[int] = num_replace_workers\n if self.num_replace_workers is not None:\n self.num_replace_workers = max(int(self.num_replace_workers), 1)\n\n self._total_num: int = len(data)\n self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)\n self._replacements: List[Any] = [None for _ in range(self._replace_num)]\n self._replace_data_idx: List[int] = list(range(self._replace_num))\n\n self._start_pos: int = 0\n self._update_lock: threading.Lock = threading.Lock()\n self._round: int = 1\n self._replace_done: bool = False\n self._replace_mgr: Optional[threading.Thread] = None\n\n self._compute_data_idx()\n\n def randomize(self, data: Sequence) -> None:\n try:\n self.R.shuffle(data)\n except TypeError as e:\n warnings.warn(f\"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.\")\n\n def _compute_data_idx(self):\n \"\"\"\n Update the replacement data position in the total data.\n\n \"\"\"\n for i in range(self._replace_num):\n pos: int = self._start_pos + self.cache_num + i\n if pos >= self._total_num:\n pos -= self._total_num\n self._replace_data_idx[i] = pos\n\n def is_started(self):\n \"\"\"\n Check whether the replacement thread is already started.\n\n \"\"\"\n if self._replace_mgr is None:\n return False\n return self._replace_mgr.is_alive()\n\n def start(self):\n \"\"\"\n Start the background thread to replace training items for every epoch.\n\n \"\"\"\n if self._replace_mgr is None or not self.is_started():\n self._restart()\n\n def _restart(self):\n \"\"\"\n Restart background thread if killed for some reason.\n\n \"\"\"\n self._round = 1\n self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)\n self._replace_mgr.start()\n\n def _try_update_cache(self):\n \"\"\"\n Update the cache items with new replacement for current epoch.\n\n \"\"\"\n with self._update_lock:\n if not self._replace_done:\n return False\n\n del self._cache[: self._replace_num]\n self._cache.extend(self._replacements)\n\n self._start_pos += self._replace_num\n if self._start_pos >= self._total_num:\n self._start_pos -= self._total_num\n\n self._compute_data_idx()\n\n # ready for next round\n self._round += 1\n self._replace_done = False\n return True\n\n def update_cache(self):\n \"\"\"\n Update cache items for current epoch, need to call this function before every epoch.\n If the cache has been shutdown before, need to restart the `_replace_mgr` thread.\n\n \"\"\"\n if not self._replace_mgr.is_alive():\n self._restart()\n\n # make sure update is done\n while not self._try_update_cache():\n time.sleep(0.01)\n\n def _try_shutdown(self):\n \"\"\"\n Wait for thread lock to shut down the background thread.\n\n \"\"\"\n with self._update_lock:\n if self._replace_done:\n self._round = 0\n self._replace_done = False\n return True\n return False\n\n def shutdown(self):\n \"\"\"\n Shut down the background thread for replacement.\n\n \"\"\"\n if not self.is_started():\n return\n\n # wait until replace mgr is done the current round\n while not self._try_shutdown():\n time.sleep(0.01)\n self._replace_mgr.join()\n\n def _replace_cache_thread(self, index: int):\n \"\"\"\n Execute deterministic transforms on the new data for replacement.\n\n \"\"\"\n pos: int = self._replace_data_idx[index]\n self._replacements[index] = self._load_cache_item(pos)\n\n def _compute_replacements(self):\n \"\"\"\n Compute expected items for the replacement of next epoch, execute deterministic transforms.\n It can support multi-threads to accelerate the computation progress.\n\n \"\"\"\n with ThreadPool(self.num_replace_workers) as p:\n p.map(self._replace_cache_thread, list(range(self._replace_num)))\n\n self._replace_done = True\n\n def _try_manage_replacement(self, check_round):\n \"\"\"\n Wait thread lock and replace training items in the background thread.\n\n \"\"\"\n with self._update_lock:\n if self._round <= 0:\n # shutdown replacement\n self._replace_done = True\n return True, -1\n\n if self._round != check_round:\n self._compute_replacements()\n return False, self._round\n\n def manage_replacement(self):\n \"\"\"\n Background thread for replacement.\n\n \"\"\"\n check_round: int = -1\n done = False\n while not done:\n done, check_round = self._try_manage_replacement(check_round)\n time.sleep(0.01)\n\n def __len__(self):\n \"\"\"\n The dataset length is given by cache_num instead of len(data).\n\n \"\"\"\n return self.cache_num\n\n\nclass ZipDataset(Dataset):\n \"\"\"\n Zip several PyTorch datasets and output data(with the same index) together in a tuple.\n If the output of single dataset is already a tuple, flatten it and extend to the result.\n For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),\n finally return (img, imgmeta, seg, segmeta).\n And if the datasets don't have same length, use the minimum length of them as the length\n of ZipDataset.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n Examples::\n\n >>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])\n >>> print(len(zip_data))\n 2\n >>> for item in zip_data:\n >>> print(item)\n [1, 4]\n [2, 5]\n\n \"\"\"\n\n def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:\n \"\"\"\n Args:\n datasets: list of datasets to zip together.\n transform: a callable data transform operates on the zipped item from `datasets`.\n \"\"\"\n super().__init__(list(datasets), transform=transform)\n\n def __len__(self) -> int:\n return min((len(dataset) for dataset in self.data))\n\n def _transform(self, index: int):\n def to_list(x):\n return list(x) if isinstance(x, (tuple, list)) else [x]\n\n data = []\n for dataset in self.data:\n data.extend(to_list(dataset[index]))\n if self.transform is not None:\n data = apply_transform(self.transform, data, map_items=False) # transform the list data\n # use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists\n return tuple(data)\n\n\nclass ArrayDataset(Randomizable, _TorchDataset):\n \"\"\"\n Dataset for segmentation and classification tasks based on array format input data and transforms.\n It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.\n The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.\n For example:\n If train based on Nifti format images without metadata, all transforms can be composed::\n\n img_transform = Compose(\n [\n LoadImage(image_only=True),\n AddChannel(),\n RandAdjustContrast()\n ]\n )\n ArrayDataset(img_file_list, img_transform=img_transform)\n\n If training based on images and the metadata, the array transforms can not be composed\n because several transforms receives multiple parameters or return multiple values. Then Users need\n to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix\n to `Spacing` transform::\n\n class TestCompose(Compose):\n def __call__(self, input_):\n img, metadata = self.transforms[0](input_)\n img = self.transforms[1](img)\n img, _, _ = self.transforms[2](img, metadata[\"affine\"])\n return self.transforms[3](img), metadata\n img_transform = TestCompose(\n [\n LoadImage(image_only=False),\n AddChannel(),\n Spacing(pixdim=(1.5, 1.5, 3.0)),\n RandAdjustContrast()\n ]\n )\n ArrayDataset(img_file_list, img_transform=img_transform)\n\n Examples::\n\n >>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)\n >>> print(ds[0])\n 1.1\n\n >>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])\n >>> print(ds[0])\n [1, 5]\n\n \"\"\"\n\n def __init__(\n self,\n img: Sequence,\n img_transform: Optional[Callable] = None,\n seg: Optional[Sequence] = None,\n seg_transform: Optional[Callable] = None,\n labels: Optional[Sequence] = None,\n label_transform: Optional[Callable] = None,\n ) -> None:\n \"\"\"\n Initializes the dataset with the filename lists. The transform `img_transform` is applied\n to the images and `seg_transform` to the segmentations.\n\n Args:\n img: sequence of images.\n img_transform: transform to apply to each element in `img`.\n seg: sequence of segmentations.\n seg_transform: transform to apply to each element in `seg`.\n labels: sequence of labels.\n label_transform: transform to apply to each element in `labels`.\n\n \"\"\"\n items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]\n self.set_random_state(seed=get_seed())\n datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]\n self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)\n\n self._seed = 0 # transform synchronization seed\n\n def __len__(self) -> int:\n return len(self.dataset)\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._seed = self.R.randint(MAX_SEED, dtype=\"uint32\")\n\n def __getitem__(self, index: int):\n self.randomize()\n if isinstance(self.dataset, ZipDataset):\n # set transforms of each zip component\n for dataset in self.dataset.data:\n transform = getattr(dataset, \"transform\", None)\n if isinstance(transform, Randomizable):\n transform.set_random_state(seed=self._seed)\n transform = getattr(self.dataset, \"transform\", None)\n if isinstance(transform, Randomizable):\n transform.set_random_state(seed=self._seed)\n return self.dataset[index]\n\n\nclass NPZDictItemDataset(Dataset):\n \"\"\"\n Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and\n stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts\n mapping names to an item extracted from the loaded arrays.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n Args:\n npzfile: Path to .npz file or stream containing .npz file data\n keys: Maps keys to load from file to name to store in dataset\n transform: Transform to apply to batch dict\n other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__\n \"\"\"\n\n def __init__(\n self,\n npzfile: Union[str, IO],\n keys: Dict[str, str],\n transform: Optional[Callable] = None,\n other_keys: Optional[Sequence[str]] = (),\n ):\n self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else \"STREAM\"\n self.keys: Dict[str, str] = dict(keys)\n dat = np.load(npzfile)\n\n self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}\n self.length = self.arrays[first(self.keys.values())].shape[0]\n\n self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}\n\n for k, v in self.arrays.items():\n if v.shape[0] != self.length:\n raise ValueError(\n \"All loaded arrays must have the same first dimension \"\n f\"size {self.length}, array `{k}` has size {v.shape[0]}\"\n )\n\n super().__init__([], transform)\n\n def __len__(self):\n return self.length\n\n def _transform(self, index: int):\n data = {k: v[index] for k, v in self.arrays.items()}\n\n if self.transform is not None:\n data = apply_transform(self.transform, data)\n\n return data\n", "path": "monai/data/dataset.py"}], "after_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections.abc\nimport math\nimport pickle\nimport shutil\nimport sys\nimport tempfile\nimport threading\nimport time\nimport warnings\nfrom copy import deepcopy\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset as _TorchDataset\nfrom torch.utils.data import Subset\n\nfrom monai.data.utils import first, pickle_hashing\nfrom monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform\nfrom monai.utils import MAX_SEED, get_seed, min_version, optional_import\n\nif TYPE_CHECKING:\n from tqdm import tqdm\n\n has_tqdm = True\nelse:\n tqdm, has_tqdm = optional_import(\"tqdm\", \"4.47.0\", min_version, \"tqdm\")\n\nlmdb, _ = optional_import(\"lmdb\")\n\n\nclass Dataset(_TorchDataset):\n \"\"\"\n A generic dataset with a length property and an optional callable data transform\n when fetching a data sample.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, typical input data can be a list of dictionaries::\n\n [{ { {\n 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',\n 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',\n 'extra': 123 'extra': 456 'extra': 789\n }, }, }]\n \"\"\"\n\n def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:\n \"\"\"\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: a callable data transform on input data.\n\n \"\"\"\n self.data = data\n self.transform = transform\n\n def __len__(self) -> int:\n return len(self.data)\n\n def _transform(self, index: int):\n \"\"\"\n Fetch single data item from `self.data`.\n \"\"\"\n data_i = self.data[index]\n return apply_transform(self.transform, data_i) if self.transform is not None else data_i\n\n def __getitem__(self, index: Union[int, slice, Sequence[int]]):\n \"\"\"\n Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.\n \"\"\"\n if isinstance(index, slice):\n # dataset[:42]\n start, stop, step = index.indices(len(self))\n indices = range(start, stop, step)\n return Subset(dataset=self, indices=indices)\n if isinstance(index, collections.abc.Sequence):\n # dataset[[1, 3, 4]]\n return Subset(dataset=self, indices=index)\n return self._transform(index)\n\n\nclass PersistentDataset(Dataset):\n \"\"\"\n Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,\n it can operate transforms for specific fields. Results from the non-random transform components are computed\n when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, typical input data can be a list of dictionaries::\n\n [{ { {\n 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',\n 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',\n 'extra': 123 'extra': 456 'extra': 789\n }, }, }]\n\n For a composite transform like\n\n .. code-block:: python\n\n [ LoadImaged(keys=['image', 'label']),\n Orientationd(keys=['image', 'label'], axcodes='RAS'),\n ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),\n pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),\n ToTensord(keys=['image', 'label'])]\n\n Upon first use a filename based dataset will be processed by the transform for the\n [LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to\n the `cache_dir` before applying the remaining random dependant transforms\n [RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.\n\n Subsequent uses of a dataset directly read pre-processed results from `cache_dir`\n followed by applying the random dependant parts of transform processing.\n\n Note:\n The input data must be a list of file paths and will hash them as cache keys.\n\n When loading persistent cache content, it can't guarantee the cached data matches current\n transform chain, so please make sure to use exactly the same non-random transforms and the\n args as the cache content, otherwise, it may cause unexpected errors.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_dir: Optional[Union[Path, str]],\n hash_func: Callable[..., bytes] = pickle_hashing,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `PersistentDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_dir: If specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If `cache_dir` doesn't exist, will automatically create it.\n If `cache_dir` is `None`, there is effectively no caching.\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n\n \"\"\"\n if not isinstance(transform, Compose):\n transform = Compose(transform)\n super().__init__(data=data, transform=transform)\n self.cache_dir = Path(cache_dir) if cache_dir is not None else None\n self.hash_func = hash_func\n if self.cache_dir is not None:\n if not self.cache_dir.exists():\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n if not self.cache_dir.is_dir():\n raise ValueError(\"cache_dir must be a directory.\")\n\n def _pre_transform(self, item_transformed):\n \"\"\"\n Process the data from original state up to the first random element.\n\n Args:\n item_transformed: The data to be transformed\n\n Returns:\n the transformed element up to the first identified\n random transform object\n\n \"\"\"\n for _transform in self.transform.transforms: # type:ignore\n # execute all the deterministic transforms\n if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n break\n # this is to be consistent with CacheDataset even though it's not in a multi-thread situation.\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item_transformed = apply_transform(_xform, item_transformed)\n return item_transformed\n\n def _post_transform(self, item_transformed):\n \"\"\"\n Process the data from before the first random transform to the final state ready for evaluation.\n\n Args:\n item_transformed: The data to be transformed (already processed up to the first random transform)\n\n Returns:\n the transformed element through the random transforms\n\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n start_post_randomize_run = False\n for _transform in self.transform.transforms:\n if (\n start_post_randomize_run\n or isinstance(_transform, Randomizable)\n or not isinstance(_transform, Transform)\n ):\n start_post_randomize_run = True\n item_transformed = apply_transform(_transform, item_transformed)\n return item_transformed\n\n def _cachecheck(self, item_transformed):\n \"\"\"\n A function to cache the expensive input data transform operations\n so that huge data sets (larger than computer memory) can be processed\n on the fly as needed, and intermediate results written to disk for\n future use.\n\n Args:\n item_transformed: The current data element to be mutated into transformed representation\n\n Returns:\n The transformed data_element, either from cache, or explicitly computing it.\n\n Warning:\n The current implementation does not encode transform information as part of the\n hashing mechanism used for generating cache names. If the transforms applied are\n changed in any way, the objects in the cache dir will be invalid. The hash for the\n cache is ONLY dependant on the input filename paths.\n\n \"\"\"\n hashfile = None\n if self.cache_dir is not None:\n data_item_md5 = self.hash_func(item_transformed).decode(\"utf-8\")\n hashfile = self.cache_dir / f\"{data_item_md5}.pt\"\n\n if hashfile is not None and hashfile.is_file(): # cache hit\n return torch.load(hashfile)\n\n _item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed\n if hashfile is not None:\n # NOTE: Writing to a temporary directory and then using a nearly atomic rename operation\n # to make the cache more robust to manual killing of parent process\n # which may leave partially written cache files in an incomplete state\n with tempfile.TemporaryDirectory() as tmpdirname:\n temp_hash_file = Path(tmpdirname) / hashfile.name\n torch.save(_item_transformed, temp_hash_file)\n if temp_hash_file.is_file() and not hashfile.is_file():\n # On Unix, if target exists and is a file, it will be replaced silently if the user has permission.\n # for more details: https://docs.python.org/3/library/shutil.html#shutil.move.\n try:\n shutil.move(temp_hash_file, hashfile)\n except FileExistsError:\n pass\n return _item_transformed\n\n def _transform(self, index: int):\n pre_random_item = self._cachecheck(self.data[index])\n return self._post_transform(pre_random_item)\n\n\nclass CacheNTransDataset(PersistentDataset):\n \"\"\"\n Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_n_trans: int,\n cache_dir: Optional[Union[Path, str]],\n hash_func: Callable[..., bytes] = pickle_hashing,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `PersistentDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_n_trans: cache the result of first N transforms.\n cache_dir: If specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If `cache_dir` doesn't exist, will automatically create it.\n If `cache_dir` is `None`, there is effectively no caching.\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n\n \"\"\"\n super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)\n self.cache_n_trans = cache_n_trans\n\n def _pre_transform(self, item_transformed):\n \"\"\"\n Process the data from original state up to the N element.\n\n Args:\n item_transformed: The data to be transformed\n\n Returns:\n the transformed element up to the N transform object\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for i, _transform in enumerate(self.transform.transforms):\n if i == self.cache_n_trans:\n break\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item_transformed = apply_transform(_xform, item_transformed)\n return item_transformed\n\n def _post_transform(self, item_transformed):\n \"\"\"\n Process the data from before the N + 1 transform to the final state ready for evaluation.\n\n Args:\n item_transformed: The data to be transformed (already processed up to the first N transform)\n\n Returns:\n the final transformed result\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for i, _transform in enumerate(self.transform.transforms):\n if i >= self.cache_n_trans:\n item_transformed = apply_transform(_transform, item_transformed)\n return item_transformed\n\n\nclass LMDBDataset(PersistentDataset):\n \"\"\"\n Extension of `PersistentDataset` using LMDB as the backend.\n\n See Also:\n :py:class:`monai.data.PersistentDataset`\n\n Examples:\n\n >>> items = [{\"data\": i} for i in range(5)]\n # [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]\n >>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd(\"data\", delay_time=1))\n >>> print(list(lmdb_ds)) # using the cached results\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_dir: Union[Path, str] = \"cache\",\n hash_func: Callable[..., bytes] = pickle_hashing,\n db_name: str = \"monai_cache\",\n progress: bool = True,\n pickle_protocol=pickle.HIGHEST_PROTOCOL,\n lmdb_kwargs: Optional[dict] = None,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `LMDBDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_dir: if specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If the cache_dir doesn't exist, will automatically create it. Defaults to \"./cache\".\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n db_name: lmdb database file name. Defaults to \"monai_cache\".\n progress: whether to display a progress bar.\n pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.\n https://docs.python.org/3/library/pickle.html#pickle-protocols\n lmdb_kwargs: additional keyword arguments to the lmdb environment.\n for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class\n \"\"\"\n super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)\n self.progress = progress\n if not self.cache_dir:\n raise ValueError(\"cache_dir must be specified.\")\n self.db_file = self.cache_dir / f\"{db_name}.lmdb\"\n self.pickle_protocol = pickle_protocol\n self.lmdb_kwargs = lmdb_kwargs or {}\n if not self.lmdb_kwargs.get(\"map_size\", 0):\n self.lmdb_kwargs[\"map_size\"] = 1024 ** 4 # default map_size\n self._read_env = None\n print(f\"Accessing lmdb file: {self.db_file.absolute()}.\")\n\n def _fill_cache_start_reader(self):\n # create cache\n self.lmdb_kwargs[\"readonly\"] = False\n env = lmdb.open(path=f\"{self.db_file}\", subdir=False, **self.lmdb_kwargs)\n if self.progress and not has_tqdm:\n warnings.warn(\"LMDBDataset: tqdm is not installed. not displaying the caching progress.\")\n for item in tqdm(self.data) if has_tqdm and self.progress else self.data:\n key = self.hash_func(item)\n done, retry, val = False, 5, None\n while not done and retry > 0:\n try:\n with env.begin(write=True) as txn:\n with txn.cursor() as cursor:\n done = cursor.set_key(key)\n if done:\n continue\n if val is None:\n val = self._pre_transform(deepcopy(item)) # keep the original hashed\n val = pickle.dumps(val, protocol=self.pickle_protocol)\n txn.put(key, val)\n done = True\n except lmdb.MapFullError:\n done, retry = False, retry - 1\n size = env.info()[\"map_size\"]\n new_size = size * 2\n warnings.warn(f\"Resizing the cache database from {int(size) >> 20}MB to {int(new_size) >> 20}MB.\")\n env.set_mapsize(new_size)\n except lmdb.MapResizedError:\n # the mapsize is increased by another process\n # set_mapsize with a size of 0 to adopt the new size,\n env.set_mapsize(0)\n if not done: # still has the map full error\n size = env.info()[\"map_size\"]\n env.close()\n raise ValueError(f\"LMDB map size reached, increase size above current size of {size}.\")\n size = env.info()[\"map_size\"]\n env.close()\n # read-only database env\n self.lmdb_kwargs[\"readonly\"] = True\n self.lmdb_kwargs[\"map_size\"] = size\n if self.lmdb_kwargs.get(\"lock\", None) is None:\n self.lmdb_kwargs[\"lock\"] = False\n if self.lmdb_kwargs.get(\"readahead\", None) is None:\n self.lmdb_kwargs[\"readahead\"] = False\n return lmdb.open(path=f\"{self.db_file}\", subdir=False, **self.lmdb_kwargs)\n\n def _cachecheck(self, item_transformed):\n \"\"\"\n if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.\n\n \"\"\"\n if self._read_env is None:\n self._read_env = self._fill_cache_start_reader()\n with self._read_env.begin(write=False) as txn:\n data = txn.get(self.hash_func(item_transformed))\n if data is None:\n warnings.warn(\"LMDBDataset: cache key not found, running fallback caching.\")\n return super()._cachecheck(item_transformed)\n try:\n return pickle.loads(data)\n except Exception as err:\n raise RuntimeError(\"Invalid cache value, corrupted lmdb file?\") from err\n\n def info(self):\n \"\"\"\n Returns: dataset info dictionary.\n\n \"\"\"\n if self._read_env is None:\n self._read_env = self._fill_cache_start_reader()\n out = dict(self._read_env.info())\n out[\"size\"] = len(self.data)\n out[\"filename\"] = f\"{self.db_file.absolute()}\"\n return out\n\n\nclass CacheDataset(Dataset):\n \"\"\"\n Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.\n\n By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.\n If the requested data is not in the cache, all transforms will run normally\n (see also :py:class:`monai.data.dataset.Dataset`).\n\n Users can set the cache rate or number of items to cache.\n It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.\n\n To improve the caching efficiency, please always put as many as possible non-random transforms\n before the randomized ones when composing the chain of transforms.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, if the transform is a `Compose` of::\n\n transforms = Compose([\n LoadImaged(),\n AddChanneld(),\n Spacingd(),\n Orientationd(),\n ScaleIntensityRanged(),\n RandCropByPosNegLabeld(),\n ToTensord()\n ])\n\n when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,\n this dataset will cache the results up to ``ScaleIntensityRanged``, as\n all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`\n can be cached. During training, the dataset will load the cached results and run\n ``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform\n and the outcome not cached.\n\n Note:\n `CacheDataset` executes non-random transforms and prepares cache content in the main process before\n the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process\n during training. it may take a long time to prepare cache content according to the size of expected cache data.\n So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to\n temporarily skip caching.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_num: int = sys.maxsize,\n cache_rate: float = 1.0,\n num_workers: Optional[int] = None,\n progress: bool = True,\n ) -> None:\n \"\"\"\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: transforms to execute operations on input data.\n cache_num: number of items to be cached. Default is `sys.maxsize`.\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n cache_rate: percentage of cached data in total, default is 1.0 (cache all).\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n num_workers: the number of worker processes to use.\n If num_workers is None then the number returned by os.cpu_count() is used.\n progress: whether to display a progress bar.\n \"\"\"\n if not isinstance(transform, Compose):\n transform = Compose(transform)\n super().__init__(data=data, transform=transform)\n self.progress = progress\n self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))\n self.num_workers = num_workers\n if self.num_workers is not None:\n self.num_workers = max(int(self.num_workers), 1)\n self._cache: List = self._fill_cache()\n\n def _fill_cache(self) -> List:\n if self.cache_num <= 0:\n return []\n if self.progress and not has_tqdm:\n warnings.warn(\"tqdm is not installed, will not show the caching progress bar.\")\n with ThreadPool(self.num_workers) as p:\n if self.progress and has_tqdm:\n return list(\n tqdm(\n p.imap(self._load_cache_item, range(self.cache_num)),\n total=self.cache_num,\n desc=\"Loading dataset\",\n )\n )\n return list(p.imap(self._load_cache_item, range(self.cache_num)))\n\n def _load_cache_item(self, idx: int):\n \"\"\"\n Args:\n idx: the index of the input data sequence.\n \"\"\"\n item = self.data[idx]\n for _transform in self.transform.transforms: # type:ignore\n # execute all the deterministic transforms\n if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n break\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item = apply_transform(_xform, item)\n return item\n\n def _transform(self, index: int):\n if index % len(self) >= self.cache_num: # support negative index\n # no cache for this index, execute all the transforms directly\n return super()._transform(index)\n # load data from cache and execute from the first random transform\n start_run = False\n if self._cache is None:\n self._cache = self._fill_cache()\n data = self._cache[index]\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for _transform in self.transform.transforms:\n if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n # only need to deep copy data on first non-deterministic transform\n if not start_run:\n start_run = True\n data = deepcopy(data)\n data = apply_transform(_transform, data)\n return data\n\n\nclass SmartCacheDataset(Randomizable, CacheDataset):\n \"\"\"\n Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.\n At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items\n in the cache are used for training. This ensures that data needed for training is readily available,\n keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic\n transform sequence before being fed to GPU. At the same time, another thread is preparing replacement\n items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart\n Cache replaces the same number of items with replacement items.\n Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.\n Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),\n where r is the configured replace rate).\n For more details, please refer to:\n https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.\n so the actual training images cached and replaced for every epoch are as below::\n\n epoch 1: [image1, image2, image3, image4]\n epoch 2: [image2, image3, image4, image5]\n epoch 3: [image3, image4, image5, image1]\n epoch 3: [image4, image5, image1, image2]\n epoch N: [image[N % 5] ...]\n\n The usage of `SmartCacheDataset` contains 4 steps:\n\n 1. Initialize `SmartCacheDataset` object and cache for the first epoch.\n 2. Call `start()` to run replacement thread in background.\n 3. Call `update_cache()` before every epoch to replace training items.\n 4. Call `shutdown()` when training ends.\n\n Note:\n This replacement will not work for below cases:\n 1. Set the `multiprocessing_context` of DataLoader to `spawn`.\n 2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.\n 3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.\n\n If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,\n otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.\n\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: transforms to execute operations on input data.\n replace_rate: percentage of the cached items to be replaced in every epoch.\n cache_num: number of items to be cached. Default is `sys.maxsize`.\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n cache_rate: percentage of cached data in total, default is 1.0 (cache all).\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n num_init_workers: the number of worker threads to initialize the cache for first epoch.\n If num_init_workers is None then the number returned by os.cpu_count() is used.\n num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.\n If num_replace_workers is None then the number returned by os.cpu_count() is used.\n progress: whether to display a progress bar when caching for the first epoch.\n shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.\n seed: random seed if shuffle is `True`, default to `0`.\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n replace_rate: float,\n cache_num: int = sys.maxsize,\n cache_rate: float = 1.0,\n num_init_workers: Optional[int] = None,\n num_replace_workers: Optional[int] = None,\n progress: bool = True,\n shuffle: bool = True,\n seed: int = 0,\n ) -> None:\n if shuffle:\n self.set_random_state(seed=seed)\n self.randomize(data)\n\n super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress)\n if self._cache is None:\n self._cache = self._fill_cache()\n if self.cache_num >= len(data):\n warnings.warn(\n \"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset.\"\n )\n if replace_rate <= 0:\n raise ValueError(\"replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.\")\n\n self.num_replace_workers: Optional[int] = num_replace_workers\n if self.num_replace_workers is not None:\n self.num_replace_workers = max(int(self.num_replace_workers), 1)\n\n self._total_num: int = len(data)\n self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)\n self._replacements: List[Any] = [None for _ in range(self._replace_num)]\n self._replace_data_idx: List[int] = list(range(self._replace_num))\n\n self._start_pos: int = 0\n self._update_lock: threading.Lock = threading.Lock()\n self._round: int = 1\n self._replace_done: bool = False\n self._replace_mgr: Optional[threading.Thread] = None\n\n self._compute_data_idx()\n\n def randomize(self, data: Sequence) -> None:\n try:\n self.R.shuffle(data)\n except TypeError as e:\n warnings.warn(f\"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.\")\n\n def _compute_data_idx(self):\n \"\"\"\n Update the replacement data position in the total data.\n\n \"\"\"\n for i in range(self._replace_num):\n pos: int = self._start_pos + self.cache_num + i\n if pos >= self._total_num:\n pos -= self._total_num\n self._replace_data_idx[i] = pos\n\n def is_started(self):\n \"\"\"\n Check whether the replacement thread is already started.\n\n \"\"\"\n if self._replace_mgr is None:\n return False\n return self._replace_mgr.is_alive()\n\n def start(self):\n \"\"\"\n Start the background thread to replace training items for every epoch.\n\n \"\"\"\n if self._replace_mgr is None or not self.is_started():\n self._restart()\n\n def _restart(self):\n \"\"\"\n Restart background thread if killed for some reason.\n\n \"\"\"\n self._round = 1\n self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)\n self._replace_mgr.start()\n\n def _try_update_cache(self):\n \"\"\"\n Update the cache items with new replacement for current epoch.\n\n \"\"\"\n with self._update_lock:\n if not self._replace_done:\n return False\n\n del self._cache[: self._replace_num]\n self._cache.extend(self._replacements)\n\n self._start_pos += self._replace_num\n if self._start_pos >= self._total_num:\n self._start_pos -= self._total_num\n\n self._compute_data_idx()\n\n # ready for next round\n self._round += 1\n self._replace_done = False\n return True\n\n def update_cache(self):\n \"\"\"\n Update cache items for current epoch, need to call this function before every epoch.\n If the cache has been shutdown before, need to restart the `_replace_mgr` thread.\n\n \"\"\"\n if not self._replace_mgr.is_alive():\n self._restart()\n\n # make sure update is done\n while not self._try_update_cache():\n time.sleep(0.01)\n\n def _try_shutdown(self):\n \"\"\"\n Wait for thread lock to shut down the background thread.\n\n \"\"\"\n with self._update_lock:\n if self._replace_done:\n self._round = 0\n self._replace_done = False\n return True\n return False\n\n def shutdown(self):\n \"\"\"\n Shut down the background thread for replacement.\n\n \"\"\"\n if not self.is_started():\n return\n\n # wait until replace mgr is done the current round\n while not self._try_shutdown():\n time.sleep(0.01)\n self._replace_mgr.join()\n\n def _replace_cache_thread(self, index: int):\n \"\"\"\n Execute deterministic transforms on the new data for replacement.\n\n \"\"\"\n pos: int = self._replace_data_idx[index]\n self._replacements[index] = self._load_cache_item(pos)\n\n def _compute_replacements(self):\n \"\"\"\n Compute expected items for the replacement of next epoch, execute deterministic transforms.\n It can support multi-threads to accelerate the computation progress.\n\n \"\"\"\n with ThreadPool(self.num_replace_workers) as p:\n p.map(self._replace_cache_thread, list(range(self._replace_num)))\n\n self._replace_done = True\n\n def _try_manage_replacement(self, check_round):\n \"\"\"\n Wait thread lock and replace training items in the background thread.\n\n \"\"\"\n with self._update_lock:\n if self._round <= 0:\n # shutdown replacement\n self._replace_done = True\n return True, -1\n\n if self._round != check_round:\n self._compute_replacements()\n return False, self._round\n\n def manage_replacement(self):\n \"\"\"\n Background thread for replacement.\n\n \"\"\"\n check_round: int = -1\n done = False\n while not done:\n done, check_round = self._try_manage_replacement(check_round)\n time.sleep(0.01)\n\n def __len__(self):\n \"\"\"\n The dataset length is given by cache_num instead of len(data).\n\n \"\"\"\n return self.cache_num\n\n\nclass ZipDataset(Dataset):\n \"\"\"\n Zip several PyTorch datasets and output data(with the same index) together in a tuple.\n If the output of single dataset is already a tuple, flatten it and extend to the result.\n For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),\n finally return (img, imgmeta, seg, segmeta).\n And if the datasets don't have same length, use the minimum length of them as the length\n of ZipDataset.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n Examples::\n\n >>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])\n >>> print(len(zip_data))\n 2\n >>> for item in zip_data:\n >>> print(item)\n [1, 4]\n [2, 5]\n\n \"\"\"\n\n def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:\n \"\"\"\n Args:\n datasets: list of datasets to zip together.\n transform: a callable data transform operates on the zipped item from `datasets`.\n \"\"\"\n super().__init__(list(datasets), transform=transform)\n\n def __len__(self) -> int:\n return min((len(dataset) for dataset in self.data))\n\n def _transform(self, index: int):\n def to_list(x):\n return list(x) if isinstance(x, (tuple, list)) else [x]\n\n data = []\n for dataset in self.data:\n data.extend(to_list(dataset[index]))\n if self.transform is not None:\n data = apply_transform(self.transform, data, map_items=False) # transform the list data\n # use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists\n return tuple(data)\n\n\nclass ArrayDataset(Randomizable, _TorchDataset):\n \"\"\"\n Dataset for segmentation and classification tasks based on array format input data and transforms.\n It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.\n The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.\n For example:\n If train based on Nifti format images without metadata, all transforms can be composed::\n\n img_transform = Compose(\n [\n LoadImage(image_only=True),\n AddChannel(),\n RandAdjustContrast()\n ]\n )\n ArrayDataset(img_file_list, img_transform=img_transform)\n\n If training based on images and the metadata, the array transforms can not be composed\n because several transforms receives multiple parameters or return multiple values. Then Users need\n to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix\n to `Spacing` transform::\n\n class TestCompose(Compose):\n def __call__(self, input_):\n img, metadata = self.transforms[0](input_)\n img = self.transforms[1](img)\n img, _, _ = self.transforms[2](img, metadata[\"affine\"])\n return self.transforms[3](img), metadata\n img_transform = TestCompose(\n [\n LoadImage(image_only=False),\n AddChannel(),\n Spacing(pixdim=(1.5, 1.5, 3.0)),\n RandAdjustContrast()\n ]\n )\n ArrayDataset(img_file_list, img_transform=img_transform)\n\n Examples::\n\n >>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)\n >>> print(ds[0])\n 1.1\n\n >>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])\n >>> print(ds[0])\n [1, 5]\n\n \"\"\"\n\n def __init__(\n self,\n img: Sequence,\n img_transform: Optional[Callable] = None,\n seg: Optional[Sequence] = None,\n seg_transform: Optional[Callable] = None,\n labels: Optional[Sequence] = None,\n label_transform: Optional[Callable] = None,\n ) -> None:\n \"\"\"\n Initializes the dataset with the filename lists. The transform `img_transform` is applied\n to the images and `seg_transform` to the segmentations.\n\n Args:\n img: sequence of images.\n img_transform: transform to apply to each element in `img`.\n seg: sequence of segmentations.\n seg_transform: transform to apply to each element in `seg`.\n labels: sequence of labels.\n label_transform: transform to apply to each element in `labels`.\n\n \"\"\"\n items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]\n self.set_random_state(seed=get_seed())\n datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]\n self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)\n\n self._seed = 0 # transform synchronization seed\n\n def __len__(self) -> int:\n return len(self.dataset)\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._seed = self.R.randint(MAX_SEED, dtype=\"uint32\")\n\n def __getitem__(self, index: int):\n self.randomize()\n if isinstance(self.dataset, ZipDataset):\n # set transforms of each zip component\n for dataset in self.dataset.data:\n transform = getattr(dataset, \"transform\", None)\n if isinstance(transform, Randomizable):\n transform.set_random_state(seed=self._seed)\n transform = getattr(self.dataset, \"transform\", None)\n if isinstance(transform, Randomizable):\n transform.set_random_state(seed=self._seed)\n return self.dataset[index]\n\n\nclass NPZDictItemDataset(Dataset):\n \"\"\"\n Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and\n stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts\n mapping names to an item extracted from the loaded arrays.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n Args:\n npzfile: Path to .npz file or stream containing .npz file data\n keys: Maps keys to load from file to name to store in dataset\n transform: Transform to apply to batch dict\n other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__\n \"\"\"\n\n def __init__(\n self,\n npzfile: Union[str, IO],\n keys: Dict[str, str],\n transform: Optional[Callable] = None,\n other_keys: Optional[Sequence[str]] = (),\n ):\n self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else \"STREAM\"\n self.keys: Dict[str, str] = dict(keys)\n dat = np.load(npzfile)\n\n self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}\n self.length = self.arrays[first(self.keys.values())].shape[0]\n\n self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}\n\n for k, v in self.arrays.items():\n if v.shape[0] != self.length:\n raise ValueError(\n \"All loaded arrays must have the same first dimension \"\n f\"size {self.length}, array `{k}` has size {v.shape[0]}\"\n )\n\n super().__init__([], transform)\n\n def __len__(self):\n return self.length\n\n def _transform(self, index: int):\n data = {k: v[index] for k, v in self.arrays.items()}\n\n if self.transform is not None:\n data = apply_transform(self.transform, data)\n\n return data\n", "path": "monai/data/dataset.py"}]} |
gh_patches_debug_1592 | rasdani/github-patches | git_diff | nonebot__nonebot2-61 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
修复阻止事件传递信号无效的问题
抛出的异常应为 StopPropagation 的一个实例
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nonebot/message.py`
Content:
```
1 """
2 事件处理
3 ========
4
5 NoneBot 内部处理并按优先级分发事件给所有事件响应器,提供了多个插槽以进行事件的预处理等。
6 """
7
8 import asyncio
9 from datetime import datetime
10
11 from nonebot.log import logger
12 from nonebot.rule import TrieRule
13 from nonebot.utils import escape_tag
14 from nonebot.matcher import matchers, Matcher
15 from nonebot.typing import Set, Type, Union, Optional, Iterable, NoReturn, Bot, Event
16 from nonebot.exception import IgnoredException, StopPropagation
17 from nonebot.typing import EventPreProcessor, RunPreProcessor, EventPostProcessor, RunPostProcessor
18
19 _event_preprocessors: Set[EventPreProcessor] = set()
20 _event_postprocessors: Set[EventPostProcessor] = set()
21 _run_preprocessors: Set[RunPreProcessor] = set()
22 _run_postprocessors: Set[RunPostProcessor] = set()
23
24
25 def event_preprocessor(func: EventPreProcessor) -> EventPreProcessor:
26 """
27 :说明:
28 事件预处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之前执行。
29 :参数:
30 事件预处理函数接收三个参数。
31
32 * ``bot: Bot``: Bot 对象
33 * ``event: Event``: Event 对象
34 * ``state: dict``: 当前 State
35 """
36 _event_preprocessors.add(func)
37 return func
38
39
40 def event_postprocessor(func: EventPostProcessor) -> EventPostProcessor:
41 """
42 :说明:
43 事件后处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之后执行。
44 :参数:
45 事件后处理函数接收三个参数。
46
47 * ``bot: Bot``: Bot 对象
48 * ``event: Event``: Event 对象
49 * ``state: dict``: 当前事件运行前 State
50 """
51 _event_postprocessors.add(func)
52 return func
53
54
55 def run_preprocessor(func: RunPreProcessor) -> RunPreProcessor:
56 """
57 :说明:
58 运行预处理。装饰一个函数,使它在每次事件响应器运行前执行。
59 :参数:
60 运行预处理函数接收四个参数。
61
62 * ``matcher: Matcher``: 当前要运行的事件响应器
63 * ``bot: Bot``: Bot 对象
64 * ``event: Event``: Event 对象
65 * ``state: dict``: 当前 State
66 """
67 _run_preprocessors.add(func)
68 return func
69
70
71 def run_postprocessor(func: RunPostProcessor) -> RunPostProcessor:
72 """
73 :说明:
74 运行后处理。装饰一个函数,使它在每次事件响应器运行后执行。
75 :参数:
76 运行后处理函数接收五个参数。
77
78 * ``matcher: Matcher``: 运行完毕的事件响应器
79 * ``exception: Optional[Exception]``: 事件响应器运行错误(如果存在)
80 * ``bot: Bot``: Bot 对象
81 * ``event: Event``: Event 对象
82 * ``state: dict``: 当前 State
83 """
84 _run_postprocessors.add(func)
85 return func
86
87
88 async def _check_matcher(priority: int, bot: Bot, event: Event,
89 state: dict) -> Iterable[Type[Matcher]]:
90 current_matchers = matchers[priority].copy()
91
92 async def _check(Matcher: Type[Matcher], bot: Bot, event: Event,
93 state: dict) -> Optional[Type[Matcher]]:
94 try:
95 if await Matcher.check_perm(
96 bot, event) and await Matcher.check_rule(bot, event, state):
97 return Matcher
98 except Exception as e:
99 logger.opt(colors=True, exception=e).error(
100 f"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>"
101 )
102 return None
103
104 async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:
105 if Matcher.temp or (Matcher.expire_time and
106 datetime.now() > Matcher.expire_time):
107 return Matcher
108 return None
109
110 checking_tasks = [
111 _check(Matcher, bot, event, state) for Matcher in current_matchers
112 ]
113 checking_expire_tasks = [
114 _check_expire(Matcher) for Matcher in current_matchers
115 ]
116 results = await asyncio.gather(*checking_tasks, return_exceptions=True)
117 expired = await asyncio.gather(*checking_expire_tasks)
118 for expired_matcher in filter(lambda x: x and x in results, expired):
119 try:
120 matchers[priority].remove(expired_matcher)
121 except Exception:
122 pass
123 return filter(lambda x: x, results)
124
125
126 async def _run_matcher(Matcher: Type[Matcher], bot: Bot, event: Event,
127 state: dict) -> Union[None, NoReturn]:
128 logger.info(f"Event will be handled by {Matcher}")
129
130 matcher = Matcher()
131
132 coros = list(
133 map(lambda x: x(matcher, bot, event, state), _run_preprocessors))
134 if coros:
135 try:
136 await asyncio.gather(*coros)
137 except IgnoredException:
138 logger.opt(colors=True).info(
139 f"Matcher {matcher} running is <b>cancelled</b>")
140 return
141 except Exception as e:
142 logger.opt(colors=True, exception=e).error(
143 "<r><bg #f8bbd0>Error when running RunPreProcessors. "
144 "Running cancelled!</bg #f8bbd0></r>")
145 return
146
147 exception = None
148
149 try:
150 logger.debug(f"Running matcher {matcher}")
151 await matcher.run(bot, event, state)
152 except Exception as e:
153 logger.opt(colors=True, exception=e).error(
154 f"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>"
155 )
156 exception = e
157
158 coros = list(
159 map(lambda x: x(matcher, exception, bot, event, state),
160 _run_postprocessors))
161 if coros:
162 try:
163 await asyncio.gather(*coros)
164 except Exception as e:
165 logger.opt(colors=True, exception=e).error(
166 "<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>"
167 )
168
169 if matcher.block:
170 raise StopPropagation
171
172
173 async def handle_event(bot: Bot, event: Event):
174 """
175 :说明:
176 处理一个事件。调用该函数以实现分发事件。
177 :参数:
178 * ``bot: Bot``: Bot 对象
179 * ``event: Event``: Event 对象
180 :示例:
181
182 .. code-block:: python
183
184 import asyncio
185 asyncio.create_task(handle_event(bot, event))
186 """
187 show_log = True
188 log_msg = f"<m>{bot.type.upper()} </m>| {event.self_id} [{event.name}]: "
189 if event.type == "message":
190 log_msg += f"Message {event.id} from "
191 log_msg += str(event.user_id)
192 if event.detail_type == "group":
193 log_msg += f"@[群:{event.group_id}]:"
194
195 log_msg += ' "' + "".join(
196 map(
197 lambda x: escape_tag(str(x))
198 if x.type == "text" else f"<le>{escape_tag(str(x))}</le>",
199 event.message)) + '"' # type: ignore
200 elif event.type == "notice":
201 log_msg += f"Notice {event.raw_event}"
202 elif event.type == "request":
203 log_msg += f"Request {event.raw_event}"
204 elif event.type == "meta_event":
205 # log_msg += f"MetaEvent {event.detail_type}"
206 show_log = False
207 if show_log:
208 logger.opt(colors=True).info(log_msg)
209
210 state = {}
211 coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))
212 if coros:
213 try:
214 logger.debug("Running PreProcessors...")
215 await asyncio.gather(*coros)
216 except IgnoredException:
217 logger.opt(
218 colors=True).info(f"Event {event.name} is <b>ignored</b>")
219 return
220 except Exception as e:
221 logger.opt(colors=True, exception=e).error(
222 "<r><bg #f8bbd0>Error when running EventPreProcessors. "
223 "Event ignored!</bg #f8bbd0></r>")
224 return
225
226 # Trie Match
227 _, _ = TrieRule.get_value(bot, event, state)
228
229 break_flag = False
230 for priority in sorted(matchers.keys()):
231 if break_flag:
232 break
233
234 if show_log:
235 logger.debug(f"Checking for matchers in priority {priority}...")
236
237 run_matchers = await _check_matcher(priority, bot, event, state)
238
239 pending_tasks = [
240 _run_matcher(matcher, bot, event, state.copy())
241 for matcher in run_matchers
242 ]
243
244 results = await asyncio.gather(*pending_tasks, return_exceptions=True)
245
246 for result in results:
247 if result is StopPropagation:
248 if not break_flag:
249 break_flag = True
250 logger.debug("Stop event propagation")
251
252 coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))
253 if coros:
254 try:
255 logger.debug("Running PostProcessors...")
256 await asyncio.gather(*coros)
257 except Exception as e:
258 logger.opt(colors=True, exception=e).error(
259 "<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>"
260 )
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nonebot/message.py b/nonebot/message.py
--- a/nonebot/message.py
+++ b/nonebot/message.py
@@ -244,7 +244,7 @@
results = await asyncio.gather(*pending_tasks, return_exceptions=True)
for result in results:
- if result is StopPropagation:
+ if isinstance(result, StopPropagation):
if not break_flag:
break_flag = True
logger.debug("Stop event propagation")
| {"golden_diff": "diff --git a/nonebot/message.py b/nonebot/message.py\n--- a/nonebot/message.py\n+++ b/nonebot/message.py\n@@ -244,7 +244,7 @@\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n \n for result in results:\n- if result is StopPropagation:\n+ if isinstance(result, StopPropagation):\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n", "issue": "\u4fee\u590d\u963b\u6b62\u4e8b\u4ef6\u4f20\u9012\u4fe1\u53f7\u65e0\u6548\u7684\u95ee\u9898\n\u629b\u51fa\u7684\u5f02\u5e38\u5e94\u4e3a StopPropagation \u7684\u4e00\u4e2a\u5b9e\u4f8b\n", "before_files": [{"content": "\"\"\"\n\u4e8b\u4ef6\u5904\u7406\n========\n\nNoneBot \u5185\u90e8\u5904\u7406\u5e76\u6309\u4f18\u5148\u7ea7\u5206\u53d1\u4e8b\u4ef6\u7ed9\u6240\u6709\u4e8b\u4ef6\u54cd\u5e94\u5668\uff0c\u63d0\u4f9b\u4e86\u591a\u4e2a\u63d2\u69fd\u4ee5\u8fdb\u884c\u4e8b\u4ef6\u7684\u9884\u5904\u7406\u7b49\u3002\n\"\"\"\n\nimport asyncio\nfrom datetime import datetime\n\nfrom nonebot.log import logger\nfrom nonebot.rule import TrieRule\nfrom nonebot.utils import escape_tag\nfrom nonebot.matcher import matchers, Matcher\nfrom nonebot.typing import Set, Type, Union, Optional, Iterable, NoReturn, Bot, Event\nfrom nonebot.exception import IgnoredException, StopPropagation\nfrom nonebot.typing import EventPreProcessor, RunPreProcessor, EventPostProcessor, RunPostProcessor\n\n_event_preprocessors: Set[EventPreProcessor] = set()\n_event_postprocessors: Set[EventPostProcessor] = set()\n_run_preprocessors: Set[RunPreProcessor] = set()\n_run_postprocessors: Set[RunPostProcessor] = set()\n\n\ndef event_preprocessor(func: EventPreProcessor) -> EventPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u4e8b\u4ef6\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u524d\u6267\u884c\u3002\n :\u53c2\u6570:\n \u4e8b\u4ef6\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _event_preprocessors.add(func)\n return func\n\n\ndef event_postprocessor(func: EventPostProcessor) -> EventPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u4e8b\u4ef6\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u540e\u6267\u884c\u3002\n :\u53c2\u6570:\n \u4e8b\u4ef6\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d\u4e8b\u4ef6\u8fd0\u884c\u524d State\n \"\"\"\n _event_postprocessors.add(func)\n return func\n\n\ndef run_preprocessor(func: RunPreProcessor) -> RunPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u8fd0\u884c\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u524d\u6267\u884c\u3002\n :\u53c2\u6570:\n \u8fd0\u884c\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u56db\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u5f53\u524d\u8981\u8fd0\u884c\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _run_preprocessors.add(func)\n return func\n\n\ndef run_postprocessor(func: RunPostProcessor) -> RunPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u8fd0\u884c\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u540e\u6267\u884c\u3002\n :\u53c2\u6570:\n \u8fd0\u884c\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e94\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u8fd0\u884c\u5b8c\u6bd5\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``exception: Optional[Exception]``: \u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u9519\u8bef\uff08\u5982\u679c\u5b58\u5728\uff09\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _run_postprocessors.add(func)\n return func\n\n\nasync def _check_matcher(priority: int, bot: Bot, event: Event,\n state: dict) -> Iterable[Type[Matcher]]:\n current_matchers = matchers[priority].copy()\n\n async def _check(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Optional[Type[Matcher]]:\n try:\n if await Matcher.check_perm(\n bot, event) and await Matcher.check_rule(bot, event, state):\n return Matcher\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>\"\n )\n return None\n\n async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:\n if Matcher.temp or (Matcher.expire_time and\n datetime.now() > Matcher.expire_time):\n return Matcher\n return None\n\n checking_tasks = [\n _check(Matcher, bot, event, state) for Matcher in current_matchers\n ]\n checking_expire_tasks = [\n _check_expire(Matcher) for Matcher in current_matchers\n ]\n results = await asyncio.gather(*checking_tasks, return_exceptions=True)\n expired = await asyncio.gather(*checking_expire_tasks)\n for expired_matcher in filter(lambda x: x and x in results, expired):\n try:\n matchers[priority].remove(expired_matcher)\n except Exception:\n pass\n return filter(lambda x: x, results)\n\n\nasync def _run_matcher(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Union[None, NoReturn]:\n logger.info(f\"Event will be handled by {Matcher}\")\n\n matcher = Matcher()\n\n coros = list(\n map(lambda x: x(matcher, bot, event, state), _run_preprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(colors=True).info(\n f\"Matcher {matcher} running is <b>cancelled</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPreProcessors. \"\n \"Running cancelled!</bg #f8bbd0></r>\")\n return\n\n exception = None\n\n try:\n logger.debug(f\"Running matcher {matcher}\")\n await matcher.run(bot, event, state)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>\"\n )\n exception = e\n\n coros = list(\n map(lambda x: x(matcher, exception, bot, event, state),\n _run_postprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>\"\n )\n\n if matcher.block:\n raise StopPropagation\n\n\nasync def handle_event(bot: Bot, event: Event):\n \"\"\"\n :\u8bf4\u660e:\n \u5904\u7406\u4e00\u4e2a\u4e8b\u4ef6\u3002\u8c03\u7528\u8be5\u51fd\u6570\u4ee5\u5b9e\u73b0\u5206\u53d1\u4e8b\u4ef6\u3002\n :\u53c2\u6570:\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n :\u793a\u4f8b:\n\n .. code-block:: python\n\n import asyncio\n asyncio.create_task(handle_event(bot, event))\n \"\"\"\n show_log = True\n log_msg = f\"<m>{bot.type.upper()} </m>| {event.self_id} [{event.name}]: \"\n if event.type == \"message\":\n log_msg += f\"Message {event.id} from \"\n log_msg += str(event.user_id)\n if event.detail_type == \"group\":\n log_msg += f\"@[\u7fa4:{event.group_id}]:\"\n\n log_msg += ' \"' + \"\".join(\n map(\n lambda x: escape_tag(str(x))\n if x.type == \"text\" else f\"<le>{escape_tag(str(x))}</le>\",\n event.message)) + '\"' # type: ignore\n elif event.type == \"notice\":\n log_msg += f\"Notice {event.raw_event}\"\n elif event.type == \"request\":\n log_msg += f\"Request {event.raw_event}\"\n elif event.type == \"meta_event\":\n # log_msg += f\"MetaEvent {event.detail_type}\"\n show_log = False\n if show_log:\n logger.opt(colors=True).info(log_msg)\n\n state = {}\n coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))\n if coros:\n try:\n logger.debug(\"Running PreProcessors...\")\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(\n colors=True).info(f\"Event {event.name} is <b>ignored</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPreProcessors. \"\n \"Event ignored!</bg #f8bbd0></r>\")\n return\n\n # Trie Match\n _, _ = TrieRule.get_value(bot, event, state)\n\n break_flag = False\n for priority in sorted(matchers.keys()):\n if break_flag:\n break\n\n if show_log:\n logger.debug(f\"Checking for matchers in priority {priority}...\")\n\n run_matchers = await _check_matcher(priority, bot, event, state)\n\n pending_tasks = [\n _run_matcher(matcher, bot, event, state.copy())\n for matcher in run_matchers\n ]\n\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n\n for result in results:\n if result is StopPropagation:\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n\n coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))\n if coros:\n try:\n logger.debug(\"Running PostProcessors...\")\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>\"\n )\n", "path": "nonebot/message.py"}], "after_files": [{"content": "\"\"\"\n\u4e8b\u4ef6\u5904\u7406\n========\n\nNoneBot \u5185\u90e8\u5904\u7406\u5e76\u6309\u4f18\u5148\u7ea7\u5206\u53d1\u4e8b\u4ef6\u7ed9\u6240\u6709\u4e8b\u4ef6\u54cd\u5e94\u5668\uff0c\u63d0\u4f9b\u4e86\u591a\u4e2a\u63d2\u69fd\u4ee5\u8fdb\u884c\u4e8b\u4ef6\u7684\u9884\u5904\u7406\u7b49\u3002\n\"\"\"\n\nimport asyncio\nfrom datetime import datetime\n\nfrom nonebot.log import logger\nfrom nonebot.rule import TrieRule\nfrom nonebot.utils import escape_tag\nfrom nonebot.matcher import matchers, Matcher\nfrom nonebot.typing import Set, Type, Union, Optional, Iterable, NoReturn, Bot, Event\nfrom nonebot.exception import IgnoredException, StopPropagation\nfrom nonebot.typing import EventPreProcessor, RunPreProcessor, EventPostProcessor, RunPostProcessor\n\n_event_preprocessors: Set[EventPreProcessor] = set()\n_event_postprocessors: Set[EventPostProcessor] = set()\n_run_preprocessors: Set[RunPreProcessor] = set()\n_run_postprocessors: Set[RunPostProcessor] = set()\n\n\ndef event_preprocessor(func: EventPreProcessor) -> EventPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u4e8b\u4ef6\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u524d\u6267\u884c\u3002\n :\u53c2\u6570:\n \u4e8b\u4ef6\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _event_preprocessors.add(func)\n return func\n\n\ndef event_postprocessor(func: EventPostProcessor) -> EventPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u4e8b\u4ef6\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u540e\u6267\u884c\u3002\n :\u53c2\u6570:\n \u4e8b\u4ef6\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d\u4e8b\u4ef6\u8fd0\u884c\u524d State\n \"\"\"\n _event_postprocessors.add(func)\n return func\n\n\ndef run_preprocessor(func: RunPreProcessor) -> RunPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u8fd0\u884c\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u524d\u6267\u884c\u3002\n :\u53c2\u6570:\n \u8fd0\u884c\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u56db\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u5f53\u524d\u8981\u8fd0\u884c\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _run_preprocessors.add(func)\n return func\n\n\ndef run_postprocessor(func: RunPostProcessor) -> RunPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u8fd0\u884c\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u540e\u6267\u884c\u3002\n :\u53c2\u6570:\n \u8fd0\u884c\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e94\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u8fd0\u884c\u5b8c\u6bd5\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``exception: Optional[Exception]``: \u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u9519\u8bef\uff08\u5982\u679c\u5b58\u5728\uff09\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _run_postprocessors.add(func)\n return func\n\n\nasync def _check_matcher(priority: int, bot: Bot, event: Event,\n state: dict) -> Iterable[Type[Matcher]]:\n current_matchers = matchers[priority].copy()\n\n async def _check(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Optional[Type[Matcher]]:\n try:\n if await Matcher.check_perm(\n bot, event) and await Matcher.check_rule(bot, event, state):\n return Matcher\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>\"\n )\n return None\n\n async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:\n if Matcher.temp or (Matcher.expire_time and\n datetime.now() > Matcher.expire_time):\n return Matcher\n return None\n\n checking_tasks = [\n _check(Matcher, bot, event, state) for Matcher in current_matchers\n ]\n checking_expire_tasks = [\n _check_expire(Matcher) for Matcher in current_matchers\n ]\n results = await asyncio.gather(*checking_tasks, return_exceptions=True)\n expired = await asyncio.gather(*checking_expire_tasks)\n for expired_matcher in filter(lambda x: x and x in results, expired):\n try:\n matchers[priority].remove(expired_matcher)\n except Exception:\n pass\n return filter(lambda x: x, results)\n\n\nasync def _run_matcher(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Union[None, NoReturn]:\n logger.info(f\"Event will be handled by {Matcher}\")\n\n matcher = Matcher()\n\n coros = list(\n map(lambda x: x(matcher, bot, event, state), _run_preprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(colors=True).info(\n f\"Matcher {matcher} running is <b>cancelled</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPreProcessors. \"\n \"Running cancelled!</bg #f8bbd0></r>\")\n return\n\n exception = None\n\n try:\n logger.debug(f\"Running matcher {matcher}\")\n await matcher.run(bot, event, state)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>\"\n )\n exception = e\n\n coros = list(\n map(lambda x: x(matcher, exception, bot, event, state),\n _run_postprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>\"\n )\n\n if matcher.block:\n raise StopPropagation\n\n\nasync def handle_event(bot: Bot, event: Event):\n \"\"\"\n :\u8bf4\u660e:\n \u5904\u7406\u4e00\u4e2a\u4e8b\u4ef6\u3002\u8c03\u7528\u8be5\u51fd\u6570\u4ee5\u5b9e\u73b0\u5206\u53d1\u4e8b\u4ef6\u3002\n :\u53c2\u6570:\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n :\u793a\u4f8b:\n\n .. code-block:: python\n\n import asyncio\n asyncio.create_task(handle_event(bot, event))\n \"\"\"\n show_log = True\n log_msg = f\"<m>{bot.type.upper()} </m>| {event.self_id} [{event.name}]: \"\n if event.type == \"message\":\n log_msg += f\"Message {event.id} from \"\n log_msg += str(event.user_id)\n if event.detail_type == \"group\":\n log_msg += f\"@[\u7fa4:{event.group_id}]:\"\n\n log_msg += ' \"' + \"\".join(\n map(\n lambda x: escape_tag(str(x))\n if x.type == \"text\" else f\"<le>{escape_tag(str(x))}</le>\",\n event.message)) + '\"' # type: ignore\n elif event.type == \"notice\":\n log_msg += f\"Notice {event.raw_event}\"\n elif event.type == \"request\":\n log_msg += f\"Request {event.raw_event}\"\n elif event.type == \"meta_event\":\n # log_msg += f\"MetaEvent {event.detail_type}\"\n show_log = False\n if show_log:\n logger.opt(colors=True).info(log_msg)\n\n state = {}\n coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))\n if coros:\n try:\n logger.debug(\"Running PreProcessors...\")\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(\n colors=True).info(f\"Event {event.name} is <b>ignored</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPreProcessors. \"\n \"Event ignored!</bg #f8bbd0></r>\")\n return\n\n # Trie Match\n _, _ = TrieRule.get_value(bot, event, state)\n\n break_flag = False\n for priority in sorted(matchers.keys()):\n if break_flag:\n break\n\n if show_log:\n logger.debug(f\"Checking for matchers in priority {priority}...\")\n\n run_matchers = await _check_matcher(priority, bot, event, state)\n\n pending_tasks = [\n _run_matcher(matcher, bot, event, state.copy())\n for matcher in run_matchers\n ]\n\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n\n for result in results:\n if isinstance(result, StopPropagation):\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n\n coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))\n if coros:\n try:\n logger.debug(\"Running PostProcessors...\")\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>\"\n )\n", "path": "nonebot/message.py"}]} |
gh_patches_debug_1593 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1551 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User/Contact bug: Signals double saving, overwriting data for test cases
### Current Behavior
A bug was introduced [with a PR](https://github.com/cisagov/manage.get.gov/pull/1491) that fixed our oidc login clearing out user information. To fix this, the Contact object was linked to the User object, such that they would remain in sync.
However, this introduced a sneaky double-save bug wherein if signals get called more than once for the same contact object (as we found happens in our test cases), it overrides information when it should not do so.


### Expected Behavior
When a `Contact` object is saved, the `save()` function checks to see if `self.user` is not None. When it is not, it will update the `self.user` object with whatever value is located at `self`. It appears that in certain situations, (such as our test cases), the `User` field is behaving as if it were a one-to-many relationship (one Contact object to many User objects). This should not be the case, and `Contact` should only update one `User` field.
### Steps to Reproduce
1. Create a new test case in `test_admin.py`, and create 3 fake User objects. Populate them with unique data. Then, log their values. Note that their data seems to get overwritten.
### Environment
_No response_
### Additional Context
This bug seems to be originating in the `signals.py` file in the ` handle_profile` class. By passing in a flag that disables the save behavior (or just commenting it out), this issue seems to resolve.

### Issue Links
🔄 Relates to: [#1464 / #1468](https://github.com/cisagov/manage.get.gov/pull/1491)
🔄 Relates to: [this PR](https://github.com/cisagov/manage.get.gov/pull/1543) (pinpoints where the behavior is occurring but does not solve it)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/models/contact.py`
Content:
```
1 from django.db import models
2
3 from phonenumber_field.modelfields import PhoneNumberField # type: ignore
4
5 from .utility.time_stamped_model import TimeStampedModel
6
7
8 class Contact(TimeStampedModel):
9
10 """Contact information follows a similar pattern for each contact."""
11
12 user = models.OneToOneField(
13 "registrar.User",
14 null=True,
15 blank=True,
16 on_delete=models.SET_NULL,
17 )
18
19 first_name = models.TextField(
20 null=True,
21 blank=True,
22 help_text="First name",
23 verbose_name="first name / given name",
24 db_index=True,
25 )
26 middle_name = models.TextField(
27 null=True,
28 blank=True,
29 help_text="Middle name (optional)",
30 )
31 last_name = models.TextField(
32 null=True,
33 blank=True,
34 help_text="Last name",
35 verbose_name="last name / family name",
36 db_index=True,
37 )
38 title = models.TextField(
39 null=True,
40 blank=True,
41 help_text="Title",
42 verbose_name="title or role in your organization",
43 )
44 email = models.EmailField(
45 null=True,
46 blank=True,
47 help_text="Email",
48 db_index=True,
49 )
50 phone = PhoneNumberField(
51 null=True,
52 blank=True,
53 help_text="Phone",
54 db_index=True,
55 )
56
57 def get_formatted_name(self):
58 """Returns the contact's name in Western order."""
59 names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]
60 return " ".join(names) if names else "Unknown"
61
62 def save(self, *args, **kwargs):
63 # Call the parent class's save method to perform the actual save
64 super().save(*args, **kwargs)
65
66 # Update the related User object's first_name and last_name
67 if self.user:
68 self.user.first_name = self.first_name
69 self.user.last_name = self.last_name
70 self.user.save()
71
72 def __str__(self):
73 if self.first_name or self.last_name:
74 return self.get_formatted_name()
75 elif self.email:
76 return self.email
77 elif self.pk:
78 return str(self.pk)
79 else:
80 return ""
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py
--- a/src/registrar/models/contact.py
+++ b/src/registrar/models/contact.py
@@ -64,7 +64,7 @@
super().save(*args, **kwargs)
# Update the related User object's first_name and last_name
- if self.user:
+ if self.user and (not self.user.first_name or not self.user.last_name):
self.user.first_name = self.first_name
self.user.last_name = self.last_name
self.user.save()
| {"golden_diff": "diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py\n--- a/src/registrar/models/contact.py\n+++ b/src/registrar/models/contact.py\n@@ -64,7 +64,7 @@\n super().save(*args, **kwargs)\n \n # Update the related User object's first_name and last_name\n- if self.user:\n+ if self.user and (not self.user.first_name or not self.user.last_name):\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n", "issue": "User/Contact bug: Signals double saving, overwriting data for test cases\n### Current Behavior\r\n\r\nA bug was introduced [with a PR](https://github.com/cisagov/manage.get.gov/pull/1491) that fixed our oidc login clearing out user information. To fix this, the Contact object was linked to the User object, such that they would remain in sync.\r\n\r\nHowever, this introduced a sneaky double-save bug wherein if signals get called more than once for the same contact object (as we found happens in our test cases), it overrides information when it should not do so.\r\n\r\n\r\n\r\n\r\n### Expected Behavior\r\n\r\nWhen a `Contact` object is saved, the `save()` function checks to see if `self.user` is not None. When it is not, it will update the `self.user` object with whatever value is located at `self`. It appears that in certain situations, (such as our test cases), the `User` field is behaving as if it were a one-to-many relationship (one Contact object to many User objects). This should not be the case, and `Contact` should only update one `User` field.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a new test case in `test_admin.py`, and create 3 fake User objects. Populate them with unique data. Then, log their values. Note that their data seems to get overwritten.\r\n\r\n\r\n### Environment\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\nThis bug seems to be originating in the `signals.py` file in the ` handle_profile` class. By passing in a flag that disables the save behavior (or just commenting it out), this issue seems to resolve. \r\n\r\n\r\n\r\n### Issue Links\r\n\r\n\ud83d\udd04 Relates to: [#1464 / #1468](https://github.com/cisagov/manage.get.gov/pull/1491)\r\n\ud83d\udd04 Relates to: [this PR](https://github.com/cisagov/manage.get.gov/pull/1543) (pinpoints where the behavior is occurring but does not solve it)\n", "before_files": [{"content": "from django.db import models\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\n\nclass Contact(TimeStampedModel):\n\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"First name\",\n verbose_name=\"first name / given name\",\n db_index=True,\n )\n middle_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Middle name (optional)\",\n )\n last_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Last name\",\n verbose_name=\"last name / family name\",\n db_index=True,\n )\n title = models.TextField(\n null=True,\n blank=True,\n help_text=\"Title\",\n verbose_name=\"title or role in your organization\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n help_text=\"Email\",\n db_index=True,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user:\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py"}], "after_files": [{"content": "from django.db import models\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\n\nclass Contact(TimeStampedModel):\n\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"First name\",\n verbose_name=\"first name / given name\",\n db_index=True,\n )\n middle_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Middle name (optional)\",\n )\n last_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Last name\",\n verbose_name=\"last name / family name\",\n db_index=True,\n )\n title = models.TextField(\n null=True,\n blank=True,\n help_text=\"Title\",\n verbose_name=\"title or role in your organization\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n help_text=\"Email\",\n db_index=True,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user and (not self.user.first_name or not self.user.last_name):\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py"}]} |
gh_patches_debug_1594 | rasdani/github-patches | git_diff | mars-project__mars-679 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Web session doesn't work with large data source
**Describe the bug**
The default `max_buffer_size` of tornado HTTP server is 100M, when constructing dataframes from large pandas dataframes it will raise `Content-Length too long`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/web/server.py`
Content:
```
1 # Copyright 1999-2018 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import functools
16 import json
17 import logging
18 import threading
19 import os
20 from collections import defaultdict
21
22 import numpy as np
23 import pyarrow
24 from bokeh.application import Application
25 from bokeh.application.handlers import FunctionHandler
26 from bokeh.server.server import Server
27 import jinja2
28 from tornado import web, ioloop
29
30 from ..compat import six
31 from ..utils import get_next_port
32 from ..scheduler import ResourceActor, SessionActor
33 from ..api import MarsAPI
34
35 logger = logging.getLogger(__name__)
36
37
38 def get_jinja_env():
39 from datetime import datetime
40 from ..utils import readable_size
41
42 _jinja_env = jinja2.Environment(
43 loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
44 )
45
46 def format_ts(value):
47 if value is None or np.isnan(value):
48 return None
49 return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
50
51 _jinja_env.filters['format_ts'] = format_ts
52 _jinja_env.filters['readable_size'] = readable_size
53 return _jinja_env
54
55
56 class BokehStaticFileHandler(web.StaticFileHandler):
57 @classmethod
58 def get_absolute_path(cls, root, path):
59 from bokeh import server
60 path_parts = path.rsplit('/', 1)
61 if 'bokeh' in path_parts[-1]:
62 root = os.path.join(os.path.dirname(server.__file__), "static")
63 return super(BokehStaticFileHandler, cls).get_absolute_path(root, path)
64
65 def validate_absolute_path(self, root, absolute_path):
66 from bokeh import server
67 path_parts = absolute_path.rsplit('/', 1)
68 if 'bokeh' in path_parts[-1]:
69 root = os.path.join(os.path.dirname(server.__file__), "static")
70 return super(BokehStaticFileHandler, self).validate_absolute_path(root, absolute_path)
71
72
73 class MarsRequestHandler(web.RequestHandler):
74 def initialize(self, scheduler_ip):
75 self._scheduler = scheduler_ip
76 self.web_api = MarsWebAPI(scheduler_ip)
77
78
79 class MarsWebAPI(MarsAPI):
80 def __init__(self, scheduler_ip):
81 super(MarsWebAPI, self).__init__(scheduler_ip)
82
83 def get_tasks_info(self, select_session_id=None):
84 from ..scheduler import GraphState
85
86 sessions = defaultdict(dict)
87 for session_id, session_ref in six.iteritems(self.session_manager.get_sessions()):
88 if select_session_id and session_id != select_session_id:
89 continue
90 session_desc = sessions[session_id]
91 session_desc['id'] = session_id
92 session_desc['name'] = session_id
93 session_desc['tasks'] = dict()
94 session_ref = self.actor_client.actor_ref(session_ref)
95 for graph_key, graph_meta_ref in six.iteritems(session_ref.get_graph_meta_refs()):
96 task_desc = dict()
97
98 state = self.get_graph_state(session_id, graph_key)
99 if state == GraphState.PREPARING:
100 task_desc['state'] = state.name.lower()
101 session_desc['tasks'][graph_key] = task_desc
102 continue
103
104 graph_meta_ref = self.actor_client.actor_ref(graph_meta_ref)
105 task_desc['id'] = graph_key
106 task_desc['state'] = graph_meta_ref.get_state().value
107 start_time, end_time, graph_size = graph_meta_ref.get_graph_info()
108 task_desc['start_time'] = start_time
109 task_desc['end_time'] = end_time
110 task_desc['graph_size'] = graph_size
111
112 session_desc['tasks'][graph_key] = task_desc
113 return sessions
114
115 def get_task_detail(self, session_id, task_id):
116 graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)
117 return graph_meta_ref.calc_stats()
118
119 def get_operand_info(self, session_id, task_id, state=None):
120 graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)
121 return graph_meta_ref.get_operand_info(state=state)
122
123 def get_workers_meta(self):
124 resource_uid = ResourceActor.default_uid()
125 resource_ref = self.get_actor_ref(resource_uid)
126 return resource_ref.get_workers_meta()
127
128 def query_worker_events(self, endpoint, category, time_start=None, time_end=None):
129 from ..worker import EventsActor
130 ref = self.actor_client.actor_ref(EventsActor.default_uid(), address=endpoint)
131 return ref.query_by_time(category, time_start=time_start, time_end=time_end)
132
133 def write_mutable_tensor(self, session_id, name, payload_type, body):
134 from ..serialize import dataserializer
135 from ..tensor.core import Indexes
136 session_uid = SessionActor.gen_uid(session_id)
137 session_ref = self.get_actor_ref(session_uid)
138
139 index_json_size = np.asscalar(np.frombuffer(body[0:8], dtype=np.int64))
140 index_json = json.loads(body[8:8+index_json_size].decode('ascii'))
141 index = Indexes.from_json(index_json).indexes
142 if payload_type is None:
143 value = dataserializer.loads(body[8+index_json_size:], raw=False)
144 elif payload_type == 'tensor':
145 tensor_chunk_offset = 8 + index_json_size
146 with pyarrow.BufferReader(body[tensor_chunk_offset:]) as reader:
147 value = pyarrow.read_tensor(reader).to_numpy()
148 elif payload_type == 'record_batch':
149 schema_size = np.asscalar(np.frombuffer(body[8+index_json_size:8+index_json_size+8], dtype=np.int64))
150 schema_offset = 8 + index_json_size + 8
151 with pyarrow.BufferReader(body[schema_offset:schema_offset+schema_size]) as reader:
152 schema = pyarrow.read_schema(reader)
153 record_batch_offset = schema_offset + schema_size
154 with pyarrow.BufferReader(body[record_batch_offset:]) as reader:
155 record_batch = pyarrow.read_record_batch(reader, schema)
156 value = record_batch.to_pandas().to_records(index=False)
157 else:
158 raise ValueError('Not supported payload type: %s' % payload_type)
159 return session_ref.write_mutable_tensor(name, index, value)
160
161
162 class MarsWeb(object):
163 def __init__(self, port=None, scheduler_ip=None):
164 self._port = port
165 self._scheduler_ip = scheduler_ip
166 self._server = None
167 self._server_thread = None
168
169 @property
170 def port(self):
171 return self._port
172
173 @staticmethod
174 def _configure_loop():
175 try:
176 ioloop.IOLoop.current()
177 except RuntimeError:
178 if six.PY3:
179 import asyncio
180 asyncio.set_event_loop(asyncio.new_event_loop())
181 loop = None
182 try:
183 loop = ioloop.IOLoop.current()
184 except: # noqa: E722
185 pass
186 if loop is None:
187 raise
188 else:
189 raise
190
191 def _try_start_web_server(self):
192 static_path = os.path.join(os.path.dirname(__file__), 'static')
193
194 handlers = dict()
195 for p, h in _bokeh_apps.items():
196 handlers[p] = Application(FunctionHandler(functools.partial(h, self._scheduler_ip)))
197
198 handler_kwargs = {'scheduler_ip': self._scheduler_ip}
199 extra_patterns = [
200 ('/static/(.*)', BokehStaticFileHandler, {'path': static_path})
201 ]
202 for p, h in _web_handlers.items():
203 extra_patterns.append((p, h, handler_kwargs))
204
205 retrial = 5
206 while retrial:
207 try:
208 if self._port is None:
209 use_port = get_next_port()
210 else:
211 use_port = self._port
212
213 self._server = Server(
214 handlers, allow_websocket_origin=['*'],
215 address='0.0.0.0', port=use_port,
216 extra_patterns=extra_patterns,
217 )
218 self._server.start()
219 self._port = use_port
220 logger.info('Mars UI started at 0.0.0.0:%d', self._port)
221 break
222 except OSError:
223 if self._port is not None:
224 raise
225 retrial -= 1
226 if retrial == 0:
227 raise
228
229 def start(self, event=None, block=False):
230 self._configure_loop()
231 self._try_start_web_server()
232
233 if not block:
234 self._server_thread = threading.Thread(target=self._server.io_loop.start)
235 self._server_thread.daemon = True
236 self._server_thread.start()
237
238 if event:
239 event.set()
240 else:
241 if event:
242 event.set()
243
244 self._server.io_loop.start()
245
246 def stop(self):
247 if self._server is not None:
248 self._server.io_loop.stop()
249 self._server.stop()
250
251
252 _bokeh_apps = dict()
253 _web_handlers = dict()
254
255
256 def register_bokeh_app(pattern, handler):
257 _bokeh_apps[pattern] = handler
258
259
260 def register_web_handler(pattern, handler):
261 _web_handlers[pattern] = handler
262
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mars/web/server.py b/mars/web/server.py
--- a/mars/web/server.py
+++ b/mars/web/server.py
@@ -214,6 +214,7 @@
handlers, allow_websocket_origin=['*'],
address='0.0.0.0', port=use_port,
extra_patterns=extra_patterns,
+ http_server_kwargs={'max_buffer_size': 2 ** 32},
)
self._server.start()
self._port = use_port
| {"golden_diff": "diff --git a/mars/web/server.py b/mars/web/server.py\n--- a/mars/web/server.py\n+++ b/mars/web/server.py\n@@ -214,6 +214,7 @@\n handlers, allow_websocket_origin=['*'],\n address='0.0.0.0', port=use_port,\n extra_patterns=extra_patterns,\n+ http_server_kwargs={'max_buffer_size': 2 ** 32},\n )\n self._server.start()\n self._port = use_port\n", "issue": "[BUG] Web session doesn't work with large data source\n**Describe the bug**\r\n\r\nThe default `max_buffer_size` of tornado HTTP server is 100M, when constructing dataframes from large pandas dataframes it will raise `Content-Length too long`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport json\nimport logging\nimport threading\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport pyarrow\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.server.server import Server\nimport jinja2\nfrom tornado import web, ioloop\n\nfrom ..compat import six\nfrom ..utils import get_next_port\nfrom ..scheduler import ResourceActor, SessionActor\nfrom ..api import MarsAPI\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_jinja_env():\n from datetime import datetime\n from ..utils import readable_size\n\n _jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),\n )\n\n def format_ts(value):\n if value is None or np.isnan(value):\n return None\n return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')\n\n _jinja_env.filters['format_ts'] = format_ts\n _jinja_env.filters['readable_size'] = readable_size\n return _jinja_env\n\n\nclass BokehStaticFileHandler(web.StaticFileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n from bokeh import server\n path_parts = path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, cls).get_absolute_path(root, path)\n\n def validate_absolute_path(self, root, absolute_path):\n from bokeh import server\n path_parts = absolute_path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, self).validate_absolute_path(root, absolute_path)\n\n\nclass MarsRequestHandler(web.RequestHandler):\n def initialize(self, scheduler_ip):\n self._scheduler = scheduler_ip\n self.web_api = MarsWebAPI(scheduler_ip)\n\n\nclass MarsWebAPI(MarsAPI):\n def __init__(self, scheduler_ip):\n super(MarsWebAPI, self).__init__(scheduler_ip)\n\n def get_tasks_info(self, select_session_id=None):\n from ..scheduler import GraphState\n\n sessions = defaultdict(dict)\n for session_id, session_ref in six.iteritems(self.session_manager.get_sessions()):\n if select_session_id and session_id != select_session_id:\n continue\n session_desc = sessions[session_id]\n session_desc['id'] = session_id\n session_desc['name'] = session_id\n session_desc['tasks'] = dict()\n session_ref = self.actor_client.actor_ref(session_ref)\n for graph_key, graph_meta_ref in six.iteritems(session_ref.get_graph_meta_refs()):\n task_desc = dict()\n\n state = self.get_graph_state(session_id, graph_key)\n if state == GraphState.PREPARING:\n task_desc['state'] = state.name.lower()\n session_desc['tasks'][graph_key] = task_desc\n continue\n\n graph_meta_ref = self.actor_client.actor_ref(graph_meta_ref)\n task_desc['id'] = graph_key\n task_desc['state'] = graph_meta_ref.get_state().value\n start_time, end_time, graph_size = graph_meta_ref.get_graph_info()\n task_desc['start_time'] = start_time\n task_desc['end_time'] = end_time\n task_desc['graph_size'] = graph_size\n\n session_desc['tasks'][graph_key] = task_desc\n return sessions\n\n def get_task_detail(self, session_id, task_id):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.calc_stats()\n\n def get_operand_info(self, session_id, task_id, state=None):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.get_operand_info(state=state)\n\n def get_workers_meta(self):\n resource_uid = ResourceActor.default_uid()\n resource_ref = self.get_actor_ref(resource_uid)\n return resource_ref.get_workers_meta()\n\n def query_worker_events(self, endpoint, category, time_start=None, time_end=None):\n from ..worker import EventsActor\n ref = self.actor_client.actor_ref(EventsActor.default_uid(), address=endpoint)\n return ref.query_by_time(category, time_start=time_start, time_end=time_end)\n\n def write_mutable_tensor(self, session_id, name, payload_type, body):\n from ..serialize import dataserializer\n from ..tensor.core import Indexes\n session_uid = SessionActor.gen_uid(session_id)\n session_ref = self.get_actor_ref(session_uid)\n\n index_json_size = np.asscalar(np.frombuffer(body[0:8], dtype=np.int64))\n index_json = json.loads(body[8:8+index_json_size].decode('ascii'))\n index = Indexes.from_json(index_json).indexes\n if payload_type is None:\n value = dataserializer.loads(body[8+index_json_size:], raw=False)\n elif payload_type == 'tensor':\n tensor_chunk_offset = 8 + index_json_size\n with pyarrow.BufferReader(body[tensor_chunk_offset:]) as reader:\n value = pyarrow.read_tensor(reader).to_numpy()\n elif payload_type == 'record_batch':\n schema_size = np.asscalar(np.frombuffer(body[8+index_json_size:8+index_json_size+8], dtype=np.int64))\n schema_offset = 8 + index_json_size + 8\n with pyarrow.BufferReader(body[schema_offset:schema_offset+schema_size]) as reader:\n schema = pyarrow.read_schema(reader)\n record_batch_offset = schema_offset + schema_size\n with pyarrow.BufferReader(body[record_batch_offset:]) as reader:\n record_batch = pyarrow.read_record_batch(reader, schema)\n value = record_batch.to_pandas().to_records(index=False)\n else:\n raise ValueError('Not supported payload type: %s' % payload_type)\n return session_ref.write_mutable_tensor(name, index, value)\n\n\nclass MarsWeb(object):\n def __init__(self, port=None, scheduler_ip=None):\n self._port = port\n self._scheduler_ip = scheduler_ip\n self._server = None\n self._server_thread = None\n\n @property\n def port(self):\n return self._port\n\n @staticmethod\n def _configure_loop():\n try:\n ioloop.IOLoop.current()\n except RuntimeError:\n if six.PY3:\n import asyncio\n asyncio.set_event_loop(asyncio.new_event_loop())\n loop = None\n try:\n loop = ioloop.IOLoop.current()\n except: # noqa: E722\n pass\n if loop is None:\n raise\n else:\n raise\n\n def _try_start_web_server(self):\n static_path = os.path.join(os.path.dirname(__file__), 'static')\n\n handlers = dict()\n for p, h in _bokeh_apps.items():\n handlers[p] = Application(FunctionHandler(functools.partial(h, self._scheduler_ip)))\n\n handler_kwargs = {'scheduler_ip': self._scheduler_ip}\n extra_patterns = [\n ('/static/(.*)', BokehStaticFileHandler, {'path': static_path})\n ]\n for p, h in _web_handlers.items():\n extra_patterns.append((p, h, handler_kwargs))\n\n retrial = 5\n while retrial:\n try:\n if self._port is None:\n use_port = get_next_port()\n else:\n use_port = self._port\n\n self._server = Server(\n handlers, allow_websocket_origin=['*'],\n address='0.0.0.0', port=use_port,\n extra_patterns=extra_patterns,\n )\n self._server.start()\n self._port = use_port\n logger.info('Mars UI started at 0.0.0.0:%d', self._port)\n break\n except OSError:\n if self._port is not None:\n raise\n retrial -= 1\n if retrial == 0:\n raise\n\n def start(self, event=None, block=False):\n self._configure_loop()\n self._try_start_web_server()\n\n if not block:\n self._server_thread = threading.Thread(target=self._server.io_loop.start)\n self._server_thread.daemon = True\n self._server_thread.start()\n\n if event:\n event.set()\n else:\n if event:\n event.set()\n\n self._server.io_loop.start()\n\n def stop(self):\n if self._server is not None:\n self._server.io_loop.stop()\n self._server.stop()\n\n\n_bokeh_apps = dict()\n_web_handlers = dict()\n\n\ndef register_bokeh_app(pattern, handler):\n _bokeh_apps[pattern] = handler\n\n\ndef register_web_handler(pattern, handler):\n _web_handlers[pattern] = handler\n", "path": "mars/web/server.py"}], "after_files": [{"content": "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport json\nimport logging\nimport threading\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport pyarrow\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.server.server import Server\nimport jinja2\nfrom tornado import web, ioloop\n\nfrom ..compat import six\nfrom ..utils import get_next_port\nfrom ..scheduler import ResourceActor, SessionActor\nfrom ..api import MarsAPI\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_jinja_env():\n from datetime import datetime\n from ..utils import readable_size\n\n _jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),\n )\n\n def format_ts(value):\n if value is None or np.isnan(value):\n return None\n return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')\n\n _jinja_env.filters['format_ts'] = format_ts\n _jinja_env.filters['readable_size'] = readable_size\n return _jinja_env\n\n\nclass BokehStaticFileHandler(web.StaticFileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n from bokeh import server\n path_parts = path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, cls).get_absolute_path(root, path)\n\n def validate_absolute_path(self, root, absolute_path):\n from bokeh import server\n path_parts = absolute_path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, self).validate_absolute_path(root, absolute_path)\n\n\nclass MarsRequestHandler(web.RequestHandler):\n def initialize(self, scheduler_ip):\n self._scheduler = scheduler_ip\n self.web_api = MarsWebAPI(scheduler_ip)\n\n\nclass MarsWebAPI(MarsAPI):\n def __init__(self, scheduler_ip):\n super(MarsWebAPI, self).__init__(scheduler_ip)\n\n def get_tasks_info(self, select_session_id=None):\n from ..scheduler import GraphState\n\n sessions = defaultdict(dict)\n for session_id, session_ref in six.iteritems(self.session_manager.get_sessions()):\n if select_session_id and session_id != select_session_id:\n continue\n session_desc = sessions[session_id]\n session_desc['id'] = session_id\n session_desc['name'] = session_id\n session_desc['tasks'] = dict()\n session_ref = self.actor_client.actor_ref(session_ref)\n for graph_key, graph_meta_ref in six.iteritems(session_ref.get_graph_meta_refs()):\n task_desc = dict()\n\n state = self.get_graph_state(session_id, graph_key)\n if state == GraphState.PREPARING:\n task_desc['state'] = state.name.lower()\n session_desc['tasks'][graph_key] = task_desc\n continue\n\n graph_meta_ref = self.actor_client.actor_ref(graph_meta_ref)\n task_desc['id'] = graph_key\n task_desc['state'] = graph_meta_ref.get_state().value\n start_time, end_time, graph_size = graph_meta_ref.get_graph_info()\n task_desc['start_time'] = start_time\n task_desc['end_time'] = end_time\n task_desc['graph_size'] = graph_size\n\n session_desc['tasks'][graph_key] = task_desc\n return sessions\n\n def get_task_detail(self, session_id, task_id):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.calc_stats()\n\n def get_operand_info(self, session_id, task_id, state=None):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.get_operand_info(state=state)\n\n def get_workers_meta(self):\n resource_uid = ResourceActor.default_uid()\n resource_ref = self.get_actor_ref(resource_uid)\n return resource_ref.get_workers_meta()\n\n def query_worker_events(self, endpoint, category, time_start=None, time_end=None):\n from ..worker import EventsActor\n ref = self.actor_client.actor_ref(EventsActor.default_uid(), address=endpoint)\n return ref.query_by_time(category, time_start=time_start, time_end=time_end)\n\n def write_mutable_tensor(self, session_id, name, payload_type, body):\n from ..serialize import dataserializer\n from ..tensor.core import Indexes\n session_uid = SessionActor.gen_uid(session_id)\n session_ref = self.get_actor_ref(session_uid)\n\n index_json_size = np.asscalar(np.frombuffer(body[0:8], dtype=np.int64))\n index_json = json.loads(body[8:8+index_json_size].decode('ascii'))\n index = Indexes.from_json(index_json).indexes\n if payload_type is None:\n value = dataserializer.loads(body[8+index_json_size:], raw=False)\n elif payload_type == 'tensor':\n tensor_chunk_offset = 8 + index_json_size\n with pyarrow.BufferReader(body[tensor_chunk_offset:]) as reader:\n value = pyarrow.read_tensor(reader).to_numpy()\n elif payload_type == 'record_batch':\n schema_size = np.asscalar(np.frombuffer(body[8+index_json_size:8+index_json_size+8], dtype=np.int64))\n schema_offset = 8 + index_json_size + 8\n with pyarrow.BufferReader(body[schema_offset:schema_offset+schema_size]) as reader:\n schema = pyarrow.read_schema(reader)\n record_batch_offset = schema_offset + schema_size\n with pyarrow.BufferReader(body[record_batch_offset:]) as reader:\n record_batch = pyarrow.read_record_batch(reader, schema)\n value = record_batch.to_pandas().to_records(index=False)\n else:\n raise ValueError('Not supported payload type: %s' % payload_type)\n return session_ref.write_mutable_tensor(name, index, value)\n\n\nclass MarsWeb(object):\n def __init__(self, port=None, scheduler_ip=None):\n self._port = port\n self._scheduler_ip = scheduler_ip\n self._server = None\n self._server_thread = None\n\n @property\n def port(self):\n return self._port\n\n @staticmethod\n def _configure_loop():\n try:\n ioloop.IOLoop.current()\n except RuntimeError:\n if six.PY3:\n import asyncio\n asyncio.set_event_loop(asyncio.new_event_loop())\n loop = None\n try:\n loop = ioloop.IOLoop.current()\n except: # noqa: E722\n pass\n if loop is None:\n raise\n else:\n raise\n\n def _try_start_web_server(self):\n static_path = os.path.join(os.path.dirname(__file__), 'static')\n\n handlers = dict()\n for p, h in _bokeh_apps.items():\n handlers[p] = Application(FunctionHandler(functools.partial(h, self._scheduler_ip)))\n\n handler_kwargs = {'scheduler_ip': self._scheduler_ip}\n extra_patterns = [\n ('/static/(.*)', BokehStaticFileHandler, {'path': static_path})\n ]\n for p, h in _web_handlers.items():\n extra_patterns.append((p, h, handler_kwargs))\n\n retrial = 5\n while retrial:\n try:\n if self._port is None:\n use_port = get_next_port()\n else:\n use_port = self._port\n\n self._server = Server(\n handlers, allow_websocket_origin=['*'],\n address='0.0.0.0', port=use_port,\n extra_patterns=extra_patterns,\n http_server_kwargs={'max_buffer_size': 2 ** 32},\n )\n self._server.start()\n self._port = use_port\n logger.info('Mars UI started at 0.0.0.0:%d', self._port)\n break\n except OSError:\n if self._port is not None:\n raise\n retrial -= 1\n if retrial == 0:\n raise\n\n def start(self, event=None, block=False):\n self._configure_loop()\n self._try_start_web_server()\n\n if not block:\n self._server_thread = threading.Thread(target=self._server.io_loop.start)\n self._server_thread.daemon = True\n self._server_thread.start()\n\n if event:\n event.set()\n else:\n if event:\n event.set()\n\n self._server.io_loop.start()\n\n def stop(self):\n if self._server is not None:\n self._server.io_loop.stop()\n self._server.stop()\n\n\n_bokeh_apps = dict()\n_web_handlers = dict()\n\n\ndef register_bokeh_app(pattern, handler):\n _bokeh_apps[pattern] = handler\n\n\ndef register_web_handler(pattern, handler):\n _web_handlers[pattern] = handler\n", "path": "mars/web/server.py"}]} |
gh_patches_debug_1595 | rasdani/github-patches | git_diff | mozmeao__snippets-service-1017 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create a preview with background (light and dark) for Icons
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/admin/adminmodels.py`
Content:
```
1 import re
2
3 from django.contrib import admin, messages
4 from django.db import transaction
5 from django.db.models import TextField, Q
6 from django.template.loader import get_template
7 from django.utils.safestring import mark_safe
8
9 from reversion.admin import VersionAdmin
10 from django_ace import AceWidget
11 from django_statsd.clients import statsd
12 from jinja2.meta import find_undeclared_variables
13 from django_admin_listfilter_dropdown.filters import (RelatedDropdownFilter,
14 RelatedOnlyDropdownFilter)
15
16 from snippets.base import forms, models, slack
17 from snippets.base.admin import actions, filters
18
19
20 MATCH_LOCALE_REGEX = re.compile(r'(\w+(?:-\w+)*)')
21 RESERVED_VARIABLES = ('_', 'snippet_id')
22
23
24 class ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):
25 list_display = ('description', 'is_exclusion', 'startpage_version', 'name',
26 'version', 'locale', 'appbuildid', 'build_target',
27 'channel', 'os_version', 'distribution',
28 'distribution_version', 'modified')
29 list_filter = ('name', 'version', 'os_version', 'appbuildid',
30 'build_target', 'channel', 'distribution', 'locale')
31 save_on_top = True
32 search_fields = ('description',)
33
34 class Media:
35 js = (
36 'js/admin/jquery.are-you-sure.js',
37 'js/admin/alert-page-leaving.js',
38 )
39
40
41 class LogEntryAdmin(admin.ModelAdmin):
42 list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')
43 list_filter = ('user', 'content_type')
44
45
46 class SnippetTemplateVariableInline(admin.TabularInline):
47 model = models.SnippetTemplateVariable
48 formset = forms.SnippetTemplateVariableInlineFormset
49 max_num = 0
50 can_delete = False
51 readonly_fields = ('name',)
52 fields = ('name', 'type', 'order', 'description')
53
54
55 class SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):
56 save_on_top = True
57 list_display = ('name', 'priority', 'hidden')
58 list_filter = ('hidden', 'startpage')
59 inlines = (SnippetTemplateVariableInline,)
60 formfield_overrides = {
61 TextField: {'widget': AceWidget(mode='html', theme='github',
62 width='1200px', height='500px')},
63 }
64
65 def save_related(self, request, form, formsets, change):
66 """
67 After saving the related objects, remove and add
68 SnippetTemplateVariables depending on how the template code changed.
69 """
70 super(SnippetTemplateAdmin, self).save_related(request, form, formsets,
71 change)
72
73 # Parse the template code and find any undefined variables.
74 ast = models.JINJA_ENV.env.parse(form.instance.code)
75 new_vars = find_undeclared_variables(ast)
76 var_manager = form.instance.variable_set
77
78 # Filter out reserved variable names.
79 new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]
80
81 # Delete variables not in the new set.
82 var_manager.filter(~Q(name__in=new_vars)).delete()
83
84 # Create variables that don't exist.
85 for i, variable in enumerate(new_vars, start=1):
86 obj, _ = models.SnippetTemplateVariable.objects.get_or_create(
87 template=form.instance, name=variable)
88 if obj.order == 0:
89 obj.order = i * 10
90 obj.save()
91
92
93 class AddonAdmin(admin.ModelAdmin):
94 list_display = ('name', 'guid')
95
96 class Media:
97 js = (
98 'js/admin/jquery.are-you-sure.js',
99 'js/admin/alert-page-leaving.js',
100 )
101
102
103 class IconAdmin(admin.ModelAdmin):
104 search_fields = [
105 'name',
106 'image',
107 ]
108 readonly_fields = [
109 'height',
110 'width',
111 'preview',
112 'creator',
113 'created',
114 'snippets',
115 ]
116 list_display_links = [
117 'id',
118 'name',
119 ]
120 list_display = [
121 'id',
122 'name',
123 'width',
124 'height',
125 'preview',
126 ]
127
128 class Media:
129 css = {
130 'all': (
131 'css/admin/ListSnippets.css',
132 )
133 }
134 js = (
135 'js/admin/jquery.are-you-sure.js',
136 'js/admin/alert-page-leaving.js',
137 )
138
139 def save_model(self, request, obj, form, change):
140 if not obj.creator_id:
141 obj.creator = request.user
142 super().save_model(request, obj, form, change)
143
144 def preview(self, obj):
145 text = f'<img style="max-width:120px; max-height:120px;" src="{obj.image.url}"/>'
146 return mark_safe(text)
147
148 def snippets(self, obj):
149 """Snippets using this icon."""
150 template = get_template('base/snippets_related_with_obj.jinja')
151 return mark_safe(template.render({'snippets': obj.snippets, 'type': 'Icon'}))
152
153
154 class SimpleTemplateInline(admin.StackedInline):
155 model = models.SimpleTemplate
156 form = forms.SimpleTemplateForm
157 can_delete = False
158 classes = [
159 'inline-template',
160 'simple_snippet',
161 ]
162 raw_id_fields = [
163 'section_title_icon',
164 'title_icon',
165 'icon',
166 ]
167
168 fieldsets = (
169 ('Title', {
170 'fields': ('title_icon', 'title'),
171 }),
172 ('Section', {
173 'fields': ('section_title_icon', 'section_title_text', 'section_title_url',),
174 }),
175 ('Main', {
176 'fields': ('icon', 'text', 'button_label', 'button_color', 'button_url'),
177 }),
178 ('Extra', {
179 'fields': ('block_button_text', 'tall', 'do_not_autoblock'),
180 })
181
182 )
183
184
185 class FundraisingTemplateInline(admin.StackedInline):
186 model = models.FundraisingTemplate
187 form = forms.FundraisingTemplateForm
188 can_delete = False
189 classes = [
190 'inline-template',
191 'eoy_snippet',
192 ]
193 raw_id_fields = [
194 'title_icon',
195 'icon',
196 ]
197
198 fieldsets = (
199 ('Title', {
200 'fields': (
201 'title_icon',
202 'title'
203 ),
204 }),
205 ('Main', {
206 'fields': (
207 'icon',
208 'text',
209 'text_color',
210 'background_color',
211 'highlight_color',
212 )
213 }),
214 ('Form Configuration', {
215 'fields': (
216 'donation_form_url',
217 'currency_code',
218 'locale',
219 'selected_button',
220 'button_label',
221 'button_color',
222 'button_background_color',
223 'monthly_checkbox_label_text',
224 )
225 }),
226 ('Donation', {
227 'fields': (
228 ('donation_amount_first', 'donation_amount_second',
229 'donation_amount_third', 'donation_amount_fourth',),
230 )
231 }),
232 ('Extra', {
233 'fields': ('block_button_text', 'test', 'do_not_autoblock'),
234 })
235
236 )
237
238
239 class FxASignupTemplateInline(admin.StackedInline):
240 model = models.FxASignupTemplate
241 form = forms.FxASignupTemplateForm
242 can_delete = False
243 classes = [
244 'inline-template',
245 'fxa_signup_snippet',
246 ]
247 raw_id_fields = [
248 'scene1_title_icon',
249 'scene1_icon',
250 ]
251
252 fieldsets = (
253 ('Scene 1 Title', {
254 'fields': (
255 'scene1_title_icon',
256 'scene1_title'
257 ),
258 }),
259 ('Scene 1 Main', {
260 'fields': (
261 'scene1_icon',
262 'scene1_text',
263 'scene1_button_label',
264 'scene1_button_color',
265 'scene1_button_background_color',
266 )
267 }),
268 ('Scene 2 Title', {
269 'fields': ('scene2_title',),
270 }),
271 ('Scene 2 Main', {
272 'fields': (
273 'scene2_text',
274 'scene2_button_label',
275 'scene2_email_placeholder_text',
276 'scene2_dismiss_button_text',
277 )
278 }),
279
280 ('Extra', {
281 'fields': (
282 'utm_term',
283 'utm_campaign',
284 'block_button_text',
285 'do_not_autoblock'
286 ),
287 })
288 )
289
290
291 class NewsletterTemplateInline(admin.StackedInline):
292 model = models.NewsletterTemplate
293 form = forms.NewsletterTemplateForm
294 can_delete = False
295 classes = [
296 'inline-template',
297 'newsletter_snippet',
298 ]
299 raw_id_fields = [
300 'scene1_title_icon',
301 'scene1_icon',
302 ]
303
304 fieldsets = (
305 ('Scene 1 Title', {
306 'fields': (
307 'scene1_title_icon',
308 'scene1_title'
309 ),
310 }),
311 ('Scene 1 Main', {
312 'fields': (
313 'scene1_icon',
314 'scene1_text',
315 'scene1_button_label',
316 'scene1_button_color',
317 'scene1_button_background_color',
318 )
319 }),
320 ('Scene 2 Title', {
321 'fields': ('scene2_title',),
322 }),
323 ('Scene 2 Main', {
324 'fields': (
325 'scene2_text',
326 'scene2_button_label',
327 'scene2_email_placeholder_text',
328 'scene2_privacy_html',
329 'scene2_newsletter',
330 'scene2_dismiss_button_text',
331 'locale',
332 'success_text',
333 'error_text',
334 )
335 }),
336
337 ('Extra', {
338 'fields': (
339 'block_button_text',
340 'do_not_autoblock'
341 ),
342 })
343 )
344
345
346 class SendToDeviceTemplateInline(admin.StackedInline):
347 model = models.SendToDeviceTemplate
348 form = forms.SendToDeviceTemplateForm
349 can_delete = False
350 classes = [
351 'inline-template',
352 'send_to_device_snippet',
353 ]
354 raw_id_fields = [
355 'scene1_title_icon',
356 'scene1_icon',
357 'scene2_icon',
358 ]
359
360 fieldsets = (
361 ('Scene 1 Title', {
362 'fields': (
363 'scene1_title_icon',
364 'scene1_title'
365 ),
366 }),
367 ('Scene 1 Main', {
368 'fields': (
369 'scene1_icon',
370 'scene1_text',
371 'scene1_button_label',
372 'scene1_button_color',
373 'scene1_button_background_color',
374 )
375 }),
376 ('Scene 2 Title', {
377 'fields': ('scene2_title',),
378 }),
379 ('Scene 2 Main', {
380 'fields': (
381 'scene2_icon',
382 'scene2_text',
383
384 'scene2_button_label',
385 'scene2_input_placeholder',
386 'scene2_disclaimer_html',
387 'scene2_dismiss_button_text',
388
389 'locale',
390 'country',
391 ('include_sms', 'message_id_sms',),
392 'message_id_email',
393 'success_title',
394 'success_text',
395 'error_text',
396 )
397 }),
398
399 ('Extra', {
400 'fields': (
401 'block_button_text',
402 'do_not_autoblock'
403 ),
404 })
405 )
406
407
408 class SimpleBelowSearchTemplateInline(admin.StackedInline):
409 model = models.SimpleBelowSearchTemplate
410 form = forms.SimpleBelowSearchTemplateForm
411 can_delete = False
412 classes = [
413 'inline-template',
414 'simple_below_search_snippet',
415 ]
416 raw_id_fields = [
417 'icon',
418 ]
419
420 fieldsets = (
421 ('Main', {
422 'fields': ('icon', 'text'),
423 }),
424 ('Extra', {
425 'fields': ('block_button_text', 'do_not_autoblock'),
426 })
427
428 )
429
430
431 class ASRSnippetAdmin(admin.ModelAdmin):
432 form = forms.ASRSnippetAdminForm
433 inlines = [
434 SimpleTemplateInline,
435 FundraisingTemplateInline,
436 FxASignupTemplateInline,
437 NewsletterTemplateInline,
438 SendToDeviceTemplateInline,
439 SimpleBelowSearchTemplateInline,
440 ]
441 list_display_links = (
442 'id',
443 'name',
444 )
445 list_display = (
446 'id',
447 'name',
448 'status',
449 'locale_list',
450 'modified',
451 )
452 list_filter = (
453 filters.ModifiedFilter,
454 filters.TemplateFilter,
455 ('locales', RelatedOnlyDropdownFilter),
456 ('targets', RelatedOnlyDropdownFilter),
457 'status',
458 filters.ChannelFilter,
459 ('campaign', RelatedDropdownFilter),
460 ('category', RelatedDropdownFilter),
461 filters.ScheduledFilter,
462 )
463 search_fields = (
464 'name',
465 'id',
466 'campaign__name',
467 'targets__name',
468 'category__name',
469 )
470 autocomplete_fields = (
471 'campaign',
472 'category',
473 )
474 preserve_filters = True
475 readonly_fields = (
476 'id',
477 'created',
478 'modified',
479 'uuid',
480 'creator',
481 'preview_url_light_theme',
482 'preview_url_dark_theme',
483
484 )
485 filter_horizontal = (
486 'targets',
487 'locales',
488 )
489 save_on_top = True
490 save_as = True
491 view_on_site = False
492 actions = (
493 actions.duplicate_snippets_action,
494 'make_published',
495 )
496
497 fieldsets = (
498 ('ID', {
499 'fields': (
500 'id',
501 'name',
502 'status',
503 'creator',
504 'preview_url_light_theme',
505 'preview_url_dark_theme',
506 )
507 }),
508 ('Content', {
509 'description': (
510 '''
511 <strong>Available deep links:</strong><br/>
512 <ol>
513 <li><code>special:accounts</code> to open Firefox Accounts</li>
514 <li><code>special:appMenu</code> to open the hamburger menu</li>
515 </ol><br/>
516 <strong>Automatically add Snippet ID:</strong><br/>
517 You can use <code>[[snippet_id]]</code> in any field and it
518 will be automatically replaced by Snippet ID when served to users.
519 <br/>
520 Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code>
521 <br/>
522 ''' # noqa
523 ),
524 'fields': ('template_chooser',),
525 'classes': ('template-fieldset',)
526 }),
527 ('Publishing Options', {
528 'fields': (
529 'campaign',
530 'category',
531 'targets',
532 ('publish_start', 'publish_end'),
533 'locales',
534 'weight',)
535 }),
536 ('Other Info', {
537 'fields': ('uuid', ('created', 'modified'), 'for_qa'),
538 'classes': ('collapse',)
539 }),
540 )
541
542 class Media:
543 css = {
544 'all': (
545 'css/admin/ASRSnippetAdmin.css',
546 'css/admin/IDFieldHighlight.css',
547 'css/admin/InlineTemplates.css',
548 )
549 }
550 js = (
551 'js/admin/jquery.are-you-sure.js',
552 'js/admin/alert-page-leaving.js',
553 'js/admin/clipboard.min.js',
554 'js/admin/copy_preview.js',
555 )
556
557 def save_model(self, request, obj, form, change):
558 if not obj.creator_id:
559 obj.creator = request.user
560 statsd.incr('save.asrsnippet')
561 super().save_model(request, obj, form, change)
562
563 def preview_url_light_theme(self, obj):
564 text = f'''
565 <span id="previewLinkUrlLight">{obj.get_preview_url()}</span>
566 <button id="copyPreviewLink" class="btn"
567 data-clipboard-target="#previewLinkUrlLight"
568 originalText="Copy to Clipboard" type="button">
569 Copy to Clipboard
570 </button>
571 '''
572 return mark_safe(text)
573 preview_url_light_theme.short_description = 'Light Themed Preview URL'
574
575 def preview_url_dark_theme(self, obj):
576 text = f'''
577 <span id="previewLinkUrlDark">{obj.get_preview_url(dark=True)}</span>
578 <button id="copyPreviewLink" class="btn"
579 data-clipboard-target="#previewLinkUrlDark"
580 originalText="Copy to Clipboard" type="button">
581 Copy to Clipboard
582 </button>
583 '''
584 return mark_safe(text)
585 preview_url_dark_theme.short_description = 'Dark Themed Preview URL'
586
587 def change_view(self, request, *args, **kwargs):
588 if request.method == 'POST' and '_saveasnew' in request.POST:
589 # Always saved cloned snippets as un-published and un-check ready for review.
590 post_data = request.POST.copy()
591 post_data['status'] = models.STATUS_CHOICES['Draft']
592 request.POST = post_data
593 return super().change_view(request, *args, **kwargs)
594
595 def get_readonly_fields(self, request, obj):
596 if not request.user.is_superuser:
597 return self.readonly_fields + ('for_qa',)
598 return self.readonly_fields
599
600 def get_queryset(self, request):
601 queryset = super().get_queryset(request)
602 if request.user.is_superuser:
603 return queryset
604 return queryset.filter(for_qa=False)
605
606 def get_form(self, request, obj=None, **kwargs):
607 form = super().get_form(request, obj, **kwargs)
608 form.current_user = request.user
609 return form
610
611 def make_published(self, request, queryset):
612 clean_queryset = queryset.exclude(status=models.STATUS_CHOICES['Published'])
613 no_snippets = clean_queryset.count()
614 no_already_published_snippets = queryset.count() - no_snippets
615
616 snippets = []
617 with transaction.atomic():
618 for snippet in clean_queryset:
619 snippet.status = models.STATUS_CHOICES['Published']
620 snippet.save()
621 snippets.append(snippet)
622
623 for snippet in snippets:
624 slack.send_slack('asr_published', snippet)
625
626 if no_already_published_snippets:
627 messages.warning(
628 request, f'Skipped {no_already_published_snippets} already published snippets.')
629 messages.success(request, f'Published {no_snippets} snippets.')
630
631 make_published.short_description = 'Publish selected snippets'
632
633 # Only users with Publishing permissions on all channels are allowed to
634 # mark snippets for publication in bulk.
635 make_published.allowed_permissions = (
636 'global_publish',
637 )
638
639 def has_global_publish_permission(self, request):
640 return request.user.has_perms([
641 'base.%s' % perm for perm in [
642 'publish_on_release',
643 'publish_on_beta',
644 'publish_on_aurora',
645 'publish_on_nightly',
646 'publish_on_esr',
647 ]
648 ])
649
650 def locale_list(self, obj):
651 num_locales = obj.locales.count()
652 locales = obj.locales.all()[:3]
653 active_locales = ', '.join([str(locale) for locale in locales])
654 if num_locales > 3:
655 active_locales += ' and {0} more.'.format(num_locales - 3)
656 return active_locales
657
658
659 class CampaignAdmin(admin.ModelAdmin):
660 readonly_fields = ('created', 'modified', 'creator',)
661 prepopulated_fields = {'slug': ('name',)}
662
663 fieldsets = (
664 ('ID', {'fields': ('name', 'slug')}),
665 ('Other Info', {
666 'fields': ('creator', ('created', 'modified')),
667 }),
668 )
669 search_fields = (
670 'name',
671 )
672
673 class Media:
674 js = (
675 'js/admin/jquery.are-you-sure.js',
676 'js/admin/alert-page-leaving.js',
677 )
678
679 def save_model(self, request, obj, form, change):
680 if not obj.creator_id:
681 obj.creator = request.user
682 statsd.incr('save.campaign')
683 super().save_model(request, obj, form, change)
684
685
686 class CategoryAdmin(admin.ModelAdmin):
687 readonly_fields = ('created', 'modified', 'creator',
688 'published_snippets_in_category', 'total_snippets_in_category')
689
690 fieldsets = (
691 ('ID', {
692 'fields': (
693 'name',
694 'description',
695 'published_snippets_in_category',
696 'total_snippets_in_category',
697 )
698 }),
699 ('Other Info', {
700 'fields': ('creator', ('created', 'modified')),
701 }),
702 )
703 search_fields = (
704 'name',
705 'description',
706 )
707
708 list_display = (
709 'name',
710 'published_snippets_in_category',
711 'total_snippets_in_category',
712 )
713
714 class Media:
715 js = (
716 'js/admin/jquery.are-you-sure.js',
717 'js/admin/alert-page-leaving.js',
718 )
719
720 def save_model(self, request, obj, form, change):
721 if not obj.creator_id:
722 obj.creator = request.user
723 statsd.incr('save.category')
724 super().save_model(request, obj, form, change)
725
726 def published_snippets_in_category(self, obj):
727 return obj.asrsnippets.filter(status=models.STATUS_CHOICES['Published']).count()
728
729 def total_snippets_in_category(self, obj):
730 return obj.asrsnippets.count()
731
732
733 class TargetAdmin(admin.ModelAdmin):
734 form = forms.TargetAdminForm
735 save_on_top = True
736 readonly_fields = (
737 'created',
738 'modified',
739 'creator',
740 'jexl_expr',
741 'snippets',
742 )
743 filter_horizontal = (
744 'client_match_rules',
745 )
746 search_fields = (
747 'name',
748 )
749 list_display = (
750 'name',
751 'number_of_snippets',
752 'number_of_published_snippets',
753 )
754 fieldsets = (
755 ('ID', {'fields': ('name',)}),
756 ('Product channels', {
757 'description': 'What channels will this snippet be available in?',
758 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)
759 }),
760 ('Targeting', {
761 'fields': (
762 'filtr_is_default_browser',
763 'filtr_updates_enabled',
764 'filtr_updates_autodownload_enabled',
765 'filtr_profile_age_created',
766 'filtr_firefox_version',
767 'filtr_previous_session_end',
768 'filtr_country',
769 'filtr_is_developer',
770 'filtr_current_search_engine',
771 'filtr_browser_addon',
772 'filtr_total_bookmarks_count',
773
774 )
775 }),
776 ('Accounts and Sync', {
777 'fields': (
778 'filtr_uses_firefox_sync',
779 'filtr_desktop_devices_count',
780 'filtr_mobile_devices_count',
781 'filtr_total_devices_count',
782 ),
783 }),
784 ('Advanced Targeting', {
785 'fields': (
786 'client_match_rules',
787 )
788 }),
789 ('Snippets', {
790 'fields': (
791 'snippets',
792 )
793 }),
794 ('Other Info', {
795 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),
796 }),
797 )
798
799 class Media:
800 css = {
801 'all': (
802 'css/admin/ListSnippets.css',
803 )
804 }
805 js = (
806 'js/admin/jquery.are-you-sure.js',
807 'js/admin/alert-page-leaving.js',
808 )
809
810 def save_model(self, request, obj, form, change):
811 if not obj.creator_id:
812 obj.creator = request.user
813 statsd.incr('save.target')
814 super().save_model(request, obj, form, change)
815
816 def number_of_snippets(self, obj):
817 return obj.asrsnippet_set.count()
818
819 def number_of_published_snippets(self, obj):
820 return obj.asrsnippet_set.filter(status=models.STATUS_CHOICES['Published']).count()
821
822 def snippets(self, obj):
823 """Snippets using this Target."""
824 template = get_template('base/snippets_related_with_obj.jinja')
825 return mark_safe(template.render({'snippets': obj.asrsnippet_set.all().order_by('id'),
826 'type': 'Target'}))
827
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py
--- a/snippets/base/admin/adminmodels.py
+++ b/snippets/base/admin/adminmodels.py
@@ -142,8 +142,8 @@
super().save_model(request, obj, form, change)
def preview(self, obj):
- text = f'<img style="max-width:120px; max-height:120px;" src="{obj.image.url}"/>'
- return mark_safe(text)
+ template = get_template('base/preview_image.jinja')
+ return mark_safe(template.render({'image': obj.image}))
def snippets(self, obj):
"""Snippets using this icon."""
| {"golden_diff": "diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py\n--- a/snippets/base/admin/adminmodels.py\n+++ b/snippets/base/admin/adminmodels.py\n@@ -142,8 +142,8 @@\n super().save_model(request, obj, form, change)\n \n def preview(self, obj):\n- text = f'<img style=\"max-width:120px; max-height:120px;\" src=\"{obj.image.url}\"/>'\n- return mark_safe(text)\n+ template = get_template('base/preview_image.jinja')\n+ return mark_safe(template.render({'image': obj.image}))\n \n def snippets(self, obj):\n \"\"\"Snippets using this icon.\"\"\"\n", "issue": "Create a preview with background (light and dark) for Icons \n\n", "before_files": [{"content": "import re\n\nfrom django.contrib import admin, messages\nfrom django.db import transaction\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import (RelatedDropdownFilter,\n RelatedOnlyDropdownFilter)\n\nfrom snippets.base import forms, models, slack\nfrom snippets.base.admin import actions, filters\n\n\nMATCH_LOCALE_REGEX = re.compile(r'(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = models.JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass IconAdmin(admin.ModelAdmin):\n search_fields = [\n 'name',\n 'image',\n ]\n readonly_fields = [\n 'height',\n 'width',\n 'preview',\n 'creator',\n 'created',\n 'snippets',\n ]\n list_display_links = [\n 'id',\n 'name',\n ]\n list_display = [\n 'id',\n 'name',\n 'width',\n 'height',\n 'preview',\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippets.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n super().save_model(request, obj, form, change)\n\n def preview(self, obj):\n text = f'<img style=\"max-width:120px; max-height:120px;\" src=\"{obj.image.url}\"/>'\n return mark_safe(text)\n\n def snippets(self, obj):\n \"\"\"Snippets using this icon.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(template.render({'snippets': obj.snippets, 'type': 'Icon'}))\n\n\nclass SimpleTemplateInline(admin.StackedInline):\n model = models.SimpleTemplate\n form = forms.SimpleTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_snippet',\n ]\n raw_id_fields = [\n 'section_title_icon',\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': ('title_icon', 'title'),\n }),\n ('Section', {\n 'fields': ('section_title_icon', 'section_title_text', 'section_title_url',),\n }),\n ('Main', {\n 'fields': ('icon', 'text', 'button_label', 'button_color', 'button_url'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'tall', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FundraisingTemplateInline(admin.StackedInline):\n model = models.FundraisingTemplate\n form = forms.FundraisingTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'eoy_snippet',\n ]\n raw_id_fields = [\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': (\n 'title_icon',\n 'title'\n ),\n }),\n ('Main', {\n 'fields': (\n 'icon',\n 'text',\n 'text_color',\n 'background_color',\n 'highlight_color',\n )\n }),\n ('Form Configuration', {\n 'fields': (\n 'donation_form_url',\n 'currency_code',\n 'locale',\n 'selected_button',\n 'button_label',\n 'button_color',\n 'button_background_color',\n 'monthly_checkbox_label_text',\n )\n }),\n ('Donation', {\n 'fields': (\n ('donation_amount_first', 'donation_amount_second',\n 'donation_amount_third', 'donation_amount_fourth',),\n )\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'test', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FxASignupTemplateInline(admin.StackedInline):\n model = models.FxASignupTemplate\n form = forms.FxASignupTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'fxa_signup_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_dismiss_button_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'utm_term',\n 'utm_campaign',\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass NewsletterTemplateInline(admin.StackedInline):\n model = models.NewsletterTemplate\n form = forms.NewsletterTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'newsletter_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_privacy_html',\n 'scene2_newsletter',\n 'scene2_dismiss_button_text',\n 'locale',\n 'success_text',\n 'error_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass SendToDeviceTemplateInline(admin.StackedInline):\n model = models.SendToDeviceTemplate\n form = forms.SendToDeviceTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'send_to_device_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n 'scene2_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_icon',\n 'scene2_text',\n\n 'scene2_button_label',\n 'scene2_input_placeholder',\n 'scene2_disclaimer_html',\n 'scene2_dismiss_button_text',\n\n 'locale',\n 'country',\n ('include_sms', 'message_id_sms',),\n 'message_id_email',\n 'success_title',\n 'success_text',\n 'error_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass SimpleBelowSearchTemplateInline(admin.StackedInline):\n model = models.SimpleBelowSearchTemplate\n form = forms.SimpleBelowSearchTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_below_search_snippet',\n ]\n raw_id_fields = [\n 'icon',\n ]\n\n fieldsets = (\n ('Main', {\n 'fields': ('icon', 'text'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'do_not_autoblock'),\n })\n\n )\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n inlines = [\n SimpleTemplateInline,\n FundraisingTemplateInline,\n FxASignupTemplateInline,\n NewsletterTemplateInline,\n SendToDeviceTemplateInline,\n SimpleBelowSearchTemplateInline,\n ]\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'locale_list',\n 'modified',\n )\n list_filter = (\n filters.ModifiedFilter,\n filters.TemplateFilter,\n ('locales', RelatedOnlyDropdownFilter),\n ('targets', RelatedOnlyDropdownFilter),\n 'status',\n filters.ChannelFilter,\n ('campaign', RelatedDropdownFilter),\n ('category', RelatedDropdownFilter),\n filters.ScheduledFilter,\n )\n search_fields = (\n 'name',\n 'id',\n 'campaign__name',\n 'targets__name',\n 'category__name',\n )\n autocomplete_fields = (\n 'campaign',\n 'category',\n )\n preserve_filters = True\n readonly_fields = (\n 'id',\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n\n )\n filter_horizontal = (\n 'targets',\n 'locales',\n )\n save_on_top = True\n save_as = True\n view_on_site = False\n actions = (\n actions.duplicate_snippets_action,\n 'make_published',\n )\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'id',\n 'name',\n 'status',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n )\n }),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code>\n <br/>\n ''' # noqa\n ),\n 'fields': ('template_chooser',),\n 'classes': ('template-fieldset',)\n }),\n ('Publishing Options', {\n 'fields': (\n 'campaign',\n 'category',\n 'targets',\n ('publish_start', 'publish_end'),\n 'locales',\n 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified'), 'for_qa'),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ASRSnippetAdmin.css',\n 'css/admin/IDFieldHighlight.css',\n 'css/admin/InlineTemplates.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n 'js/admin/clipboard.min.js',\n 'js/admin/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url_light_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlLight\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlLight\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_light_theme.short_description = 'Light Themed Preview URL'\n\n def preview_url_dark_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlDark\">{obj.get_preview_url(dark=True)}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlDark\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_dark_theme.short_description = 'Dark Themed Preview URL'\n\n def change_view(self, request, *args, **kwargs):\n if request.method == 'POST' and '_saveasnew' in request.POST:\n # Always saved cloned snippets as un-published and un-check ready for review.\n post_data = request.POST.copy()\n post_data['status'] = models.STATUS_CHOICES['Draft']\n request.POST = post_data\n return super().change_view(request, *args, **kwargs)\n\n def get_readonly_fields(self, request, obj):\n if not request.user.is_superuser:\n return self.readonly_fields + ('for_qa',)\n return self.readonly_fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n if request.user.is_superuser:\n return queryset\n return queryset.filter(for_qa=False)\n\n def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n form.current_user = request.user\n return form\n\n def make_published(self, request, queryset):\n clean_queryset = queryset.exclude(status=models.STATUS_CHOICES['Published'])\n no_snippets = clean_queryset.count()\n no_already_published_snippets = queryset.count() - no_snippets\n\n snippets = []\n with transaction.atomic():\n for snippet in clean_queryset:\n snippet.status = models.STATUS_CHOICES['Published']\n snippet.save()\n snippets.append(snippet)\n\n for snippet in snippets:\n slack.send_slack('asr_published', snippet)\n\n if no_already_published_snippets:\n messages.warning(\n request, f'Skipped {no_already_published_snippets} already published snippets.')\n messages.success(request, f'Published {no_snippets} snippets.')\n\n make_published.short_description = 'Publish selected snippets'\n\n # Only users with Publishing permissions on all channels are allowed to\n # mark snippets for publication in bulk.\n make_published.allowed_permissions = (\n 'global_publish',\n )\n\n def has_global_publish_permission(self, request):\n return request.user.has_perms([\n 'base.%s' % perm for perm in [\n 'publish_on_release',\n 'publish_on_beta',\n 'publish_on_aurora',\n 'publish_on_nightly',\n 'publish_on_esr',\n ]\n ])\n\n def locale_list(self, obj):\n num_locales = obj.locales.count()\n locales = obj.locales.all()[:3]\n active_locales = ', '.join([str(locale) for locale in locales])\n if num_locales > 3:\n active_locales += ' and {0} more.'.format(num_locales - 3)\n return active_locales\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',\n 'published_snippets_in_category', 'total_snippets_in_category')\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'name',\n 'description',\n 'published_snippets_in_category',\n 'total_snippets_in_category',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n 'description',\n )\n\n list_display = (\n 'name',\n 'published_snippets_in_category',\n 'total_snippets_in_category',\n )\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.category')\n super().save_model(request, obj, form, change)\n\n def published_snippets_in_category(self, obj):\n return obj.asrsnippets.filter(status=models.STATUS_CHOICES['Published']).count()\n\n def total_snippets_in_category(self, obj):\n return obj.asrsnippets.count()\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n save_on_top = True\n readonly_fields = (\n 'created',\n 'modified',\n 'creator',\n 'jexl_expr',\n 'snippets',\n )\n filter_horizontal = (\n 'client_match_rules',\n )\n search_fields = (\n 'name',\n )\n list_display = (\n 'name',\n 'number_of_snippets',\n 'number_of_published_snippets',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n\n )\n }),\n ('Accounts and Sync', {\n 'fields': (\n 'filtr_uses_firefox_sync',\n 'filtr_desktop_devices_count',\n 'filtr_mobile_devices_count',\n 'filtr_total_devices_count',\n ),\n }),\n ('Advanced Targeting', {\n 'fields': (\n 'client_match_rules',\n )\n }),\n ('Snippets', {\n 'fields': (\n 'snippets',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippets.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n\n def number_of_snippets(self, obj):\n return obj.asrsnippet_set.count()\n\n def number_of_published_snippets(self, obj):\n return obj.asrsnippet_set.filter(status=models.STATUS_CHOICES['Published']).count()\n\n def snippets(self, obj):\n \"\"\"Snippets using this Target.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(template.render({'snippets': obj.asrsnippet_set.all().order_by('id'),\n 'type': 'Target'}))\n", "path": "snippets/base/admin/adminmodels.py"}], "after_files": [{"content": "import re\n\nfrom django.contrib import admin, messages\nfrom django.db import transaction\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import (RelatedDropdownFilter,\n RelatedOnlyDropdownFilter)\n\nfrom snippets.base import forms, models, slack\nfrom snippets.base.admin import actions, filters\n\n\nMATCH_LOCALE_REGEX = re.compile(r'(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = models.JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass IconAdmin(admin.ModelAdmin):\n search_fields = [\n 'name',\n 'image',\n ]\n readonly_fields = [\n 'height',\n 'width',\n 'preview',\n 'creator',\n 'created',\n 'snippets',\n ]\n list_display_links = [\n 'id',\n 'name',\n ]\n list_display = [\n 'id',\n 'name',\n 'width',\n 'height',\n 'preview',\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippets.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n super().save_model(request, obj, form, change)\n\n def preview(self, obj):\n template = get_template('base/preview_image.jinja')\n return mark_safe(template.render({'image': obj.image}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this icon.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(template.render({'snippets': obj.snippets, 'type': 'Icon'}))\n\n\nclass SimpleTemplateInline(admin.StackedInline):\n model = models.SimpleTemplate\n form = forms.SimpleTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_snippet',\n ]\n raw_id_fields = [\n 'section_title_icon',\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': ('title_icon', 'title'),\n }),\n ('Section', {\n 'fields': ('section_title_icon', 'section_title_text', 'section_title_url',),\n }),\n ('Main', {\n 'fields': ('icon', 'text', 'button_label', 'button_color', 'button_url'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'tall', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FundraisingTemplateInline(admin.StackedInline):\n model = models.FundraisingTemplate\n form = forms.FundraisingTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'eoy_snippet',\n ]\n raw_id_fields = [\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': (\n 'title_icon',\n 'title'\n ),\n }),\n ('Main', {\n 'fields': (\n 'icon',\n 'text',\n 'text_color',\n 'background_color',\n 'highlight_color',\n )\n }),\n ('Form Configuration', {\n 'fields': (\n 'donation_form_url',\n 'currency_code',\n 'locale',\n 'selected_button',\n 'button_label',\n 'button_color',\n 'button_background_color',\n 'monthly_checkbox_label_text',\n )\n }),\n ('Donation', {\n 'fields': (\n ('donation_amount_first', 'donation_amount_second',\n 'donation_amount_third', 'donation_amount_fourth',),\n )\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'test', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FxASignupTemplateInline(admin.StackedInline):\n model = models.FxASignupTemplate\n form = forms.FxASignupTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'fxa_signup_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_dismiss_button_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'utm_term',\n 'utm_campaign',\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass NewsletterTemplateInline(admin.StackedInline):\n model = models.NewsletterTemplate\n form = forms.NewsletterTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'newsletter_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_privacy_html',\n 'scene2_newsletter',\n 'scene2_dismiss_button_text',\n 'locale',\n 'success_text',\n 'error_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass SendToDeviceTemplateInline(admin.StackedInline):\n model = models.SendToDeviceTemplate\n form = forms.SendToDeviceTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'send_to_device_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n 'scene2_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_icon',\n 'scene2_text',\n\n 'scene2_button_label',\n 'scene2_input_placeholder',\n 'scene2_disclaimer_html',\n 'scene2_dismiss_button_text',\n\n 'locale',\n 'country',\n ('include_sms', 'message_id_sms',),\n 'message_id_email',\n 'success_title',\n 'success_text',\n 'error_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass SimpleBelowSearchTemplateInline(admin.StackedInline):\n model = models.SimpleBelowSearchTemplate\n form = forms.SimpleBelowSearchTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_below_search_snippet',\n ]\n raw_id_fields = [\n 'icon',\n ]\n\n fieldsets = (\n ('Main', {\n 'fields': ('icon', 'text'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'do_not_autoblock'),\n })\n\n )\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n inlines = [\n SimpleTemplateInline,\n FundraisingTemplateInline,\n FxASignupTemplateInline,\n NewsletterTemplateInline,\n SendToDeviceTemplateInline,\n SimpleBelowSearchTemplateInline,\n ]\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'locale_list',\n 'modified',\n )\n list_filter = (\n filters.ModifiedFilter,\n filters.TemplateFilter,\n ('locales', RelatedOnlyDropdownFilter),\n ('targets', RelatedOnlyDropdownFilter),\n 'status',\n filters.ChannelFilter,\n ('campaign', RelatedDropdownFilter),\n ('category', RelatedDropdownFilter),\n filters.ScheduledFilter,\n )\n search_fields = (\n 'name',\n 'id',\n 'campaign__name',\n 'targets__name',\n 'category__name',\n )\n autocomplete_fields = (\n 'campaign',\n 'category',\n )\n preserve_filters = True\n readonly_fields = (\n 'id',\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n\n )\n filter_horizontal = (\n 'targets',\n 'locales',\n )\n save_on_top = True\n save_as = True\n view_on_site = False\n actions = (\n actions.duplicate_snippets_action,\n 'make_published',\n )\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'id',\n 'name',\n 'status',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n )\n }),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code>\n <br/>\n ''' # noqa\n ),\n 'fields': ('template_chooser',),\n 'classes': ('template-fieldset',)\n }),\n ('Publishing Options', {\n 'fields': (\n 'campaign',\n 'category',\n 'targets',\n ('publish_start', 'publish_end'),\n 'locales',\n 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified'), 'for_qa'),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ASRSnippetAdmin.css',\n 'css/admin/IDFieldHighlight.css',\n 'css/admin/InlineTemplates.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n 'js/admin/clipboard.min.js',\n 'js/admin/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url_light_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlLight\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlLight\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_light_theme.short_description = 'Light Themed Preview URL'\n\n def preview_url_dark_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlDark\">{obj.get_preview_url(dark=True)}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlDark\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_dark_theme.short_description = 'Dark Themed Preview URL'\n\n def change_view(self, request, *args, **kwargs):\n if request.method == 'POST' and '_saveasnew' in request.POST:\n # Always saved cloned snippets as un-published and un-check ready for review.\n post_data = request.POST.copy()\n post_data['status'] = models.STATUS_CHOICES['Draft']\n request.POST = post_data\n return super().change_view(request, *args, **kwargs)\n\n def get_readonly_fields(self, request, obj):\n if not request.user.is_superuser:\n return self.readonly_fields + ('for_qa',)\n return self.readonly_fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n if request.user.is_superuser:\n return queryset\n return queryset.filter(for_qa=False)\n\n def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n form.current_user = request.user\n return form\n\n def make_published(self, request, queryset):\n clean_queryset = queryset.exclude(status=models.STATUS_CHOICES['Published'])\n no_snippets = clean_queryset.count()\n no_already_published_snippets = queryset.count() - no_snippets\n\n snippets = []\n with transaction.atomic():\n for snippet in clean_queryset:\n snippet.status = models.STATUS_CHOICES['Published']\n snippet.save()\n snippets.append(snippet)\n\n for snippet in snippets:\n slack.send_slack('asr_published', snippet)\n\n if no_already_published_snippets:\n messages.warning(\n request, f'Skipped {no_already_published_snippets} already published snippets.')\n messages.success(request, f'Published {no_snippets} snippets.')\n\n make_published.short_description = 'Publish selected snippets'\n\n # Only users with Publishing permissions on all channels are allowed to\n # mark snippets for publication in bulk.\n make_published.allowed_permissions = (\n 'global_publish',\n )\n\n def has_global_publish_permission(self, request):\n return request.user.has_perms([\n 'base.%s' % perm for perm in [\n 'publish_on_release',\n 'publish_on_beta',\n 'publish_on_aurora',\n 'publish_on_nightly',\n 'publish_on_esr',\n ]\n ])\n\n def locale_list(self, obj):\n num_locales = obj.locales.count()\n locales = obj.locales.all()[:3]\n active_locales = ', '.join([str(locale) for locale in locales])\n if num_locales > 3:\n active_locales += ' and {0} more.'.format(num_locales - 3)\n return active_locales\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',\n 'published_snippets_in_category', 'total_snippets_in_category')\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'name',\n 'description',\n 'published_snippets_in_category',\n 'total_snippets_in_category',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n 'description',\n )\n\n list_display = (\n 'name',\n 'published_snippets_in_category',\n 'total_snippets_in_category',\n )\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.category')\n super().save_model(request, obj, form, change)\n\n def published_snippets_in_category(self, obj):\n return obj.asrsnippets.filter(status=models.STATUS_CHOICES['Published']).count()\n\n def total_snippets_in_category(self, obj):\n return obj.asrsnippets.count()\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n save_on_top = True\n readonly_fields = (\n 'created',\n 'modified',\n 'creator',\n 'jexl_expr',\n 'snippets',\n )\n filter_horizontal = (\n 'client_match_rules',\n )\n search_fields = (\n 'name',\n )\n list_display = (\n 'name',\n 'number_of_snippets',\n 'number_of_published_snippets',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n\n )\n }),\n ('Accounts and Sync', {\n 'fields': (\n 'filtr_uses_firefox_sync',\n 'filtr_desktop_devices_count',\n 'filtr_mobile_devices_count',\n 'filtr_total_devices_count',\n ),\n }),\n ('Advanced Targeting', {\n 'fields': (\n 'client_match_rules',\n )\n }),\n ('Snippets', {\n 'fields': (\n 'snippets',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippets.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n\n def number_of_snippets(self, obj):\n return obj.asrsnippet_set.count()\n\n def number_of_published_snippets(self, obj):\n return obj.asrsnippet_set.filter(status=models.STATUS_CHOICES['Published']).count()\n\n def snippets(self, obj):\n \"\"\"Snippets using this Target.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(template.render({'snippets': obj.asrsnippet_set.all().order_by('id'),\n 'type': 'Target'}))\n", "path": "snippets/base/admin/adminmodels.py"}]} |
gh_patches_debug_1596 | rasdani/github-patches | git_diff | awslabs__gluonts-1132 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update pandas dependency
As documented in #967, pandas will be fixing the breaking change that led us to fix the dependency to `<1.1`, see pandas-dev/pandas#37267
Once that is resolved, we could remove the constraint.
*Edit:* we should also make sure to find a solution to #965 first
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gluonts/time_feature/_base.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 from typing import List
15
16 # Third-party imports
17 import numpy as np
18 import pandas as pd
19 from pandas.tseries import offsets
20 from pandas.tseries.frequencies import to_offset
21
22 # First-party imports
23 from gluonts.core.component import validated
24
25
26 class TimeFeature:
27 """
28 Base class for features that only depend on time.
29 """
30
31 @validated()
32 def __init__(self, normalized: bool = True):
33 self.normalized = normalized
34
35 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
36 pass
37
38 def __repr__(self):
39 return self.__class__.__name__ + "()"
40
41
42 class MinuteOfHour(TimeFeature):
43 """
44 Minute of hour encoded as value between [-0.5, 0.5]
45 """
46
47 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
48 if self.normalized:
49 return index.minute / 59.0 - 0.5
50 else:
51 return index.minute.map(float)
52
53
54 class HourOfDay(TimeFeature):
55 """
56 Hour of day encoded as value between [-0.5, 0.5]
57 """
58
59 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
60 if self.normalized:
61 return index.hour / 23.0 - 0.5
62 else:
63 return index.hour.map(float)
64
65
66 class DayOfWeek(TimeFeature):
67 """
68 Hour of day encoded as value between [-0.5, 0.5]
69 """
70
71 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
72 if self.normalized:
73 return index.dayofweek / 6.0 - 0.5
74 else:
75 return index.dayofweek.map(float)
76
77
78 class DayOfMonth(TimeFeature):
79 """
80 Day of month encoded as value between [-0.5, 0.5]
81 """
82
83 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
84 if self.normalized:
85 return index.day / 30.0 - 0.5
86 else:
87 return index.day.map(float)
88
89
90 class DayOfYear(TimeFeature):
91 """
92 Day of year encoded as value between [-0.5, 0.5]
93 """
94
95 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
96 if self.normalized:
97 return index.dayofyear / 364.0 - 0.5
98 else:
99 return index.dayofyear.map(float)
100
101
102 class MonthOfYear(TimeFeature):
103 """
104 Month of year encoded as value between [-0.5, 0.5]
105 """
106
107 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
108 if self.normalized:
109 return index.month / 11.0 - 0.5
110 else:
111 return index.month.map(float)
112
113
114 class WeekOfYear(TimeFeature):
115 """
116 Week of year encoded as value between [-0.5, 0.5]
117 """
118
119 def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
120 if self.normalized:
121 return index.weekofyear / 51.0 - 0.5
122 else:
123 return index.weekofyear.map(float)
124
125
126 def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
127 """
128 Returns a list of time features that will be appropriate for the given frequency string.
129
130 Parameters
131 ----------
132
133 freq_str
134 Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
135
136 """
137
138 features_by_offsets = {
139 offsets.YearOffset: [],
140 offsets.MonthOffset: [MonthOfYear],
141 offsets.Week: [DayOfMonth, WeekOfYear],
142 offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
143 offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
144 offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
145 offsets.Minute: [
146 MinuteOfHour,
147 HourOfDay,
148 DayOfWeek,
149 DayOfMonth,
150 DayOfYear,
151 ],
152 }
153
154 offset = to_offset(freq_str)
155
156 for offset_type, feature_classes in features_by_offsets.items():
157 if isinstance(offset, offset_type):
158 return [cls() for cls in feature_classes]
159
160 supported_freq_msg = f"""
161 Unsupported frequency {freq_str}
162
163 The following frequencies are supported:
164
165 Y - yearly
166 alias: A
167 M - monthly
168 W - weekly
169 D - daily
170 B - business days
171 H - hourly
172 T - minutely
173 alias: min
174 """
175 raise RuntimeError(supported_freq_msg)
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/gluonts/time_feature/_base.py b/src/gluonts/time_feature/_base.py
--- a/src/gluonts/time_feature/_base.py
+++ b/src/gluonts/time_feature/_base.py
@@ -136,8 +136,8 @@
"""
features_by_offsets = {
- offsets.YearOffset: [],
- offsets.MonthOffset: [MonthOfYear],
+ offsets.YearEnd: [],
+ offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
| {"golden_diff": "diff --git a/src/gluonts/time_feature/_base.py b/src/gluonts/time_feature/_base.py\n--- a/src/gluonts/time_feature/_base.py\n+++ b/src/gluonts/time_feature/_base.py\n@@ -136,8 +136,8 @@\n \"\"\"\n \n features_by_offsets = {\n- offsets.YearOffset: [],\n- offsets.MonthOffset: [MonthOfYear],\n+ offsets.YearEnd: [],\n+ offsets.MonthEnd: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n", "issue": "Update pandas dependency\nAs documented in #967, pandas will be fixing the breaking change that led us to fix the dependency to `<1.1`, see pandas-dev/pandas#37267\r\n\r\nOnce that is resolved, we could remove the constraint.\r\n\r\n*Edit:* we should also make sure to find a solution to #965 first\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import List\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n# First-party imports\nfrom gluonts.core.component import validated\n\n\nclass TimeFeature:\n \"\"\"\n Base class for features that only depend on time.\n \"\"\"\n\n @validated()\n def __init__(self, normalized: bool = True):\n self.normalized = normalized\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n pass\n\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\n\nclass MinuteOfHour(TimeFeature):\n \"\"\"\n Minute of hour encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.minute / 59.0 - 0.5\n else:\n return index.minute.map(float)\n\n\nclass HourOfDay(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.hour / 23.0 - 0.5\n else:\n return index.hour.map(float)\n\n\nclass DayOfWeek(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofweek / 6.0 - 0.5\n else:\n return index.dayofweek.map(float)\n\n\nclass DayOfMonth(TimeFeature):\n \"\"\"\n Day of month encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.day / 30.0 - 0.5\n else:\n return index.day.map(float)\n\n\nclass DayOfYear(TimeFeature):\n \"\"\"\n Day of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofyear / 364.0 - 0.5\n else:\n return index.dayofyear.map(float)\n\n\nclass MonthOfYear(TimeFeature):\n \"\"\"\n Month of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.month / 11.0 - 0.5\n else:\n return index.month.map(float)\n\n\nclass WeekOfYear(TimeFeature):\n \"\"\"\n Week of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.weekofyear / 51.0 - 0.5\n else:\n return index.weekofyear.map(float)\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n \"\"\"\n Returns a list of time features that will be appropriate for the given frequency string.\n\n Parameters\n ----------\n\n freq_str\n Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n\n \"\"\"\n\n features_by_offsets = {\n offsets.YearOffset: [],\n offsets.MonthOffset: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Minute: [\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature_classes in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return [cls() for cls in feature_classes]\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n\n The following frequencies are supported:\n\n Y - yearly\n alias: A\n M - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n \"\"\"\n raise RuntimeError(supported_freq_msg)\n", "path": "src/gluonts/time_feature/_base.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import List\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n# First-party imports\nfrom gluonts.core.component import validated\n\n\nclass TimeFeature:\n \"\"\"\n Base class for features that only depend on time.\n \"\"\"\n\n @validated()\n def __init__(self, normalized: bool = True):\n self.normalized = normalized\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n pass\n\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\n\nclass MinuteOfHour(TimeFeature):\n \"\"\"\n Minute of hour encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.minute / 59.0 - 0.5\n else:\n return index.minute.map(float)\n\n\nclass HourOfDay(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.hour / 23.0 - 0.5\n else:\n return index.hour.map(float)\n\n\nclass DayOfWeek(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofweek / 6.0 - 0.5\n else:\n return index.dayofweek.map(float)\n\n\nclass DayOfMonth(TimeFeature):\n \"\"\"\n Day of month encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.day / 30.0 - 0.5\n else:\n return index.day.map(float)\n\n\nclass DayOfYear(TimeFeature):\n \"\"\"\n Day of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofyear / 364.0 - 0.5\n else:\n return index.dayofyear.map(float)\n\n\nclass MonthOfYear(TimeFeature):\n \"\"\"\n Month of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.month / 11.0 - 0.5\n else:\n return index.month.map(float)\n\n\nclass WeekOfYear(TimeFeature):\n \"\"\"\n Week of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.weekofyear / 51.0 - 0.5\n else:\n return index.weekofyear.map(float)\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n \"\"\"\n Returns a list of time features that will be appropriate for the given frequency string.\n\n Parameters\n ----------\n\n freq_str\n Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n\n \"\"\"\n\n features_by_offsets = {\n offsets.YearEnd: [],\n offsets.MonthEnd: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Minute: [\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature_classes in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return [cls() for cls in feature_classes]\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n\n The following frequencies are supported:\n\n Y - yearly\n alias: A\n M - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n \"\"\"\n raise RuntimeError(supported_freq_msg)\n", "path": "src/gluonts/time_feature/_base.py"}]} |
gh_patches_debug_1597 | rasdani/github-patches | git_diff | holoviz__panel-956 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fileinput widget always returns `None` for name of uploaded file
As of panel 0.7.0 (with bokeh 1.4.0), the Fileinput widget does not set the name of the uploaded file.
E.g. in the following code
```python
import panel as pn
inp = pn.widgets.FileInput(name='Upload')
btn = pn.widgets.Button(name='Upload', button_type='primary')
r= pn.Column(pn.Row(inp, btn),)
def on_click_parse(event):
#print(inp.get_param_values())
print(inp.filename)
btn.on_click(on_click_parse)
r.servable()
```
it always prints `None` to the terminal.
I can already see where the issue is and will open a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/widgets/input.py`
Content:
```
1 """
2 The input widgets generally allow entering arbitrary information into
3 a text field or similar.
4 """
5 from __future__ import absolute_import, division, unicode_literals
6
7 import ast
8
9 from base64 import b64decode
10 from datetime import datetime
11 from six import string_types
12
13 import param
14
15 from bokeh.models.widgets import (
16 CheckboxGroup as _BkCheckboxGroup, ColorPicker as _BkColorPicker,
17 DatePicker as _BkDatePicker, Div as _BkDiv, TextInput as _BkTextInput,
18 PasswordInput as _BkPasswordInput, Spinner as _BkSpinner,
19 FileInput as _BkFileInput, TextAreaInput as _BkTextAreaInput)
20
21 from ..util import as_unicode
22 from .base import Widget
23
24
25 class TextInput(Widget):
26
27 value = param.String(default='', allow_None=True)
28
29 placeholder = param.String(default='')
30
31 _widget_type = _BkTextInput
32
33 class PasswordInput(Widget):
34
35 value = param.String(default='', allow_None=True)
36
37 placeholder = param.String(default='')
38
39 _widget_type = _BkPasswordInput
40
41 class TextAreaInput(Widget):
42
43 value = param.String(default='', allow_None=True)
44
45 placeholder = param.String(default='')
46
47 max_length = param.Integer(default=5000)
48
49 _widget_type = _BkTextAreaInput
50
51 class FileInput(Widget):
52
53 accept = param.String(default=None)
54
55 filename = param.String(default=None)
56
57 mime_type = param.String(default=None)
58
59 value = param.Parameter(default=None)
60
61 _widget_type = _BkFileInput
62
63 _rename = {'name': None, 'filename': None}
64
65 def _process_param_change(self, msg):
66 msg = super(FileInput, self)._process_param_change(msg)
67 if 'value' in msg:
68 msg.pop('value')
69 if 'mime_type' in msg:
70 msg.pop('mime_type')
71 return msg
72
73 def _filter_properties(self, properties):
74 properties = super(FileInput, self)._filter_properties(properties)
75 return properties + ['value', 'mime_type']
76
77 def _process_property_change(self, msg):
78 msg = super(FileInput, self)._process_property_change(msg)
79 if 'value' in msg:
80 msg['value'] = b64decode(msg['value'])
81 return msg
82
83 def save(self, filename):
84 """
85 Saves the uploaded FileInput data to a file or BytesIO object.
86
87 Arguments
88 ---------
89 filename (str): File path or file-like object
90 """
91 if isinstance(filename, string_types):
92 with open(filename, 'wb') as f:
93 f.write(self.value)
94 else:
95 filename.write(self.value)
96
97
98 class StaticText(Widget):
99
100 style = param.Dict(default=None, doc="""
101 Dictionary of CSS property:value pairs to apply to this Div.""")
102
103 value = param.Parameter(default=None)
104
105 _widget_type = _BkDiv
106
107 _format = '<b>{title}</b>: {value}'
108
109 _rename = {'name': 'title', 'value': 'text'}
110
111 def _process_param_change(self, msg):
112 msg = super(StaticText, self)._process_property_change(msg)
113 msg.pop('title', None)
114 if 'value' in msg:
115 text = as_unicode(msg.pop('value'))
116 if self.name:
117 text = self._format.format(title=self.name, value=text)
118 msg['text'] = text
119 return msg
120
121
122 class DatePicker(Widget):
123
124 value = param.Date(default=None)
125
126 start = param.Date(default=None)
127
128 end = param.Date(default=None)
129
130 _widget_type = _BkDatePicker
131
132 _rename = {'start': 'min_date', 'end': 'max_date', 'name': 'title'}
133
134 def _process_property_change(self, msg):
135 msg = super(DatePicker, self)._process_property_change(msg)
136 if 'value' in msg:
137 if isinstance(msg['value'], string_types):
138 msg['value'] = datetime.strptime(msg['value'][4:], '%b %d %Y')
139 return msg
140
141
142 class ColorPicker(Widget):
143
144 value = param.Color(default=None, doc="""
145 The selected color""")
146
147 _widget_type = _BkColorPicker
148
149 _rename = {'value': 'color', 'name': 'title'}
150
151
152 class Spinner(Widget):
153
154 start = param.Number(default=None, doc="""
155 Optional minimum allowable value""")
156
157 end = param.Number(default=None, doc="""
158 Optional maximum allowable value""")
159
160 value = param.Number(default=0, doc="""
161 The initial value of the spinner""")
162
163 step = param.Number(default=1, doc="""
164 The step added or subtracted to the current value""")
165
166 _widget_type = _BkSpinner
167
168 _rename = {'name': 'title', 'start': 'low', 'end': 'high'}
169
170
171 class LiteralInput(Widget):
172 """
173 LiteralInput allows declaring Python literals using a text
174 input widget. Optionally a type may be declared.
175 """
176
177 type = param.ClassSelector(default=None, class_=(type, tuple),
178 is_instance=True)
179
180 value = param.Parameter(default=None)
181
182 _widget_type = _BkTextInput
183
184 def __init__(self, **params):
185 super(LiteralInput, self).__init__(**params)
186 self._state = ''
187 self._validate(None)
188 self.param.watch(self._validate, 'value')
189
190 def _validate(self, event):
191 if self.type is None: return
192 new = self.value
193 if not isinstance(new, self.type):
194 if event:
195 self.value = event.old
196 types = repr(self.type) if isinstance(self.type, tuple) else self.type.__name__
197 raise ValueError('LiteralInput expected %s type but value %s '
198 'is of type %s.' %
199 (types, new, type(new).__name__))
200
201 def _process_property_change(self, msg):
202 msg = super(LiteralInput, self)._process_property_change(msg)
203 new_state = ''
204 if 'value' in msg:
205 value = msg.pop('value')
206 try:
207 value = ast.literal_eval(value)
208 except:
209 new_state = ' (invalid)'
210 value = self.value
211 else:
212 if self.type and not isinstance(value, self.type):
213 new_state = ' (wrong type)'
214 value = self.value
215 msg['value'] = value
216 msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state
217 self._state = new_state
218 self.param.trigger('name')
219 return msg
220
221 def _process_param_change(self, msg):
222 msg = super(LiteralInput, self)._process_param_change(msg)
223 msg.pop('type', None)
224 if 'value' in msg:
225 msg['value'] = '' if msg['value'] is None else as_unicode(msg['value'])
226 msg['title'] = self.name
227 return msg
228
229
230 class DatetimeInput(LiteralInput):
231 """
232 DatetimeInput allows declaring Python literals using a text
233 input widget. Optionally a type may be declared.
234 """
235
236 format = param.String(default='%Y-%m-%d %H:%M:%S', doc="""
237 Datetime format used for parsing and formatting the datetime.""")
238
239 value = param.Date(default=None)
240
241 start = param.Date(default=None)
242
243 end = param.Date(default=None)
244
245 type = datetime
246
247 def __init__(self, **params):
248 super(DatetimeInput, self).__init__(**params)
249 self.param.watch(self._validate, 'value')
250 self._validate(None)
251
252 def _validate(self, event):
253 new = self.value
254 if new is not None and ((self.start is not None and self.start > new) or
255 (self.end is not None and self.end < new)):
256 value = datetime.strftime(new, self.format)
257 start = datetime.strftime(self.start, self.format)
258 end = datetime.strftime(self.end, self.format)
259 if event:
260 self.value = event.old
261 raise ValueError('DatetimeInput value must be between {start} and {end}, '
262 'supplied value is {value}'.format(start=start, end=end,
263 value=value))
264
265 def _process_property_change(self, msg):
266 msg = Widget._process_property_change(self, msg)
267 new_state = ''
268 if 'value' in msg:
269 value = msg.pop('value')
270 try:
271 value = datetime.strptime(value, self.format)
272 except:
273 new_state = ' (invalid)'
274 value = self.value
275 else:
276 if value is not None and ((self.start is not None and self.start > value) or
277 (self.end is not None and self.end < value)):
278 new_state = ' (out of bounds)'
279 value = self.value
280 msg['value'] = value
281 msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state
282 self._state = new_state
283 return msg
284
285 def _process_param_change(self, msg):
286 msg = {k: v for k, v in msg.items() if k not in ('type', 'format', 'start', 'end')}
287 if 'value' in msg:
288 value = msg['value']
289 if value is None:
290 value = ''
291 else:
292 value = datetime.strftime(msg['value'], self.format)
293 msg['value'] = value
294 msg['title'] = self.name
295 return msg
296
297
298 class Checkbox(Widget):
299
300 value = param.Boolean(default=False)
301
302 _supports_embed = True
303
304 _widget_type = _BkCheckboxGroup
305
306 def _process_property_change(self, msg):
307 msg = super(Checkbox, self)._process_property_change(msg)
308 if 'active' in msg:
309 msg['value'] = 0 in msg.pop('active')
310 return msg
311
312 def _process_param_change(self, msg):
313 msg = super(Checkbox, self)._process_param_change(msg)
314 if 'value' in msg:
315 msg['active'] = [0] if msg.pop('value', None) else []
316 if 'title' in msg:
317 msg['labels'] = [msg.pop('title')]
318 return msg
319
320 def _get_embed_state(self, root, max_opts=3):
321 return (self, self._models[root.ref['id']][0], [False, True],
322 lambda x: 0 in x.active, 'active', 'cb_obj.active.indexOf(0) >= 0')
323
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/widgets/input.py b/panel/widgets/input.py
--- a/panel/widgets/input.py
+++ b/panel/widgets/input.py
@@ -72,7 +72,7 @@
def _filter_properties(self, properties):
properties = super(FileInput, self)._filter_properties(properties)
- return properties + ['value', 'mime_type']
+ return properties + ['value', 'mime_type', 'filename']
def _process_property_change(self, msg):
msg = super(FileInput, self)._process_property_change(msg)
| {"golden_diff": "diff --git a/panel/widgets/input.py b/panel/widgets/input.py\n--- a/panel/widgets/input.py\n+++ b/panel/widgets/input.py\n@@ -72,7 +72,7 @@\n \n def _filter_properties(self, properties):\n properties = super(FileInput, self)._filter_properties(properties)\n- return properties + ['value', 'mime_type']\n+ return properties + ['value', 'mime_type', 'filename']\n \n def _process_property_change(self, msg):\n msg = super(FileInput, self)._process_property_change(msg)\n", "issue": "Fileinput widget always returns `None` for name of uploaded file\nAs of panel 0.7.0 (with bokeh 1.4.0), the Fileinput widget does not set the name of the uploaded file.\r\n\r\nE.g. in the following code\r\n```python\r\nimport panel as pn\r\n\r\ninp = pn.widgets.FileInput(name='Upload')\r\nbtn = pn.widgets.Button(name='Upload', button_type='primary')\r\nr= pn.Column(pn.Row(inp, btn),)\r\n\r\ndef on_click_parse(event):\r\n #print(inp.get_param_values())\r\n print(inp.filename)\r\nbtn.on_click(on_click_parse)\r\n\r\nr.servable()\r\n```\r\nit always prints `None` to the terminal.\r\n\r\nI can already see where the issue is and will open a PR.\n", "before_files": [{"content": "\"\"\"\nThe input widgets generally allow entering arbitrary information into\na text field or similar.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport ast\n\nfrom base64 import b64decode\nfrom datetime import datetime\nfrom six import string_types\n\nimport param\n\nfrom bokeh.models.widgets import (\n CheckboxGroup as _BkCheckboxGroup, ColorPicker as _BkColorPicker,\n DatePicker as _BkDatePicker, Div as _BkDiv, TextInput as _BkTextInput,\n PasswordInput as _BkPasswordInput, Spinner as _BkSpinner, \n FileInput as _BkFileInput, TextAreaInput as _BkTextAreaInput)\n\nfrom ..util import as_unicode\nfrom .base import Widget\n\n\nclass TextInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n _widget_type = _BkTextInput\n\nclass PasswordInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n _widget_type = _BkPasswordInput\n\nclass TextAreaInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n max_length = param.Integer(default=5000)\n \n _widget_type = _BkTextAreaInput\n\nclass FileInput(Widget):\n\n accept = param.String(default=None)\n\n filename = param.String(default=None)\n\n mime_type = param.String(default=None)\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkFileInput\n\n _rename = {'name': None, 'filename': None}\n\n def _process_param_change(self, msg):\n msg = super(FileInput, self)._process_param_change(msg)\n if 'value' in msg:\n msg.pop('value')\n if 'mime_type' in msg:\n msg.pop('mime_type')\n return msg\n\n def _filter_properties(self, properties):\n properties = super(FileInput, self)._filter_properties(properties)\n return properties + ['value', 'mime_type']\n\n def _process_property_change(self, msg):\n msg = super(FileInput, self)._process_property_change(msg)\n if 'value' in msg:\n msg['value'] = b64decode(msg['value'])\n return msg\n\n def save(self, filename):\n \"\"\"\n Saves the uploaded FileInput data to a file or BytesIO object.\n\n Arguments\n ---------\n filename (str): File path or file-like object\n \"\"\"\n if isinstance(filename, string_types):\n with open(filename, 'wb') as f:\n f.write(self.value)\n else:\n filename.write(self.value)\n\n\nclass StaticText(Widget):\n\n style = param.Dict(default=None, doc=\"\"\"\n Dictionary of CSS property:value pairs to apply to this Div.\"\"\")\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkDiv\n\n _format = '<b>{title}</b>: {value}'\n\n _rename = {'name': 'title', 'value': 'text'}\n\n def _process_param_change(self, msg):\n msg = super(StaticText, self)._process_property_change(msg)\n msg.pop('title', None)\n if 'value' in msg:\n text = as_unicode(msg.pop('value'))\n if self.name:\n text = self._format.format(title=self.name, value=text)\n msg['text'] = text\n return msg\n\n\nclass DatePicker(Widget):\n\n value = param.Date(default=None)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n _widget_type = _BkDatePicker\n\n _rename = {'start': 'min_date', 'end': 'max_date', 'name': 'title'}\n\n def _process_property_change(self, msg):\n msg = super(DatePicker, self)._process_property_change(msg)\n if 'value' in msg:\n if isinstance(msg['value'], string_types):\n msg['value'] = datetime.strptime(msg['value'][4:], '%b %d %Y')\n return msg\n\n\nclass ColorPicker(Widget):\n\n value = param.Color(default=None, doc=\"\"\"\n The selected color\"\"\")\n\n _widget_type = _BkColorPicker\n\n _rename = {'value': 'color', 'name': 'title'}\n\n\nclass Spinner(Widget):\n\n start = param.Number(default=None, doc=\"\"\"\n Optional minimum allowable value\"\"\")\n\n end = param.Number(default=None, doc=\"\"\"\n Optional maximum allowable value\"\"\")\n\n value = param.Number(default=0, doc=\"\"\"\n The initial value of the spinner\"\"\")\n\n step = param.Number(default=1, doc=\"\"\"\n The step added or subtracted to the current value\"\"\")\n\n _widget_type = _BkSpinner\n\n _rename = {'name': 'title', 'start': 'low', 'end': 'high'}\n\n\nclass LiteralInput(Widget):\n \"\"\"\n LiteralInput allows declaring Python literals using a text\n input widget. Optionally a type may be declared.\n \"\"\"\n\n type = param.ClassSelector(default=None, class_=(type, tuple),\n is_instance=True)\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkTextInput\n\n def __init__(self, **params):\n super(LiteralInput, self).__init__(**params)\n self._state = ''\n self._validate(None)\n self.param.watch(self._validate, 'value')\n\n def _validate(self, event):\n if self.type is None: return\n new = self.value\n if not isinstance(new, self.type):\n if event:\n self.value = event.old\n types = repr(self.type) if isinstance(self.type, tuple) else self.type.__name__\n raise ValueError('LiteralInput expected %s type but value %s '\n 'is of type %s.' %\n (types, new, type(new).__name__))\n\n def _process_property_change(self, msg):\n msg = super(LiteralInput, self)._process_property_change(msg)\n new_state = ''\n if 'value' in msg:\n value = msg.pop('value')\n try:\n value = ast.literal_eval(value)\n except:\n new_state = ' (invalid)'\n value = self.value\n else:\n if self.type and not isinstance(value, self.type):\n new_state = ' (wrong type)'\n value = self.value\n msg['value'] = value\n msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state\n self._state = new_state\n self.param.trigger('name')\n return msg\n\n def _process_param_change(self, msg):\n msg = super(LiteralInput, self)._process_param_change(msg)\n msg.pop('type', None)\n if 'value' in msg:\n msg['value'] = '' if msg['value'] is None else as_unicode(msg['value'])\n msg['title'] = self.name\n return msg\n\n\nclass DatetimeInput(LiteralInput):\n \"\"\"\n DatetimeInput allows declaring Python literals using a text\n input widget. Optionally a type may be declared.\n \"\"\"\n\n format = param.String(default='%Y-%m-%d %H:%M:%S', doc=\"\"\"\n Datetime format used for parsing and formatting the datetime.\"\"\")\n\n value = param.Date(default=None)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n type = datetime\n\n def __init__(self, **params):\n super(DatetimeInput, self).__init__(**params)\n self.param.watch(self._validate, 'value')\n self._validate(None)\n\n def _validate(self, event):\n new = self.value\n if new is not None and ((self.start is not None and self.start > new) or\n (self.end is not None and self.end < new)):\n value = datetime.strftime(new, self.format)\n start = datetime.strftime(self.start, self.format)\n end = datetime.strftime(self.end, self.format)\n if event:\n self.value = event.old\n raise ValueError('DatetimeInput value must be between {start} and {end}, '\n 'supplied value is {value}'.format(start=start, end=end,\n value=value))\n\n def _process_property_change(self, msg):\n msg = Widget._process_property_change(self, msg)\n new_state = ''\n if 'value' in msg:\n value = msg.pop('value')\n try:\n value = datetime.strptime(value, self.format)\n except:\n new_state = ' (invalid)'\n value = self.value\n else:\n if value is not None and ((self.start is not None and self.start > value) or\n (self.end is not None and self.end < value)):\n new_state = ' (out of bounds)'\n value = self.value\n msg['value'] = value\n msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state\n self._state = new_state\n return msg\n\n def _process_param_change(self, msg):\n msg = {k: v for k, v in msg.items() if k not in ('type', 'format', 'start', 'end')}\n if 'value' in msg:\n value = msg['value']\n if value is None:\n value = ''\n else:\n value = datetime.strftime(msg['value'], self.format)\n msg['value'] = value\n msg['title'] = self.name\n return msg\n\n\nclass Checkbox(Widget):\n\n value = param.Boolean(default=False)\n\n _supports_embed = True\n\n _widget_type = _BkCheckboxGroup\n\n def _process_property_change(self, msg):\n msg = super(Checkbox, self)._process_property_change(msg)\n if 'active' in msg:\n msg['value'] = 0 in msg.pop('active')\n return msg\n\n def _process_param_change(self, msg):\n msg = super(Checkbox, self)._process_param_change(msg)\n if 'value' in msg:\n msg['active'] = [0] if msg.pop('value', None) else []\n if 'title' in msg:\n msg['labels'] = [msg.pop('title')]\n return msg\n\n def _get_embed_state(self, root, max_opts=3):\n return (self, self._models[root.ref['id']][0], [False, True],\n lambda x: 0 in x.active, 'active', 'cb_obj.active.indexOf(0) >= 0')\n", "path": "panel/widgets/input.py"}], "after_files": [{"content": "\"\"\"\nThe input widgets generally allow entering arbitrary information into\na text field or similar.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport ast\n\nfrom base64 import b64decode\nfrom datetime import datetime\nfrom six import string_types\n\nimport param\n\nfrom bokeh.models.widgets import (\n CheckboxGroup as _BkCheckboxGroup, ColorPicker as _BkColorPicker,\n DatePicker as _BkDatePicker, Div as _BkDiv, TextInput as _BkTextInput,\n PasswordInput as _BkPasswordInput, Spinner as _BkSpinner, \n FileInput as _BkFileInput, TextAreaInput as _BkTextAreaInput)\n\nfrom ..util import as_unicode\nfrom .base import Widget\n\n\nclass TextInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n _widget_type = _BkTextInput\n\nclass PasswordInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n _widget_type = _BkPasswordInput\n\nclass TextAreaInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n max_length = param.Integer(default=5000)\n \n _widget_type = _BkTextAreaInput\n\nclass FileInput(Widget):\n\n accept = param.String(default=None)\n\n filename = param.String(default=None)\n\n mime_type = param.String(default=None)\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkFileInput\n\n _rename = {'name': None, 'filename': None}\n\n def _process_param_change(self, msg):\n msg = super(FileInput, self)._process_param_change(msg)\n if 'value' in msg:\n msg.pop('value')\n if 'mime_type' in msg:\n msg.pop('mime_type')\n return msg\n\n def _filter_properties(self, properties):\n properties = super(FileInput, self)._filter_properties(properties)\n return properties + ['value', 'mime_type', 'filename']\n\n def _process_property_change(self, msg):\n msg = super(FileInput, self)._process_property_change(msg)\n if 'value' in msg:\n msg['value'] = b64decode(msg['value'])\n return msg\n\n def save(self, filename):\n \"\"\"\n Saves the uploaded FileInput data to a file or BytesIO object.\n\n Arguments\n ---------\n filename (str): File path or file-like object\n \"\"\"\n if isinstance(filename, string_types):\n with open(filename, 'wb') as f:\n f.write(self.value)\n else:\n filename.write(self.value)\n\n\nclass StaticText(Widget):\n\n style = param.Dict(default=None, doc=\"\"\"\n Dictionary of CSS property:value pairs to apply to this Div.\"\"\")\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkDiv\n\n _format = '<b>{title}</b>: {value}'\n\n _rename = {'name': 'title', 'value': 'text'}\n\n def _process_param_change(self, msg):\n msg = super(StaticText, self)._process_property_change(msg)\n msg.pop('title', None)\n if 'value' in msg:\n text = as_unicode(msg.pop('value'))\n if self.name:\n text = self._format.format(title=self.name, value=text)\n msg['text'] = text\n return msg\n\n\nclass DatePicker(Widget):\n\n value = param.Date(default=None)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n _widget_type = _BkDatePicker\n\n _rename = {'start': 'min_date', 'end': 'max_date', 'name': 'title'}\n\n def _process_property_change(self, msg):\n msg = super(DatePicker, self)._process_property_change(msg)\n if 'value' in msg:\n if isinstance(msg['value'], string_types):\n msg['value'] = datetime.strptime(msg['value'][4:], '%b %d %Y')\n return msg\n\n\nclass ColorPicker(Widget):\n\n value = param.Color(default=None, doc=\"\"\"\n The selected color\"\"\")\n\n _widget_type = _BkColorPicker\n\n _rename = {'value': 'color', 'name': 'title'}\n\n\nclass Spinner(Widget):\n\n start = param.Number(default=None, doc=\"\"\"\n Optional minimum allowable value\"\"\")\n\n end = param.Number(default=None, doc=\"\"\"\n Optional maximum allowable value\"\"\")\n\n value = param.Number(default=0, doc=\"\"\"\n The initial value of the spinner\"\"\")\n\n step = param.Number(default=1, doc=\"\"\"\n The step added or subtracted to the current value\"\"\")\n\n _widget_type = _BkSpinner\n\n _rename = {'name': 'title', 'start': 'low', 'end': 'high'}\n\n\nclass LiteralInput(Widget):\n \"\"\"\n LiteralInput allows declaring Python literals using a text\n input widget. Optionally a type may be declared.\n \"\"\"\n\n type = param.ClassSelector(default=None, class_=(type, tuple),\n is_instance=True)\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkTextInput\n\n def __init__(self, **params):\n super(LiteralInput, self).__init__(**params)\n self._state = ''\n self._validate(None)\n self.param.watch(self._validate, 'value')\n\n def _validate(self, event):\n if self.type is None: return\n new = self.value\n if not isinstance(new, self.type):\n if event:\n self.value = event.old\n types = repr(self.type) if isinstance(self.type, tuple) else self.type.__name__\n raise ValueError('LiteralInput expected %s type but value %s '\n 'is of type %s.' %\n (types, new, type(new).__name__))\n\n def _process_property_change(self, msg):\n msg = super(LiteralInput, self)._process_property_change(msg)\n new_state = ''\n if 'value' in msg:\n value = msg.pop('value')\n try:\n value = ast.literal_eval(value)\n except:\n new_state = ' (invalid)'\n value = self.value\n else:\n if self.type and not isinstance(value, self.type):\n new_state = ' (wrong type)'\n value = self.value\n msg['value'] = value\n msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state\n self._state = new_state\n self.param.trigger('name')\n return msg\n\n def _process_param_change(self, msg):\n msg = super(LiteralInput, self)._process_param_change(msg)\n msg.pop('type', None)\n if 'value' in msg:\n msg['value'] = '' if msg['value'] is None else as_unicode(msg['value'])\n msg['title'] = self.name\n return msg\n\n\nclass DatetimeInput(LiteralInput):\n \"\"\"\n DatetimeInput allows declaring Python literals using a text\n input widget. Optionally a type may be declared.\n \"\"\"\n\n format = param.String(default='%Y-%m-%d %H:%M:%S', doc=\"\"\"\n Datetime format used for parsing and formatting the datetime.\"\"\")\n\n value = param.Date(default=None)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n type = datetime\n\n def __init__(self, **params):\n super(DatetimeInput, self).__init__(**params)\n self.param.watch(self._validate, 'value')\n self._validate(None)\n\n def _validate(self, event):\n new = self.value\n if new is not None and ((self.start is not None and self.start > new) or\n (self.end is not None and self.end < new)):\n value = datetime.strftime(new, self.format)\n start = datetime.strftime(self.start, self.format)\n end = datetime.strftime(self.end, self.format)\n if event:\n self.value = event.old\n raise ValueError('DatetimeInput value must be between {start} and {end}, '\n 'supplied value is {value}'.format(start=start, end=end,\n value=value))\n\n def _process_property_change(self, msg):\n msg = Widget._process_property_change(self, msg)\n new_state = ''\n if 'value' in msg:\n value = msg.pop('value')\n try:\n value = datetime.strptime(value, self.format)\n except:\n new_state = ' (invalid)'\n value = self.value\n else:\n if value is not None and ((self.start is not None and self.start > value) or\n (self.end is not None and self.end < value)):\n new_state = ' (out of bounds)'\n value = self.value\n msg['value'] = value\n msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state\n self._state = new_state\n return msg\n\n def _process_param_change(self, msg):\n msg = {k: v for k, v in msg.items() if k not in ('type', 'format', 'start', 'end')}\n if 'value' in msg:\n value = msg['value']\n if value is None:\n value = ''\n else:\n value = datetime.strftime(msg['value'], self.format)\n msg['value'] = value\n msg['title'] = self.name\n return msg\n\n\nclass Checkbox(Widget):\n\n value = param.Boolean(default=False)\n\n _supports_embed = True\n\n _widget_type = _BkCheckboxGroup\n\n def _process_property_change(self, msg):\n msg = super(Checkbox, self)._process_property_change(msg)\n if 'active' in msg:\n msg['value'] = 0 in msg.pop('active')\n return msg\n\n def _process_param_change(self, msg):\n msg = super(Checkbox, self)._process_param_change(msg)\n if 'value' in msg:\n msg['active'] = [0] if msg.pop('value', None) else []\n if 'title' in msg:\n msg['labels'] = [msg.pop('title')]\n return msg\n\n def _get_embed_state(self, root, max_opts=3):\n return (self, self._models[root.ref['id']][0], [False, True],\n lambda x: 0 in x.active, 'active', 'cb_obj.active.indexOf(0) >= 0')\n", "path": "panel/widgets/input.py"}]} |
gh_patches_debug_1598 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-3796 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CA-PE production parser down
## Description
This is an automatic error report generated for Canada Prince Edward Island (CA-PE).
Issues:
- No recent data found for `production` parser
## Suggestions
- Try running the parser locally using the command `poetry run test_parser CA-PE production`
- <a href="https://storage.googleapis.com/electricitymap-parser-logs/CA-PE.html">Explore the runtime logs</a>
You can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/CA_PE.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import json
4
5 # The arrow library is used to handle datetimes consistently with other parsers
6 import arrow
7
8 # The request library is used to fetch content through HTTP
9 import requests
10
11
12 timezone = 'Canada/Atlantic'
13
14
15 def _find_pei_key(pei_list, sought_key):
16 matching_item = [item for item in pei_list
17 if 'header' in item['data']
18 and item['data']['header'].startswith(sought_key)]
19
20 if not matching_item:
21 return None
22
23 return matching_item[0]['data']['actualValue']
24
25
26 def _get_pei_info(requests_obj):
27 url = 'https://wdf.princeedwardisland.ca/workflow'
28 request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}
29 headers = {'Content-Type': 'application/json'}
30 response = requests_obj.post(url, data=json.dumps(request), headers=headers)
31
32 raw_data = response.json().get('data', [])
33
34 datetime_item = [item['data']['text'] for item in raw_data
35 if 'text' in item['data']]
36 if not datetime_item:
37 # unable to get a timestamp, return empty
38 return None
39 datetime_text = datetime_item[0][len('Last updated '):]
40 data_timestamp = arrow.get(datetime_text, 'MMMM D, YYYY HH:mm A').replace(tzinfo='Canada/Atlantic')
41
42 # see https://ruk.ca/content/new-api-endpoint-pei-wind for more info
43 data = {
44 'pei_load': _find_pei_key(raw_data, 'Total On-Island Load'),
45 'pei_wind_gen': _find_pei_key(raw_data, 'Total On-Island Wind Generation'),
46 'pei_fossil_gen': _find_pei_key(raw_data, 'Total On-Island Fossil Fuel Generation'),
47 'pei_wind_used': _find_pei_key(raw_data, 'Wind Power Used On Island'),
48 'pei_wind_exported': _find_pei_key(raw_data, 'Wind Power Exported Off Island'),
49 'datetime': data_timestamp.datetime
50 }
51
52 # the following keys are always required downstream, if we don't have them, no sense returning
53 if data['pei_wind_gen'] is None or data['pei_fossil_gen'] is None:
54 return None
55
56 return data
57
58
59 def fetch_production(zone_key='CA-PE', session=None, target_datetime=None, logger=None) -> dict:
60 """Requests the last known production mix (in MW) of a given country."""
61 if target_datetime:
62 raise NotImplementedError('This parser is not yet able to parse past dates')
63
64 requests_obj = session or requests.session()
65 pei_info = _get_pei_info(requests_obj)
66
67 if pei_info is None:
68 return None
69
70 data = {
71 'datetime': pei_info['datetime'],
72 'zoneKey': zone_key,
73 'production': {
74 'wind': pei_info['pei_wind_gen'],
75
76 # These are oil-fueled ("heavy fuel oil" and "diesel") generators
77 # used as peakers and back-up
78 'oil': pei_info['pei_fossil_gen'],
79
80 # specify some sources that definitely aren't present on PEI as zero,
81 # this allows the analyzer to better estimate CO2eq
82 'coal': 0,
83 'hydro': 0,
84 'nuclear': 0,
85 'geothermal': 0
86 },
87 'storage': {},
88 'source': 'princeedwardisland.ca'
89 }
90
91 return data
92
93
94 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None) -> dict:
95 """Requests the last known power exchange (in MW) between two regions."""
96 if target_datetime:
97 raise NotImplementedError('This parser is not yet able to parse past dates')
98
99 sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
100
101 if sorted_zone_keys != 'CA-NB->CA-PE':
102 raise NotImplementedError('This exchange pair is not implemented')
103
104 requests_obj = session or requests.session()
105 pei_info = _get_pei_info(requests_obj)
106
107 if pei_info is None or pei_info['pei_load'] is None:
108 return None
109
110 # PEI imports most of its electricity. Everything not generated on island
111 # is imported from New Brunswick.
112 # In case of wind, some is paper-"exported" even if there is a net import,
113 # and 'pei_wind_used'/'data5' indicates their accounting of part of the load
114 # served by non-exported wind.
115 # https://www.princeedwardisland.ca/en/feature/pei-wind-energy says:
116 # "Wind Power Exported Off-Island is that portion of wind generation that is supplying
117 # contracts elsewhere. The actual electricity from this portion of wind generation
118 # may stay within PEI but is satisfying a contractual arrangement in another jurisdiction."
119 # We are ignoring these paper exports, as they are an accounting/legal detail
120 # that doesn't actually reflect what happens on the wires.
121 # (New Brunswick being the only interconnection with PEI, "exporting" wind power to NB
122 # then "importing" a balance of NB electricity likely doesn't actually happen.)
123 imported_from_nb = (pei_info['pei_load'] - pei_info['pei_fossil_gen'] - pei_info['pei_wind_gen'])
124
125 # In expected result, "net" represents an export.
126 # We have sorted_zone_keys 'CA-NB->CA-PE', so it's export *from* NB,
127 # and import *to* PEI.
128 data = {
129 'datetime': pei_info['datetime'],
130 'sortedZoneKeys': sorted_zone_keys,
131 'netFlow': imported_from_nb,
132 'source': 'princeedwardisland.ca'
133 }
134
135 return data
136
137
138 if __name__ == '__main__':
139 """Main method, never used by the Electricity Map backend, but handy for testing."""
140
141 print('fetch_production() ->')
142 print(fetch_production())
143
144 print('fetch_exchange("CA-PE", "CA-NB") ->')
145 print(fetch_exchange("CA-PE", "CA-NB"))
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/CA_PE.py b/parsers/CA_PE.py
--- a/parsers/CA_PE.py
+++ b/parsers/CA_PE.py
@@ -24,7 +24,7 @@
def _get_pei_info(requests_obj):
- url = 'https://wdf.princeedwardisland.ca/workflow'
+ url = 'https://wdf.princeedwardisland.ca/api/workflow'
request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}
headers = {'Content-Type': 'application/json'}
response = requests_obj.post(url, data=json.dumps(request), headers=headers)
| {"golden_diff": "diff --git a/parsers/CA_PE.py b/parsers/CA_PE.py\n--- a/parsers/CA_PE.py\n+++ b/parsers/CA_PE.py\n@@ -24,7 +24,7 @@\n \n \n def _get_pei_info(requests_obj):\n- url = 'https://wdf.princeedwardisland.ca/workflow'\n+ url = 'https://wdf.princeedwardisland.ca/api/workflow'\n request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}\n headers = {'Content-Type': 'application/json'}\n response = requests_obj.post(url, data=json.dumps(request), headers=headers)\n", "issue": "CA-PE production parser down\n## Description\n\nThis is an automatic error report generated for Canada Prince Edward Island (CA-PE).\n\nIssues:\n- No recent data found for `production` parser\n\n## Suggestions\n- Try running the parser locally using the command `poetry run test_parser CA-PE production`\n- <a href=\"https://storage.googleapis.com/electricitymap-parser-logs/CA-PE.html\">Explore the runtime logs</a>\n\nYou can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport json\n\n# The arrow library is used to handle datetimes consistently with other parsers\nimport arrow\n\n# The request library is used to fetch content through HTTP\nimport requests\n\n\ntimezone = 'Canada/Atlantic'\n\n\ndef _find_pei_key(pei_list, sought_key):\n matching_item = [item for item in pei_list\n if 'header' in item['data']\n and item['data']['header'].startswith(sought_key)]\n\n if not matching_item:\n return None\n\n return matching_item[0]['data']['actualValue']\n\n\ndef _get_pei_info(requests_obj):\n url = 'https://wdf.princeedwardisland.ca/workflow'\n request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}\n headers = {'Content-Type': 'application/json'}\n response = requests_obj.post(url, data=json.dumps(request), headers=headers)\n\n raw_data = response.json().get('data', [])\n\n datetime_item = [item['data']['text'] for item in raw_data\n if 'text' in item['data']]\n if not datetime_item:\n # unable to get a timestamp, return empty\n return None\n datetime_text = datetime_item[0][len('Last updated '):]\n data_timestamp = arrow.get(datetime_text, 'MMMM D, YYYY HH:mm A').replace(tzinfo='Canada/Atlantic')\n\n # see https://ruk.ca/content/new-api-endpoint-pei-wind for more info\n data = {\n 'pei_load': _find_pei_key(raw_data, 'Total On-Island Load'),\n 'pei_wind_gen': _find_pei_key(raw_data, 'Total On-Island Wind Generation'),\n 'pei_fossil_gen': _find_pei_key(raw_data, 'Total On-Island Fossil Fuel Generation'),\n 'pei_wind_used': _find_pei_key(raw_data, 'Wind Power Used On Island'),\n 'pei_wind_exported': _find_pei_key(raw_data, 'Wind Power Exported Off Island'),\n 'datetime': data_timestamp.datetime\n }\n\n # the following keys are always required downstream, if we don't have them, no sense returning\n if data['pei_wind_gen'] is None or data['pei_fossil_gen'] is None:\n return None\n\n return data\n\n\ndef fetch_production(zone_key='CA-PE', session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n requests_obj = session or requests.session()\n pei_info = _get_pei_info(requests_obj)\n\n if pei_info is None:\n return None\n\n data = {\n 'datetime': pei_info['datetime'],\n 'zoneKey': zone_key,\n 'production': {\n 'wind': pei_info['pei_wind_gen'],\n\n # These are oil-fueled (\"heavy fuel oil\" and \"diesel\") generators\n # used as peakers and back-up\n 'oil': pei_info['pei_fossil_gen'],\n\n # specify some sources that definitely aren't present on PEI as zero,\n # this allows the analyzer to better estimate CO2eq\n 'coal': 0,\n 'hydro': 0,\n 'nuclear': 0,\n 'geothermal': 0\n },\n 'storage': {},\n 'source': 'princeedwardisland.ca'\n }\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known power exchange (in MW) between two regions.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n if sorted_zone_keys != 'CA-NB->CA-PE':\n raise NotImplementedError('This exchange pair is not implemented')\n\n requests_obj = session or requests.session()\n pei_info = _get_pei_info(requests_obj)\n\n if pei_info is None or pei_info['pei_load'] is None:\n return None\n\n # PEI imports most of its electricity. Everything not generated on island\n # is imported from New Brunswick.\n # In case of wind, some is paper-\"exported\" even if there is a net import,\n # and 'pei_wind_used'/'data5' indicates their accounting of part of the load\n # served by non-exported wind.\n # https://www.princeedwardisland.ca/en/feature/pei-wind-energy says:\n # \"Wind Power Exported Off-Island is that portion of wind generation that is supplying\n # contracts elsewhere. The actual electricity from this portion of wind generation\n # may stay within PEI but is satisfying a contractual arrangement in another jurisdiction.\"\n # We are ignoring these paper exports, as they are an accounting/legal detail\n # that doesn't actually reflect what happens on the wires.\n # (New Brunswick being the only interconnection with PEI, \"exporting\" wind power to NB\n # then \"importing\" a balance of NB electricity likely doesn't actually happen.)\n imported_from_nb = (pei_info['pei_load'] - pei_info['pei_fossil_gen'] - pei_info['pei_wind_gen'])\n\n # In expected result, \"net\" represents an export.\n # We have sorted_zone_keys 'CA-NB->CA-PE', so it's export *from* NB,\n # and import *to* PEI.\n data = {\n 'datetime': pei_info['datetime'],\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': imported_from_nb,\n 'source': 'princeedwardisland.ca'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n\n print('fetch_exchange(\"CA-PE\", \"CA-NB\") ->')\n print(fetch_exchange(\"CA-PE\", \"CA-NB\"))\n", "path": "parsers/CA_PE.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport json\n\n# The arrow library is used to handle datetimes consistently with other parsers\nimport arrow\n\n# The request library is used to fetch content through HTTP\nimport requests\n\n\ntimezone = 'Canada/Atlantic'\n\n\ndef _find_pei_key(pei_list, sought_key):\n matching_item = [item for item in pei_list\n if 'header' in item['data']\n and item['data']['header'].startswith(sought_key)]\n\n if not matching_item:\n return None\n\n return matching_item[0]['data']['actualValue']\n\n\ndef _get_pei_info(requests_obj):\n url = 'https://wdf.princeedwardisland.ca/api/workflow'\n request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}\n headers = {'Content-Type': 'application/json'}\n response = requests_obj.post(url, data=json.dumps(request), headers=headers)\n\n raw_data = response.json().get('data', [])\n\n datetime_item = [item['data']['text'] for item in raw_data\n if 'text' in item['data']]\n if not datetime_item:\n # unable to get a timestamp, return empty\n return None\n datetime_text = datetime_item[0][len('Last updated '):]\n data_timestamp = arrow.get(datetime_text, 'MMMM D, YYYY HH:mm A').replace(tzinfo='Canada/Atlantic')\n\n # see https://ruk.ca/content/new-api-endpoint-pei-wind for more info\n data = {\n 'pei_load': _find_pei_key(raw_data, 'Total On-Island Load'),\n 'pei_wind_gen': _find_pei_key(raw_data, 'Total On-Island Wind Generation'),\n 'pei_fossil_gen': _find_pei_key(raw_data, 'Total On-Island Fossil Fuel Generation'),\n 'pei_wind_used': _find_pei_key(raw_data, 'Wind Power Used On Island'),\n 'pei_wind_exported': _find_pei_key(raw_data, 'Wind Power Exported Off Island'),\n 'datetime': data_timestamp.datetime\n }\n\n # the following keys are always required downstream, if we don't have them, no sense returning\n if data['pei_wind_gen'] is None or data['pei_fossil_gen'] is None:\n return None\n\n return data\n\n\ndef fetch_production(zone_key='CA-PE', session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n requests_obj = session or requests.session()\n pei_info = _get_pei_info(requests_obj)\n\n if pei_info is None:\n return None\n\n data = {\n 'datetime': pei_info['datetime'],\n 'zoneKey': zone_key,\n 'production': {\n 'wind': pei_info['pei_wind_gen'],\n\n # These are oil-fueled (\"heavy fuel oil\" and \"diesel\") generators\n # used as peakers and back-up\n 'oil': pei_info['pei_fossil_gen'],\n\n # specify some sources that definitely aren't present on PEI as zero,\n # this allows the analyzer to better estimate CO2eq\n 'coal': 0,\n 'hydro': 0,\n 'nuclear': 0,\n 'geothermal': 0\n },\n 'storage': {},\n 'source': 'princeedwardisland.ca'\n }\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known power exchange (in MW) between two regions.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n if sorted_zone_keys != 'CA-NB->CA-PE':\n raise NotImplementedError('This exchange pair is not implemented')\n\n requests_obj = session or requests.session()\n pei_info = _get_pei_info(requests_obj)\n\n if pei_info is None or pei_info['pei_load'] is None:\n return None\n\n # PEI imports most of its electricity. Everything not generated on island\n # is imported from New Brunswick.\n # In case of wind, some is paper-\"exported\" even if there is a net import,\n # and 'pei_wind_used'/'data5' indicates their accounting of part of the load\n # served by non-exported wind.\n # https://www.princeedwardisland.ca/en/feature/pei-wind-energy says:\n # \"Wind Power Exported Off-Island is that portion of wind generation that is supplying\n # contracts elsewhere. The actual electricity from this portion of wind generation\n # may stay within PEI but is satisfying a contractual arrangement in another jurisdiction.\"\n # We are ignoring these paper exports, as they are an accounting/legal detail\n # that doesn't actually reflect what happens on the wires.\n # (New Brunswick being the only interconnection with PEI, \"exporting\" wind power to NB\n # then \"importing\" a balance of NB electricity likely doesn't actually happen.)\n imported_from_nb = (pei_info['pei_load'] - pei_info['pei_fossil_gen'] - pei_info['pei_wind_gen'])\n\n # In expected result, \"net\" represents an export.\n # We have sorted_zone_keys 'CA-NB->CA-PE', so it's export *from* NB,\n # and import *to* PEI.\n data = {\n 'datetime': pei_info['datetime'],\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': imported_from_nb,\n 'source': 'princeedwardisland.ca'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n\n print('fetch_exchange(\"CA-PE\", \"CA-NB\") ->')\n print(fetch_exchange(\"CA-PE\", \"CA-NB\"))\n", "path": "parsers/CA_PE.py"}]} |
gh_patches_debug_1599 | rasdani/github-patches | git_diff | openshift__openshift-ansible-8921 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
installation of Logging fails on "Gather OpenShift Logging Facts"
#### Description
On a developer setup, using Fedora 25 as the OS and executing `cd ${GOPATH}/src/github.com/openshift/origin/_output/local/bin/linux/amd64 && sudo ./openshift start` for an OpenShift cluster, the Ansible Playbook for Logging fails during the task "Gather OpenShift Logging Facts".
This is the script that is used to install Logging via the playbook:
https://paste.fedoraproject.org/paste/TAxemZhC59HT-WMGxTiVBl5M1UNdIGYhyRLivL9gydE=
```bash
#!/bin/bash
cat > /tmp/metrics.inventory <<EOF
[oo_first_master]
openshift
[oo_first_master:vars]
#openshift_deployment_type=origin
#openshift_release=v3.6
#openshift_image_tag=v3.6.0
#openshift_metrics_image_prefix=registry.ops.openshift.com/openshift3
#openshift_metrics_image_version=3.6.0
openshift_deployment_type=origin
openshift_release=v1.5
openshift_image_tag=v1.5.0
openshift_logging_install_logging=true
# for more options, see https://github.com/openshift/openshift-ansible/blob/master/roles/openshift_logging/defaults/main.yml
EOF
sudo ansible-playbook /mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/playbooks/byo/openshift-cluster/openshift-logging.yml \
-vv \
-e 'ansible_python_interpreter=/usr/bin/python3' \
-c local \
-i /tmp/metrics.inventory
```
This is the actual message:
https://paste.fedoraproject.org/paste/IZ06Z5xI2bmOxGg8nK4RRl5M1UNdIGYhyRLivL9gydE=
```
TASK [openshift_logging : Gather OpenShift Logging Facts] *********************************************************************************************************************************************************
task path: /mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/roles/openshift_logging/tasks/install_logging.yaml:2
fatal: [openshift]: FAILED! => {"changed": false, "failed": true, "msg": "There was an exception trying to run the command 'oc get routes -n logging --user=system:admin/192-168-2-111:8443 --config=/tmp/openshift-logging-ansible-QNj1vf/admin.kubeconfig -o json -l component=support, logging-infra=support, provider=openshift' a bytes-like object is required, not 'str'"}
to retry, use: --limit @/mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/playbooks/byo/openshift-cluster/openshift-logging.retry
PLAY RECAP ********************************************************************************************************************************************************************************************************
localhost : ok=2 changed=0 unreachable=0 failed=0
openshift : ok=19 changed=0 unreachable=0 failed=1
```
##### Version
```bash
$ ansible --version
ansible 2.3.0.0
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
python version = 2.7.13 (default, Jan 12 2017, 17:59:37) [GCC 6.3.1 20161221 (Red Hat 6.3.1-1)]
$ git describe
openshift-ansible-3.6.67-1-28-g74e4c9d
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/openshift_logging/library/openshift_logging_facts.py`
Content:
```
1 '''
2 ---
3 module: openshift_logging_facts
4 version_added: ""
5 short_description: Gather facts about the OpenShift logging stack
6 description:
7 - Determine the current facts about the OpenShift logging stack (e.g. cluster size)
8 options:
9 author: Red Hat, Inc
10 '''
11
12 import copy
13 import json
14
15 # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
16 from subprocess import * # noqa: F402,F403
17
18 # ignore pylint errors related to the module_utils import
19 # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
20 from ansible.module_utils.basic import * # noqa: F402,F403
21
22 import yaml
23
24 EXAMPLES = """
25 - action: opneshift_logging_facts
26 """
27
28 RETURN = """
29 """
30
31 DEFAULT_OC_OPTIONS = ["-o", "json"]
32
33 # constants used for various labels and selectors
34 COMPONENT_KEY = "component"
35 LOGGING_INFRA_KEY = "logging-infra"
36
37 # selectors for filtering resources
38 DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
39 LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
40 ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift"
41 # pylint: disable=line-too-long
42 COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops", "mux", "eventrouter"]
43
44
45 class OCBaseCommand(object):
46 ''' The base class used to query openshift '''
47
48 def __init__(self, binary, kubeconfig, namespace):
49 ''' the init method of OCBaseCommand class '''
50 self.binary = binary
51 self.kubeconfig = kubeconfig
52 self.user = self.get_system_admin(self.kubeconfig)
53 self.namespace = namespace
54
55 # pylint: disable=no-self-use
56 def get_system_admin(self, kubeconfig):
57 ''' Retrieves the system admin '''
58 with open(kubeconfig, 'r') as kubeconfig_file:
59 config = yaml.load(kubeconfig_file)
60 for user in config["users"]:
61 if user["name"].startswith("system:admin"):
62 return user["name"]
63 raise Exception("Unable to find system:admin in: " + kubeconfig)
64
65 # pylint: disable=too-many-arguments, dangerous-default-value
66 def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):
67 ''' Wrapper method for the "oc" command '''
68 cmd = [self.binary, sub, kind]
69 if name is not None:
70 cmd = cmd + [name]
71 if namespace is not None:
72 cmd = cmd + ["-n", namespace]
73 if add_options is None:
74 add_options = []
75 cmd = cmd + ["--user=" + self.user, "--config=" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options
76 try:
77 process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405
78 out, err = process.communicate(cmd)
79 if len(err) > 0:
80 if 'not found' in err:
81 return {'items': []}
82 if 'No resources found' in err:
83 return {'items': []}
84 raise Exception(err)
85 except Exception as excp:
86 err = "There was an exception trying to run the command '" + " ".join(cmd) + "' " + str(excp)
87 raise Exception(err)
88
89 return json.loads(out)
90
91
92 class OpenshiftLoggingFacts(OCBaseCommand):
93 ''' The class structure for holding the OpenshiftLogging Facts'''
94 name = "facts"
95
96 def __init__(self, logger, binary, kubeconfig, namespace):
97 ''' The init method for OpenshiftLoggingFacts '''
98 super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)
99 self.logger = logger
100 self.facts = dict()
101
102 def default_keys_for(self, kind):
103 ''' Sets the default key values for kind '''
104 for comp in COMPONENTS:
105 self.add_facts_for(comp, kind)
106
107 def add_facts_for(self, comp, kind, name=None, facts=None):
108 ''' Add facts for the provided kind '''
109 if comp not in self.facts:
110 self.facts[comp] = dict()
111 if kind not in self.facts[comp]:
112 self.facts[comp][kind] = dict()
113 if name:
114 self.facts[comp][kind][name] = facts
115
116 def facts_for_routes(self, namespace):
117 ''' Gathers facts for Routes in logging namespace '''
118 self.default_keys_for("routes")
119 route_list = self.oc_command("get", "routes", namespace=namespace, add_options=["-l", ROUTE_SELECTOR])
120 if len(route_list["items"]) == 0:
121 return None
122 for route in route_list["items"]:
123 name = route["metadata"]["name"]
124 comp = self.comp(name)
125 if comp is not None:
126 self.add_facts_for(comp, "routes", name, dict(host=route["spec"]["host"]))
127 self.facts["agl_namespace"] = namespace
128
129 def facts_for_daemonsets(self, namespace):
130 ''' Gathers facts for Daemonsets in logging namespace '''
131 self.default_keys_for("daemonsets")
132 ds_list = self.oc_command("get", "daemonsets", namespace=namespace,
133 add_options=["-l", LOGGING_INFRA_KEY + "=fluentd"])
134 if len(ds_list["items"]) == 0:
135 return
136 for ds_item in ds_list["items"]:
137 name = ds_item["metadata"]["name"]
138 comp = self.comp(name)
139 spec = ds_item["spec"]["template"]["spec"]
140 result = dict(
141 selector=ds_item["spec"]["selector"],
142 containers=dict(),
143 nodeSelector=spec["nodeSelector"],
144 serviceAccount=spec["serviceAccount"],
145 terminationGracePeriodSeconds=spec["terminationGracePeriodSeconds"]
146 )
147 for container in spec["containers"]:
148 result["containers"][container["name"]] = container
149 self.add_facts_for(comp, "daemonsets", name, result)
150
151 def facts_for_pvcs(self, namespace):
152 ''' Gathers facts for PVCS in logging namespace'''
153 self.default_keys_for("pvcs")
154 pvclist = self.oc_command("get", "pvc", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
155 if len(pvclist["items"]) == 0:
156 return
157 for pvc in pvclist["items"]:
158 name = pvc["metadata"]["name"]
159 comp = self.comp(name)
160 self.add_facts_for(comp, "pvcs", name, dict())
161
162 def facts_for_deploymentconfigs(self, namespace):
163 ''' Gathers facts for DeploymentConfigs in logging namespace '''
164 self.default_keys_for("deploymentconfigs")
165 dclist = self.oc_command("get", "deploymentconfigs", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
166 if len(dclist["items"]) == 0:
167 return
168 dcs = dclist["items"]
169 for dc_item in dcs:
170 name = dc_item["metadata"]["name"]
171 comp = self.comp(name)
172 if comp is not None:
173 spec = dc_item["spec"]["template"]["spec"]
174 facts = dict(
175 name=name,
176 selector=dc_item["spec"]["selector"],
177 replicas=dc_item["spec"]["replicas"],
178 serviceAccount=spec["serviceAccount"],
179 containers=dict(),
180 volumes=dict()
181 )
182 if "nodeSelector" in spec:
183 facts["nodeSelector"] = spec["nodeSelector"]
184 if "supplementalGroups" in spec["securityContext"]:
185 facts["storageGroups"] = spec["securityContext"]["supplementalGroups"]
186 facts["spec"] = spec
187 if "volumes" in spec:
188 for vol in spec["volumes"]:
189 clone = copy.deepcopy(vol)
190 clone.pop("name", None)
191 facts["volumes"][vol["name"]] = clone
192 for container in spec["containers"]:
193 facts["containers"][container["name"]] = container
194 self.add_facts_for(comp, "deploymentconfigs", name, facts)
195
196 def facts_for_services(self, namespace):
197 ''' Gathers facts for services in logging namespace '''
198 self.default_keys_for("services")
199 servicelist = self.oc_command("get", "services", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
200 if len(servicelist["items"]) == 0:
201 return
202 for service in servicelist["items"]:
203 name = service["metadata"]["name"]
204 comp = self.comp(name)
205 if comp is not None:
206 self.add_facts_for(comp, "services", name, dict())
207
208 # pylint: disable=too-many-arguments
209 def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):
210 '''Extracts facts in logging namespace from configmap'''
211 if yaml_file is not None:
212 if config_key.endswith(".yml") or config_key.endswith(".yaml"):
213 config_facts = yaml.load(yaml_file)
214 self.facts[comp][kind][name][config_key] = config_facts
215 self.facts[comp][kind][name][config_key]["raw"] = yaml_file
216
217 def facts_for_configmaps(self, namespace):
218 ''' Gathers facts for configmaps in logging namespace '''
219 self.default_keys_for("configmaps")
220 a_list = self.oc_command("get", "configmaps", namespace=namespace)
221 if len(a_list["items"]) == 0:
222 return
223 for item in a_list["items"]:
224 name = item["metadata"]["name"]
225 comp = self.comp(name)
226 if comp is not None:
227 self.add_facts_for(comp, "configmaps", name, dict(item["data"]))
228 if comp in ["elasticsearch", "elasticsearch_ops"]:
229 for config_key in item["data"]:
230 self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key])
231
232 def facts_for_oauthclients(self, namespace):
233 ''' Gathers facts for oauthclients used with logging '''
234 self.default_keys_for("oauthclients")
235 a_list = self.oc_command("get", "oauthclients", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
236 if len(a_list["items"]) == 0:
237 return
238 for item in a_list["items"]:
239 name = item["metadata"]["name"]
240 comp = self.comp(name)
241 if comp is not None:
242 result = dict(
243 redirectURIs=item["redirectURIs"]
244 )
245 self.add_facts_for(comp, "oauthclients", name, result)
246
247 def facts_for_secrets(self, namespace):
248 ''' Gathers facts for secrets in the logging namespace '''
249 self.default_keys_for("secrets")
250 a_list = self.oc_command("get", "secrets", namespace=namespace)
251 if len(a_list["items"]) == 0:
252 return
253 for item in a_list["items"]:
254 name = item["metadata"]["name"]
255 comp = self.comp(name)
256 if comp is not None and item["type"] == "Opaque":
257 result = dict(
258 keys=item["data"].keys()
259 )
260 self.add_facts_for(comp, "secrets", name, result)
261
262 def facts_for_sccs(self):
263 ''' Gathers facts for SCCs used with logging '''
264 self.default_keys_for("sccs")
265 scc = self.oc_command("get", "securitycontextconstraints.v1.security.openshift.io", name="privileged")
266 if len(scc["users"]) == 0:
267 return
268 for item in scc["users"]:
269 comp = self.comp(item)
270 if comp is not None:
271 self.add_facts_for(comp, "sccs", "privileged", dict())
272
273 def facts_for_clusterrolebindings(self, namespace):
274 ''' Gathers ClusterRoleBindings used with logging '''
275 self.default_keys_for("clusterrolebindings")
276 role = self.oc_command("get", "clusterrolebindings", name="cluster-readers")
277 if "subjects" not in role or len(role["subjects"]) == 0:
278 return
279 for item in role["subjects"]:
280 comp = self.comp(item["name"])
281 if comp is not None and namespace == item.get("namespace"):
282 self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
283
284 # this needs to end up nested under the service account...
285 def facts_for_rolebindings(self, namespace):
286 ''' Gathers facts for RoleBindings used with logging '''
287 self.default_keys_for("rolebindings")
288 role = self.oc_command("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role")
289 if "subjects" not in role or len(role["subjects"]) == 0:
290 return
291 for item in role["subjects"]:
292 comp = self.comp(item["name"])
293 if comp is not None and namespace == item.get("namespace"):
294 self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
295
296 # pylint: disable=no-self-use, too-many-return-statements
297 def comp(self, name):
298 ''' Does a comparison to evaluate the logging component '''
299 if name.startswith("logging-curator-ops"):
300 return "curator_ops"
301 elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"):
302 return "kibana_ops"
303 elif name.startswith("logging-es-ops") or name.startswith("logging-elasticsearch-ops"):
304 return "elasticsearch_ops"
305 elif name.startswith("logging-curator"):
306 return "curator"
307 elif name.startswith("logging-kibana") or name.startswith("kibana"):
308 return "kibana"
309 elif name.startswith("logging-es") or name.startswith("logging-elasticsearch"):
310 return "elasticsearch"
311 elif name.startswith("logging-fluentd") or name.endswith("aggregated-logging-fluentd"):
312 return "fluentd"
313 elif name.startswith("logging-mux"):
314 return "mux"
315 elif name.startswith("logging-eventrouter"):
316 return "eventrouter"
317 else:
318 return None
319
320 def build_facts(self):
321 ''' Builds the logging facts and returns them '''
322 self.facts_for_routes(self.namespace)
323 self.facts_for_daemonsets(self.namespace)
324 self.facts_for_deploymentconfigs(self.namespace)
325 self.facts_for_services(self.namespace)
326 self.facts_for_configmaps(self.namespace)
327 self.facts_for_sccs()
328 self.facts_for_oauthclients(self.namespace)
329 self.facts_for_clusterrolebindings(self.namespace)
330 self.facts_for_rolebindings(self.namespace)
331 self.facts_for_secrets(self.namespace)
332 self.facts_for_pvcs(self.namespace)
333
334 return self.facts
335
336
337 def main():
338 ''' The main method '''
339 module = AnsibleModule( # noqa: F405
340 argument_spec=dict(
341 admin_kubeconfig={"default": "/etc/origin/master/admin.kubeconfig", "type": "str"},
342 oc_bin={"required": True, "type": "str"},
343 openshift_logging_namespace={"required": True, "type": "str"}
344 ),
345 supports_check_mode=False
346 )
347 try:
348 cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],
349 module.params['openshift_logging_namespace'])
350 module.exit_json(
351 ansible_facts={"openshift_logging_facts": cmd.build_facts()}
352 )
353 # ignore broad-except error to avoid stack trace to ansible user
354 # pylint: disable=broad-except
355 except Exception as error:
356 module.fail_json(msg=str(error))
357
358
359 if __name__ == '__main__':
360 main()
361
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -76,6 +76,7 @@
try:
process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405
out, err = process.communicate(cmd)
+ err = err.decode(encoding='utf8', errors='replace')
if len(err) > 0:
if 'not found' in err:
return {'items': []}
| {"golden_diff": "diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py\n--- a/roles/openshift_logging/library/openshift_logging_facts.py\n+++ b/roles/openshift_logging/library/openshift_logging_facts.py\n@@ -76,6 +76,7 @@\n try:\n process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405\n out, err = process.communicate(cmd)\n+ err = err.decode(encoding='utf8', errors='replace')\n if len(err) > 0:\n if 'not found' in err:\n return {'items': []}\n", "issue": "installation of Logging fails on \"Gather OpenShift Logging Facts\"\n#### Description\r\n\r\nOn a developer setup, using Fedora 25 as the OS and executing `cd ${GOPATH}/src/github.com/openshift/origin/_output/local/bin/linux/amd64 && sudo ./openshift start` for an OpenShift cluster, the Ansible Playbook for Logging fails during the task \"Gather OpenShift Logging Facts\".\r\n\r\nThis is the script that is used to install Logging via the playbook:\r\n\r\nhttps://paste.fedoraproject.org/paste/TAxemZhC59HT-WMGxTiVBl5M1UNdIGYhyRLivL9gydE=\r\n```bash\r\n#!/bin/bash\r\n\r\ncat > /tmp/metrics.inventory <<EOF\r\n[oo_first_master]\r\nopenshift\r\n\r\n[oo_first_master:vars]\r\n#openshift_deployment_type=origin\r\n#openshift_release=v3.6\r\n#openshift_image_tag=v3.6.0\r\n#openshift_metrics_image_prefix=registry.ops.openshift.com/openshift3\r\n#openshift_metrics_image_version=3.6.0\r\n\r\nopenshift_deployment_type=origin\r\nopenshift_release=v1.5\r\nopenshift_image_tag=v1.5.0\r\nopenshift_logging_install_logging=true\r\n\r\n# for more options, see https://github.com/openshift/openshift-ansible/blob/master/roles/openshift_logging/defaults/main.yml\r\nEOF\r\n\r\nsudo ansible-playbook /mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/playbooks/byo/openshift-cluster/openshift-logging.yml \\\r\n\t-vv \\\r\n\t-e 'ansible_python_interpreter=/usr/bin/python3' \\\r\n\t-c local \\\r\n\t-i /tmp/metrics.inventory\r\n```\r\n\r\nThis is the actual message:\r\n\r\nhttps://paste.fedoraproject.org/paste/IZ06Z5xI2bmOxGg8nK4RRl5M1UNdIGYhyRLivL9gydE=\r\n```\r\nTASK [openshift_logging : Gather OpenShift Logging Facts] *********************************************************************************************************************************************************\r\ntask path: /mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/roles/openshift_logging/tasks/install_logging.yaml:2\r\nfatal: [openshift]: FAILED! => {\"changed\": false, \"failed\": true, \"msg\": \"There was an exception trying to run the command 'oc get routes -n logging --user=system:admin/192-168-2-111:8443 --config=/tmp/openshift-logging-ansible-QNj1vf/admin.kubeconfig -o json -l component=support, logging-infra=support, provider=openshift' a bytes-like object is required, not 'str'\"}\r\n\tto retry, use: --limit @/mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/playbooks/byo/openshift-cluster/openshift-logging.retry\r\n\r\nPLAY RECAP ********************************************************************************************************************************************************************************************************\r\nlocalhost : ok=2 changed=0 unreachable=0 failed=0 \r\nopenshift : ok=19 changed=0 unreachable=0 failed=1 \r\n```\r\n\r\n##### Version\r\n\r\n```bash\r\n$ ansible --version\r\nansible 2.3.0.0\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = Default w/o overrides\r\n python version = 2.7.13 (default, Jan 12 2017, 17:59:37) [GCC 6.3.1 20161221 (Red Hat 6.3.1-1)]\r\n$ git describe\r\nopenshift-ansible-3.6.67-1-28-g74e4c9d\r\n```\n", "before_files": [{"content": "'''\n---\nmodule: openshift_logging_facts\nversion_added: \"\"\nshort_description: Gather facts about the OpenShift logging stack\ndescription:\n - Determine the current facts about the OpenShift logging stack (e.g. cluster size)\noptions:\nauthor: Red Hat, Inc\n'''\n\nimport copy\nimport json\n\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\nfrom subprocess import * # noqa: F402,F403\n\n# ignore pylint errors related to the module_utils import\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\nfrom ansible.module_utils.basic import * # noqa: F402,F403\n\nimport yaml\n\nEXAMPLES = \"\"\"\n- action: opneshift_logging_facts\n\"\"\"\n\nRETURN = \"\"\"\n\"\"\"\n\nDEFAULT_OC_OPTIONS = [\"-o\", \"json\"]\n\n# constants used for various labels and selectors\nCOMPONENT_KEY = \"component\"\nLOGGING_INFRA_KEY = \"logging-infra\"\n\n# selectors for filtering resources\nDS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + \"=\" + \"fluentd\"\nLOGGING_SELECTOR = LOGGING_INFRA_KEY + \"=\" + \"support\"\nROUTE_SELECTOR = \"component=support,logging-infra=support,provider=openshift\"\n# pylint: disable=line-too-long\nCOMPONENTS = [\"kibana\", \"curator\", \"elasticsearch\", \"fluentd\", \"kibana_ops\", \"curator_ops\", \"elasticsearch_ops\", \"mux\", \"eventrouter\"]\n\n\nclass OCBaseCommand(object):\n ''' The base class used to query openshift '''\n\n def __init__(self, binary, kubeconfig, namespace):\n ''' the init method of OCBaseCommand class '''\n self.binary = binary\n self.kubeconfig = kubeconfig\n self.user = self.get_system_admin(self.kubeconfig)\n self.namespace = namespace\n\n # pylint: disable=no-self-use\n def get_system_admin(self, kubeconfig):\n ''' Retrieves the system admin '''\n with open(kubeconfig, 'r') as kubeconfig_file:\n config = yaml.load(kubeconfig_file)\n for user in config[\"users\"]:\n if user[\"name\"].startswith(\"system:admin\"):\n return user[\"name\"]\n raise Exception(\"Unable to find system:admin in: \" + kubeconfig)\n\n # pylint: disable=too-many-arguments, dangerous-default-value\n def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):\n ''' Wrapper method for the \"oc\" command '''\n cmd = [self.binary, sub, kind]\n if name is not None:\n cmd = cmd + [name]\n if namespace is not None:\n cmd = cmd + [\"-n\", namespace]\n if add_options is None:\n add_options = []\n cmd = cmd + [\"--user=\" + self.user, \"--config=\" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options\n try:\n process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405\n out, err = process.communicate(cmd)\n if len(err) > 0:\n if 'not found' in err:\n return {'items': []}\n if 'No resources found' in err:\n return {'items': []}\n raise Exception(err)\n except Exception as excp:\n err = \"There was an exception trying to run the command '\" + \" \".join(cmd) + \"' \" + str(excp)\n raise Exception(err)\n\n return json.loads(out)\n\n\nclass OpenshiftLoggingFacts(OCBaseCommand):\n ''' The class structure for holding the OpenshiftLogging Facts'''\n name = \"facts\"\n\n def __init__(self, logger, binary, kubeconfig, namespace):\n ''' The init method for OpenshiftLoggingFacts '''\n super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)\n self.logger = logger\n self.facts = dict()\n\n def default_keys_for(self, kind):\n ''' Sets the default key values for kind '''\n for comp in COMPONENTS:\n self.add_facts_for(comp, kind)\n\n def add_facts_for(self, comp, kind, name=None, facts=None):\n ''' Add facts for the provided kind '''\n if comp not in self.facts:\n self.facts[comp] = dict()\n if kind not in self.facts[comp]:\n self.facts[comp][kind] = dict()\n if name:\n self.facts[comp][kind][name] = facts\n\n def facts_for_routes(self, namespace):\n ''' Gathers facts for Routes in logging namespace '''\n self.default_keys_for(\"routes\")\n route_list = self.oc_command(\"get\", \"routes\", namespace=namespace, add_options=[\"-l\", ROUTE_SELECTOR])\n if len(route_list[\"items\"]) == 0:\n return None\n for route in route_list[\"items\"]:\n name = route[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"routes\", name, dict(host=route[\"spec\"][\"host\"]))\n self.facts[\"agl_namespace\"] = namespace\n\n def facts_for_daemonsets(self, namespace):\n ''' Gathers facts for Daemonsets in logging namespace '''\n self.default_keys_for(\"daemonsets\")\n ds_list = self.oc_command(\"get\", \"daemonsets\", namespace=namespace,\n add_options=[\"-l\", LOGGING_INFRA_KEY + \"=fluentd\"])\n if len(ds_list[\"items\"]) == 0:\n return\n for ds_item in ds_list[\"items\"]:\n name = ds_item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n spec = ds_item[\"spec\"][\"template\"][\"spec\"]\n result = dict(\n selector=ds_item[\"spec\"][\"selector\"],\n containers=dict(),\n nodeSelector=spec[\"nodeSelector\"],\n serviceAccount=spec[\"serviceAccount\"],\n terminationGracePeriodSeconds=spec[\"terminationGracePeriodSeconds\"]\n )\n for container in spec[\"containers\"]:\n result[\"containers\"][container[\"name\"]] = container\n self.add_facts_for(comp, \"daemonsets\", name, result)\n\n def facts_for_pvcs(self, namespace):\n ''' Gathers facts for PVCS in logging namespace'''\n self.default_keys_for(\"pvcs\")\n pvclist = self.oc_command(\"get\", \"pvc\", namespace=namespace, add_options=[\"-l\", LOGGING_INFRA_KEY])\n if len(pvclist[\"items\"]) == 0:\n return\n for pvc in pvclist[\"items\"]:\n name = pvc[\"metadata\"][\"name\"]\n comp = self.comp(name)\n self.add_facts_for(comp, \"pvcs\", name, dict())\n\n def facts_for_deploymentconfigs(self, namespace):\n ''' Gathers facts for DeploymentConfigs in logging namespace '''\n self.default_keys_for(\"deploymentconfigs\")\n dclist = self.oc_command(\"get\", \"deploymentconfigs\", namespace=namespace, add_options=[\"-l\", LOGGING_INFRA_KEY])\n if len(dclist[\"items\"]) == 0:\n return\n dcs = dclist[\"items\"]\n for dc_item in dcs:\n name = dc_item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n spec = dc_item[\"spec\"][\"template\"][\"spec\"]\n facts = dict(\n name=name,\n selector=dc_item[\"spec\"][\"selector\"],\n replicas=dc_item[\"spec\"][\"replicas\"],\n serviceAccount=spec[\"serviceAccount\"],\n containers=dict(),\n volumes=dict()\n )\n if \"nodeSelector\" in spec:\n facts[\"nodeSelector\"] = spec[\"nodeSelector\"]\n if \"supplementalGroups\" in spec[\"securityContext\"]:\n facts[\"storageGroups\"] = spec[\"securityContext\"][\"supplementalGroups\"]\n facts[\"spec\"] = spec\n if \"volumes\" in spec:\n for vol in spec[\"volumes\"]:\n clone = copy.deepcopy(vol)\n clone.pop(\"name\", None)\n facts[\"volumes\"][vol[\"name\"]] = clone\n for container in spec[\"containers\"]:\n facts[\"containers\"][container[\"name\"]] = container\n self.add_facts_for(comp, \"deploymentconfigs\", name, facts)\n\n def facts_for_services(self, namespace):\n ''' Gathers facts for services in logging namespace '''\n self.default_keys_for(\"services\")\n servicelist = self.oc_command(\"get\", \"services\", namespace=namespace, add_options=[\"-l\", LOGGING_SELECTOR])\n if len(servicelist[\"items\"]) == 0:\n return\n for service in servicelist[\"items\"]:\n name = service[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"services\", name, dict())\n\n # pylint: disable=too-many-arguments\n def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):\n '''Extracts facts in logging namespace from configmap'''\n if yaml_file is not None:\n if config_key.endswith(\".yml\") or config_key.endswith(\".yaml\"):\n config_facts = yaml.load(yaml_file)\n self.facts[comp][kind][name][config_key] = config_facts\n self.facts[comp][kind][name][config_key][\"raw\"] = yaml_file\n\n def facts_for_configmaps(self, namespace):\n ''' Gathers facts for configmaps in logging namespace '''\n self.default_keys_for(\"configmaps\")\n a_list = self.oc_command(\"get\", \"configmaps\", namespace=namespace)\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"configmaps\", name, dict(item[\"data\"]))\n if comp in [\"elasticsearch\", \"elasticsearch_ops\"]:\n for config_key in item[\"data\"]:\n self.facts_from_configmap(comp, \"configmaps\", name, config_key, item[\"data\"][config_key])\n\n def facts_for_oauthclients(self, namespace):\n ''' Gathers facts for oauthclients used with logging '''\n self.default_keys_for(\"oauthclients\")\n a_list = self.oc_command(\"get\", \"oauthclients\", namespace=namespace, add_options=[\"-l\", LOGGING_SELECTOR])\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n result = dict(\n redirectURIs=item[\"redirectURIs\"]\n )\n self.add_facts_for(comp, \"oauthclients\", name, result)\n\n def facts_for_secrets(self, namespace):\n ''' Gathers facts for secrets in the logging namespace '''\n self.default_keys_for(\"secrets\")\n a_list = self.oc_command(\"get\", \"secrets\", namespace=namespace)\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None and item[\"type\"] == \"Opaque\":\n result = dict(\n keys=item[\"data\"].keys()\n )\n self.add_facts_for(comp, \"secrets\", name, result)\n\n def facts_for_sccs(self):\n ''' Gathers facts for SCCs used with logging '''\n self.default_keys_for(\"sccs\")\n scc = self.oc_command(\"get\", \"securitycontextconstraints.v1.security.openshift.io\", name=\"privileged\")\n if len(scc[\"users\"]) == 0:\n return\n for item in scc[\"users\"]:\n comp = self.comp(item)\n if comp is not None:\n self.add_facts_for(comp, \"sccs\", \"privileged\", dict())\n\n def facts_for_clusterrolebindings(self, namespace):\n ''' Gathers ClusterRoleBindings used with logging '''\n self.default_keys_for(\"clusterrolebindings\")\n role = self.oc_command(\"get\", \"clusterrolebindings\", name=\"cluster-readers\")\n if \"subjects\" not in role or len(role[\"subjects\"]) == 0:\n return\n for item in role[\"subjects\"]:\n comp = self.comp(item[\"name\"])\n if comp is not None and namespace == item.get(\"namespace\"):\n self.add_facts_for(comp, \"clusterrolebindings\", \"cluster-readers\", dict())\n\n# this needs to end up nested under the service account...\n def facts_for_rolebindings(self, namespace):\n ''' Gathers facts for RoleBindings used with logging '''\n self.default_keys_for(\"rolebindings\")\n role = self.oc_command(\"get\", \"rolebindings\", namespace=namespace, name=\"logging-elasticsearch-view-role\")\n if \"subjects\" not in role or len(role[\"subjects\"]) == 0:\n return\n for item in role[\"subjects\"]:\n comp = self.comp(item[\"name\"])\n if comp is not None and namespace == item.get(\"namespace\"):\n self.add_facts_for(comp, \"rolebindings\", \"logging-elasticsearch-view-role\", dict())\n\n # pylint: disable=no-self-use, too-many-return-statements\n def comp(self, name):\n ''' Does a comparison to evaluate the logging component '''\n if name.startswith(\"logging-curator-ops\"):\n return \"curator_ops\"\n elif name.startswith(\"logging-kibana-ops\") or name.startswith(\"kibana-ops\"):\n return \"kibana_ops\"\n elif name.startswith(\"logging-es-ops\") or name.startswith(\"logging-elasticsearch-ops\"):\n return \"elasticsearch_ops\"\n elif name.startswith(\"logging-curator\"):\n return \"curator\"\n elif name.startswith(\"logging-kibana\") or name.startswith(\"kibana\"):\n return \"kibana\"\n elif name.startswith(\"logging-es\") or name.startswith(\"logging-elasticsearch\"):\n return \"elasticsearch\"\n elif name.startswith(\"logging-fluentd\") or name.endswith(\"aggregated-logging-fluentd\"):\n return \"fluentd\"\n elif name.startswith(\"logging-mux\"):\n return \"mux\"\n elif name.startswith(\"logging-eventrouter\"):\n return \"eventrouter\"\n else:\n return None\n\n def build_facts(self):\n ''' Builds the logging facts and returns them '''\n self.facts_for_routes(self.namespace)\n self.facts_for_daemonsets(self.namespace)\n self.facts_for_deploymentconfigs(self.namespace)\n self.facts_for_services(self.namespace)\n self.facts_for_configmaps(self.namespace)\n self.facts_for_sccs()\n self.facts_for_oauthclients(self.namespace)\n self.facts_for_clusterrolebindings(self.namespace)\n self.facts_for_rolebindings(self.namespace)\n self.facts_for_secrets(self.namespace)\n self.facts_for_pvcs(self.namespace)\n\n return self.facts\n\n\ndef main():\n ''' The main method '''\n module = AnsibleModule( # noqa: F405\n argument_spec=dict(\n admin_kubeconfig={\"default\": \"/etc/origin/master/admin.kubeconfig\", \"type\": \"str\"},\n oc_bin={\"required\": True, \"type\": \"str\"},\n openshift_logging_namespace={\"required\": True, \"type\": \"str\"}\n ),\n supports_check_mode=False\n )\n try:\n cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],\n module.params['openshift_logging_namespace'])\n module.exit_json(\n ansible_facts={\"openshift_logging_facts\": cmd.build_facts()}\n )\n # ignore broad-except error to avoid stack trace to ansible user\n # pylint: disable=broad-except\n except Exception as error:\n module.fail_json(msg=str(error))\n\n\nif __name__ == '__main__':\n main()\n", "path": "roles/openshift_logging/library/openshift_logging_facts.py"}], "after_files": [{"content": "'''\n---\nmodule: openshift_logging_facts\nversion_added: \"\"\nshort_description: Gather facts about the OpenShift logging stack\ndescription:\n - Determine the current facts about the OpenShift logging stack (e.g. cluster size)\noptions:\nauthor: Red Hat, Inc\n'''\n\nimport copy\nimport json\n\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\nfrom subprocess import * # noqa: F402,F403\n\n# ignore pylint errors related to the module_utils import\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\nfrom ansible.module_utils.basic import * # noqa: F402,F403\n\nimport yaml\n\nEXAMPLES = \"\"\"\n- action: opneshift_logging_facts\n\"\"\"\n\nRETURN = \"\"\"\n\"\"\"\n\nDEFAULT_OC_OPTIONS = [\"-o\", \"json\"]\n\n# constants used for various labels and selectors\nCOMPONENT_KEY = \"component\"\nLOGGING_INFRA_KEY = \"logging-infra\"\n\n# selectors for filtering resources\nDS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + \"=\" + \"fluentd\"\nLOGGING_SELECTOR = LOGGING_INFRA_KEY + \"=\" + \"support\"\nROUTE_SELECTOR = \"component=support,logging-infra=support,provider=openshift\"\n# pylint: disable=line-too-long\nCOMPONENTS = [\"kibana\", \"curator\", \"elasticsearch\", \"fluentd\", \"kibana_ops\", \"curator_ops\", \"elasticsearch_ops\", \"mux\", \"eventrouter\"]\n\n\nclass OCBaseCommand(object):\n ''' The base class used to query openshift '''\n\n def __init__(self, binary, kubeconfig, namespace):\n ''' the init method of OCBaseCommand class '''\n self.binary = binary\n self.kubeconfig = kubeconfig\n self.user = self.get_system_admin(self.kubeconfig)\n self.namespace = namespace\n\n # pylint: disable=no-self-use\n def get_system_admin(self, kubeconfig):\n ''' Retrieves the system admin '''\n with open(kubeconfig, 'r') as kubeconfig_file:\n config = yaml.load(kubeconfig_file)\n for user in config[\"users\"]:\n if user[\"name\"].startswith(\"system:admin\"):\n return user[\"name\"]\n raise Exception(\"Unable to find system:admin in: \" + kubeconfig)\n\n # pylint: disable=too-many-arguments, dangerous-default-value\n def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):\n ''' Wrapper method for the \"oc\" command '''\n cmd = [self.binary, sub, kind]\n if name is not None:\n cmd = cmd + [name]\n if namespace is not None:\n cmd = cmd + [\"-n\", namespace]\n if add_options is None:\n add_options = []\n cmd = cmd + [\"--user=\" + self.user, \"--config=\" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options\n try:\n process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405\n out, err = process.communicate(cmd)\n err = err.decode(encoding='utf8', errors='replace')\n if len(err) > 0:\n if 'not found' in err:\n return {'items': []}\n if 'No resources found' in err:\n return {'items': []}\n raise Exception(err)\n except Exception as excp:\n err = \"There was an exception trying to run the command '\" + \" \".join(cmd) + \"' \" + str(excp)\n raise Exception(err)\n\n return json.loads(out)\n\n\nclass OpenshiftLoggingFacts(OCBaseCommand):\n ''' The class structure for holding the OpenshiftLogging Facts'''\n name = \"facts\"\n\n def __init__(self, logger, binary, kubeconfig, namespace):\n ''' The init method for OpenshiftLoggingFacts '''\n super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)\n self.logger = logger\n self.facts = dict()\n\n def default_keys_for(self, kind):\n ''' Sets the default key values for kind '''\n for comp in COMPONENTS:\n self.add_facts_for(comp, kind)\n\n def add_facts_for(self, comp, kind, name=None, facts=None):\n ''' Add facts for the provided kind '''\n if comp not in self.facts:\n self.facts[comp] = dict()\n if kind not in self.facts[comp]:\n self.facts[comp][kind] = dict()\n if name:\n self.facts[comp][kind][name] = facts\n\n def facts_for_routes(self, namespace):\n ''' Gathers facts for Routes in logging namespace '''\n self.default_keys_for(\"routes\")\n route_list = self.oc_command(\"get\", \"routes\", namespace=namespace, add_options=[\"-l\", ROUTE_SELECTOR])\n if len(route_list[\"items\"]) == 0:\n return None\n for route in route_list[\"items\"]:\n name = route[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"routes\", name, dict(host=route[\"spec\"][\"host\"]))\n self.facts[\"agl_namespace\"] = namespace\n\n def facts_for_daemonsets(self, namespace):\n ''' Gathers facts for Daemonsets in logging namespace '''\n self.default_keys_for(\"daemonsets\")\n ds_list = self.oc_command(\"get\", \"daemonsets\", namespace=namespace,\n add_options=[\"-l\", LOGGING_INFRA_KEY + \"=fluentd\"])\n if len(ds_list[\"items\"]) == 0:\n return\n for ds_item in ds_list[\"items\"]:\n name = ds_item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n spec = ds_item[\"spec\"][\"template\"][\"spec\"]\n result = dict(\n selector=ds_item[\"spec\"][\"selector\"],\n containers=dict(),\n nodeSelector=spec[\"nodeSelector\"],\n serviceAccount=spec[\"serviceAccount\"],\n terminationGracePeriodSeconds=spec[\"terminationGracePeriodSeconds\"]\n )\n for container in spec[\"containers\"]:\n result[\"containers\"][container[\"name\"]] = container\n self.add_facts_for(comp, \"daemonsets\", name, result)\n\n def facts_for_pvcs(self, namespace):\n ''' Gathers facts for PVCS in logging namespace'''\n self.default_keys_for(\"pvcs\")\n pvclist = self.oc_command(\"get\", \"pvc\", namespace=namespace, add_options=[\"-l\", LOGGING_INFRA_KEY])\n if len(pvclist[\"items\"]) == 0:\n return\n for pvc in pvclist[\"items\"]:\n name = pvc[\"metadata\"][\"name\"]\n comp = self.comp(name)\n self.add_facts_for(comp, \"pvcs\", name, dict())\n\n def facts_for_deploymentconfigs(self, namespace):\n ''' Gathers facts for DeploymentConfigs in logging namespace '''\n self.default_keys_for(\"deploymentconfigs\")\n dclist = self.oc_command(\"get\", \"deploymentconfigs\", namespace=namespace, add_options=[\"-l\", LOGGING_INFRA_KEY])\n if len(dclist[\"items\"]) == 0:\n return\n dcs = dclist[\"items\"]\n for dc_item in dcs:\n name = dc_item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n spec = dc_item[\"spec\"][\"template\"][\"spec\"]\n facts = dict(\n name=name,\n selector=dc_item[\"spec\"][\"selector\"],\n replicas=dc_item[\"spec\"][\"replicas\"],\n serviceAccount=spec[\"serviceAccount\"],\n containers=dict(),\n volumes=dict()\n )\n if \"nodeSelector\" in spec:\n facts[\"nodeSelector\"] = spec[\"nodeSelector\"]\n if \"supplementalGroups\" in spec[\"securityContext\"]:\n facts[\"storageGroups\"] = spec[\"securityContext\"][\"supplementalGroups\"]\n facts[\"spec\"] = spec\n if \"volumes\" in spec:\n for vol in spec[\"volumes\"]:\n clone = copy.deepcopy(vol)\n clone.pop(\"name\", None)\n facts[\"volumes\"][vol[\"name\"]] = clone\n for container in spec[\"containers\"]:\n facts[\"containers\"][container[\"name\"]] = container\n self.add_facts_for(comp, \"deploymentconfigs\", name, facts)\n\n def facts_for_services(self, namespace):\n ''' Gathers facts for services in logging namespace '''\n self.default_keys_for(\"services\")\n servicelist = self.oc_command(\"get\", \"services\", namespace=namespace, add_options=[\"-l\", LOGGING_SELECTOR])\n if len(servicelist[\"items\"]) == 0:\n return\n for service in servicelist[\"items\"]:\n name = service[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"services\", name, dict())\n\n # pylint: disable=too-many-arguments\n def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):\n '''Extracts facts in logging namespace from configmap'''\n if yaml_file is not None:\n if config_key.endswith(\".yml\") or config_key.endswith(\".yaml\"):\n config_facts = yaml.load(yaml_file)\n self.facts[comp][kind][name][config_key] = config_facts\n self.facts[comp][kind][name][config_key][\"raw\"] = yaml_file\n\n def facts_for_configmaps(self, namespace):\n ''' Gathers facts for configmaps in logging namespace '''\n self.default_keys_for(\"configmaps\")\n a_list = self.oc_command(\"get\", \"configmaps\", namespace=namespace)\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"configmaps\", name, dict(item[\"data\"]))\n if comp in [\"elasticsearch\", \"elasticsearch_ops\"]:\n for config_key in item[\"data\"]:\n self.facts_from_configmap(comp, \"configmaps\", name, config_key, item[\"data\"][config_key])\n\n def facts_for_oauthclients(self, namespace):\n ''' Gathers facts for oauthclients used with logging '''\n self.default_keys_for(\"oauthclients\")\n a_list = self.oc_command(\"get\", \"oauthclients\", namespace=namespace, add_options=[\"-l\", LOGGING_SELECTOR])\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n result = dict(\n redirectURIs=item[\"redirectURIs\"]\n )\n self.add_facts_for(comp, \"oauthclients\", name, result)\n\n def facts_for_secrets(self, namespace):\n ''' Gathers facts for secrets in the logging namespace '''\n self.default_keys_for(\"secrets\")\n a_list = self.oc_command(\"get\", \"secrets\", namespace=namespace)\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None and item[\"type\"] == \"Opaque\":\n result = dict(\n keys=item[\"data\"].keys()\n )\n self.add_facts_for(comp, \"secrets\", name, result)\n\n def facts_for_sccs(self):\n ''' Gathers facts for SCCs used with logging '''\n self.default_keys_for(\"sccs\")\n scc = self.oc_command(\"get\", \"securitycontextconstraints.v1.security.openshift.io\", name=\"privileged\")\n if len(scc[\"users\"]) == 0:\n return\n for item in scc[\"users\"]:\n comp = self.comp(item)\n if comp is not None:\n self.add_facts_for(comp, \"sccs\", \"privileged\", dict())\n\n def facts_for_clusterrolebindings(self, namespace):\n ''' Gathers ClusterRoleBindings used with logging '''\n self.default_keys_for(\"clusterrolebindings\")\n role = self.oc_command(\"get\", \"clusterrolebindings\", name=\"cluster-readers\")\n if \"subjects\" not in role or len(role[\"subjects\"]) == 0:\n return\n for item in role[\"subjects\"]:\n comp = self.comp(item[\"name\"])\n if comp is not None and namespace == item.get(\"namespace\"):\n self.add_facts_for(comp, \"clusterrolebindings\", \"cluster-readers\", dict())\n\n# this needs to end up nested under the service account...\n def facts_for_rolebindings(self, namespace):\n ''' Gathers facts for RoleBindings used with logging '''\n self.default_keys_for(\"rolebindings\")\n role = self.oc_command(\"get\", \"rolebindings\", namespace=namespace, name=\"logging-elasticsearch-view-role\")\n if \"subjects\" not in role or len(role[\"subjects\"]) == 0:\n return\n for item in role[\"subjects\"]:\n comp = self.comp(item[\"name\"])\n if comp is not None and namespace == item.get(\"namespace\"):\n self.add_facts_for(comp, \"rolebindings\", \"logging-elasticsearch-view-role\", dict())\n\n # pylint: disable=no-self-use, too-many-return-statements\n def comp(self, name):\n ''' Does a comparison to evaluate the logging component '''\n if name.startswith(\"logging-curator-ops\"):\n return \"curator_ops\"\n elif name.startswith(\"logging-kibana-ops\") or name.startswith(\"kibana-ops\"):\n return \"kibana_ops\"\n elif name.startswith(\"logging-es-ops\") or name.startswith(\"logging-elasticsearch-ops\"):\n return \"elasticsearch_ops\"\n elif name.startswith(\"logging-curator\"):\n return \"curator\"\n elif name.startswith(\"logging-kibana\") or name.startswith(\"kibana\"):\n return \"kibana\"\n elif name.startswith(\"logging-es\") or name.startswith(\"logging-elasticsearch\"):\n return \"elasticsearch\"\n elif name.startswith(\"logging-fluentd\") or name.endswith(\"aggregated-logging-fluentd\"):\n return \"fluentd\"\n elif name.startswith(\"logging-mux\"):\n return \"mux\"\n elif name.startswith(\"logging-eventrouter\"):\n return \"eventrouter\"\n else:\n return None\n\n def build_facts(self):\n ''' Builds the logging facts and returns them '''\n self.facts_for_routes(self.namespace)\n self.facts_for_daemonsets(self.namespace)\n self.facts_for_deploymentconfigs(self.namespace)\n self.facts_for_services(self.namespace)\n self.facts_for_configmaps(self.namespace)\n self.facts_for_sccs()\n self.facts_for_oauthclients(self.namespace)\n self.facts_for_clusterrolebindings(self.namespace)\n self.facts_for_rolebindings(self.namespace)\n self.facts_for_secrets(self.namespace)\n self.facts_for_pvcs(self.namespace)\n\n return self.facts\n\n\ndef main():\n ''' The main method '''\n module = AnsibleModule( # noqa: F405\n argument_spec=dict(\n admin_kubeconfig={\"default\": \"/etc/origin/master/admin.kubeconfig\", \"type\": \"str\"},\n oc_bin={\"required\": True, \"type\": \"str\"},\n openshift_logging_namespace={\"required\": True, \"type\": \"str\"}\n ),\n supports_check_mode=False\n )\n try:\n cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],\n module.params['openshift_logging_namespace'])\n module.exit_json(\n ansible_facts={\"openshift_logging_facts\": cmd.build_facts()}\n )\n # ignore broad-except error to avoid stack trace to ansible user\n # pylint: disable=broad-except\n except Exception as error:\n module.fail_json(msg=str(error))\n\n\nif __name__ == '__main__':\n main()\n", "path": "roles/openshift_logging/library/openshift_logging_facts.py"}]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.