in_source_id
string
before_files
list
after_files
list
pr_diff
string
issue
string
aws__aws-cli-4397
[ { "content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Module for parsing shorthand syntax.\n\nThis module parses any CLI options that use a \"shorthand\"\nsyntax::\n\n --foo A=b,C=d\n |------|\n |\n Shorthand syntax\n\n\nThis module provides two main classes to do this.\nFirst, there's a ``ShorthandParser`` class. This class works\non a purely syntactic level. It looks only at the string value\nprovided to it in order to figure out how the string should be parsed.\n\nHowever, because there was a pre-existing shorthand parser, we need\nto remain backwards compatible with the previous parser. One of the\nthings the previous parser did was use the associated JSON model to\ncontrol how the expression was parsed.\n\nIn order to accommodate this a post processing class is provided that\ntakes the parsed values from the ``ShorthandParser`` as well as the\ncorresponding JSON model for the CLI argument and makes any adjustments\nnecessary to maintain backwards compatibility. This is done in the\n``BackCompatVisitor`` class.\n\n\"\"\"\nimport re\nimport string\n\n\n_EOF = object()\n\n\nclass _NamedRegex(object):\n def __init__(self, name, regex_str):\n self.name = name\n self.regex = re.compile(regex_str, re.UNICODE)\n\n def match(self, value):\n return self.regex.match(value)\n\n\nclass ShorthandParseError(Exception):\n def __init__(self, value, expected, actual, index):\n self.value = value\n self.expected = expected\n self.actual = actual\n self.index = index\n msg = self._construct_msg()\n super(ShorthandParseError, self).__init__(msg)\n\n def _construct_msg(self):\n consumed, remaining, num_spaces = self.value, '', self.index\n if '\\n' in self.value[:self.index]:\n # If there's newlines in the consumed expression, we want\n # to make sure we're only counting the spaces\n # from the last newline:\n # foo=bar,\\n\n # bar==baz\n # ^\n last_newline = self.value[:self.index].rindex('\\n')\n num_spaces = self.index - last_newline - 1\n if '\\n' in self.value[self.index:]:\n # If there's newline in the remaining, divide value\n # into consumed and remainig\n # foo==bar,\\n\n # ^\n # bar=baz\n next_newline = self.index + self.value[self.index:].index('\\n')\n consumed = self.value[:next_newline]\n remaining = self.value[next_newline:]\n msg = (\n \"Expected: '%s', received: '%s' for input:\\n\"\n \"%s\\n\"\n \"%s\"\n \"%s\"\n ) % (self.expected, self.actual, consumed,\n ' ' * num_spaces + '^', remaining)\n return msg\n\n\nclass ShorthandParser(object):\n \"\"\"Parses shorthand syntax in the CLI.\n\n Note that this parser does not rely on any JSON models to control\n how to parse the shorthand syntax.\n\n \"\"\"\n\n _SINGLE_QUOTED = _NamedRegex('singled quoted', r'\\'(?:\\\\\\\\|\\\\\\'|[^\\'])*\\'')\n _DOUBLE_QUOTED = _NamedRegex('double quoted', r'\"(?:\\\\\\\\|\\\\\"|[^\"])*\"')\n _START_WORD = u'\\!\\#-&\\(-\\+\\--\\<\\>-Z\\\\\\\\-z\\u007c-\\uffff'\n _FIRST_FOLLOW_CHARS = u'\\s\\!\\#-&\\(-\\+\\--\\\\\\\\\\^-\\|~-\\uffff'\n _SECOND_FOLLOW_CHARS = u'\\s\\!\\#-&\\(-\\+\\--\\<\\>-\\uffff'\n _ESCAPED_COMMA = '(\\\\\\\\,)'\n _FIRST_VALUE = _NamedRegex(\n 'first',\n u'({escaped_comma}|[{start_word}])'\n u'({escaped_comma}|[{follow_chars}])*'.format(\n escaped_comma=_ESCAPED_COMMA,\n start_word=_START_WORD,\n follow_chars=_FIRST_FOLLOW_CHARS,\n ))\n _SECOND_VALUE = _NamedRegex(\n 'second',\n u'({escaped_comma}|[{start_word}])'\n u'({escaped_comma}|[{follow_chars}])*'.format(\n escaped_comma=_ESCAPED_COMMA,\n start_word=_START_WORD,\n follow_chars=_SECOND_FOLLOW_CHARS,\n ))\n\n def __init__(self):\n self._tokens = []\n\n def parse(self, value):\n \"\"\"Parse shorthand syntax.\n\n For example::\n\n parser = ShorthandParser()\n parser.parse('a=b') # {'a': 'b'}\n parser.parse('a=b,c') # {'a': ['b', 'c']}\n\n :tpye value: str\n :param value: Any value that needs to be parsed.\n\n :return: Parsed value, which will be a dictionary.\n \"\"\"\n self._input_value = value\n self._index = 0\n return self._parameter()\n\n def _parameter(self):\n # parameter = keyval *(\",\" keyval)\n params = {}\n params.update(self._keyval())\n while self._index < len(self._input_value):\n self._expect(',', consume_whitespace=True)\n params.update(self._keyval())\n return params\n\n def _keyval(self):\n # keyval = key \"=\" [values]\n key = self._key()\n self._expect('=', consume_whitespace=True)\n values = self._values()\n return {key: values}\n\n def _key(self):\n # key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\\-_.#/]\n valid_chars = string.ascii_letters + string.digits + '-_.#/'\n start = self._index\n while not self._at_eof():\n if self._current() not in valid_chars:\n break\n self._index += 1\n return self._input_value[start:self._index]\n\n def _values(self):\n # values = csv-list / explicit-list / hash-literal\n if self._at_eof():\n return ''\n elif self._current() == '[':\n return self._explicit_list()\n elif self._current() == '{':\n return self._hash_literal()\n else:\n return self._csv_value()\n\n def _csv_value(self):\n # Supports either:\n # foo=bar -> 'bar'\n # ^\n # foo=bar,baz -> ['bar', 'baz']\n # ^\n first_value = self._first_value()\n self._consume_whitespace()\n if self._at_eof() or self._input_value[self._index] != ',':\n return first_value\n self._expect(',', consume_whitespace=True)\n csv_list = [first_value]\n # Try to parse remaining list values.\n # It's possible we don't parse anything:\n # a=b,c=d\n # ^-here\n # In the case above, we'll hit the ShorthandParser,\n # backtrack to the comma, and return a single scalar\n # value 'b'.\n while True:\n try:\n current = self._second_value()\n self._consume_whitespace()\n if self._at_eof():\n csv_list.append(current)\n break\n self._expect(',', consume_whitespace=True)\n csv_list.append(current)\n except ShorthandParseError:\n # Backtrack to the previous comma.\n # This can happen when we reach this case:\n # foo=a,b,c=d,e=f\n # ^-start\n # foo=a,b,c=d,e=f\n # ^-error, \"expected ',' received '='\n # foo=a,b,c=d,e=f\n # ^-backtrack to here.\n if self._at_eof():\n raise\n self._backtrack_to(',')\n break\n if len(csv_list) == 1:\n # Then this was a foo=bar case, so we expect\n # this to parse to a scalar value 'bar', i.e\n # {\"foo\": \"bar\"} instead of {\"bar\": [\"bar\"]}\n return first_value\n return csv_list\n\n def _value(self):\n result = self._FIRST_VALUE.match(self._input_value[self._index:])\n if result is not None:\n consumed = self._consume_matched_regex(result)\n return consumed.replace('\\\\,', ',').rstrip()\n return ''\n\n def _explicit_list(self):\n # explicit-list = \"[\" [value *(\",' value)] \"]\"\n self._expect('[', consume_whitespace=True)\n values = []\n while self._current() != ']':\n val = self._explicit_values()\n values.append(val)\n self._consume_whitespace()\n if self._current() != ']':\n self._expect(',')\n self._consume_whitespace()\n self._expect(']')\n return values\n\n def _explicit_values(self):\n # values = csv-list / explicit-list / hash-literal\n if self._current() == '[':\n return self._explicit_list()\n elif self._current() == '{':\n return self._hash_literal()\n else:\n return self._first_value()\n\n def _hash_literal(self):\n self._expect('{', consume_whitespace=True)\n keyvals = {}\n while self._current() != '}':\n key = self._key()\n self._expect('=', consume_whitespace=True)\n v = self._explicit_values()\n self._consume_whitespace()\n if self._current() != '}':\n self._expect(',')\n self._consume_whitespace()\n keyvals[key] = v\n self._expect('}')\n return keyvals\n\n def _first_value(self):\n # first-value = value / single-quoted-val / double-quoted-val\n if self._current() == \"'\":\n return self._single_quoted_value()\n elif self._current() == '\"':\n return self._double_quoted_value()\n return self._value()\n\n def _single_quoted_value(self):\n # single-quoted-value = %x27 *(val-escaped-single) %x27\n # val-escaped-single = %x20-26 / %x28-7F / escaped-escape /\n # (escape single-quote)\n return self._consume_quoted(self._SINGLE_QUOTED, escaped_char=\"'\")\n\n def _consume_quoted(self, regex, escaped_char=None):\n value = self._must_consume_regex(regex)[1:-1]\n if escaped_char is not None:\n value = value.replace(\"\\\\%s\" % escaped_char, escaped_char)\n value = value.replace(\"\\\\\\\\\", \"\\\\\")\n return value\n\n def _double_quoted_value(self):\n return self._consume_quoted(self._DOUBLE_QUOTED, escaped_char='\"')\n\n def _second_value(self):\n if self._current() == \"'\":\n return self._single_quoted_value()\n elif self._current() == '\"':\n return self._double_quoted_value()\n else:\n consumed = self._must_consume_regex(self._SECOND_VALUE)\n return consumed.replace('\\\\,', ',').rstrip()\n\n def _expect(self, char, consume_whitespace=False):\n if consume_whitespace:\n self._consume_whitespace()\n if self._index >= len(self._input_value):\n raise ShorthandParseError(self._input_value, char,\n 'EOF', self._index)\n actual = self._input_value[self._index]\n if actual != char:\n raise ShorthandParseError(self._input_value, char,\n actual, self._index)\n self._index += 1\n if consume_whitespace:\n self._consume_whitespace()\n\n def _must_consume_regex(self, regex):\n result = regex.match(self._input_value[self._index:])\n if result is not None:\n return self._consume_matched_regex(result)\n raise ShorthandParseError(self._input_value, '<%s>' % regex.name,\n '<none>', self._index)\n\n def _consume_matched_regex(self, result):\n start, end = result.span()\n v = self._input_value[self._index+start:self._index+end]\n self._index += (end - start)\n return v\n\n def _current(self):\n # If the index is at the end of the input value,\n # then _EOF will be returned.\n if self._index < len(self._input_value):\n return self._input_value[self._index]\n return _EOF\n\n def _at_eof(self):\n return self._index >= len(self._input_value)\n\n def _backtrack_to(self, char):\n while self._index >= 0 and self._input_value[self._index] != char:\n self._index -= 1\n\n def _consume_whitespace(self):\n while self._current() != _EOF and self._current() in string.whitespace:\n self._index += 1\n\n\nclass ModelVisitor(object):\n def visit(self, params, model):\n self._visit({}, model, '', params)\n\n def _visit(self, parent, shape, name, value):\n method = getattr(self, '_visit_%s' % shape.type_name,\n self._visit_scalar)\n method(parent, shape, name, value)\n\n def _visit_structure(self, parent, shape, name, value):\n if not isinstance(value, dict):\n return\n for member_name, member_shape in shape.members.items():\n self._visit(value, member_shape, member_name,\n value.get(member_name))\n\n def _visit_list(self, parent, shape, name, value):\n if not isinstance(value, list):\n return\n for i, element in enumerate(value):\n self._visit(value, shape.member, i, element)\n\n def _visit_map(self, parent, shape, name, value):\n if not isinstance(value, dict):\n return\n value_shape = shape.value\n for k, v in value.items():\n self._visit(value, value_shape, k, v)\n\n def _visit_scalar(self, parent, shape, name, value):\n pass\n\n\nclass BackCompatVisitor(ModelVisitor):\n def _visit_list(self, parent, shape, name, value):\n if not isinstance(value, list):\n # Convert a -> [a] because they specified\n # \"foo=bar\", but \"bar\" should really be [\"bar\"].\n if value is not None:\n parent[name] = [value]\n else:\n return super(BackCompatVisitor, self)._visit_list(\n parent, shape, name, value)\n\n def _visit_scalar(self, parent, shape, name, value):\n if value is None:\n return\n type_name = shape.type_name\n if type_name in ['integer', 'long']:\n parent[name] = int(value)\n elif type_name in ['double', 'float']:\n parent[name] = float(value)\n elif type_name == 'boolean':\n # We want to make sure we only set a value\n # only if \"true\"/\"false\" is specified.\n if value.lower() == 'true':\n parent[name] = True\n elif value.lower() == 'false':\n parent[name] = False\n", "path": "awscli/shorthand.py" } ]
[ { "content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Module for parsing shorthand syntax.\n\nThis module parses any CLI options that use a \"shorthand\"\nsyntax::\n\n --foo A=b,C=d\n |------|\n |\n Shorthand syntax\n\n\nThis module provides two main classes to do this.\nFirst, there's a ``ShorthandParser`` class. This class works\non a purely syntactic level. It looks only at the string value\nprovided to it in order to figure out how the string should be parsed.\n\nHowever, because there was a pre-existing shorthand parser, we need\nto remain backwards compatible with the previous parser. One of the\nthings the previous parser did was use the associated JSON model to\ncontrol how the expression was parsed.\n\nIn order to accommodate this a post processing class is provided that\ntakes the parsed values from the ``ShorthandParser`` as well as the\ncorresponding JSON model for the CLI argument and makes any adjustments\nnecessary to maintain backwards compatibility. This is done in the\n``BackCompatVisitor`` class.\n\n\"\"\"\nimport re\nimport string\n\n\n_EOF = object()\n\n\nclass _NamedRegex(object):\n def __init__(self, name, regex_str):\n self.name = name\n self.regex = re.compile(regex_str, re.UNICODE)\n\n def match(self, value):\n return self.regex.match(value)\n\n\nclass ShorthandParseError(Exception):\n def __init__(self, value, expected, actual, index):\n self.value = value\n self.expected = expected\n self.actual = actual\n self.index = index\n msg = self._construct_msg()\n super(ShorthandParseError, self).__init__(msg)\n\n def _construct_msg(self):\n consumed, remaining, num_spaces = self.value, '', self.index\n if '\\n' in self.value[:self.index]:\n # If there's newlines in the consumed expression, we want\n # to make sure we're only counting the spaces\n # from the last newline:\n # foo=bar,\\n\n # bar==baz\n # ^\n last_newline = self.value[:self.index].rindex('\\n')\n num_spaces = self.index - last_newline - 1\n if '\\n' in self.value[self.index:]:\n # If there's newline in the remaining, divide value\n # into consumed and remainig\n # foo==bar,\\n\n # ^\n # bar=baz\n next_newline = self.index + self.value[self.index:].index('\\n')\n consumed = self.value[:next_newline]\n remaining = self.value[next_newline:]\n msg = (\n \"Expected: '%s', received: '%s' for input:\\n\"\n \"%s\\n\"\n \"%s\"\n \"%s\"\n ) % (self.expected, self.actual, consumed,\n ' ' * num_spaces + '^', remaining)\n return msg\n\n\nclass ShorthandParser(object):\n \"\"\"Parses shorthand syntax in the CLI.\n\n Note that this parser does not rely on any JSON models to control\n how to parse the shorthand syntax.\n\n \"\"\"\n\n _SINGLE_QUOTED = _NamedRegex('singled quoted', r'\\'(?:\\\\\\\\|\\\\\\'|[^\\'])*\\'')\n _DOUBLE_QUOTED = _NamedRegex('double quoted', r'\"(?:\\\\\\\\|\\\\\"|[^\"])*\"')\n _START_WORD = u'\\!\\#-&\\(-\\+\\--\\<\\>-Z\\\\\\\\-z\\u007c-\\uffff'\n _FIRST_FOLLOW_CHARS = u'\\s\\!\\#-&\\(-\\+\\--\\\\\\\\\\^-\\|~-\\uffff'\n _SECOND_FOLLOW_CHARS = u'\\s\\!\\#-&\\(-\\+\\--\\<\\>-\\uffff'\n _ESCAPED_COMMA = '(\\\\\\\\,)'\n _FIRST_VALUE = _NamedRegex(\n 'first',\n u'({escaped_comma}|[{start_word}])'\n u'({escaped_comma}|[{follow_chars}])*'.format(\n escaped_comma=_ESCAPED_COMMA,\n start_word=_START_WORD,\n follow_chars=_FIRST_FOLLOW_CHARS,\n ))\n _SECOND_VALUE = _NamedRegex(\n 'second',\n u'({escaped_comma}|[{start_word}])'\n u'({escaped_comma}|[{follow_chars}])*'.format(\n escaped_comma=_ESCAPED_COMMA,\n start_word=_START_WORD,\n follow_chars=_SECOND_FOLLOW_CHARS,\n ))\n\n def __init__(self):\n self._tokens = []\n\n def parse(self, value):\n \"\"\"Parse shorthand syntax.\n\n For example::\n\n parser = ShorthandParser()\n parser.parse('a=b') # {'a': 'b'}\n parser.parse('a=b,c') # {'a': ['b', 'c']}\n\n :tpye value: str\n :param value: Any value that needs to be parsed.\n\n :return: Parsed value, which will be a dictionary.\n \"\"\"\n self._input_value = value\n self._index = 0\n return self._parameter()\n\n def _parameter(self):\n # parameter = keyval *(\",\" keyval)\n params = {}\n params.update(self._keyval())\n while self._index < len(self._input_value):\n self._expect(',', consume_whitespace=True)\n params.update(self._keyval())\n return params\n\n def _keyval(self):\n # keyval = key \"=\" [values]\n key = self._key()\n self._expect('=', consume_whitespace=True)\n values = self._values()\n return {key: values}\n\n def _key(self):\n # key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\\-_.#/]\n valid_chars = string.ascii_letters + string.digits + '-_.#/:'\n start = self._index\n while not self._at_eof():\n if self._current() not in valid_chars:\n break\n self._index += 1\n return self._input_value[start:self._index]\n\n def _values(self):\n # values = csv-list / explicit-list / hash-literal\n if self._at_eof():\n return ''\n elif self._current() == '[':\n return self._explicit_list()\n elif self._current() == '{':\n return self._hash_literal()\n else:\n return self._csv_value()\n\n def _csv_value(self):\n # Supports either:\n # foo=bar -> 'bar'\n # ^\n # foo=bar,baz -> ['bar', 'baz']\n # ^\n first_value = self._first_value()\n self._consume_whitespace()\n if self._at_eof() or self._input_value[self._index] != ',':\n return first_value\n self._expect(',', consume_whitespace=True)\n csv_list = [first_value]\n # Try to parse remaining list values.\n # It's possible we don't parse anything:\n # a=b,c=d\n # ^-here\n # In the case above, we'll hit the ShorthandParser,\n # backtrack to the comma, and return a single scalar\n # value 'b'.\n while True:\n try:\n current = self._second_value()\n self._consume_whitespace()\n if self._at_eof():\n csv_list.append(current)\n break\n self._expect(',', consume_whitespace=True)\n csv_list.append(current)\n except ShorthandParseError:\n # Backtrack to the previous comma.\n # This can happen when we reach this case:\n # foo=a,b,c=d,e=f\n # ^-start\n # foo=a,b,c=d,e=f\n # ^-error, \"expected ',' received '='\n # foo=a,b,c=d,e=f\n # ^-backtrack to here.\n if self._at_eof():\n raise\n self._backtrack_to(',')\n break\n if len(csv_list) == 1:\n # Then this was a foo=bar case, so we expect\n # this to parse to a scalar value 'bar', i.e\n # {\"foo\": \"bar\"} instead of {\"bar\": [\"bar\"]}\n return first_value\n return csv_list\n\n def _value(self):\n result = self._FIRST_VALUE.match(self._input_value[self._index:])\n if result is not None:\n consumed = self._consume_matched_regex(result)\n return consumed.replace('\\\\,', ',').rstrip()\n return ''\n\n def _explicit_list(self):\n # explicit-list = \"[\" [value *(\",' value)] \"]\"\n self._expect('[', consume_whitespace=True)\n values = []\n while self._current() != ']':\n val = self._explicit_values()\n values.append(val)\n self._consume_whitespace()\n if self._current() != ']':\n self._expect(',')\n self._consume_whitespace()\n self._expect(']')\n return values\n\n def _explicit_values(self):\n # values = csv-list / explicit-list / hash-literal\n if self._current() == '[':\n return self._explicit_list()\n elif self._current() == '{':\n return self._hash_literal()\n else:\n return self._first_value()\n\n def _hash_literal(self):\n self._expect('{', consume_whitespace=True)\n keyvals = {}\n while self._current() != '}':\n key = self._key()\n self._expect('=', consume_whitespace=True)\n v = self._explicit_values()\n self._consume_whitespace()\n if self._current() != '}':\n self._expect(',')\n self._consume_whitespace()\n keyvals[key] = v\n self._expect('}')\n return keyvals\n\n def _first_value(self):\n # first-value = value / single-quoted-val / double-quoted-val\n if self._current() == \"'\":\n return self._single_quoted_value()\n elif self._current() == '\"':\n return self._double_quoted_value()\n return self._value()\n\n def _single_quoted_value(self):\n # single-quoted-value = %x27 *(val-escaped-single) %x27\n # val-escaped-single = %x20-26 / %x28-7F / escaped-escape /\n # (escape single-quote)\n return self._consume_quoted(self._SINGLE_QUOTED, escaped_char=\"'\")\n\n def _consume_quoted(self, regex, escaped_char=None):\n value = self._must_consume_regex(regex)[1:-1]\n if escaped_char is not None:\n value = value.replace(\"\\\\%s\" % escaped_char, escaped_char)\n value = value.replace(\"\\\\\\\\\", \"\\\\\")\n return value\n\n def _double_quoted_value(self):\n return self._consume_quoted(self._DOUBLE_QUOTED, escaped_char='\"')\n\n def _second_value(self):\n if self._current() == \"'\":\n return self._single_quoted_value()\n elif self._current() == '\"':\n return self._double_quoted_value()\n else:\n consumed = self._must_consume_regex(self._SECOND_VALUE)\n return consumed.replace('\\\\,', ',').rstrip()\n\n def _expect(self, char, consume_whitespace=False):\n if consume_whitespace:\n self._consume_whitespace()\n if self._index >= len(self._input_value):\n raise ShorthandParseError(self._input_value, char,\n 'EOF', self._index)\n actual = self._input_value[self._index]\n if actual != char:\n raise ShorthandParseError(self._input_value, char,\n actual, self._index)\n self._index += 1\n if consume_whitespace:\n self._consume_whitespace()\n\n def _must_consume_regex(self, regex):\n result = regex.match(self._input_value[self._index:])\n if result is not None:\n return self._consume_matched_regex(result)\n raise ShorthandParseError(self._input_value, '<%s>' % regex.name,\n '<none>', self._index)\n\n def _consume_matched_regex(self, result):\n start, end = result.span()\n v = self._input_value[self._index+start:self._index+end]\n self._index += (end - start)\n return v\n\n def _current(self):\n # If the index is at the end of the input value,\n # then _EOF will be returned.\n if self._index < len(self._input_value):\n return self._input_value[self._index]\n return _EOF\n\n def _at_eof(self):\n return self._index >= len(self._input_value)\n\n def _backtrack_to(self, char):\n while self._index >= 0 and self._input_value[self._index] != char:\n self._index -= 1\n\n def _consume_whitespace(self):\n while self._current() != _EOF and self._current() in string.whitespace:\n self._index += 1\n\n\nclass ModelVisitor(object):\n def visit(self, params, model):\n self._visit({}, model, '', params)\n\n def _visit(self, parent, shape, name, value):\n method = getattr(self, '_visit_%s' % shape.type_name,\n self._visit_scalar)\n method(parent, shape, name, value)\n\n def _visit_structure(self, parent, shape, name, value):\n if not isinstance(value, dict):\n return\n for member_name, member_shape in shape.members.items():\n self._visit(value, member_shape, member_name,\n value.get(member_name))\n\n def _visit_list(self, parent, shape, name, value):\n if not isinstance(value, list):\n return\n for i, element in enumerate(value):\n self._visit(value, shape.member, i, element)\n\n def _visit_map(self, parent, shape, name, value):\n if not isinstance(value, dict):\n return\n value_shape = shape.value\n for k, v in value.items():\n self._visit(value, value_shape, k, v)\n\n def _visit_scalar(self, parent, shape, name, value):\n pass\n\n\nclass BackCompatVisitor(ModelVisitor):\n def _visit_list(self, parent, shape, name, value):\n if not isinstance(value, list):\n # Convert a -> [a] because they specified\n # \"foo=bar\", but \"bar\" should really be [\"bar\"].\n if value is not None:\n parent[name] = [value]\n else:\n return super(BackCompatVisitor, self)._visit_list(\n parent, shape, name, value)\n\n def _visit_scalar(self, parent, shape, name, value):\n if value is None:\n return\n type_name = shape.type_name\n if type_name in ['integer', 'long']:\n parent[name] = int(value)\n elif type_name in ['double', 'float']:\n parent[name] = float(value)\n elif type_name == 'boolean':\n # We want to make sure we only set a value\n # only if \"true\"/\"false\" is specified.\n if value.lower() == 'true':\n parent[name] = True\n elif value.lower() == 'false':\n parent[name] = False\n", "path": "awscli/shorthand.py" } ]
diff --git a/.changes/next-release/enhancement-Shorthand-33683.json b/.changes/next-release/enhancement-Shorthand-33683.json new file mode 100644 index 000000000000..c9cb10e8e3ab --- /dev/null +++ b/.changes/next-release/enhancement-Shorthand-33683.json @@ -0,0 +1,5 @@ +{ + "category": "Shorthand", + "type": "enhancement", + "description": "Support colon char in shorthand syntax key names (#4348)" +} diff --git a/awscli/shorthand.py b/awscli/shorthand.py index cb8dd3f71ed0..0bd1d98e9404 100644 --- a/awscli/shorthand.py +++ b/awscli/shorthand.py @@ -163,7 +163,7 @@ def _keyval(self): def _key(self): # key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\-_.#/] - valid_chars = string.ascii_letters + string.digits + '-_.#/' + valid_chars = string.ascii_letters + string.digits + '-_.#/:' start = self._index while not self._at_eof(): if self._current() not in valid_chars: diff --git a/tests/unit/test_shorthand.py b/tests/unit/test_shorthand.py index 209946ee8cf3..e9e97cd3cb3e 100644 --- a/tests/unit/test_shorthand.py +++ b/tests/unit/test_shorthand.py @@ -62,6 +62,10 @@ def test_parse(): # Forward slashes are allowed in keys. yield (_can_parse, 'some/thing=value', {'some/thing': 'value'}) + # Colon chars are allowed in keys: + yield (_can_parse, 'aws:service:region:124:foo/bar=baz', + {'aws:service:region:124:foo/bar': 'baz'}) + # Explicit lists. yield (_can_parse, 'foo=[]', {'foo': []}) yield (_can_parse, 'foo=[a]', {'foo': ['a']})
codecommit: tag-resource does not permit `:` character in tag key The following command incorrectly returns an error even though colon characters (`:`) in tag keys are legal: ``` aws codecommit tag-resource --resource-arn <arn> --tags namespace:KeyName=value ``` Expected behavior: a new tag is applied to the resource with the name `namespace:KeyName` and value `value`. Actual behavior: ``` Error parsing parameter '--tags': Expected: '=', received: ':' for input: namespace:KeyName=value ```
localstack__localstack-3366
[ { "content": "import os\nimport re\nimport sys\nimport glob\nimport json\nimport time\nimport logging\nimport threading\nimport subprocess\nimport six\nimport base64\nfrom multiprocessing import Process, Queue\ntry:\n from shlex import quote as cmd_quote\nexcept ImportError:\n from pipes import quote as cmd_quote # for Python 2.7\nfrom localstack import config\nfrom localstack.utils import bootstrap\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import (\n CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker,\n to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port)\nfrom localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR\nfrom localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue\nfrom localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched\n\n# constants\nLAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR\nLAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'\nEVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER\n\nLAMBDA_RUNTIME_PYTHON27 = 'python2.7'\nLAMBDA_RUNTIME_PYTHON36 = 'python3.6'\nLAMBDA_RUNTIME_PYTHON37 = 'python3.7'\nLAMBDA_RUNTIME_PYTHON38 = 'python3.8'\nLAMBDA_RUNTIME_NODEJS = 'nodejs'\nLAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'\nLAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'\nLAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'\nLAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'\nLAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'\nLAMBDA_RUNTIME_JAVA8 = 'java8'\nLAMBDA_RUNTIME_JAVA11 = 'java11'\nLAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'\nLAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'\nLAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'\nLAMBDA_RUNTIME_GOLANG = 'go1.x'\nLAMBDA_RUNTIME_RUBY = 'ruby'\nLAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\nLAMBDA_RUNTIME_PROVIDED = 'provided'\n\nLAMBDA_SERVER_UNIQUE_PORTS = 500\nLAMBDA_SERVER_PORT_OFFSET = 5000\n\nLAMBDA_API_UNIQUE_PORTS = 500\nLAMBDA_API_PORT_OFFSET = 9000\n\n# logger\nLOG = logging.getLogger(__name__)\n\n# maximum time a pre-allocated container can sit idle before getting killed\nMAX_CONTAINER_IDLE_TIME_MS = 600 * 1000\n\n# SQS event source name\nEVENT_SOURCE_SQS = 'aws:sqs'\n\n# IP address of main Docker container (lazily initialized)\nDOCKER_MAIN_CONTAINER_IP = None\n\n# whether to use our custom Java executor, or the default from lambci\n# TODO: deprecated, should be removed in the future\nUSE_CUSTOM_JAVA_EXECUTOR = False\n\n\ndef get_from_event(event, key):\n try:\n return event['Records'][0][key]\n except KeyError:\n return None\n\n\ndef is_java_lambda(lambda_details):\n runtime = getattr(lambda_details, 'runtime', lambda_details)\n return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]\n\n\ndef is_nodejs_runtime(lambda_details):\n runtime = getattr(lambda_details, 'runtime', lambda_details)\n return runtime.startswith('nodejs')\n\n\ndef _store_logs(func_details, log_output, invocation_time=None, container_id=None):\n log_group_name = '/aws/lambda/%s' % func_details.name()\n container_id = container_id or short_uid()\n invocation_time = invocation_time or int(time.time() * 1000)\n invocation_time_secs = int(invocation_time / 1000)\n time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))\n log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)\n return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)\n\n\ndef get_main_endpoint_from_container():\n global DOCKER_MAIN_CONTAINER_IP\n if DOCKER_MAIN_CONTAINER_IP is None:\n DOCKER_MAIN_CONTAINER_IP = False\n try:\n if in_docker():\n DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()\n LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)\n except Exception as e:\n container_name = bootstrap.get_main_container_name()\n LOG.info('Unable to get IP address of main Docker container \"%s\": %s' %\n (container_name, e))\n # return main container IP, or fall back to Docker host (bridge IP, or host DNS address)\n return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER\n\n\nclass LambdaExecutor(object):\n \"\"\" Base class for Lambda executors. Subclasses must overwrite the _execute method \"\"\"\n def __init__(self):\n # keeps track of each function arn and the last time it was invoked\n self.function_invoke_times = {}\n\n def _prepare_environment(self, func_details):\n # setup environment pre-defined variables for docker environment\n result = func_details.envvars.copy()\n\n # injecting aws credentials into docker environment if not provided\n aws_stack.inject_test_credentials_into_env(result)\n\n return result\n\n def execute(self, func_arn, func_details, event, context=None, version=None,\n asynchronous=False, callback=None):\n def do_execute(*args):\n\n @cloudwatched('lambda')\n def _run(func_arn=None):\n # set the invocation time in milliseconds\n invocation_time = int(time.time() * 1000)\n # start the execution\n raised_error = None\n result = None\n dlq_sent = None\n try:\n result = self._execute(func_arn, func_details, event, context, version)\n except Exception as e:\n raised_error = e\n if asynchronous:\n if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:\n sqs_queue_arn = get_from_event(event, 'eventSourceARN')\n if sqs_queue_arn:\n # event source is SQS, send event back to dead letter queue\n dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)\n else:\n # event source is not SQS, send back to lambda dead letter queue\n lambda_error_to_dead_letter_queue(func_details, event, e)\n raise e\n finally:\n self.function_invoke_times[func_arn] = invocation_time\n callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)\n # return final result\n return result\n\n return _run(func_arn=func_arn)\n\n # Inform users about asynchronous mode of the lambda execution.\n if asynchronous:\n LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')\n FuncThread(do_execute).start()\n return None, 'Lambda executed asynchronously.'\n\n return do_execute()\n\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n \"\"\" This method must be overwritten by subclasses. \"\"\"\n raise Exception('Not implemented.')\n\n def startup(self):\n pass\n\n def cleanup(self, arn=None):\n pass\n\n def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):\n kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}\n\n is_provided = func_details.runtime.startswith(LAMBDA_RUNTIME_PROVIDED)\n if func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':\n # Note: certain \"provided\" runtimes (e.g., Rust programs) can block when we pass in\n # the event payload via stdin, hence we rewrite the command to \"echo ... | ...\" below\n env_vars = {\n 'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),\n 'AWS_LAMBDA_EVENT_BODY': to_str(event),\n 'DOCKER_LAMBDA_USE_STDIN': '1'\n }\n event = None\n cmd = re.sub(r'(.*)(%s\\s+(run|start))' % self._docker_cmd(), r'\\1echo $AWS_LAMBDA_EVENT_BODY | \\2', cmd)\n\n process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)\n result, log_output = process.communicate(input=event)\n try:\n result = to_str(result).strip()\n except Exception:\n pass\n log_output = to_str(log_output).strip()\n return_code = process.returncode\n # Note: The user's code may have been logging to stderr, in which case the logs\n # will be part of the \"result\" variable here. Hence, make sure that we extract\n # only the *last* line of \"result\" and consider anything above that as log output.\n if isinstance(result, six.string_types) and '\\n' in result:\n additional_logs, _, result = result.rpartition('\\n')\n log_output += '\\n%s' % additional_logs\n\n log_formatted = log_output.strip().replace('\\n', '\\n> ')\n func_arn = func_details and func_details.arn()\n LOG.debug('Lambda %s result / log output:\\n%s\\n> %s' % (func_arn, result.strip(), log_formatted))\n\n # store log output - TODO get live logs from `process` above?\n _store_logs(func_details, log_output)\n\n if return_code != 0:\n raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\\n%s' %\n (return_code, result, log_output))\n\n return result\n\n\nclass ContainerInfo:\n \"\"\" Contains basic information about a docker container. \"\"\"\n def __init__(self, name, entry_point):\n self.name = name\n self.entry_point = entry_point\n\n\nclass LambdaExecutorContainers(LambdaExecutor):\n \"\"\" Abstract executor class for executing Lambda functions in Docker containers \"\"\"\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n raise Exception('Not implemented')\n\n def _docker_cmd(self):\n \"\"\" Return the string to be used for running Docker commands. \"\"\"\n return config.DOCKER_CMD\n\n def prepare_event(self, environment, event_body):\n \"\"\" Return the event as a stdin string. \"\"\"\n # amend the environment variables for execution\n environment['AWS_LAMBDA_EVENT_BODY'] = event_body\n return None\n\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n lambda_cwd = func_details.cwd\n runtime = func_details.runtime\n handler = func_details.handler\n environment = self._prepare_environment(func_details)\n\n # configure USE_SSL in environment\n if config.USE_SSL:\n environment['USE_SSL'] = '1'\n\n # prepare event body\n if not event:\n LOG.warning('Empty event body specified for invocation of Lambda \"%s\"' % func_arn)\n event = {}\n event_body = json.dumps(json_safe(event))\n stdin = self.prepare_event(environment, event_body)\n\n main_endpoint = get_main_endpoint_from_container()\n\n environment['LOCALSTACK_HOSTNAME'] = main_endpoint\n environment['EDGE_PORT'] = str(config.EDGE_PORT)\n environment['_HANDLER'] = handler\n if os.environ.get('HTTP_PROXY'):\n environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n if func_details.timeout:\n environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)\n if context:\n environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name\n environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version\n environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn\n environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})\n if context.client_context is not None:\n environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(\n base64.b64decode(to_bytes(context.client_context))))\n\n # custom command to execute in the container\n command = ''\n events_file = ''\n\n if USE_CUSTOM_JAVA_EXECUTOR and is_java_lambda(runtime):\n # if running a Java Lambda with our custom executor, set up classpath arguments\n java_opts = Util.get_java_opts()\n stdin = None\n # copy executor jar into temp directory\n target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))\n if not os.path.exists(target_file):\n cp_r(LAMBDA_EXECUTOR_JAR, target_file)\n # TODO cleanup once we have custom Java Docker image\n taskdir = '/var/task'\n events_file = '_lambda.events.%s.json' % short_uid()\n save_file(os.path.join(lambda_cwd, events_file), event_body)\n classpath = Util.get_java_classpath(target_file)\n command = (\"bash -c 'cd %s; java %s -cp \\\"%s\\\" \\\"%s\\\" \\\"%s\\\" \\\"%s\\\"'\" %\n (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))\n\n # accept any self-signed certificates for outgoing calls from the Lambda\n if is_nodejs_runtime(runtime):\n environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'\n\n # determine the command to be executed (implemented by subclasses)\n cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)\n\n # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!\n LOG.info('Running lambda cmd: %s' % cmd)\n result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)\n\n # clean up events file\n events_file and os.path.exists(events_file) and rm_rf(events_file)\n\n return result\n\n\nclass LambdaExecutorReuseContainers(LambdaExecutorContainers):\n \"\"\" Executor class for executing Lambda functions in re-usable Docker containers \"\"\"\n def __init__(self):\n super(LambdaExecutorReuseContainers, self).__init__()\n # locking thread for creation/destruction of docker containers.\n self.docker_container_lock = threading.RLock()\n\n # On each invocation we try to construct a port unlikely to conflict\n # with a previously invoked lambda function. This is a problem with at\n # least the lambci/lambda:go1.x container, which execs a go program that\n # attempts to bind to the same default port.\n self.next_port = 0\n self.max_port = LAMBDA_SERVER_UNIQUE_PORTS\n self.port_offset = LAMBDA_SERVER_PORT_OFFSET\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n # check whether the Lambda has been invoked before\n has_been_invoked_before = func_arn in self.function_invoke_times\n\n # Choose a port for this invocation\n with self.docker_container_lock:\n env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)\n self.next_port = (self.next_port + 1) % self.max_port\n\n # create/verify the docker container is running.\n LOG.debug('Priming docker container with runtime \"%s\" and arn \"%s\".', runtime, func_arn)\n container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)\n\n # Note: currently \"docker exec\" does not support --env-file, i.e., environment variables can only be\n # passed directly on the command line, using \"-e\" below. TODO: Update this code once --env-file is\n # available for docker exec, to better support very large Lambda events (very long environment values)\n exec_env_vars = ' '.join(['-e {}=\"${}\"'.format(k, k) for (k, v) in env_vars.items()])\n\n if not command:\n command = '%s %s' % (container_info.entry_point, handler)\n\n # determine files to be copied into the container\n copy_command = ''\n docker_cmd = self._docker_cmd()\n if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:\n # if this is the first invocation: copy the entire folder into the container\n copy_command = '%s cp \"%s/.\" \"%s:/var/task\";' % (docker_cmd, lambda_cwd, container_info.name)\n\n cmd = (\n '%s'\n ' %s exec'\n ' %s' # env variables\n ' %s' # container name\n ' %s' # run cmd\n ) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)\n LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)\n\n return cmd\n\n def startup(self):\n self.cleanup()\n # start a process to remove idle containers\n if config.LAMBDA_REMOVE_CONTAINERS:\n self.start_idle_container_destroyer_interval()\n\n def cleanup(self, arn=None):\n if arn:\n self.function_invoke_times.pop(arn, None)\n return self.destroy_docker_container(arn)\n self.function_invoke_times = {}\n return self.destroy_existing_docker_containers()\n\n def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):\n \"\"\"\n Prepares a persistent docker container for a specific function.\n :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.\n :param func_arn: The ARN of the lambda function.\n :param env_vars: The environment variables for the lambda.\n :param lambda_cwd: The local directory containing the code for the lambda function.\n :return: ContainerInfo class containing the container name and default entry point.\n \"\"\"\n with self.docker_container_lock:\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n docker_cmd = self._docker_cmd()\n\n status = self.get_docker_container_status(func_arn)\n LOG.debug('Priming docker container (status \"%s\"): %s' % (status, container_name))\n\n docker_image = Util.docker_image_for_runtime(runtime)\n rm_flag = Util.get_docker_remove_flag()\n\n # Container is not running or doesn't exist.\n if status < 1:\n # Make sure the container does not exist in any form/state.\n self.destroy_docker_container(func_arn)\n\n env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])\n\n network = config.LAMBDA_DOCKER_NETWORK\n network_str = '--network=\"%s\"' % network if network else ''\n\n dns = config.LAMBDA_DOCKER_DNS\n dns_str = '--dns=\"%s\"' % dns if dns else ''\n\n mount_volume = not config.LAMBDA_REMOTE_DOCKER\n lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)\n if (':' in lambda_cwd and '\\\\' in lambda_cwd):\n lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)\n mount_volume_str = '-v \"%s\":/var/task' % lambda_cwd_on_host if mount_volume else ''\n\n # Create and start the container\n LOG.debug('Creating container: %s' % container_name)\n cmd = (\n '%s create'\n ' %s' # --rm flag\n ' --name \"%s\"'\n ' --entrypoint /bin/bash' # Load bash when it starts.\n ' %s'\n ' --interactive' # Keeps the container running bash.\n ' -e AWS_LAMBDA_EVENT_BODY=\"$AWS_LAMBDA_EVENT_BODY\"'\n ' -e HOSTNAME=\"$HOSTNAME\"'\n ' -e LOCALSTACK_HOSTNAME=\"$LOCALSTACK_HOSTNAME\"'\n ' -e EDGE_PORT=\"$EDGE_PORT\"'\n ' %s' # env_vars\n ' %s' # network\n ' %s' # dns\n ' %s'\n ) % (docker_cmd, rm_flag, container_name, mount_volume_str,\n env_vars_str, network_str, dns_str, docker_image)\n LOG.debug(cmd)\n run(cmd)\n\n if not mount_volume:\n LOG.debug('Copying files to container \"%s\" from \"%s\".' % (container_name, lambda_cwd))\n cmd = (\n '%s cp'\n ' \"%s/.\" \"%s:/var/task\"'\n ) % (docker_cmd, lambda_cwd, container_name)\n LOG.debug(cmd)\n run(cmd)\n\n LOG.debug('Starting container: %s' % container_name)\n cmd = '%s start %s' % (docker_cmd, container_name)\n LOG.debug(cmd)\n run(cmd)\n # give the container some time to start up\n time.sleep(1)\n\n # Get the entry point for the image.\n LOG.debug('Getting the entrypoint for image: %s' % (docker_image))\n cmd = (\n '%s image inspect'\n ' --format=\"{{ .ContainerConfig.Entrypoint }}\"'\n ' %s'\n ) % (docker_cmd, docker_image)\n\n LOG.debug(cmd)\n run_result = run(cmd)\n\n entry_point = run_result.strip('[]\\n\\r ')\n\n container_network = self.get_docker_container_network(func_arn)\n\n LOG.debug('Using entrypoint \"%s\" for container \"%s\" on network \"%s\".'\n % (entry_point, container_name, container_network))\n\n return ContainerInfo(container_name, entry_point)\n\n def destroy_docker_container(self, func_arn):\n \"\"\"\n Stops and/or removes a docker container for a specific lambda function ARN.\n :param func_arn: The ARN of the lambda function.\n :return: None\n \"\"\"\n with self.docker_container_lock:\n status = self.get_docker_container_status(func_arn)\n docker_cmd = self._docker_cmd()\n\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n\n if status == 1:\n LOG.debug('Stopping container: %s' % container_name)\n cmd = '%s stop -t0 %s' % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n status = self.get_docker_container_status(func_arn)\n\n if status == -1:\n LOG.debug('Removing container: %s' % container_name)\n cmd = '%s rm %s' % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n def get_all_container_names(self):\n \"\"\"\n Returns a list of container names for lambda containers.\n :return: A String[] localstack docker container names for each function.\n \"\"\"\n with self.docker_container_lock:\n LOG.debug('Getting all lambda containers names.')\n cmd = '%s ps -a --filter=\"name=localstack_lambda_*\" --format \"{{.Names}}\"' % self._docker_cmd()\n LOG.debug(cmd)\n cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()\n\n if len(cmd_result) > 0:\n container_names = cmd_result.split('\\n')\n else:\n container_names = []\n\n return container_names\n\n def destroy_existing_docker_containers(self):\n \"\"\"\n Stops and/or removes all lambda docker containers for localstack.\n :return: None\n \"\"\"\n with self.docker_container_lock:\n container_names = self.get_all_container_names()\n\n LOG.debug('Removing %d containers.' % len(container_names))\n for container_name in container_names:\n cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n def get_docker_container_status(self, func_arn):\n \"\"\"\n Determine the status of a docker container.\n :param func_arn: The ARN of the lambda function.\n :return: 1 If the container is running,\n -1 if the container exists but is not running\n 0 if the container does not exist.\n \"\"\"\n with self.docker_container_lock:\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n\n # Check if the container is already running\n # Note: filtering by *exact* name using regex filter '^...$' seems unstable on some\n # systems. Therefore, we use a combination of filter and grep to get the results.\n cmd = (\"docker ps -a --filter name='%s' \"\n '--format \"{{ .Status }} - {{ .Names }}\" '\n '| grep -w \"%s\" | cat') % (container_name, container_name)\n LOG.debug('Getting status for container \"%s\": %s' % (container_name, cmd))\n cmd_result = run(cmd)\n\n # If the container doesn't exist. Create and start it.\n container_status = cmd_result.strip()\n\n if len(container_status) == 0:\n return 0\n\n if container_status.lower().startswith('up '):\n return 1\n\n return -1\n\n def get_docker_container_network(self, func_arn):\n \"\"\"\n Determine the network of a docker container.\n :param func_arn: The ARN of the lambda function.\n :return: name of the container network\n \"\"\"\n with self.docker_container_lock:\n status = self.get_docker_container_status(func_arn)\n # container does not exist\n if status == 0:\n return ''\n\n # Get the container name.\n container_name = self.get_container_name(func_arn)\n docker_cmd = self._docker_cmd()\n\n # Get the container network\n LOG.debug('Getting container network: %s' % container_name)\n cmd = (\n '%s inspect %s'\n ' --format \"{{ .HostConfig.NetworkMode }}\"'\n ) % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n container_network = cmd_result.strip()\n\n return container_network\n\n def idle_container_destroyer(self):\n \"\"\"\n Iterates though all the lambda containers and destroys any container that has\n been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.\n :return: None\n \"\"\"\n LOG.info('Checking if there are idle containers.')\n current_time = int(time.time() * 1000)\n for func_arn, last_run_time in dict(self.function_invoke_times).items():\n duration = current_time - last_run_time\n\n # not enough idle time has passed\n if duration < MAX_CONTAINER_IDLE_TIME_MS:\n continue\n\n # container has been idle, destroy it.\n self.destroy_docker_container(func_arn)\n\n def start_idle_container_destroyer_interval(self):\n \"\"\"\n Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.\n Thus checking for idle containers and destroying them.\n :return: None\n \"\"\"\n self.idle_container_destroyer()\n threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()\n\n def get_container_name(self, func_arn):\n \"\"\"\n Given a function ARN, returns a valid docker container name.\n :param func_arn: The ARN of the lambda function.\n :return: A docker compatible name for the arn.\n \"\"\"\n return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)\n\n\nclass LambdaExecutorSeparateContainers(LambdaExecutorContainers):\n def __init__(self):\n super(LambdaExecutorSeparateContainers, self).__init__()\n self.max_port = LAMBDA_API_UNIQUE_PORTS\n self.port_offset = LAMBDA_API_PORT_OFFSET\n\n def prepare_event(self, environment, event_body):\n # Tell Lambci to use STDIN for the event\n environment['DOCKER_LAMBDA_USE_STDIN'] = '1'\n return event_body.encode()\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n entrypoint = ''\n if command:\n entrypoint = ' --entrypoint \"\"'\n else:\n command = '\"%s\"' % handler\n\n # add Docker Lambda env vars\n network = config.LAMBDA_DOCKER_NETWORK\n network_str = '--network=\"%s\"' % network if network else ''\n if network == 'host':\n port = get_free_tcp_port()\n env_vars['DOCKER_LAMBDA_API_PORT'] = port\n env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port\n\n dns = config.LAMBDA_DOCKER_DNS\n dns_str = '--dns=\"%s\"' % dns if dns else ''\n\n env_vars_string = ' '.join(['-e {}=\"${}\"'.format(k, k) for (k, v) in env_vars.items()])\n debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''\n docker_cmd = self._docker_cmd()\n docker_image = Util.docker_image_for_runtime(runtime)\n rm_flag = Util.get_docker_remove_flag()\n\n if config.LAMBDA_REMOTE_DOCKER:\n cmd = (\n 'CONTAINER_ID=\"$(%s create -i'\n ' %s' # entrypoint\n ' %s' # debug_docker_java_port\n ' %s' # env\n ' %s' # network\n ' %s' # dns\n ' %s' # --rm flag\n ' %s %s' # image and command\n ')\";'\n '%s cp \"%s/.\" \"$CONTAINER_ID:/var/task\"; '\n '%s start -ai \"$CONTAINER_ID\";'\n ) % (docker_cmd, entrypoint, debug_docker_java_port,\n env_vars_string, network_str, dns_str, rm_flag,\n docker_image, command,\n docker_cmd, lambda_cwd,\n docker_cmd)\n else:\n lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)\n cmd = (\n '%s run -i'\n ' %s -v \"%s\":/var/task'\n ' %s'\n ' %s' # network\n ' %s' # dns\n ' %s' # --rm flag\n ' %s %s'\n ) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,\n network_str, dns_str, rm_flag, docker_image, command)\n return cmd\n\n\nclass LambdaExecutorLocal(LambdaExecutor):\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n lambda_cwd = func_details.cwd\n environment = self._prepare_environment(func_details)\n\n # execute the Lambda function in a forked sub-process, sync result via queue\n queue = Queue()\n\n lambda_function = func_details.function(version)\n\n def do_execute():\n # now we're executing in the child process, safe to change CWD and ENV\n path_before = sys.path\n try:\n if lambda_cwd:\n os.chdir(lambda_cwd)\n sys.path = [lambda_cwd] + sys.path\n if environment:\n os.environ.update(environment)\n result = lambda_function(event, context)\n queue.put(result)\n finally:\n sys.path = path_before\n\n process = Process(target=do_execute)\n with CaptureOutput() as c:\n process.run()\n result = queue.get()\n\n # Make sure to keep the log line below, to ensure the log stream gets created\n log_output = 'START: Lambda %s started via \"local\" executor ...' % func_arn\n # TODO: Interweaving stdout/stderr currently not supported\n for stream in (c.stdout(), c.stderr()):\n if stream:\n log_output += ('\\n' if log_output else '') + stream\n\n # store logs to CloudWatch\n _store_logs(func_details, log_output)\n\n return result\n\n def execute_java_lambda(self, event, context, main_file, func_details=None):\n handler = func_details.handler\n opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''\n event_file = EVENT_FILE_PATTERN.replace('*', short_uid())\n save_file(event_file, json.dumps(json_safe(event)))\n TMP_FILES.append(event_file)\n class_name = handler.split('::')[0]\n classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)\n cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)\n LOG.warning(cmd)\n result = self.run_lambda_executor(cmd, func_details=func_details)\n return result\n\n\nclass Util:\n debug_java_port = False\n\n @classmethod\n def get_java_opts(cls):\n opts = config.LAMBDA_JAVA_OPTS or ''\n # Replace _debug_port_ with a random free port\n if '_debug_port_' in opts:\n if not cls.debug_java_port:\n cls.debug_java_port = get_free_tcp_port()\n opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))\n else:\n # Parse the debug port from opts\n m = re.match('.*address=(.+:)?(\\\\d+).*', opts)\n if m is not None:\n cls.debug_java_port = m.groups()[1]\n\n return opts\n\n @classmethod\n def get_host_path_for_path_in_docker(cls, path):\n return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,\n r'%s/\\1' % config.HOST_TMP_FOLDER, path)\n\n @classmethod\n def format_windows_path(cls, path):\n temp = path.replace(':', '').replace('\\\\', '/')\n if len(temp) >= 1 and temp[:1] != '/':\n temp = '/' + temp\n temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)\n return temp\n\n @classmethod\n def docker_image_for_runtime(cls, runtime):\n docker_tag = runtime\n docker_image = config.LAMBDA_CONTAINER_REGISTRY\n # TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas\n # See https://github.com/lambci/docker-lambda/pull/218\n lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']\n if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):\n docker_tag = '20191117-%s' % docker_tag\n return '\"%s:%s\"' % (docker_image, docker_tag)\n\n @classmethod\n def get_docker_remove_flag(cls):\n return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''\n\n @classmethod\n def get_java_classpath(cls, archive):\n \"\"\"\n Return the Java classpath, using the parent folder of the\n given archive as the base folder.\n\n The result contains any *.jar files in the base folder, as\n well as any JAR files in the \"lib/*\" subfolder living\n alongside the supplied java archive (.jar or .zip).\n\n :param archive: an absolute path to a .jar or .zip Java archive\n :return: the Java classpath, relative to the base dir of \"archive\"\n \"\"\"\n entries = ['.']\n base_dir = os.path.dirname(archive)\n for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:\n for entry in glob.glob(pattern % base_dir):\n if os.path.realpath(archive) != os.path.realpath(entry):\n entries.append(os.path.relpath(entry, base_dir))\n # make sure to append the localstack-utils.jar at the end of the classpath\n # https://github.com/localstack/localstack/issues/1160\n entries.append(os.path.relpath(archive, base_dir))\n entries.append('*.jar')\n entries.append('java/lib/*.jar')\n result = ':'.join(entries)\n return result\n\n\n# --------------\n# GLOBAL STATE\n# --------------\n\nEXECUTOR_LOCAL = LambdaExecutorLocal()\nEXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()\nEXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()\nDEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE\n# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable\nAVAILABLE_EXECUTORS = {\n 'local': EXECUTOR_LOCAL,\n 'docker': EXECUTOR_CONTAINERS_SEPARATE,\n 'docker-reuse': EXECUTOR_CONTAINERS_REUSE\n}\n", "path": "localstack/services/awslambda/lambda_executors.py" } ]
[ { "content": "import os\nimport re\nimport sys\nimport glob\nimport json\nimport time\nimport logging\nimport threading\nimport subprocess\nimport six\nimport base64\nfrom multiprocessing import Process, Queue\ntry:\n from shlex import quote as cmd_quote\nexcept ImportError:\n from pipes import quote as cmd_quote # for Python 2.7\nfrom localstack import config\nfrom localstack.utils import bootstrap\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import (\n CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker,\n to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port)\nfrom localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR\nfrom localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue\nfrom localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched\n\n# constants\nLAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR\nLAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'\nEVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER\n\nLAMBDA_RUNTIME_PYTHON27 = 'python2.7'\nLAMBDA_RUNTIME_PYTHON36 = 'python3.6'\nLAMBDA_RUNTIME_PYTHON37 = 'python3.7'\nLAMBDA_RUNTIME_PYTHON38 = 'python3.8'\nLAMBDA_RUNTIME_NODEJS = 'nodejs'\nLAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'\nLAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'\nLAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'\nLAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'\nLAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'\nLAMBDA_RUNTIME_JAVA8 = 'java8'\nLAMBDA_RUNTIME_JAVA11 = 'java11'\nLAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'\nLAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'\nLAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'\nLAMBDA_RUNTIME_GOLANG = 'go1.x'\nLAMBDA_RUNTIME_RUBY = 'ruby'\nLAMBDA_RUNTIME_RUBY25 = 'ruby2.5'\nLAMBDA_RUNTIME_PROVIDED = 'provided'\n\nLAMBDA_SERVER_UNIQUE_PORTS = 500\nLAMBDA_SERVER_PORT_OFFSET = 5000\n\nLAMBDA_API_UNIQUE_PORTS = 500\nLAMBDA_API_PORT_OFFSET = 9000\n\n# logger\nLOG = logging.getLogger(__name__)\n\n# maximum time a pre-allocated container can sit idle before getting killed\nMAX_CONTAINER_IDLE_TIME_MS = 600 * 1000\n\n# SQS event source name\nEVENT_SOURCE_SQS = 'aws:sqs'\n\n# IP address of main Docker container (lazily initialized)\nDOCKER_MAIN_CONTAINER_IP = None\n\n# whether to use our custom Java executor, or the default from lambci\n# TODO: deprecated, should be removed in the future\nUSE_CUSTOM_JAVA_EXECUTOR = False\n\n\ndef get_from_event(event, key):\n try:\n return event['Records'][0][key]\n except KeyError:\n return None\n\n\ndef is_java_lambda(lambda_details):\n runtime = getattr(lambda_details, 'runtime', lambda_details)\n return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]\n\n\ndef is_nodejs_runtime(lambda_details):\n runtime = getattr(lambda_details, 'runtime', lambda_details)\n return runtime.startswith('nodejs')\n\n\ndef _store_logs(func_details, log_output, invocation_time=None, container_id=None):\n log_group_name = '/aws/lambda/%s' % func_details.name()\n container_id = container_id or short_uid()\n invocation_time = invocation_time or int(time.time() * 1000)\n invocation_time_secs = int(invocation_time / 1000)\n time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))\n log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)\n return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)\n\n\ndef get_main_endpoint_from_container():\n global DOCKER_MAIN_CONTAINER_IP\n if DOCKER_MAIN_CONTAINER_IP is None:\n DOCKER_MAIN_CONTAINER_IP = False\n try:\n if in_docker():\n DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()\n LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)\n except Exception as e:\n container_name = bootstrap.get_main_container_name()\n LOG.info('Unable to get IP address of main Docker container \"%s\": %s' %\n (container_name, e))\n # return main container IP, or fall back to Docker host (bridge IP, or host DNS address)\n return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER\n\n\nclass LambdaExecutor(object):\n \"\"\" Base class for Lambda executors. Subclasses must overwrite the _execute method \"\"\"\n def __init__(self):\n # keeps track of each function arn and the last time it was invoked\n self.function_invoke_times = {}\n\n def _prepare_environment(self, func_details):\n # setup environment pre-defined variables for docker environment\n result = func_details.envvars.copy()\n\n # injecting aws credentials into docker environment if not provided\n aws_stack.inject_test_credentials_into_env(result)\n\n return result\n\n def execute(self, func_arn, func_details, event, context=None, version=None,\n asynchronous=False, callback=None):\n def do_execute(*args):\n\n @cloudwatched('lambda')\n def _run(func_arn=None):\n # set the invocation time in milliseconds\n invocation_time = int(time.time() * 1000)\n # start the execution\n raised_error = None\n result = None\n dlq_sent = None\n try:\n result = self._execute(func_arn, func_details, event, context, version)\n except Exception as e:\n raised_error = e\n if asynchronous:\n if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:\n sqs_queue_arn = get_from_event(event, 'eventSourceARN')\n if sqs_queue_arn:\n # event source is SQS, send event back to dead letter queue\n dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)\n else:\n # event source is not SQS, send back to lambda dead letter queue\n lambda_error_to_dead_letter_queue(func_details, event, e)\n raise e\n finally:\n self.function_invoke_times[func_arn] = invocation_time\n callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)\n # return final result\n return result\n\n return _run(func_arn=func_arn)\n\n # Inform users about asynchronous mode of the lambda execution.\n if asynchronous:\n LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')\n FuncThread(do_execute).start()\n return None, 'Lambda executed asynchronously.'\n\n return do_execute()\n\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n \"\"\" This method must be overwritten by subclasses. \"\"\"\n raise Exception('Not implemented.')\n\n def startup(self):\n pass\n\n def cleanup(self, arn=None):\n pass\n\n def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):\n kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}\n\n is_provided = func_details.runtime.startswith(LAMBDA_RUNTIME_PROVIDED)\n if func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':\n # Note: certain \"provided\" runtimes (e.g., Rust programs) can block when we pass in\n # the event payload via stdin, hence we rewrite the command to \"echo ... | ...\" below\n env_vars = {\n 'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),\n 'AWS_LAMBDA_EVENT_BODY': to_str(event),\n 'DOCKER_LAMBDA_USE_STDIN': '1'\n }\n event = None\n cmd = re.sub(r'(.*)(%s\\s+(run|start))' % self._docker_cmd(), r'\\1echo $AWS_LAMBDA_EVENT_BODY | \\2', cmd)\n\n process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)\n result, log_output = process.communicate(input=event)\n try:\n result = to_str(result).strip()\n except Exception:\n pass\n log_output = to_str(log_output).strip()\n return_code = process.returncode\n # Note: The user's code may have been logging to stderr, in which case the logs\n # will be part of the \"result\" variable here. Hence, make sure that we extract\n # only the *last* line of \"result\" and consider anything above that as log output.\n if isinstance(result, six.string_types) and '\\n' in result:\n additional_logs, _, result = result.rpartition('\\n')\n log_output += '\\n%s' % additional_logs\n\n log_formatted = log_output.strip().replace('\\n', '\\n> ')\n func_arn = func_details and func_details.arn()\n LOG.debug('Lambda %s result / log output:\\n%s\\n> %s' % (func_arn, result.strip(), log_formatted))\n\n # store log output - TODO get live logs from `process` above?\n _store_logs(func_details, log_output)\n\n if return_code != 0:\n raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\\n%s' %\n (return_code, result, log_output))\n\n return result\n\n\nclass ContainerInfo:\n \"\"\" Contains basic information about a docker container. \"\"\"\n def __init__(self, name, entry_point):\n self.name = name\n self.entry_point = entry_point\n\n\nclass LambdaExecutorContainers(LambdaExecutor):\n \"\"\" Abstract executor class for executing Lambda functions in Docker containers \"\"\"\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n raise Exception('Not implemented')\n\n def _docker_cmd(self):\n \"\"\" Return the string to be used for running Docker commands. \"\"\"\n return config.DOCKER_CMD\n\n def prepare_event(self, environment, event_body):\n \"\"\" Return the event as a stdin string. \"\"\"\n # amend the environment variables for execution\n environment['AWS_LAMBDA_EVENT_BODY'] = event_body\n return None\n\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n lambda_cwd = func_details.cwd\n runtime = func_details.runtime\n handler = func_details.handler\n environment = self._prepare_environment(func_details)\n\n # configure USE_SSL in environment\n if config.USE_SSL:\n environment['USE_SSL'] = '1'\n\n # prepare event body\n if not event:\n LOG.warning('Empty event body specified for invocation of Lambda \"%s\"' % func_arn)\n event = {}\n event_body = json.dumps(json_safe(event))\n stdin = self.prepare_event(environment, event_body)\n\n main_endpoint = get_main_endpoint_from_container()\n\n environment['LOCALSTACK_HOSTNAME'] = main_endpoint\n environment['EDGE_PORT'] = str(config.EDGE_PORT)\n environment['_HANDLER'] = handler\n if os.environ.get('HTTP_PROXY'):\n environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n if func_details.timeout:\n environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)\n if context:\n environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name\n environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version\n environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn\n environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})\n if context.client_context is not None:\n environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(\n base64.b64decode(to_bytes(context.client_context))))\n\n # custom command to execute in the container\n command = ''\n events_file = ''\n\n if USE_CUSTOM_JAVA_EXECUTOR and is_java_lambda(runtime):\n # if running a Java Lambda with our custom executor, set up classpath arguments\n java_opts = Util.get_java_opts()\n stdin = None\n # copy executor jar into temp directory\n target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))\n if not os.path.exists(target_file):\n cp_r(LAMBDA_EXECUTOR_JAR, target_file)\n # TODO cleanup once we have custom Java Docker image\n taskdir = '/var/task'\n events_file = '_lambda.events.%s.json' % short_uid()\n save_file(os.path.join(lambda_cwd, events_file), event_body)\n classpath = Util.get_java_classpath(target_file)\n command = (\"bash -c 'cd %s; java %s -cp \\\"%s\\\" \\\"%s\\\" \\\"%s\\\" \\\"%s\\\"'\" %\n (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))\n\n # accept any self-signed certificates for outgoing calls from the Lambda\n if is_nodejs_runtime(runtime):\n environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'\n\n # determine the command to be executed (implemented by subclasses)\n cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)\n\n # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!\n LOG.info('Running lambda cmd: %s' % cmd)\n result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)\n\n # clean up events file\n events_file and os.path.exists(events_file) and rm_rf(events_file)\n\n return result\n\n\nclass LambdaExecutorReuseContainers(LambdaExecutorContainers):\n \"\"\" Executor class for executing Lambda functions in re-usable Docker containers \"\"\"\n def __init__(self):\n super(LambdaExecutorReuseContainers, self).__init__()\n # locking thread for creation/destruction of docker containers.\n self.docker_container_lock = threading.RLock()\n\n # On each invocation we try to construct a port unlikely to conflict\n # with a previously invoked lambda function. This is a problem with at\n # least the lambci/lambda:go1.x container, which execs a go program that\n # attempts to bind to the same default port.\n self.next_port = 0\n self.max_port = LAMBDA_SERVER_UNIQUE_PORTS\n self.port_offset = LAMBDA_SERVER_PORT_OFFSET\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n # check whether the Lambda has been invoked before\n has_been_invoked_before = func_arn in self.function_invoke_times\n\n # Choose a port for this invocation\n with self.docker_container_lock:\n env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)\n self.next_port = (self.next_port + 1) % self.max_port\n\n # create/verify the docker container is running.\n LOG.debug('Priming docker container with runtime \"%s\" and arn \"%s\".', runtime, func_arn)\n container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)\n\n # Note: currently \"docker exec\" does not support --env-file, i.e., environment variables can only be\n # passed directly on the command line, using \"-e\" below. TODO: Update this code once --env-file is\n # available for docker exec, to better support very large Lambda events (very long environment values)\n exec_env_vars = ' '.join(['-e {}=\"${}\"'.format(k, k) for (k, v) in env_vars.items()])\n\n if not command:\n command = '%s %s' % (container_info.entry_point, handler)\n\n # determine files to be copied into the container\n copy_command = ''\n docker_cmd = self._docker_cmd()\n if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:\n # if this is the first invocation: copy the entire folder into the container\n copy_command = '%s cp \"%s/.\" \"%s:/var/task\";' % (docker_cmd, lambda_cwd, container_info.name)\n\n cmd = (\n '%s'\n ' %s exec'\n ' %s' # env variables\n ' %s' # container name\n ' %s' # run cmd\n ) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)\n LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)\n\n return cmd\n\n def startup(self):\n self.cleanup()\n # start a process to remove idle containers\n if config.LAMBDA_REMOVE_CONTAINERS:\n self.start_idle_container_destroyer_interval()\n\n def cleanup(self, arn=None):\n if arn:\n self.function_invoke_times.pop(arn, None)\n return self.destroy_docker_container(arn)\n self.function_invoke_times = {}\n return self.destroy_existing_docker_containers()\n\n def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):\n \"\"\"\n Prepares a persistent docker container for a specific function.\n :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.\n :param func_arn: The ARN of the lambda function.\n :param env_vars: The environment variables for the lambda.\n :param lambda_cwd: The local directory containing the code for the lambda function.\n :return: ContainerInfo class containing the container name and default entry point.\n \"\"\"\n with self.docker_container_lock:\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n docker_cmd = self._docker_cmd()\n\n status = self.get_docker_container_status(func_arn)\n LOG.debug('Priming docker container (status \"%s\"): %s' % (status, container_name))\n\n docker_image = Util.docker_image_for_runtime(runtime)\n rm_flag = Util.get_docker_remove_flag()\n\n # Container is not running or doesn't exist.\n if status < 1:\n # Make sure the container does not exist in any form/state.\n self.destroy_docker_container(func_arn)\n\n env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])\n\n network = config.LAMBDA_DOCKER_NETWORK\n network_str = '--network=\"%s\"' % network if network else ''\n\n dns = config.LAMBDA_DOCKER_DNS\n dns_str = '--dns=\"%s\"' % dns if dns else ''\n\n mount_volume = not config.LAMBDA_REMOTE_DOCKER\n lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)\n if (':' in lambda_cwd and '\\\\' in lambda_cwd):\n lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)\n mount_volume_str = '-v \"%s\":/var/task' % lambda_cwd_on_host if mount_volume else ''\n\n # Create and start the container\n LOG.debug('Creating container: %s' % container_name)\n cmd = (\n '%s create'\n ' %s' # --rm flag\n ' --name \"%s\"'\n ' --entrypoint /bin/bash' # Load bash when it starts.\n ' %s'\n ' --interactive' # Keeps the container running bash.\n ' -e AWS_LAMBDA_EVENT_BODY=\"$AWS_LAMBDA_EVENT_BODY\"'\n ' -e HOSTNAME=\"$HOSTNAME\"'\n ' -e LOCALSTACK_HOSTNAME=\"$LOCALSTACK_HOSTNAME\"'\n ' -e EDGE_PORT=\"$EDGE_PORT\"'\n ' %s' # env_vars\n ' %s' # network\n ' %s' # dns\n ' %s'\n ) % (docker_cmd, rm_flag, container_name, mount_volume_str,\n env_vars_str, network_str, dns_str, docker_image)\n LOG.debug(cmd)\n run(cmd)\n\n if not mount_volume:\n LOG.debug('Copying files to container \"%s\" from \"%s\".' % (container_name, lambda_cwd))\n cmd = (\n '%s cp'\n ' \"%s/.\" \"%s:/var/task\"'\n ) % (docker_cmd, lambda_cwd, container_name)\n LOG.debug(cmd)\n run(cmd)\n\n LOG.debug('Starting container: %s' % container_name)\n cmd = '%s start %s' % (docker_cmd, container_name)\n LOG.debug(cmd)\n run(cmd)\n # give the container some time to start up\n time.sleep(1)\n\n # Get the entry point for the image.\n LOG.debug('Getting the entrypoint for image: %s' % (docker_image))\n cmd = (\n '%s image inspect'\n ' --format=\"{{ .Config.Entrypoint }}\"'\n ' %s'\n ) % (docker_cmd, docker_image)\n\n LOG.debug(cmd)\n run_result = run(cmd)\n\n entry_point = run_result.strip('[]\\n\\r ')\n\n container_network = self.get_docker_container_network(func_arn)\n\n LOG.debug('Using entrypoint \"%s\" for container \"%s\" on network \"%s\".'\n % (entry_point, container_name, container_network))\n\n return ContainerInfo(container_name, entry_point)\n\n def destroy_docker_container(self, func_arn):\n \"\"\"\n Stops and/or removes a docker container for a specific lambda function ARN.\n :param func_arn: The ARN of the lambda function.\n :return: None\n \"\"\"\n with self.docker_container_lock:\n status = self.get_docker_container_status(func_arn)\n docker_cmd = self._docker_cmd()\n\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n\n if status == 1:\n LOG.debug('Stopping container: %s' % container_name)\n cmd = '%s stop -t0 %s' % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n status = self.get_docker_container_status(func_arn)\n\n if status == -1:\n LOG.debug('Removing container: %s' % container_name)\n cmd = '%s rm %s' % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n def get_all_container_names(self):\n \"\"\"\n Returns a list of container names for lambda containers.\n :return: A String[] localstack docker container names for each function.\n \"\"\"\n with self.docker_container_lock:\n LOG.debug('Getting all lambda containers names.')\n cmd = '%s ps -a --filter=\"name=localstack_lambda_*\" --format \"{{.Names}}\"' % self._docker_cmd()\n LOG.debug(cmd)\n cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()\n\n if len(cmd_result) > 0:\n container_names = cmd_result.split('\\n')\n else:\n container_names = []\n\n return container_names\n\n def destroy_existing_docker_containers(self):\n \"\"\"\n Stops and/or removes all lambda docker containers for localstack.\n :return: None\n \"\"\"\n with self.docker_container_lock:\n container_names = self.get_all_container_names()\n\n LOG.debug('Removing %d containers.' % len(container_names))\n for container_name in container_names:\n cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)\n LOG.debug(cmd)\n run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n def get_docker_container_status(self, func_arn):\n \"\"\"\n Determine the status of a docker container.\n :param func_arn: The ARN of the lambda function.\n :return: 1 If the container is running,\n -1 if the container exists but is not running\n 0 if the container does not exist.\n \"\"\"\n with self.docker_container_lock:\n # Get the container name and id.\n container_name = self.get_container_name(func_arn)\n\n # Check if the container is already running\n # Note: filtering by *exact* name using regex filter '^...$' seems unstable on some\n # systems. Therefore, we use a combination of filter and grep to get the results.\n cmd = (\"docker ps -a --filter name='%s' \"\n '--format \"{{ .Status }} - {{ .Names }}\" '\n '| grep -w \"%s\" | cat') % (container_name, container_name)\n LOG.debug('Getting status for container \"%s\": %s' % (container_name, cmd))\n cmd_result = run(cmd)\n\n # If the container doesn't exist. Create and start it.\n container_status = cmd_result.strip()\n\n if len(container_status) == 0:\n return 0\n\n if container_status.lower().startswith('up '):\n return 1\n\n return -1\n\n def get_docker_container_network(self, func_arn):\n \"\"\"\n Determine the network of a docker container.\n :param func_arn: The ARN of the lambda function.\n :return: name of the container network\n \"\"\"\n with self.docker_container_lock:\n status = self.get_docker_container_status(func_arn)\n # container does not exist\n if status == 0:\n return ''\n\n # Get the container name.\n container_name = self.get_container_name(func_arn)\n docker_cmd = self._docker_cmd()\n\n # Get the container network\n LOG.debug('Getting container network: %s' % container_name)\n cmd = (\n '%s inspect %s'\n ' --format \"{{ .HostConfig.NetworkMode }}\"'\n ) % (docker_cmd, container_name)\n\n LOG.debug(cmd)\n cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)\n\n container_network = cmd_result.strip()\n\n return container_network\n\n def idle_container_destroyer(self):\n \"\"\"\n Iterates though all the lambda containers and destroys any container that has\n been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.\n :return: None\n \"\"\"\n LOG.info('Checking if there are idle containers.')\n current_time = int(time.time() * 1000)\n for func_arn, last_run_time in dict(self.function_invoke_times).items():\n duration = current_time - last_run_time\n\n # not enough idle time has passed\n if duration < MAX_CONTAINER_IDLE_TIME_MS:\n continue\n\n # container has been idle, destroy it.\n self.destroy_docker_container(func_arn)\n\n def start_idle_container_destroyer_interval(self):\n \"\"\"\n Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.\n Thus checking for idle containers and destroying them.\n :return: None\n \"\"\"\n self.idle_container_destroyer()\n threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()\n\n def get_container_name(self, func_arn):\n \"\"\"\n Given a function ARN, returns a valid docker container name.\n :param func_arn: The ARN of the lambda function.\n :return: A docker compatible name for the arn.\n \"\"\"\n return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)\n\n\nclass LambdaExecutorSeparateContainers(LambdaExecutorContainers):\n def __init__(self):\n super(LambdaExecutorSeparateContainers, self).__init__()\n self.max_port = LAMBDA_API_UNIQUE_PORTS\n self.port_offset = LAMBDA_API_PORT_OFFSET\n\n def prepare_event(self, environment, event_body):\n # Tell Lambci to use STDIN for the event\n environment['DOCKER_LAMBDA_USE_STDIN'] = '1'\n return event_body.encode()\n\n def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):\n entrypoint = ''\n if command:\n entrypoint = ' --entrypoint \"\"'\n else:\n command = '\"%s\"' % handler\n\n # add Docker Lambda env vars\n network = config.LAMBDA_DOCKER_NETWORK\n network_str = '--network=\"%s\"' % network if network else ''\n if network == 'host':\n port = get_free_tcp_port()\n env_vars['DOCKER_LAMBDA_API_PORT'] = port\n env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port\n\n dns = config.LAMBDA_DOCKER_DNS\n dns_str = '--dns=\"%s\"' % dns if dns else ''\n\n env_vars_string = ' '.join(['-e {}=\"${}\"'.format(k, k) for (k, v) in env_vars.items()])\n debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''\n docker_cmd = self._docker_cmd()\n docker_image = Util.docker_image_for_runtime(runtime)\n rm_flag = Util.get_docker_remove_flag()\n\n if config.LAMBDA_REMOTE_DOCKER:\n cmd = (\n 'CONTAINER_ID=\"$(%s create -i'\n ' %s' # entrypoint\n ' %s' # debug_docker_java_port\n ' %s' # env\n ' %s' # network\n ' %s' # dns\n ' %s' # --rm flag\n ' %s %s' # image and command\n ')\";'\n '%s cp \"%s/.\" \"$CONTAINER_ID:/var/task\"; '\n '%s start -ai \"$CONTAINER_ID\";'\n ) % (docker_cmd, entrypoint, debug_docker_java_port,\n env_vars_string, network_str, dns_str, rm_flag,\n docker_image, command,\n docker_cmd, lambda_cwd,\n docker_cmd)\n else:\n lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)\n cmd = (\n '%s run -i'\n ' %s -v \"%s\":/var/task'\n ' %s'\n ' %s' # network\n ' %s' # dns\n ' %s' # --rm flag\n ' %s %s'\n ) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,\n network_str, dns_str, rm_flag, docker_image, command)\n return cmd\n\n\nclass LambdaExecutorLocal(LambdaExecutor):\n def _execute(self, func_arn, func_details, event, context=None, version=None):\n lambda_cwd = func_details.cwd\n environment = self._prepare_environment(func_details)\n\n # execute the Lambda function in a forked sub-process, sync result via queue\n queue = Queue()\n\n lambda_function = func_details.function(version)\n\n def do_execute():\n # now we're executing in the child process, safe to change CWD and ENV\n path_before = sys.path\n try:\n if lambda_cwd:\n os.chdir(lambda_cwd)\n sys.path = [lambda_cwd] + sys.path\n if environment:\n os.environ.update(environment)\n result = lambda_function(event, context)\n queue.put(result)\n finally:\n sys.path = path_before\n\n process = Process(target=do_execute)\n with CaptureOutput() as c:\n process.run()\n result = queue.get()\n\n # Make sure to keep the log line below, to ensure the log stream gets created\n log_output = 'START: Lambda %s started via \"local\" executor ...' % func_arn\n # TODO: Interweaving stdout/stderr currently not supported\n for stream in (c.stdout(), c.stderr()):\n if stream:\n log_output += ('\\n' if log_output else '') + stream\n\n # store logs to CloudWatch\n _store_logs(func_details, log_output)\n\n return result\n\n def execute_java_lambda(self, event, context, main_file, func_details=None):\n handler = func_details.handler\n opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''\n event_file = EVENT_FILE_PATTERN.replace('*', short_uid())\n save_file(event_file, json.dumps(json_safe(event)))\n TMP_FILES.append(event_file)\n class_name = handler.split('::')[0]\n classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)\n cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)\n LOG.warning(cmd)\n result = self.run_lambda_executor(cmd, func_details=func_details)\n return result\n\n\nclass Util:\n debug_java_port = False\n\n @classmethod\n def get_java_opts(cls):\n opts = config.LAMBDA_JAVA_OPTS or ''\n # Replace _debug_port_ with a random free port\n if '_debug_port_' in opts:\n if not cls.debug_java_port:\n cls.debug_java_port = get_free_tcp_port()\n opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))\n else:\n # Parse the debug port from opts\n m = re.match('.*address=(.+:)?(\\\\d+).*', opts)\n if m is not None:\n cls.debug_java_port = m.groups()[1]\n\n return opts\n\n @classmethod\n def get_host_path_for_path_in_docker(cls, path):\n return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,\n r'%s/\\1' % config.HOST_TMP_FOLDER, path)\n\n @classmethod\n def format_windows_path(cls, path):\n temp = path.replace(':', '').replace('\\\\', '/')\n if len(temp) >= 1 and temp[:1] != '/':\n temp = '/' + temp\n temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)\n return temp\n\n @classmethod\n def docker_image_for_runtime(cls, runtime):\n docker_tag = runtime\n docker_image = config.LAMBDA_CONTAINER_REGISTRY\n # TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas\n # See https://github.com/lambci/docker-lambda/pull/218\n lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']\n if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):\n docker_tag = '20191117-%s' % docker_tag\n return '\"%s:%s\"' % (docker_image, docker_tag)\n\n @classmethod\n def get_docker_remove_flag(cls):\n return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''\n\n @classmethod\n def get_java_classpath(cls, archive):\n \"\"\"\n Return the Java classpath, using the parent folder of the\n given archive as the base folder.\n\n The result contains any *.jar files in the base folder, as\n well as any JAR files in the \"lib/*\" subfolder living\n alongside the supplied java archive (.jar or .zip).\n\n :param archive: an absolute path to a .jar or .zip Java archive\n :return: the Java classpath, relative to the base dir of \"archive\"\n \"\"\"\n entries = ['.']\n base_dir = os.path.dirname(archive)\n for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:\n for entry in glob.glob(pattern % base_dir):\n if os.path.realpath(archive) != os.path.realpath(entry):\n entries.append(os.path.relpath(entry, base_dir))\n # make sure to append the localstack-utils.jar at the end of the classpath\n # https://github.com/localstack/localstack/issues/1160\n entries.append(os.path.relpath(archive, base_dir))\n entries.append('*.jar')\n entries.append('java/lib/*.jar')\n result = ':'.join(entries)\n return result\n\n\n# --------------\n# GLOBAL STATE\n# --------------\n\nEXECUTOR_LOCAL = LambdaExecutorLocal()\nEXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()\nEXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()\nDEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE\n# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable\nAVAILABLE_EXECUTORS = {\n 'local': EXECUTOR_LOCAL,\n 'docker': EXECUTOR_CONTAINERS_SEPARATE,\n 'docker-reuse': EXECUTOR_CONTAINERS_REUSE\n}\n", "path": "localstack/services/awslambda/lambda_executors.py" } ]
diff --git a/localstack/services/awslambda/lambda_executors.py b/localstack/services/awslambda/lambda_executors.py index 5c41462270c7c..5a010fd746480 100644 --- a/localstack/services/awslambda/lambda_executors.py +++ b/localstack/services/awslambda/lambda_executors.py @@ -469,7 +469,7 @@ def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd): LOG.debug('Getting the entrypoint for image: %s' % (docker_image)) cmd = ( '%s image inspect' - ' --format="{{ .ContainerConfig.Entrypoint }}"' + ' --format="{{ .Config.Entrypoint }}"' ' %s' ) % (docker_cmd, docker_image)
Usage of `docker inspect ..` is fragile, depends on how and what built the docker image [x] bug report [ ] feature request # Detailed description `lambda_executor.py` current retrieves the container entrypoint from the docker image via `docker inspect --format="{{ .ContainerConfig.Entrypoint }}" ..`. This is fragile and may be missing depending on how the image in question is built. There is a `config` block _and_ a `containerconfig` block that are mostly the same, but sometimes different depending what built and what version of that thing built the image, for example we are seeing the entrypoint missing on images built with Docker for Mac 2.5.0.1, but not on earlier versions, others using `podman` are noticing the fragility in other projects: https://github.com/containers/podman/issues/2017 ## Expected behavior entrypoint value is picked up from a validly built container ## Actual behavior entrypoint is sometimes an empty string, which then for a `provided` lambda executor ends up with a script error trying to execute the handler name. The simple fix is to change `--format="{{ .ContainerConfig.Entrypoint }}"` to `--format="{{ .Config.Entrypoint }}"` which seems like the more canonical way of getting that value. ┆Issue is synchronized with this [Jira Task](https://localstack.atlassian.net/browse/LOC-54) by [Unito](https://www.unito.io/learn-more)
ckan__ckan-4249
[ { "content": "# encoding: utf-8\n\nfrom paste.deploy.converters import asbool\nfrom six import text_type\n\nimport ckan.model as model\nfrom ckan.common import g, request, config, session\nfrom ckan.lib.helpers import redirect_to as redirect\nimport ckan.plugins as p\n\nimport logging\nlog = logging.getLogger(__name__)\n\nAPIKEY_HEADER_NAME_KEY = u'apikey_header_name'\nAPIKEY_HEADER_NAME_DEFAULT = u'X-CKAN-API-Key'\n\n\ndef check_session_cookie(response):\n u'''\n The cookies for auth (auth_tkt) and session (ckan) are separate. This\n checks whether a user is logged in, and determines the validity of the\n session cookie, removing it if necessary.\n '''\n for cookie in request.cookies:\n # Remove the ckan session cookie if logged out.\n if cookie == u'ckan' and not getattr(g, u'user', None):\n # Check session for valid data (including flash messages)\n is_valid_cookie_data = False\n for key, value in session.items():\n if not key.startswith(u'_') and value:\n is_valid_cookie_data = True\n break\n if not is_valid_cookie_data:\n if session.id:\n log.debug(u'No valid session data - deleting session')\n log.debug(u'Session: %r', session.items())\n session.delete()\n else:\n log.debug(u'No session id - deleting session cookie')\n response.delete_cookie(cookie)\n # Remove auth_tkt repoze.who cookie if user not logged in.\n elif cookie == u'auth_tkt' and not session.id:\n response.delete_cookie(cookie)\n\n return response\n\n\ndef set_cors_headers_for_response(response):\n u'''\n Set up Access Control Allow headers if either origin_allow_all is True, or\n the request Origin is in the origin_whitelist.\n '''\n if config.get(u'ckan.cors.origin_allow_all') \\\n and request.headers.get(u'Origin'):\n\n cors_origin_allowed = None\n if asbool(config.get(u'ckan.cors.origin_allow_all')):\n cors_origin_allowed = b'*'\n elif config.get(u'ckan.cors.origin_whitelist') and \\\n request.headers.get(u'Origin') \\\n in config[u'ckan.cors.origin_whitelist'].split(u' '):\n # set var to the origin to allow it.\n cors_origin_allowed = request.headers.get(u'Origin')\n\n if cors_origin_allowed is not None:\n response.headers[b'Access-Control-Allow-Origin'] = \\\n cors_origin_allowed\n response.headers[b'Access-Control-Allow-Methods'] = \\\n b'POST, PUT, GET, DELETE, OPTIONS'\n response.headers[b'Access-Control-Allow-Headers'] = \\\n b'X-CKAN-API-KEY, Authorization, Content-Type'\n\n return response\n\n\ndef identify_user():\n u'''Try to identify the user\n If the user is identified then:\n g.user = user name (unicode)\n g.userobj = user object\n g.author = user name\n otherwise:\n g.user = None\n g.userobj = None\n g.author = user's IP address (unicode)\n\n Note: Remember, when running under Pylons, `g` is the Pylons `c` object\n '''\n # see if it was proxied first\n g.remote_addr = request.environ.get(u'HTTP_X_FORWARDED_FOR', u'')\n if not g.remote_addr:\n g.remote_addr = request.environ.get(u'REMOTE_ADDR',\n u'Unknown IP Address')\n\n # Authentication plugins get a chance to run here break as soon as a user\n # is identified.\n authenticators = p.PluginImplementations(p.IAuthenticator)\n if authenticators:\n for item in authenticators:\n item.identify()\n if g.user:\n break\n\n # We haven't identified the user so try the default methods\n if not getattr(g, u'user', None):\n _identify_user_default()\n\n # If we have a user but not the userobj let's get the userobj. This means\n # that IAuthenticator extensions do not need to access the user model\n # directly.\n if g.user and not getattr(g, u'userobj', None):\n g.userobj = model.User.by_name(g.user)\n\n # general settings\n if g.user:\n g.author = g.user\n else:\n g.author = g.remote_addr\n g.author = text_type(g.author)\n\n\ndef _identify_user_default():\n u'''\n Identifies the user using two methods:\n a) If they logged into the web interface then repoze.who will\n set REMOTE_USER.\n b) For API calls they may set a header with an API key.\n '''\n\n # environ['REMOTE_USER'] is set by repoze.who if it authenticates a\n # user's cookie. But repoze.who doesn't check the user (still) exists\n # in our database - we need to do that here. (Another way would be\n # with an userid_checker, but that would mean another db access.\n # See: http://docs.repoze.org/who/1.0/narr.html#module-repoze.who\\\n # .plugins.sql )\n g.user = request.environ.get(u'REMOTE_USER', u'')\n if g.user:\n g.user = g.user.decode(u'utf8')\n g.userobj = model.User.by_name(g.user)\n\n if g.userobj is None or not g.userobj.is_active():\n\n # This occurs when a user that was still logged in is deleted, or\n # when you are logged in, clean db and then restart (or when you\n # change your username). There is no user object, so even though\n # repoze thinks you are logged in and your cookie has\n # ckan_display_name, we need to force user to logout and login\n # again to get the User object.\n\n ev = request.environ\n if u'repoze.who.plugins' in ev:\n pth = getattr(ev[u'repoze.who.plugins'][u'friendlyform'],\n u'logout_handler_path')\n redirect(pth)\n else:\n g.userobj = _get_user_for_apikey()\n if g.userobj is not None:\n g.user = g.userobj.name\n\n\ndef _get_user_for_apikey():\n apikey_header_name = config.get(APIKEY_HEADER_NAME_KEY,\n APIKEY_HEADER_NAME_DEFAULT)\n apikey = request.headers.get(apikey_header_name, u'')\n if not apikey:\n apikey = request.environ.get(apikey_header_name, u'')\n if not apikey:\n # For misunderstanding old documentation (now fixed).\n apikey = request.environ.get(u'HTTP_AUTHORIZATION', u'')\n if not apikey:\n apikey = request.environ.get(u'Authorization', u'')\n # Forget HTTP Auth credentials (they have spaces).\n if u' ' in apikey:\n apikey = u''\n if not apikey:\n return None\n apikey = apikey.decode(u'utf8', u'ignore')\n log.debug(u'Received API Key: %s' % apikey)\n query = model.Session.query(model.User)\n user = query.filter_by(apikey=apikey).first()\n return user\n", "path": "ckan/views/__init__.py" } ]
[ { "content": "# encoding: utf-8\n\nfrom paste.deploy.converters import asbool\nfrom six import text_type\n\nimport ckan.model as model\nfrom ckan.common import g, request, config, session\nfrom ckan.lib.helpers import redirect_to as redirect\nimport ckan.plugins as p\n\nimport logging\nlog = logging.getLogger(__name__)\n\nAPIKEY_HEADER_NAME_KEY = u'apikey_header_name'\nAPIKEY_HEADER_NAME_DEFAULT = u'X-CKAN-API-Key'\n\n\ndef check_session_cookie(response):\n u'''\n The cookies for auth (auth_tkt) and session (ckan) are separate. This\n checks whether a user is logged in, and determines the validity of the\n session cookie, removing it if necessary.\n '''\n for cookie in request.cookies:\n # Remove the ckan session cookie if logged out.\n if cookie == u'ckan' and not getattr(g, u'user', None):\n # Check session for valid data (including flash messages)\n is_valid_cookie_data = False\n for key, value in session.items():\n if not key.startswith(u'_') and value:\n is_valid_cookie_data = True\n break\n if not is_valid_cookie_data:\n if session.id:\n log.debug(u'No valid session data - deleting session')\n log.debug(u'Session: %r', session.items())\n session.delete()\n else:\n log.debug(u'No session id - deleting session cookie')\n response.delete_cookie(cookie)\n # Remove auth_tkt repoze.who cookie if user not logged in.\n elif cookie == u'auth_tkt' and not session.id:\n response.delete_cookie(cookie)\n\n return response\n\n\ndef set_cors_headers_for_response(response):\n u'''\n Set up Access Control Allow headers if either origin_allow_all is True, or\n the request Origin is in the origin_whitelist.\n '''\n if config.get(u'ckan.cors.origin_allow_all') \\\n and request.headers.get(u'Origin'):\n\n cors_origin_allowed = None\n if asbool(config.get(u'ckan.cors.origin_allow_all')):\n cors_origin_allowed = b'*'\n elif config.get(u'ckan.cors.origin_whitelist') and \\\n request.headers.get(u'Origin') \\\n in config[u'ckan.cors.origin_whitelist'].split(u' '):\n # set var to the origin to allow it.\n cors_origin_allowed = request.headers.get(u'Origin')\n\n if cors_origin_allowed is not None:\n response.headers[b'Access-Control-Allow-Origin'] = \\\n cors_origin_allowed\n response.headers[b'Access-Control-Allow-Methods'] = \\\n b'POST, PUT, GET, DELETE, OPTIONS'\n response.headers[b'Access-Control-Allow-Headers'] = \\\n b'X-CKAN-API-KEY, Authorization, Content-Type'\n\n return response\n\n\ndef identify_user():\n u'''Try to identify the user\n If the user is identified then:\n g.user = user name (unicode)\n g.userobj = user object\n g.author = user name\n otherwise:\n g.user = None\n g.userobj = None\n g.author = user's IP address (unicode)\n\n Note: Remember, when running under Pylons, `g` is the Pylons `c` object\n '''\n # see if it was proxied first\n g.remote_addr = request.environ.get(u'HTTP_X_FORWARDED_FOR', u'')\n if not g.remote_addr:\n g.remote_addr = request.environ.get(u'REMOTE_ADDR',\n u'Unknown IP Address')\n\n # Authentication plugins get a chance to run here break as soon as a user\n # is identified.\n authenticators = p.PluginImplementations(p.IAuthenticator)\n if authenticators:\n for item in authenticators:\n item.identify()\n try:\n if g.user:\n break\n except AttributeError:\n continue\n\n # We haven't identified the user so try the default methods\n if not getattr(g, u'user', None):\n _identify_user_default()\n\n # If we have a user but not the userobj let's get the userobj. This means\n # that IAuthenticator extensions do not need to access the user model\n # directly.\n if g.user and not getattr(g, u'userobj', None):\n g.userobj = model.User.by_name(g.user)\n\n # general settings\n if g.user:\n g.author = g.user\n else:\n g.author = g.remote_addr\n g.author = text_type(g.author)\n\n\ndef _identify_user_default():\n u'''\n Identifies the user using two methods:\n a) If they logged into the web interface then repoze.who will\n set REMOTE_USER.\n b) For API calls they may set a header with an API key.\n '''\n\n # environ['REMOTE_USER'] is set by repoze.who if it authenticates a\n # user's cookie. But repoze.who doesn't check the user (still) exists\n # in our database - we need to do that here. (Another way would be\n # with an userid_checker, but that would mean another db access.\n # See: http://docs.repoze.org/who/1.0/narr.html#module-repoze.who\\\n # .plugins.sql )\n g.user = request.environ.get(u'REMOTE_USER', u'')\n if g.user:\n g.user = g.user.decode(u'utf8')\n g.userobj = model.User.by_name(g.user)\n\n if g.userobj is None or not g.userobj.is_active():\n\n # This occurs when a user that was still logged in is deleted, or\n # when you are logged in, clean db and then restart (or when you\n # change your username). There is no user object, so even though\n # repoze thinks you are logged in and your cookie has\n # ckan_display_name, we need to force user to logout and login\n # again to get the User object.\n\n ev = request.environ\n if u'repoze.who.plugins' in ev:\n pth = getattr(ev[u'repoze.who.plugins'][u'friendlyform'],\n u'logout_handler_path')\n redirect(pth)\n else:\n g.userobj = _get_user_for_apikey()\n if g.userobj is not None:\n g.user = g.userobj.name\n\n\ndef _get_user_for_apikey():\n apikey_header_name = config.get(APIKEY_HEADER_NAME_KEY,\n APIKEY_HEADER_NAME_DEFAULT)\n apikey = request.headers.get(apikey_header_name, u'')\n if not apikey:\n apikey = request.environ.get(apikey_header_name, u'')\n if not apikey:\n # For misunderstanding old documentation (now fixed).\n apikey = request.environ.get(u'HTTP_AUTHORIZATION', u'')\n if not apikey:\n apikey = request.environ.get(u'Authorization', u'')\n # Forget HTTP Auth credentials (they have spaces).\n if u' ' in apikey:\n apikey = u''\n if not apikey:\n return None\n apikey = apikey.decode(u'utf8', u'ignore')\n log.debug(u'Received API Key: %s' % apikey)\n query = model.Session.query(model.User)\n user = query.filter_by(apikey=apikey).first()\n return user\n", "path": "ckan/views/__init__.py" } ]
diff --git a/ckan/views/__init__.py b/ckan/views/__init__.py index 164231b8bd3..15e95a86487 100644 --- a/ckan/views/__init__.py +++ b/ckan/views/__init__.py @@ -98,8 +98,11 @@ def identify_user(): if authenticators: for item in authenticators: item.identify() - if g.user: - break + try: + if g.user: + break + except AttributeError: + continue # We haven't identified the user so try the default methods if not getattr(g, u'user', None):
'_Globals' has no attribute 'user' : exception when using an IAuthenticator on CKAN 2.8.0 I'm putting together a new deployment based on the new CKAN v2.8.0 release. I'm using ckanext-ldap as an authenticator, though it looks like this bug would apply to any authenticator plugin. This exact setup worked fine on CKAN v2.7.3. ### CKAN Version if known (or site URL) CKAN v 2.8.0 ckanext-ldap @ `ckan-upgrade-2.8.0a` ### Please describe the expected behaviour If the IAuthenticator plugin cannot authenticate the user, it does not set `g.user`, and CKAN should run the default authenticator. ### Please describe the actual behaviour If the IAuthenticator plugin cannot authenticate the user, it does not set `g.user`, and CKAN tries to lookup `g.user` and crashes with traceback: ``` Traceback (most recent call last): File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1982, in wsgi_app response = self.full_dispatch_request() File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1614, in full_dispatch_request rv = self.handle_user_exception(e) File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1517, in handle_user_exception reraise(exc_type, exc_value, tb) File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1610, in full_dispatch_request rv = self.preprocess_request() File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/app.py", line 1831, in preprocess_request rv = func() File "/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/flask_app.py", line 281, in ckan_before_request identify_user() File "/usr/lib/ckan/venv/src/ckan/ckan/views/__init__.py", line 101, in identify_user if g.user: File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/werkzeug/local.py", line 347, in __getattr__ return getattr(self._get_current_object(), name) File "/usr/lib/ckan/venv/local/lib/python2.7/site-packages/werkzeug/local.py", line 347, in __getattr__ return getattr(self._get_current_object(), name) File "/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/flask_app.py", line 334, in __getattr__ return getattr(app_globals.app_globals, name) AttributeError: '_Globals' object has no attribute 'user' ``` ### What steps can be taken to reproduce the issue? * Install CKAN v2.8.0 as per documented instructions * Install a plugin that implements IAuthenticator (In this case I am using the ckanext-ldap plugin in the 2.8.0 branch), that may not be able to authenticate the user, so may not set `g.user`. * Run CKAN normally * Attempt to load any page. What is odd is that this section of code at `identify_user` in `ckan/views/__init__.py` has not changed between v2.7.3 and v2.8.0. And the way the authenticator plugin handles/sets `g.user` has not changed either. I'm guessing this is caused by a change in the way the _Globals object behaves when it cannot find an attribute.
aio-libs__aiohttp-4040
[ { "content": "import asyncio\nimport codecs\nimport io\nimport re\nimport sys\nimport traceback\nimport warnings\nfrom hashlib import md5, sha1, sha256\nfrom http.cookies import CookieError, Morsel, SimpleCookie\nfrom types import MappingProxyType, TracebackType\nfrom typing import ( # noqa\n TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nimport attr\nfrom multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy\nfrom yarl import URL\n\nfrom . import hdrs, helpers, http, multipart, payload\nfrom .abc import AbstractStreamWriter\nfrom .client_exceptions import (\n ClientConnectionError,\n ClientOSError,\n ClientResponseError,\n ContentTypeError,\n InvalidURL,\n ServerFingerprintMismatch,\n)\nfrom .formdata import FormData\nfrom .helpers import ( # noqa\n PY_36,\n BaseTimerContext,\n BasicAuth,\n HeadersMixin,\n TimerNoop,\n is_expected_content_type,\n noop,\n reify,\n set_result,\n)\nfrom .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter\nfrom .log import client_logger\nfrom .streams import StreamReader # noqa\nfrom .typedefs import (\n DEFAULT_JSON_DECODER,\n JSONDecoder,\n LooseCookies,\n LooseHeaders,\n RawHeaders,\n)\n\ntry:\n import ssl\n from ssl import SSLContext\nexcept ImportError: # pragma: no cover\n ssl = None # type: ignore\n SSLContext = object # type: ignore\n\ntry:\n import cchardet as chardet\nexcept ImportError: # pragma: no cover\n import chardet\n\n\n__all__ = ('ClientRequest', 'ClientResponse', 'RequestInfo', 'Fingerprint')\n\n\nif TYPE_CHECKING: # pragma: no cover\n from .client import ClientSession # noqa\n from .connector import Connection # noqa\n from .tracing import Trace # noqa\n\n\[email protected](frozen=True, slots=True)\nclass ContentDisposition:\n type = attr.ib(type=str) # type: Optional[str]\n parameters = attr.ib(type=MappingProxyType) # type: MappingProxyType[str, str] # noqa\n filename = attr.ib(type=str) # type: Optional[str]\n\n\[email protected](frozen=True, slots=True)\nclass RequestInfo:\n url = attr.ib(type=URL)\n method = attr.ib(type=str)\n headers = attr.ib(type=CIMultiDictProxy) # type: CIMultiDictProxy[str]\n real_url = attr.ib(type=URL)\n\n @real_url.default\n def real_url_default(self) -> URL:\n return self.url\n\n\nclass Fingerprint:\n HASHFUNC_BY_DIGESTLEN = {\n 16: md5,\n 20: sha1,\n 32: sha256,\n }\n\n def __init__(self, fingerprint: bytes) -> None:\n digestlen = len(fingerprint)\n hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)\n if not hashfunc:\n raise ValueError('fingerprint has invalid length')\n elif hashfunc is md5 or hashfunc is sha1:\n raise ValueError('md5 and sha1 are insecure and '\n 'not supported. Use sha256.')\n self._hashfunc = hashfunc\n self._fingerprint = fingerprint\n\n @property\n def fingerprint(self) -> bytes:\n return self._fingerprint\n\n def check(self, transport: asyncio.Transport) -> None:\n if not transport.get_extra_info('sslcontext'):\n return\n sslobj = transport.get_extra_info('ssl_object')\n cert = sslobj.getpeercert(binary_form=True)\n got = self._hashfunc(cert).digest()\n if got != self._fingerprint:\n host, port, *_ = transport.get_extra_info('peername')\n raise ServerFingerprintMismatch(self._fingerprint,\n got, host, port)\n\n\nif ssl is not None:\n SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))\nelse: # pragma: no cover\n SSL_ALLOWED_TYPES = type(None)\n\n\[email protected](slots=True, frozen=True)\nclass ConnectionKey:\n # the key should contain an information about used proxy / TLS\n # to prevent reusing wrong connections from a pool\n host = attr.ib(type=str)\n port = attr.ib(type=int) # type: Optional[int]\n is_ssl = attr.ib(type=bool)\n ssl = attr.ib() # type: Union[SSLContext, None, bool, Fingerprint]\n proxy = attr.ib() # type: Optional[URL]\n proxy_auth = attr.ib() # type: Optional[BasicAuth]\n proxy_headers_hash = attr.ib(type=int) # type: Optional[int] # noqa # hash(CIMultiDict)\n\n\nclass ClientRequest:\n GET_METHODS = {\n hdrs.METH_GET,\n hdrs.METH_HEAD,\n hdrs.METH_OPTIONS,\n hdrs.METH_TRACE,\n }\n POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}\n ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})\n\n DEFAULT_HEADERS = {\n hdrs.ACCEPT: '*/*',\n hdrs.ACCEPT_ENCODING: 'gzip, deflate',\n }\n\n body = b''\n auth = None\n response = None\n response_class = None\n\n _writer = None # async task for streaming data\n _continue = None # waiter future for '100 Continue' response\n\n # N.B.\n # Adding __del__ method with self._writer closing doesn't make sense\n # because _writer is instance method, thus it keeps a reference to self.\n # Until writer has finished finalizer will not be called.\n\n def __init__(self, method: str, url: URL, *,\n params: Optional[Mapping[str, str]]=None,\n headers: Optional[LooseHeaders]=None,\n skip_auto_headers: Iterable[str]=frozenset(),\n data: Any=None,\n cookies: Optional[LooseCookies]=None,\n auth: Optional[BasicAuth]=None,\n version: http.HttpVersion=http.HttpVersion11,\n compress: Optional[str]=None,\n chunked: Optional[bool]=None,\n expect100: bool=False,\n loop: asyncio.AbstractEventLoop,\n response_class: Optional[Type['ClientResponse']]=None,\n proxy: Optional[URL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n timer: Optional[BaseTimerContext]=None,\n session: Optional['ClientSession']=None,\n ssl: Union[SSLContext, bool, Fingerprint, None]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n traces: Optional[List['Trace']]=None):\n\n assert isinstance(url, URL), url\n assert isinstance(proxy, (URL, type(None))), proxy\n # FIXME: session is None in tests only, need to fix tests\n # assert session is not None\n self._session = cast('ClientSession', session)\n if params:\n q = MultiDict(url.query)\n url2 = url.with_query(params)\n q.extend(url2.query)\n url = url.with_query(q)\n self.original_url = url\n self.url = url.with_fragment(None)\n self.method = method.upper()\n self.chunked = chunked\n self.compress = compress\n self.loop = loop\n self.length = None\n if response_class is None:\n real_response_class = ClientResponse\n else:\n real_response_class = response_class\n self.response_class = real_response_class # type: Type[ClientResponse]\n self._timer = timer if timer is not None else TimerNoop()\n self._ssl = ssl\n\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1))\n\n self.update_version(version)\n self.update_host(url)\n self.update_headers(headers)\n self.update_auto_headers(skip_auto_headers)\n self.update_cookies(cookies)\n self.update_content_encoding(data)\n self.update_auth(auth)\n self.update_proxy(proxy, proxy_auth, proxy_headers)\n\n self.update_body_from_data(data)\n if data or self.method not in self.GET_METHODS:\n self.update_transfer_encoding()\n self.update_expect_continue(expect100)\n if traces is None:\n traces = []\n self._traces = traces\n\n def is_ssl(self) -> bool:\n return self.url.scheme in ('https', 'wss')\n\n @property\n def ssl(self) -> Union['SSLContext', None, bool, Fingerprint]:\n return self._ssl\n\n @property\n def connection_key(self) -> ConnectionKey:\n proxy_headers = self.proxy_headers\n if proxy_headers:\n h = hash(tuple((k, v) for k, v in proxy_headers.items())) # type: Optional[int] # noqa\n else:\n h = None\n return ConnectionKey(self.host, self.port, self.is_ssl(),\n self.ssl,\n self.proxy, self.proxy_auth, h)\n\n @property\n def host(self) -> str:\n ret = self.url.host\n assert ret is not None\n return ret\n\n @property\n def port(self) -> Optional[int]:\n return self.url.port\n\n @property\n def request_info(self) -> RequestInfo:\n headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str]\n return RequestInfo(self.url, self.method,\n headers, self.original_url)\n\n def update_host(self, url: URL) -> None:\n \"\"\"Update destination host, port and connection type (ssl).\"\"\"\n # get host/port\n if not url.host:\n raise InvalidURL(url)\n\n # basic auth info\n username, password = url.user, url.password\n if username:\n self.auth = helpers.BasicAuth(username, password or '')\n\n def update_version(self, version: Union[http.HttpVersion, str]) -> None:\n \"\"\"Convert request version to two elements tuple.\n\n parser HTTP version '1.1' => (1, 1)\n \"\"\"\n if isinstance(version, str):\n v = [l.strip() for l in version.split('.', 1)]\n try:\n version = http.HttpVersion(int(v[0]), int(v[1]))\n except ValueError:\n raise ValueError(\n 'Can not parse http version number: {}'\n .format(version)) from None\n self.version = version\n\n def update_headers(self, headers: Optional[LooseHeaders]) -> None:\n \"\"\"Update request headers.\"\"\"\n self.headers = CIMultiDict() # type: CIMultiDict[str]\n\n # add host\n netloc = cast(str, self.url.raw_host)\n if helpers.is_ipv6_address(netloc):\n netloc = '[{}]'.format(netloc)\n if not self.url.is_default_port():\n netloc += ':' + str(self.url.port)\n self.headers[hdrs.HOST] = netloc\n\n if headers:\n if isinstance(headers, (dict, MultiDictProxy, MultiDict)):\n headers = headers.items() # type: ignore\n\n for key, value in headers:\n # A special case for Host header\n if key.lower() == 'host':\n self.headers[key] = value\n else:\n self.headers.add(key, value)\n\n def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:\n self.skip_auto_headers = CIMultiDict(\n (hdr, None) for hdr in sorted(skip_auto_headers))\n used_headers = self.headers.copy()\n used_headers.extend(self.skip_auto_headers) # type: ignore\n\n for hdr, val in self.DEFAULT_HEADERS.items():\n if hdr not in used_headers:\n self.headers.add(hdr, val)\n\n if hdrs.USER_AGENT not in used_headers:\n self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE\n\n def update_cookies(self, cookies: Optional[LooseCookies]) -> None:\n \"\"\"Update request cookies header.\"\"\"\n if not cookies:\n return\n\n c = SimpleCookie()\n if hdrs.COOKIE in self.headers:\n c.load(self.headers.get(hdrs.COOKIE, ''))\n del self.headers[hdrs.COOKIE]\n\n if isinstance(cookies, Mapping):\n iter_cookies = cookies.items()\n else:\n iter_cookies = cookies # type: ignore\n for name, value in iter_cookies:\n if isinstance(value, Morsel):\n # Preserve coded_value\n mrsl_val = value.get(value.key, Morsel())\n mrsl_val.set(value.key, value.value, value.coded_value) # type: ignore # noqa\n c[name] = mrsl_val\n else:\n c[name] = value # type: ignore\n\n self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()\n\n def update_content_encoding(self, data: Any) -> None:\n \"\"\"Set request content encoding.\"\"\"\n if not data:\n return\n\n enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()\n if enc:\n if self.compress:\n raise ValueError(\n 'compress can not be set '\n 'if Content-Encoding header is set')\n elif self.compress:\n if not isinstance(self.compress, str):\n self.compress = 'deflate'\n self.headers[hdrs.CONTENT_ENCODING] = self.compress\n self.chunked = True # enable chunked, no need to deal with length\n\n def update_transfer_encoding(self) -> None:\n \"\"\"Analyze transfer-encoding header.\"\"\"\n te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()\n\n if 'chunked' in te:\n if self.chunked:\n raise ValueError(\n 'chunked can not be set '\n 'if \"Transfer-Encoding: chunked\" header is set')\n\n elif self.chunked:\n if hdrs.CONTENT_LENGTH in self.headers:\n raise ValueError(\n 'chunked can not be set '\n 'if Content-Length header is set')\n\n self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'\n else:\n if hdrs.CONTENT_LENGTH not in self.headers:\n self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))\n\n def update_auth(self, auth: Optional[BasicAuth]) -> None:\n \"\"\"Set basic auth.\"\"\"\n if auth is None:\n auth = self.auth\n if auth is None:\n return\n\n if not isinstance(auth, helpers.BasicAuth):\n raise TypeError('BasicAuth() tuple is required instead')\n\n self.headers[hdrs.AUTHORIZATION] = auth.encode()\n\n def update_body_from_data(self, body: Any) -> None:\n if not body:\n return\n\n # FormData\n if isinstance(body, FormData):\n body = body()\n\n try:\n body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)\n except payload.LookupError:\n body = FormData(body)()\n\n self.body = body\n\n # enable chunked encoding if needed\n if not self.chunked:\n if hdrs.CONTENT_LENGTH not in self.headers:\n size = body.size\n if size is None:\n self.chunked = True\n else:\n if hdrs.CONTENT_LENGTH not in self.headers:\n self.headers[hdrs.CONTENT_LENGTH] = str(size)\n\n # copy payload headers\n assert body.headers\n for (key, value) in body.headers.items():\n if key in self.headers:\n continue\n if key in self.skip_auto_headers:\n continue\n self.headers[key] = value\n\n def update_expect_continue(self, expect: bool=False) -> None:\n if expect:\n self.headers[hdrs.EXPECT] = '100-continue'\n elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':\n expect = True\n\n if expect:\n self._continue = self.loop.create_future()\n\n def update_proxy(self, proxy: Optional[URL],\n proxy_auth: Optional[BasicAuth],\n proxy_headers: Optional[LooseHeaders]) -> None:\n if proxy and not proxy.scheme == 'http':\n raise ValueError(\"Only http proxies are supported\")\n if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):\n raise ValueError(\"proxy_auth must be None or BasicAuth() tuple\")\n self.proxy = proxy\n self.proxy_auth = proxy_auth\n self.proxy_headers = proxy_headers\n\n def keep_alive(self) -> bool:\n if self.version < HttpVersion10:\n # keep alive not supported at all\n return False\n if self.version == HttpVersion10:\n if self.headers.get(hdrs.CONNECTION) == 'keep-alive':\n return True\n else: # no headers means we close for Http 1.0\n return False\n elif self.headers.get(hdrs.CONNECTION) == 'close':\n return False\n\n return True\n\n async def write_bytes(self, writer: AbstractStreamWriter,\n conn: 'Connection') -> None:\n \"\"\"Support coroutines that yields bytes objects.\"\"\"\n # 100 response\n if self._continue is not None:\n await writer.drain()\n await self._continue\n\n protocol = conn.protocol\n assert protocol is not None\n try:\n if isinstance(self.body, payload.Payload):\n await self.body.write(writer)\n else:\n if isinstance(self.body, (bytes, bytearray)):\n self.body = (self.body,) # type: ignore\n\n for chunk in self.body:\n await writer.write(chunk) # type: ignore\n\n await writer.write_eof()\n except OSError as exc:\n new_exc = ClientOSError(\n exc.errno,\n 'Can not write request body for %s' % self.url)\n new_exc.__context__ = exc\n new_exc.__cause__ = exc\n protocol.set_exception(new_exc)\n except asyncio.CancelledError as exc:\n if not conn.closed:\n protocol.set_exception(exc)\n except Exception as exc:\n protocol.set_exception(exc)\n finally:\n self._writer = None\n\n async def send(self, conn: 'Connection') -> 'ClientResponse':\n # Specify request target:\n # - CONNECT request must send authority form URI\n # - not CONNECT proxy must send absolute form URI\n # - most common is origin form URI\n if self.method == hdrs.METH_CONNECT:\n connect_host = self.url.raw_host\n assert connect_host is not None\n if helpers.is_ipv6_address(connect_host):\n connect_host = '[{}]'.format(connect_host)\n path = '{}:{}'.format(connect_host, self.url.port)\n elif self.proxy and not self.is_ssl():\n path = str(self.url)\n else:\n path = self.url.raw_path\n if self.url.raw_query_string:\n path += '?' + self.url.raw_query_string\n\n protocol = conn.protocol\n assert protocol is not None\n writer = StreamWriter(\n protocol, self.loop,\n on_chunk_sent=self._on_chunk_request_sent\n )\n\n if self.compress:\n writer.enable_compression(self.compress)\n\n if self.chunked is not None:\n writer.enable_chunking()\n\n # set default content-type\n if (self.method in self.POST_METHODS and\n hdrs.CONTENT_TYPE not in self.skip_auto_headers and\n hdrs.CONTENT_TYPE not in self.headers):\n self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'\n\n # set the connection header\n connection = self.headers.get(hdrs.CONNECTION)\n if not connection:\n if self.keep_alive():\n if self.version == HttpVersion10:\n connection = 'keep-alive'\n else:\n if self.version == HttpVersion11:\n connection = 'close'\n\n if connection is not None:\n self.headers[hdrs.CONNECTION] = connection\n\n # status + headers\n status_line = '{0} {1} HTTP/{2[0]}.{2[1]}'.format(\n self.method, path, self.version)\n await writer.write_headers(status_line, self.headers)\n\n self._writer = self.loop.create_task(self.write_bytes(writer, conn))\n\n response_class = self.response_class\n assert response_class is not None\n self.response = response_class(\n self.method, self.original_url,\n writer=self._writer, continue100=self._continue, timer=self._timer,\n request_info=self.request_info,\n traces=self._traces,\n loop=self.loop,\n session=self._session\n )\n return self.response\n\n async def close(self) -> None:\n if self._writer is not None:\n try:\n await self._writer\n finally:\n self._writer = None\n\n def terminate(self) -> None:\n if self._writer is not None:\n if not self.loop.is_closed():\n self._writer.cancel()\n self._writer = None\n\n async def _on_chunk_request_sent(self, chunk: bytes) -> None:\n for trace in self._traces:\n await trace.send_request_chunk_sent(chunk)\n\n\nclass ClientResponse(HeadersMixin):\n\n # from the Status-Line of the response\n version = None # HTTP-Version\n status = None # type: int # Status-Code\n reason = None # Reason-Phrase\n\n content = None # type: StreamReader # Payload stream\n _headers = None # type: CIMultiDictProxy[str] # Response headers\n _raw_headers = None # type: RawHeaders # Response raw headers\n\n _connection = None # current connection\n _source_traceback = None\n # setted up by ClientRequest after ClientResponse object creation\n # post-init stage allows to not change ctor signature\n _closed = True # to allow __del__ for non-initialized properly response\n _released = False\n\n def __init__(self, method: str, url: URL, *,\n writer: 'asyncio.Task[None]',\n continue100: Optional['asyncio.Future[bool]'],\n timer: BaseTimerContext,\n request_info: RequestInfo,\n traces: List['Trace'],\n loop: asyncio.AbstractEventLoop,\n session: 'ClientSession') -> None:\n assert isinstance(url, URL)\n super().__init__()\n\n self.method = method\n self.cookies = SimpleCookie()\n\n self._real_url = url\n self._url = url.with_fragment(None)\n self._body = None # type: Optional[bytes]\n self._writer = writer # type: Optional[asyncio.Task[None]]\n self._continue = continue100 # None by default\n self._closed = True\n self._history = () # type: Tuple[ClientResponse, ...]\n self._request_info = request_info\n self._timer = timer if timer is not None else TimerNoop()\n self._cache = {} # type: Dict[str, Any]\n self._traces = traces\n self._loop = loop\n # store a reference to session #1985\n self._session = session # type: Optional[ClientSession]\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1))\n\n @reify\n def url(self) -> URL:\n return self._url\n\n @reify\n def real_url(self) -> URL:\n return self._real_url\n\n @reify\n def host(self) -> str:\n assert self._url.host is not None\n return self._url.host\n\n @reify\n def headers(self) -> 'CIMultiDictProxy[str]':\n return self._headers\n\n @reify\n def raw_headers(self) -> RawHeaders:\n return self._raw_headers\n\n @reify\n def request_info(self) -> RequestInfo:\n return self._request_info\n\n @reify\n def content_disposition(self) -> Optional[ContentDisposition]:\n raw = self._headers.get(hdrs.CONTENT_DISPOSITION)\n if raw is None:\n return None\n disposition_type, params_dct = multipart.parse_content_disposition(raw)\n params = MappingProxyType(params_dct)\n filename = multipart.content_disposition_filename(params)\n return ContentDisposition(disposition_type, params, filename)\n\n def __del__(self, _warnings: Any=warnings) -> None:\n if self._closed:\n return\n\n if self._connection is not None:\n self._connection.release()\n self._cleanup_writer()\n\n if self._loop.get_debug():\n if PY_36:\n kwargs = {'source': self}\n else:\n kwargs = {}\n _warnings.warn(\"Unclosed response {!r}\".format(self),\n ResourceWarning,\n **kwargs)\n context = {'client_response': self,\n 'message': 'Unclosed response'}\n if self._source_traceback:\n context['source_traceback'] = self._source_traceback\n self._loop.call_exception_handler(context)\n\n def __repr__(self) -> str:\n out = io.StringIO()\n ascii_encodable_url = str(self.url)\n if self.reason:\n ascii_encodable_reason = self.reason.encode('ascii',\n 'backslashreplace') \\\n .decode('ascii')\n else:\n ascii_encodable_reason = self.reason\n print('<ClientResponse({}) [{} {}]>'.format(\n ascii_encodable_url, self.status, ascii_encodable_reason),\n file=out)\n print(self.headers, file=out)\n return out.getvalue()\n\n @property\n def connection(self) -> Optional['Connection']:\n return self._connection\n\n @reify\n def history(self) -> Tuple['ClientResponse', ...]:\n \"\"\"A sequence of responses, if redirects occurred.\"\"\"\n return self._history\n\n @reify\n def links(self) -> 'MultiDictProxy[MultiDictProxy[Union[str, URL]]]':\n links_str = \", \".join(self.headers.getall(\"link\", []))\n\n if not links_str:\n return MultiDictProxy(MultiDict())\n\n links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]\n\n for val in re.split(r\",(?=\\s*<)\", links_str):\n match = re.match(r\"\\s*<(.*)>(.*)\", val)\n if match is None: # pragma: no cover\n # the check exists to suppress mypy error\n continue\n url, params_str = match.groups()\n params = params_str.split(\";\")[1:]\n\n link = MultiDict() # type: MultiDict[Union[str, URL]]\n\n for param in params:\n match = re.match(\n r\"^\\s*(\\S*)\\s*=\\s*(['\\\"]?)(.*?)(\\2)\\s*$\",\n param, re.M\n )\n if match is None: # pragma: no cover\n # the check exists to suppress mypy error\n continue\n key, _, value, _ = match.groups()\n\n link.add(key, value)\n\n key = link.get(\"rel\", url) # type: ignore\n\n link.add(\"url\", self.url.join(URL(url)))\n\n links.add(key, MultiDictProxy(link))\n\n return MultiDictProxy(links)\n\n async def start(self, connection: 'Connection') -> 'ClientResponse':\n \"\"\"Start response processing.\"\"\"\n self._closed = False\n self._protocol = connection.protocol\n self._connection = connection\n\n with self._timer:\n while True:\n # read response\n try:\n message, payload = await self._protocol.read() # type: ignore # noqa\n except http.HttpProcessingError as exc:\n raise ClientResponseError(\n self.request_info, self.history,\n status=exc.code,\n message=exc.message, headers=exc.headers) from exc\n\n if (message.code < 100 or\n message.code > 199 or message.code == 101):\n break\n\n if self._continue is not None:\n set_result(self._continue, True)\n self._continue = None\n\n # payload eof handler\n payload.on_eof(self._response_eof)\n\n # response status\n self.version = message.version\n self.status = message.code\n self.reason = message.reason\n\n # headers\n self._headers = message.headers # type is CIMultiDictProxy\n self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]\n\n # payload\n self.content = payload\n\n # cookies\n for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):\n try:\n self.cookies.load(hdr)\n except CookieError as exc:\n client_logger.warning(\n 'Can not load response cookies: %s', exc)\n return self\n\n def _response_eof(self) -> None:\n if self._closed:\n return\n\n if self._connection is not None:\n # websocket, protocol could be None because\n # connection could be detached\n if (self._connection.protocol is not None and\n self._connection.protocol.upgraded):\n return\n\n self._connection.release()\n self._connection = None\n\n self._closed = True\n self._cleanup_writer()\n\n @property\n def closed(self) -> bool:\n return self._closed\n\n def close(self) -> None:\n if not self._released:\n self._notify_content()\n if self._closed:\n return\n\n self._closed = True\n if self._loop is None or self._loop.is_closed():\n return\n\n if self._connection is not None:\n self._connection.close()\n self._connection = None\n self._cleanup_writer()\n\n def release(self) -> Any:\n if not self._released:\n self._notify_content()\n if self._closed:\n return noop()\n\n self._closed = True\n if self._connection is not None:\n self._connection.release()\n self._connection = None\n\n self._cleanup_writer()\n return noop()\n\n def raise_for_status(self) -> None:\n if 400 <= self.status:\n # reason should always be not None for a started response\n assert self.reason is not None\n self.release()\n raise ClientResponseError(\n self.request_info,\n self.history,\n status=self.status,\n message=self.reason,\n headers=self.headers)\n\n def _cleanup_writer(self) -> None:\n if self._writer is not None:\n self._writer.cancel()\n self._writer = None\n self._session = None\n\n def _notify_content(self) -> None:\n content = self.content\n if content and content.exception() is None:\n content.set_exception(\n ClientConnectionError('Connection closed'))\n self._released = True\n\n async def wait_for_close(self) -> None:\n if self._writer is not None:\n try:\n await self._writer\n finally:\n self._writer = None\n self.release()\n\n async def read(self) -> bytes:\n \"\"\"Read response payload.\"\"\"\n if self._body is None:\n try:\n self._body = await self.content.read()\n for trace in self._traces:\n await trace.send_response_chunk_received(self._body)\n except BaseException:\n self.close()\n raise\n elif self._released:\n raise ClientConnectionError('Connection closed')\n\n return self._body\n\n def get_encoding(self) -> str:\n ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()\n mimetype = helpers.parse_mimetype(ctype)\n\n encoding = mimetype.parameters.get('charset')\n if encoding:\n try:\n codecs.lookup(encoding)\n except LookupError:\n encoding = None\n if not encoding:\n if mimetype.type == 'application' and mimetype.subtype == 'json':\n # RFC 7159 states that the default encoding is UTF-8.\n encoding = 'utf-8'\n else:\n encoding = chardet.detect(self._body)['encoding']\n if not encoding:\n encoding = 'utf-8'\n\n return encoding\n\n async def text(self,\n encoding: Optional[str]=None, errors: str='strict') -> str:\n \"\"\"Read response payload and decode.\"\"\"\n if self._body is None:\n await self.read()\n\n if encoding is None:\n encoding = self.get_encoding()\n\n return self._body.decode(encoding, errors=errors) # type: ignore\n\n async def json(self, *, encoding: str=None,\n loads: JSONDecoder=DEFAULT_JSON_DECODER,\n content_type: Optional[str]='application/json') -> Any:\n \"\"\"Read and decodes JSON response.\"\"\"\n if self._body is None:\n await self.read()\n\n if content_type:\n ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()\n if not is_expected_content_type(ctype, content_type):\n raise ContentTypeError(\n self.request_info,\n self.history,\n message=('Attempt to decode JSON with '\n 'unexpected mimetype: %s' % ctype),\n headers=self.headers)\n\n if encoding is None:\n encoding = self.get_encoding()\n\n return loads(self._body.decode(encoding)) # type: ignore\n\n async def __aenter__(self) -> 'ClientResponse':\n return self\n\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType]) -> None:\n # similar to _RequestContextManager, we do not need to check\n # for exceptions, response object can closes connection\n # is state is broken\n self.release()\n", "path": "aiohttp/client_reqrep.py" } ]
[ { "content": "import asyncio\nimport codecs\nimport io\nimport re\nimport sys\nimport traceback\nimport warnings\nfrom hashlib import md5, sha1, sha256\nfrom http.cookies import CookieError, Morsel, SimpleCookie\nfrom types import MappingProxyType, TracebackType\nfrom typing import ( # noqa\n TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nimport attr\nfrom multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy\nfrom yarl import URL\n\nfrom . import hdrs, helpers, http, multipart, payload\nfrom .abc import AbstractStreamWriter\nfrom .client_exceptions import (\n ClientConnectionError,\n ClientOSError,\n ClientResponseError,\n ContentTypeError,\n InvalidURL,\n ServerFingerprintMismatch,\n)\nfrom .formdata import FormData\nfrom .helpers import ( # noqa\n PY_36,\n BaseTimerContext,\n BasicAuth,\n HeadersMixin,\n TimerNoop,\n is_expected_content_type,\n noop,\n reify,\n set_result,\n)\nfrom .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter\nfrom .log import client_logger\nfrom .streams import StreamReader # noqa\nfrom .typedefs import (\n DEFAULT_JSON_DECODER,\n JSONDecoder,\n LooseCookies,\n LooseHeaders,\n RawHeaders,\n)\n\ntry:\n import ssl\n from ssl import SSLContext\nexcept ImportError: # pragma: no cover\n ssl = None # type: ignore\n SSLContext = object # type: ignore\n\ntry:\n import cchardet as chardet\nexcept ImportError: # pragma: no cover\n import chardet\n\n\n__all__ = ('ClientRequest', 'ClientResponse', 'RequestInfo', 'Fingerprint')\n\n\nif TYPE_CHECKING: # pragma: no cover\n from .client import ClientSession # noqa\n from .connector import Connection # noqa\n from .tracing import Trace # noqa\n\n\[email protected](frozen=True, slots=True)\nclass ContentDisposition:\n type = attr.ib(type=str) # type: Optional[str]\n parameters = attr.ib(type=MappingProxyType) # type: MappingProxyType[str, str] # noqa\n filename = attr.ib(type=str) # type: Optional[str]\n\n\[email protected](frozen=True, slots=True)\nclass RequestInfo:\n url = attr.ib(type=URL)\n method = attr.ib(type=str)\n headers = attr.ib(type=CIMultiDictProxy) # type: CIMultiDictProxy[str]\n real_url = attr.ib(type=URL)\n\n @real_url.default\n def real_url_default(self) -> URL:\n return self.url\n\n\nclass Fingerprint:\n HASHFUNC_BY_DIGESTLEN = {\n 16: md5,\n 20: sha1,\n 32: sha256,\n }\n\n def __init__(self, fingerprint: bytes) -> None:\n digestlen = len(fingerprint)\n hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)\n if not hashfunc:\n raise ValueError('fingerprint has invalid length')\n elif hashfunc is md5 or hashfunc is sha1:\n raise ValueError('md5 and sha1 are insecure and '\n 'not supported. Use sha256.')\n self._hashfunc = hashfunc\n self._fingerprint = fingerprint\n\n @property\n def fingerprint(self) -> bytes:\n return self._fingerprint\n\n def check(self, transport: asyncio.Transport) -> None:\n if not transport.get_extra_info('sslcontext'):\n return\n sslobj = transport.get_extra_info('ssl_object')\n cert = sslobj.getpeercert(binary_form=True)\n got = self._hashfunc(cert).digest()\n if got != self._fingerprint:\n host, port, *_ = transport.get_extra_info('peername')\n raise ServerFingerprintMismatch(self._fingerprint,\n got, host, port)\n\n\nif ssl is not None:\n SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))\nelse: # pragma: no cover\n SSL_ALLOWED_TYPES = type(None)\n\n\[email protected](slots=True, frozen=True)\nclass ConnectionKey:\n # the key should contain an information about used proxy / TLS\n # to prevent reusing wrong connections from a pool\n host = attr.ib(type=str)\n port = attr.ib(type=int) # type: Optional[int]\n is_ssl = attr.ib(type=bool)\n ssl = attr.ib() # type: Union[SSLContext, None, bool, Fingerprint]\n proxy = attr.ib() # type: Optional[URL]\n proxy_auth = attr.ib() # type: Optional[BasicAuth]\n proxy_headers_hash = attr.ib(type=int) # type: Optional[int] # noqa # hash(CIMultiDict)\n\n\nclass ClientRequest:\n GET_METHODS = {\n hdrs.METH_GET,\n hdrs.METH_HEAD,\n hdrs.METH_OPTIONS,\n hdrs.METH_TRACE,\n }\n POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}\n ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})\n\n DEFAULT_HEADERS = {\n hdrs.ACCEPT: '*/*',\n hdrs.ACCEPT_ENCODING: 'gzip, deflate',\n }\n\n body = b''\n auth = None\n response = None\n response_class = None\n\n _writer = None # async task for streaming data\n _continue = None # waiter future for '100 Continue' response\n\n # N.B.\n # Adding __del__ method with self._writer closing doesn't make sense\n # because _writer is instance method, thus it keeps a reference to self.\n # Until writer has finished finalizer will not be called.\n\n def __init__(self, method: str, url: URL, *,\n params: Optional[Mapping[str, str]]=None,\n headers: Optional[LooseHeaders]=None,\n skip_auto_headers: Iterable[str]=frozenset(),\n data: Any=None,\n cookies: Optional[LooseCookies]=None,\n auth: Optional[BasicAuth]=None,\n version: http.HttpVersion=http.HttpVersion11,\n compress: Optional[str]=None,\n chunked: Optional[bool]=None,\n expect100: bool=False,\n loop: asyncio.AbstractEventLoop,\n response_class: Optional[Type['ClientResponse']]=None,\n proxy: Optional[URL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n timer: Optional[BaseTimerContext]=None,\n session: Optional['ClientSession']=None,\n ssl: Union[SSLContext, bool, Fingerprint, None]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n traces: Optional[List['Trace']]=None):\n\n assert isinstance(url, URL), url\n assert isinstance(proxy, (URL, type(None))), proxy\n # FIXME: session is None in tests only, need to fix tests\n # assert session is not None\n self._session = cast('ClientSession', session)\n if params:\n q = MultiDict(url.query)\n url2 = url.with_query(params)\n q.extend(url2.query)\n url = url.with_query(q)\n self.original_url = url\n self.url = url.with_fragment(None)\n self.method = method.upper()\n self.chunked = chunked\n self.compress = compress\n self.loop = loop\n self.length = None\n if response_class is None:\n real_response_class = ClientResponse\n else:\n real_response_class = response_class\n self.response_class = real_response_class # type: Type[ClientResponse]\n self._timer = timer if timer is not None else TimerNoop()\n self._ssl = ssl\n\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1))\n\n self.update_version(version)\n self.update_host(url)\n self.update_headers(headers)\n self.update_auto_headers(skip_auto_headers)\n self.update_cookies(cookies)\n self.update_content_encoding(data)\n self.update_auth(auth)\n self.update_proxy(proxy, proxy_auth, proxy_headers)\n\n self.update_body_from_data(data)\n if data or self.method not in self.GET_METHODS:\n self.update_transfer_encoding()\n self.update_expect_continue(expect100)\n if traces is None:\n traces = []\n self._traces = traces\n\n def is_ssl(self) -> bool:\n return self.url.scheme in ('https', 'wss')\n\n @property\n def ssl(self) -> Union['SSLContext', None, bool, Fingerprint]:\n return self._ssl\n\n @property\n def connection_key(self) -> ConnectionKey:\n proxy_headers = self.proxy_headers\n if proxy_headers:\n h = hash(tuple((k, v) for k, v in proxy_headers.items())) # type: Optional[int] # noqa\n else:\n h = None\n return ConnectionKey(self.host, self.port, self.is_ssl(),\n self.ssl,\n self.proxy, self.proxy_auth, h)\n\n @property\n def host(self) -> str:\n ret = self.url.host\n assert ret is not None\n return ret\n\n @property\n def port(self) -> Optional[int]:\n return self.url.port\n\n @property\n def request_info(self) -> RequestInfo:\n headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str]\n return RequestInfo(self.url, self.method,\n headers, self.original_url)\n\n def update_host(self, url: URL) -> None:\n \"\"\"Update destination host, port and connection type (ssl).\"\"\"\n # get host/port\n if not url.host:\n raise InvalidURL(url)\n\n # basic auth info\n username, password = url.user, url.password\n if username:\n self.auth = helpers.BasicAuth(username, password or '')\n\n def update_version(self, version: Union[http.HttpVersion, str]) -> None:\n \"\"\"Convert request version to two elements tuple.\n\n parser HTTP version '1.1' => (1, 1)\n \"\"\"\n if isinstance(version, str):\n v = [l.strip() for l in version.split('.', 1)]\n try:\n version = http.HttpVersion(int(v[0]), int(v[1]))\n except ValueError:\n raise ValueError(\n 'Can not parse http version number: {}'\n .format(version)) from None\n self.version = version\n\n def update_headers(self, headers: Optional[LooseHeaders]) -> None:\n \"\"\"Update request headers.\"\"\"\n self.headers = CIMultiDict() # type: CIMultiDict[str]\n\n # add host\n netloc = cast(str, self.url.raw_host)\n if helpers.is_ipv6_address(netloc):\n netloc = '[{}]'.format(netloc)\n if self.url.port is not None and not self.url.is_default_port():\n netloc += ':' + str(self.url.port)\n self.headers[hdrs.HOST] = netloc\n\n if headers:\n if isinstance(headers, (dict, MultiDictProxy, MultiDict)):\n headers = headers.items() # type: ignore\n\n for key, value in headers:\n # A special case for Host header\n if key.lower() == 'host':\n self.headers[key] = value\n else:\n self.headers.add(key, value)\n\n def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:\n self.skip_auto_headers = CIMultiDict(\n (hdr, None) for hdr in sorted(skip_auto_headers))\n used_headers = self.headers.copy()\n used_headers.extend(self.skip_auto_headers) # type: ignore\n\n for hdr, val in self.DEFAULT_HEADERS.items():\n if hdr not in used_headers:\n self.headers.add(hdr, val)\n\n if hdrs.USER_AGENT not in used_headers:\n self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE\n\n def update_cookies(self, cookies: Optional[LooseCookies]) -> None:\n \"\"\"Update request cookies header.\"\"\"\n if not cookies:\n return\n\n c = SimpleCookie()\n if hdrs.COOKIE in self.headers:\n c.load(self.headers.get(hdrs.COOKIE, ''))\n del self.headers[hdrs.COOKIE]\n\n if isinstance(cookies, Mapping):\n iter_cookies = cookies.items()\n else:\n iter_cookies = cookies # type: ignore\n for name, value in iter_cookies:\n if isinstance(value, Morsel):\n # Preserve coded_value\n mrsl_val = value.get(value.key, Morsel())\n mrsl_val.set(value.key, value.value, value.coded_value) # type: ignore # noqa\n c[name] = mrsl_val\n else:\n c[name] = value # type: ignore\n\n self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()\n\n def update_content_encoding(self, data: Any) -> None:\n \"\"\"Set request content encoding.\"\"\"\n if not data:\n return\n\n enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()\n if enc:\n if self.compress:\n raise ValueError(\n 'compress can not be set '\n 'if Content-Encoding header is set')\n elif self.compress:\n if not isinstance(self.compress, str):\n self.compress = 'deflate'\n self.headers[hdrs.CONTENT_ENCODING] = self.compress\n self.chunked = True # enable chunked, no need to deal with length\n\n def update_transfer_encoding(self) -> None:\n \"\"\"Analyze transfer-encoding header.\"\"\"\n te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()\n\n if 'chunked' in te:\n if self.chunked:\n raise ValueError(\n 'chunked can not be set '\n 'if \"Transfer-Encoding: chunked\" header is set')\n\n elif self.chunked:\n if hdrs.CONTENT_LENGTH in self.headers:\n raise ValueError(\n 'chunked can not be set '\n 'if Content-Length header is set')\n\n self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'\n else:\n if hdrs.CONTENT_LENGTH not in self.headers:\n self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))\n\n def update_auth(self, auth: Optional[BasicAuth]) -> None:\n \"\"\"Set basic auth.\"\"\"\n if auth is None:\n auth = self.auth\n if auth is None:\n return\n\n if not isinstance(auth, helpers.BasicAuth):\n raise TypeError('BasicAuth() tuple is required instead')\n\n self.headers[hdrs.AUTHORIZATION] = auth.encode()\n\n def update_body_from_data(self, body: Any) -> None:\n if not body:\n return\n\n # FormData\n if isinstance(body, FormData):\n body = body()\n\n try:\n body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)\n except payload.LookupError:\n body = FormData(body)()\n\n self.body = body\n\n # enable chunked encoding if needed\n if not self.chunked:\n if hdrs.CONTENT_LENGTH not in self.headers:\n size = body.size\n if size is None:\n self.chunked = True\n else:\n if hdrs.CONTENT_LENGTH not in self.headers:\n self.headers[hdrs.CONTENT_LENGTH] = str(size)\n\n # copy payload headers\n assert body.headers\n for (key, value) in body.headers.items():\n if key in self.headers:\n continue\n if key in self.skip_auto_headers:\n continue\n self.headers[key] = value\n\n def update_expect_continue(self, expect: bool=False) -> None:\n if expect:\n self.headers[hdrs.EXPECT] = '100-continue'\n elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':\n expect = True\n\n if expect:\n self._continue = self.loop.create_future()\n\n def update_proxy(self, proxy: Optional[URL],\n proxy_auth: Optional[BasicAuth],\n proxy_headers: Optional[LooseHeaders]) -> None:\n if proxy and not proxy.scheme == 'http':\n raise ValueError(\"Only http proxies are supported\")\n if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):\n raise ValueError(\"proxy_auth must be None or BasicAuth() tuple\")\n self.proxy = proxy\n self.proxy_auth = proxy_auth\n self.proxy_headers = proxy_headers\n\n def keep_alive(self) -> bool:\n if self.version < HttpVersion10:\n # keep alive not supported at all\n return False\n if self.version == HttpVersion10:\n if self.headers.get(hdrs.CONNECTION) == 'keep-alive':\n return True\n else: # no headers means we close for Http 1.0\n return False\n elif self.headers.get(hdrs.CONNECTION) == 'close':\n return False\n\n return True\n\n async def write_bytes(self, writer: AbstractStreamWriter,\n conn: 'Connection') -> None:\n \"\"\"Support coroutines that yields bytes objects.\"\"\"\n # 100 response\n if self._continue is not None:\n await writer.drain()\n await self._continue\n\n protocol = conn.protocol\n assert protocol is not None\n try:\n if isinstance(self.body, payload.Payload):\n await self.body.write(writer)\n else:\n if isinstance(self.body, (bytes, bytearray)):\n self.body = (self.body,) # type: ignore\n\n for chunk in self.body:\n await writer.write(chunk) # type: ignore\n\n await writer.write_eof()\n except OSError as exc:\n new_exc = ClientOSError(\n exc.errno,\n 'Can not write request body for %s' % self.url)\n new_exc.__context__ = exc\n new_exc.__cause__ = exc\n protocol.set_exception(new_exc)\n except asyncio.CancelledError as exc:\n if not conn.closed:\n protocol.set_exception(exc)\n except Exception as exc:\n protocol.set_exception(exc)\n finally:\n self._writer = None\n\n async def send(self, conn: 'Connection') -> 'ClientResponse':\n # Specify request target:\n # - CONNECT request must send authority form URI\n # - not CONNECT proxy must send absolute form URI\n # - most common is origin form URI\n if self.method == hdrs.METH_CONNECT:\n connect_host = self.url.raw_host\n assert connect_host is not None\n if helpers.is_ipv6_address(connect_host):\n connect_host = '[{}]'.format(connect_host)\n path = '{}:{}'.format(connect_host, self.url.port)\n elif self.proxy and not self.is_ssl():\n path = str(self.url)\n else:\n path = self.url.raw_path\n if self.url.raw_query_string:\n path += '?' + self.url.raw_query_string\n\n protocol = conn.protocol\n assert protocol is not None\n writer = StreamWriter(\n protocol, self.loop,\n on_chunk_sent=self._on_chunk_request_sent\n )\n\n if self.compress:\n writer.enable_compression(self.compress)\n\n if self.chunked is not None:\n writer.enable_chunking()\n\n # set default content-type\n if (self.method in self.POST_METHODS and\n hdrs.CONTENT_TYPE not in self.skip_auto_headers and\n hdrs.CONTENT_TYPE not in self.headers):\n self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'\n\n # set the connection header\n connection = self.headers.get(hdrs.CONNECTION)\n if not connection:\n if self.keep_alive():\n if self.version == HttpVersion10:\n connection = 'keep-alive'\n else:\n if self.version == HttpVersion11:\n connection = 'close'\n\n if connection is not None:\n self.headers[hdrs.CONNECTION] = connection\n\n # status + headers\n status_line = '{0} {1} HTTP/{2[0]}.{2[1]}'.format(\n self.method, path, self.version)\n await writer.write_headers(status_line, self.headers)\n\n self._writer = self.loop.create_task(self.write_bytes(writer, conn))\n\n response_class = self.response_class\n assert response_class is not None\n self.response = response_class(\n self.method, self.original_url,\n writer=self._writer, continue100=self._continue, timer=self._timer,\n request_info=self.request_info,\n traces=self._traces,\n loop=self.loop,\n session=self._session\n )\n return self.response\n\n async def close(self) -> None:\n if self._writer is not None:\n try:\n await self._writer\n finally:\n self._writer = None\n\n def terminate(self) -> None:\n if self._writer is not None:\n if not self.loop.is_closed():\n self._writer.cancel()\n self._writer = None\n\n async def _on_chunk_request_sent(self, chunk: bytes) -> None:\n for trace in self._traces:\n await trace.send_request_chunk_sent(chunk)\n\n\nclass ClientResponse(HeadersMixin):\n\n # from the Status-Line of the response\n version = None # HTTP-Version\n status = None # type: int # Status-Code\n reason = None # Reason-Phrase\n\n content = None # type: StreamReader # Payload stream\n _headers = None # type: CIMultiDictProxy[str] # Response headers\n _raw_headers = None # type: RawHeaders # Response raw headers\n\n _connection = None # current connection\n _source_traceback = None\n # setted up by ClientRequest after ClientResponse object creation\n # post-init stage allows to not change ctor signature\n _closed = True # to allow __del__ for non-initialized properly response\n _released = False\n\n def __init__(self, method: str, url: URL, *,\n writer: 'asyncio.Task[None]',\n continue100: Optional['asyncio.Future[bool]'],\n timer: BaseTimerContext,\n request_info: RequestInfo,\n traces: List['Trace'],\n loop: asyncio.AbstractEventLoop,\n session: 'ClientSession') -> None:\n assert isinstance(url, URL)\n super().__init__()\n\n self.method = method\n self.cookies = SimpleCookie()\n\n self._real_url = url\n self._url = url.with_fragment(None)\n self._body = None # type: Optional[bytes]\n self._writer = writer # type: Optional[asyncio.Task[None]]\n self._continue = continue100 # None by default\n self._closed = True\n self._history = () # type: Tuple[ClientResponse, ...]\n self._request_info = request_info\n self._timer = timer if timer is not None else TimerNoop()\n self._cache = {} # type: Dict[str, Any]\n self._traces = traces\n self._loop = loop\n # store a reference to session #1985\n self._session = session # type: Optional[ClientSession]\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1))\n\n @reify\n def url(self) -> URL:\n return self._url\n\n @reify\n def real_url(self) -> URL:\n return self._real_url\n\n @reify\n def host(self) -> str:\n assert self._url.host is not None\n return self._url.host\n\n @reify\n def headers(self) -> 'CIMultiDictProxy[str]':\n return self._headers\n\n @reify\n def raw_headers(self) -> RawHeaders:\n return self._raw_headers\n\n @reify\n def request_info(self) -> RequestInfo:\n return self._request_info\n\n @reify\n def content_disposition(self) -> Optional[ContentDisposition]:\n raw = self._headers.get(hdrs.CONTENT_DISPOSITION)\n if raw is None:\n return None\n disposition_type, params_dct = multipart.parse_content_disposition(raw)\n params = MappingProxyType(params_dct)\n filename = multipart.content_disposition_filename(params)\n return ContentDisposition(disposition_type, params, filename)\n\n def __del__(self, _warnings: Any=warnings) -> None:\n if self._closed:\n return\n\n if self._connection is not None:\n self._connection.release()\n self._cleanup_writer()\n\n if self._loop.get_debug():\n if PY_36:\n kwargs = {'source': self}\n else:\n kwargs = {}\n _warnings.warn(\"Unclosed response {!r}\".format(self),\n ResourceWarning,\n **kwargs)\n context = {'client_response': self,\n 'message': 'Unclosed response'}\n if self._source_traceback:\n context['source_traceback'] = self._source_traceback\n self._loop.call_exception_handler(context)\n\n def __repr__(self) -> str:\n out = io.StringIO()\n ascii_encodable_url = str(self.url)\n if self.reason:\n ascii_encodable_reason = self.reason.encode('ascii',\n 'backslashreplace') \\\n .decode('ascii')\n else:\n ascii_encodable_reason = self.reason\n print('<ClientResponse({}) [{} {}]>'.format(\n ascii_encodable_url, self.status, ascii_encodable_reason),\n file=out)\n print(self.headers, file=out)\n return out.getvalue()\n\n @property\n def connection(self) -> Optional['Connection']:\n return self._connection\n\n @reify\n def history(self) -> Tuple['ClientResponse', ...]:\n \"\"\"A sequence of responses, if redirects occurred.\"\"\"\n return self._history\n\n @reify\n def links(self) -> 'MultiDictProxy[MultiDictProxy[Union[str, URL]]]':\n links_str = \", \".join(self.headers.getall(\"link\", []))\n\n if not links_str:\n return MultiDictProxy(MultiDict())\n\n links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]\n\n for val in re.split(r\",(?=\\s*<)\", links_str):\n match = re.match(r\"\\s*<(.*)>(.*)\", val)\n if match is None: # pragma: no cover\n # the check exists to suppress mypy error\n continue\n url, params_str = match.groups()\n params = params_str.split(\";\")[1:]\n\n link = MultiDict() # type: MultiDict[Union[str, URL]]\n\n for param in params:\n match = re.match(\n r\"^\\s*(\\S*)\\s*=\\s*(['\\\"]?)(.*?)(\\2)\\s*$\",\n param, re.M\n )\n if match is None: # pragma: no cover\n # the check exists to suppress mypy error\n continue\n key, _, value, _ = match.groups()\n\n link.add(key, value)\n\n key = link.get(\"rel\", url) # type: ignore\n\n link.add(\"url\", self.url.join(URL(url)))\n\n links.add(key, MultiDictProxy(link))\n\n return MultiDictProxy(links)\n\n async def start(self, connection: 'Connection') -> 'ClientResponse':\n \"\"\"Start response processing.\"\"\"\n self._closed = False\n self._protocol = connection.protocol\n self._connection = connection\n\n with self._timer:\n while True:\n # read response\n try:\n message, payload = await self._protocol.read() # type: ignore # noqa\n except http.HttpProcessingError as exc:\n raise ClientResponseError(\n self.request_info, self.history,\n status=exc.code,\n message=exc.message, headers=exc.headers) from exc\n\n if (message.code < 100 or\n message.code > 199 or message.code == 101):\n break\n\n if self._continue is not None:\n set_result(self._continue, True)\n self._continue = None\n\n # payload eof handler\n payload.on_eof(self._response_eof)\n\n # response status\n self.version = message.version\n self.status = message.code\n self.reason = message.reason\n\n # headers\n self._headers = message.headers # type is CIMultiDictProxy\n self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]\n\n # payload\n self.content = payload\n\n # cookies\n for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):\n try:\n self.cookies.load(hdr)\n except CookieError as exc:\n client_logger.warning(\n 'Can not load response cookies: %s', exc)\n return self\n\n def _response_eof(self) -> None:\n if self._closed:\n return\n\n if self._connection is not None:\n # websocket, protocol could be None because\n # connection could be detached\n if (self._connection.protocol is not None and\n self._connection.protocol.upgraded):\n return\n\n self._connection.release()\n self._connection = None\n\n self._closed = True\n self._cleanup_writer()\n\n @property\n def closed(self) -> bool:\n return self._closed\n\n def close(self) -> None:\n if not self._released:\n self._notify_content()\n if self._closed:\n return\n\n self._closed = True\n if self._loop is None or self._loop.is_closed():\n return\n\n if self._connection is not None:\n self._connection.close()\n self._connection = None\n self._cleanup_writer()\n\n def release(self) -> Any:\n if not self._released:\n self._notify_content()\n if self._closed:\n return noop()\n\n self._closed = True\n if self._connection is not None:\n self._connection.release()\n self._connection = None\n\n self._cleanup_writer()\n return noop()\n\n def raise_for_status(self) -> None:\n if 400 <= self.status:\n # reason should always be not None for a started response\n assert self.reason is not None\n self.release()\n raise ClientResponseError(\n self.request_info,\n self.history,\n status=self.status,\n message=self.reason,\n headers=self.headers)\n\n def _cleanup_writer(self) -> None:\n if self._writer is not None:\n self._writer.cancel()\n self._writer = None\n self._session = None\n\n def _notify_content(self) -> None:\n content = self.content\n if content and content.exception() is None:\n content.set_exception(\n ClientConnectionError('Connection closed'))\n self._released = True\n\n async def wait_for_close(self) -> None:\n if self._writer is not None:\n try:\n await self._writer\n finally:\n self._writer = None\n self.release()\n\n async def read(self) -> bytes:\n \"\"\"Read response payload.\"\"\"\n if self._body is None:\n try:\n self._body = await self.content.read()\n for trace in self._traces:\n await trace.send_response_chunk_received(self._body)\n except BaseException:\n self.close()\n raise\n elif self._released:\n raise ClientConnectionError('Connection closed')\n\n return self._body\n\n def get_encoding(self) -> str:\n ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()\n mimetype = helpers.parse_mimetype(ctype)\n\n encoding = mimetype.parameters.get('charset')\n if encoding:\n try:\n codecs.lookup(encoding)\n except LookupError:\n encoding = None\n if not encoding:\n if mimetype.type == 'application' and mimetype.subtype == 'json':\n # RFC 7159 states that the default encoding is UTF-8.\n encoding = 'utf-8'\n else:\n encoding = chardet.detect(self._body)['encoding']\n if not encoding:\n encoding = 'utf-8'\n\n return encoding\n\n async def text(self,\n encoding: Optional[str]=None, errors: str='strict') -> str:\n \"\"\"Read response payload and decode.\"\"\"\n if self._body is None:\n await self.read()\n\n if encoding is None:\n encoding = self.get_encoding()\n\n return self._body.decode(encoding, errors=errors) # type: ignore\n\n async def json(self, *, encoding: str=None,\n loads: JSONDecoder=DEFAULT_JSON_DECODER,\n content_type: Optional[str]='application/json') -> Any:\n \"\"\"Read and decodes JSON response.\"\"\"\n if self._body is None:\n await self.read()\n\n if content_type:\n ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()\n if not is_expected_content_type(ctype, content_type):\n raise ContentTypeError(\n self.request_info,\n self.history,\n message=('Attempt to decode JSON with '\n 'unexpected mimetype: %s' % ctype),\n headers=self.headers)\n\n if encoding is None:\n encoding = self.get_encoding()\n\n return loads(self._body.decode(encoding)) # type: ignore\n\n async def __aenter__(self) -> 'ClientResponse':\n return self\n\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType]) -> None:\n # similar to _RequestContextManager, we do not need to check\n # for exceptions, response object can closes connection\n # is state is broken\n self.release()\n", "path": "aiohttp/client_reqrep.py" } ]
diff --git a/CHANGES/4039.bugfix b/CHANGES/4039.bugfix new file mode 100644 index 00000000000..27c4cdc9682 --- /dev/null +++ b/CHANGES/4039.bugfix @@ -0,0 +1 @@ +For URLs like "unix://localhost/..." set Host HTTP header to "localhost" instead of "localhost:None". diff --git a/aiohttp/client_reqrep.py b/aiohttp/client_reqrep.py index 05183e139de..8ff444d8531 100644 --- a/aiohttp/client_reqrep.py +++ b/aiohttp/client_reqrep.py @@ -315,7 +315,7 @@ def update_headers(self, headers: Optional[LooseHeaders]) -> None: netloc = cast(str, self.url.raw_host) if helpers.is_ipv6_address(netloc): netloc = '[{}]'.format(netloc) - if not self.url.is_default_port(): + if self.url.port is not None and not self.url.is_default_port(): netloc += ':' + str(self.url.port) self.headers[hdrs.HOST] = netloc diff --git a/tests/test_client_request.py b/tests/test_client_request.py index 7dd32173f4b..7b5882de29f 100644 --- a/tests/test_client_request.py +++ b/tests/test_client_request.py @@ -199,6 +199,11 @@ def test_host_port_nondefault_wss(make_request) -> None: assert req.is_ssl() +def test_host_port_none_port(make_request) -> None: + req = make_request('get', 'unix://localhost/path') + assert req.headers['Host'] == 'localhost' + + def test_host_port_err(make_request) -> None: with pytest.raises(ValueError): make_request('get', 'http://python.org:123e/')
Client-sent Host header can include port as "None" ## Long story short <!-- Please describe your problem and why the fix is important. --> When the client forms the Host header, it is possible for it to include the port as "None". This came up for me when using `aiodocker` to try to connect to the Docker API container attach websocket endpoint, which used a URL of the form "unix://localhost/..." and let to a "Host" header of "localhost:None", triggering a 400 error from docker with a message like: parse ws://localhost:None/v1.35/containers/CONTAINER_ID/attach/ws?stdin=1&stdout=0&stderr=0&stream=1: invalid port ":None" after host ## Expected behaviour <!-- What is the behaviour you expect? --> At least, not to send "None" as a port number for the Host header. According to [RFC 7230 Section 5.4](https://tools.ietf.org/html/rfc7230#section-5.4): > If the authority component is missing or undefined for the target URI, then a client MUST send a Host header field with an empty field-value. So perhaps it should be possible for the `aiohttp` client to get and recognize such a URI and send a blank Host header field. At the moment though, I think, it doesn't seem possible to send such an "authority"-less URL to `ws_connect` nor does there currently exist a conditional path for the Host header construction to make a blank Host header field: [client_reqrep.py lines 314-320](https://github.com/aio-libs/aiohttp/blob/21b062199ff8da1a8d48b262f3d75fb616cc275f/aiohttp/client_reqrep.py#L314-L320) ## Actual behaviour <!-- What's actually happening? --> The Host header includes the string "None" as the port when making requests whose URL registers as not `is_default_port()` but has no port defined, e.g. `unix://localhost/path/to/endpoint`. ## Steps to reproduce <!-- Please describe steps to reproduce the issue. If you have a script that does that please include it here within markdown code markup --> This occurred for me while using the `aiodocker` package to attach to `stdin` of a running container. A sort of silly example server/client that displays the behavior is as follows: ```python from aiohttp import web from asyncio import sleep, create_task import aiohttp SOCK_PATH = '/tmp/example.sock' async def hello(request): print('Host: '+request.headers['Host']) return web.Response() async def make_request(): await sleep(1) # Let the server become available. conn = aiohttp.UnixConnector(path=SOCK_PATH) async with aiohttp.ClientSession(connector=conn) as session: async with session.get('unix://localhost/'): pass # Produces a Host of "localhost:None" async with session.get('http://localhost/'): pass # Produces a Host of "localhost" async def schedule_request(_): create_task(make_request()) app = web.Application() app.add_routes([web.get('/', hello)]) app.on_startup.append(schedule_request) web.run_app(app, path=SOCK_PATH) ``` Output: ``` ======== Running on http://unix:/tmp/example.sock: ======== (Press CTRL+C to quit) Host: localhost:None Host: localhost ``` ## Your environment <!-- Describe the environment you have that lead to your issue. This includes aiohttp version, OS, proxy server and other bits that are related to your case. IMPORTANT: aiohttp is both server framework and client library. For getting rid of confusing please put 'server', 'client' or 'both' word here. --> * Debian 9 * Python 3.7.4 * aiohttp 3.5.4 * aiodocker 0.14.0 * Docker 19.03.2-ce BTW the specific thing that I think make this appear where it didn't before was a security update to Go that make URL parsing more strict: https://github.com/golang/go/issues?q=milestone%3AGo1.12.8 Client-sent Host header can include port as "None" ## Long story short <!-- Please describe your problem and why the fix is important. --> When the client forms the Host header, it is possible for it to include the port as "None". This came up for me when using `aiodocker` to try to connect to the Docker API container attach websocket endpoint, which used a URL of the form "unix://localhost/..." and let to a "Host" header of "localhost:None", triggering a 400 error from docker with a message like: parse ws://localhost:None/v1.35/containers/CONTAINER_ID/attach/ws?stdin=1&stdout=0&stderr=0&stream=1: invalid port ":None" after host ## Expected behaviour <!-- What is the behaviour you expect? --> At least, not to send "None" as a port number for the Host header. According to [RFC 7230 Section 5.4](https://tools.ietf.org/html/rfc7230#section-5.4): > If the authority component is missing or undefined for the target URI, then a client MUST send a Host header field with an empty field-value. So perhaps it should be possible for the `aiohttp` client to get and recognize such a URI and send a blank Host header field. At the moment though, I think, it doesn't seem possible to send such an "authority"-less URL to `ws_connect` nor does there currently exist a conditional path for the Host header construction to make a blank Host header field: [client_reqrep.py lines 314-320](https://github.com/aio-libs/aiohttp/blob/21b062199ff8da1a8d48b262f3d75fb616cc275f/aiohttp/client_reqrep.py#L314-L320) ## Actual behaviour <!-- What's actually happening? --> The Host header includes the string "None" as the port when making requests whose URL registers as not `is_default_port()` but has no port defined, e.g. `unix://localhost/path/to/endpoint`. ## Steps to reproduce <!-- Please describe steps to reproduce the issue. If you have a script that does that please include it here within markdown code markup --> This occurred for me while using the `aiodocker` package to attach to `stdin` of a running container. A sort of silly example server/client that displays the behavior is as follows: ```python from aiohttp import web from asyncio import sleep, create_task import aiohttp SOCK_PATH = '/tmp/example.sock' async def hello(request): print('Host: '+request.headers['Host']) return web.Response() async def make_request(): await sleep(1) # Let the server become available. conn = aiohttp.UnixConnector(path=SOCK_PATH) async with aiohttp.ClientSession(connector=conn) as session: async with session.get('unix://localhost/'): pass # Produces a Host of "localhost:None" async with session.get('http://localhost/'): pass # Produces a Host of "localhost" async def schedule_request(_): create_task(make_request()) app = web.Application() app.add_routes([web.get('/', hello)]) app.on_startup.append(schedule_request) web.run_app(app, path=SOCK_PATH) ``` Output: ``` ======== Running on http://unix:/tmp/example.sock: ======== (Press CTRL+C to quit) Host: localhost:None Host: localhost ``` ## Your environment <!-- Describe the environment you have that lead to your issue. This includes aiohttp version, OS, proxy server and other bits that are related to your case. IMPORTANT: aiohttp is both server framework and client library. For getting rid of confusing please put 'server', 'client' or 'both' word here. --> * Debian 9 * Python 3.7.4 * aiohttp 3.5.4 * aiodocker 0.14.0 * Docker 19.03.2-ce BTW the specific thing that I think make this appear where it didn't before was a security update to Go that make URL parsing more strict: https://github.com/golang/go/issues?q=milestone%3AGo1.12.8
aws-cloudformation__cfn-lint-2466
[ { "content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nfrom datetime import datetime\nimport importlib\nimport traceback\nfrom typing import Any, Dict, List, Optional, Tuple, Union\nfrom cfnlint.exceptions import DuplicateRuleError\nimport cfnlint.helpers\nimport cfnlint.rules.custom\nfrom cfnlint.decode.node import TemplateAttributeError\nfrom cfnlint.template import Template\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef matching(match_type: Any):\n \"\"\"Does Logging for match functions\"\"\"\n\n def decorator(match_function):\n \"\"\"The Actual Decorator\"\"\"\n\n def wrapper(self, filename, cfn, *args, **kwargs):\n \"\"\"Wrapper\"\"\"\n matches = []\n\n if not getattr(self, match_type):\n return []\n\n if match_type == 'match_resource_properties':\n if args[1] not in self.resource_property_types:\n return []\n elif match_type == 'match_resource_sub_properties':\n if args[1] not in self.resource_sub_property_types:\n return []\n\n start = datetime.now()\n LOGGER.debug('Starting match function for rule %s at %s', self.id, start)\n # pylint: disable=E1102\n results = match_function(self, filename, cfn, *args, **kwargs)\n LOGGER.debug(\n 'Complete match function for rule %s at %s. Ran in %s',\n self.id,\n datetime.now(),\n datetime.now() - start,\n )\n LOGGER.debug('Results from rule %s are %s: ', self.id, results)\n\n if results:\n for result in results:\n error_rule = self\n if hasattr(result, 'rule'):\n error_rule = result.rule\n linenumbers: Union[Tuple[int, int, int, int], None] = None\n if hasattr(result, 'location'):\n linenumbers = result.location\n else:\n linenumbers = cfn.get_location_yaml(cfn.template, result.path)\n if linenumbers:\n matches.append(\n Match(\n linenumbers[0] + 1,\n linenumbers[1] + 1,\n linenumbers[2] + 1,\n linenumbers[3] + 1,\n filename,\n error_rule,\n result.message,\n result,\n )\n )\n else:\n matches.append(\n Match(\n 1, 1, 1, 1, filename, error_rule, result.message, result\n )\n )\n\n return matches\n\n return wrapper\n\n return decorator\n\n\nclass CloudFormationLintRule:\n \"\"\"CloudFormation linter rules\"\"\"\n\n id: str = ''\n shortdesc: str = ''\n description: str = ''\n source_url: str = ''\n tags: List[str] = []\n experimental: bool = False\n child_rules: Dict[str, Any] = {}\n\n logger = logging.getLogger(__name__)\n\n def __init__(self):\n self.resource_property_types = []\n self.resource_sub_property_types = []\n self.config = {} # `-X E3012:strict=false`... Show more\n self.config_definition = {}\n\n def __repr__(self):\n return f'{self.id}: {self.shortdesc}'\n\n @property\n def severity(self):\n \"\"\"Severity level\"\"\"\n levels = {\n 'I': 'informational',\n 'E': 'error',\n 'W': 'warning',\n }\n return levels.get(self.id[0].upper(), 'unknown')\n\n def verbose(self):\n \"\"\"Verbose output\"\"\"\n return f'{self.id}: {self.shortdesc}\\n{self.description}'\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n\n def is_enabled(\n self,\n include_experimental=False,\n ignore_rules=None,\n include_rules=None,\n mandatory_rules=None,\n ):\n \"\"\"Is the rule enabled based on the configuration\"\"\"\n ignore_rules = ignore_rules or []\n include_rules = include_rules or []\n mandatory_rules = mandatory_rules or []\n\n # Evaluate experimental rules\n if self.experimental and not include_experimental:\n return False\n\n # Evaluate includes first:\n include_filter = False\n for include_rule in include_rules:\n if self.id.startswith(include_rule):\n include_filter = True\n if not include_filter:\n return False\n\n # Enable mandatory rules without checking for if they are ignored\n for mandatory_rule in mandatory_rules:\n if self.id.startswith(mandatory_rule):\n return True\n\n # Allowing ignoring of rules based on prefix to ignore checks\n for ignore_rule in ignore_rules:\n if self.id.startswith(ignore_rule) and ignore_rule:\n return False\n\n return True\n\n def configure(self, configs=None):\n \"\"\"Set the configuration\"\"\"\n\n # set defaults\n if isinstance(self.config_definition, dict):\n for config_name, config_values in self.config_definition.items():\n self.config[config_name] = config_values['default']\n\n if isinstance(configs, dict):\n for key, value in configs.items():\n if key in self.config_definition:\n if self.config_definition[key]['type'] == 'boolean':\n self.config[key] = cfnlint.helpers.bool_compare(value, True)\n elif self.config_definition[key]['type'] == 'string':\n self.config[key] = str(value)\n elif self.config_definition[key]['type'] == 'integer':\n self.config[key] = int(value)\n elif self.config_definition[key]['type'] == 'list':\n self.config[key] = []\n for l_value in value:\n if self.config_definition[key]['itemtype'] == 'boolean':\n self.config[key].append(\n cfnlint.helpers.bool_compare(l_value, True)\n )\n elif self.config_definition[key]['itemtype'] == 'string':\n self.config[key].append(str(l_value))\n elif self.config_definition[key]['itemtype'] == 'integer':\n self.config[key].append(int(l_value))\n\n match = None\n match_resource_properties = None\n match_resource_sub_properties = None\n\n @matching('match')\n # pylint: disable=W0613\n def matchall(self, filename, cfn):\n \"\"\"Match the entire file\"\"\"\n return self.match(cfn) # pylint: disable=E1102\n\n @matching('match_resource_properties')\n # pylint: disable=W0613\n def matchall_resource_properties(\n self, filename, cfn, resource_properties, property_type, path\n ):\n \"\"\"Check for resource properties type\"\"\"\n return self.match_resource_properties( # pylint: disable=E1102\n resource_properties, property_type, path, cfn\n )\n\n @matching('match_resource_sub_properties')\n # pylint: disable=W0613\n def matchall_resource_sub_properties(\n self, filename, cfn, resource_properties, property_type, path\n ):\n \"\"\"Check for resource properties type\"\"\"\n return self.match_resource_sub_properties( # pylint: disable=E1102\n resource_properties, property_type, path, cfn\n )\n\n\n# pylint: disable=too-many-instance-attributes\nclass RulesCollection:\n \"\"\"Collection of rules\"\"\"\n\n def __init__(\n self,\n ignore_rules=None,\n include_rules=None,\n configure_rules=None,\n include_experimental=False,\n mandatory_rules=None,\n ):\n self.rules: Dict[str, CloudFormationLintRule] = {}\n self.all_rules: Dict[str, CloudFormationLintRule] = {}\n self.used_rules = set()\n\n self.configure(\n ignore_rules=ignore_rules,\n include_rules=include_rules,\n configure_rules=configure_rules,\n include_experimental=include_experimental,\n mandatory_rules=mandatory_rules,\n )\n\n def configure(\n self,\n ignore_rules=None,\n include_rules=None,\n configure_rules=None,\n include_experimental=False,\n mandatory_rules=None,\n ):\n self.rules: Dict[str, CloudFormationLintRule] = {}\n # Whether \"experimental\" rules should be added\n self.include_experimental = include_experimental\n\n # Make Ignore Rules not required\n self.ignore_rules = ignore_rules or []\n self.include_rules = include_rules or []\n self.mandatory_rules = mandatory_rules or []\n self.configure_rules = configure_rules or {}\n # by default include 'W' and 'E'\n # 'I' has to be included manually for backwards compabitility\n # Have to add W, E here because integrations don't use config\n for default_rule in ['W', 'E']:\n if default_rule not in self.include_rules:\n self.include_rules.extend([default_rule])\n\n for rule in self.all_rules.values():\n self.__register(rule)\n\n def __register(self, rule: CloudFormationLintRule):\n \"\"\"Register and configure the rule\"\"\"\n if self.is_rule_enabled(rule):\n self.used_rules.add(rule.id)\n self.rules[rule.id] = rule\n rule.configure(self.configure_rules.get(rule.id, None))\n\n def register(self, rule: CloudFormationLintRule):\n \"\"\"Register rules\"\"\"\n # Some rules are inheritited to limit code re-use.\n # These rules have no rule ID so we filter this out\n if rule.id != '':\n if rule.id in self.all_rules:\n raise DuplicateRuleError(rule_id=rule.id)\n self.all_rules[rule.id] = rule\n self.__register(rule)\n\n def __iter__(self):\n return iter(self.rules.values())\n\n def __len__(self):\n return len(self.rules.keys())\n\n def extend(self, more):\n \"\"\"Extend rules\"\"\"\n for rule in more:\n self.register(rule)\n\n def __repr__(self):\n return '\\n'.join(\n [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]\n )\n\n def is_rule_enabled(self, rule: CloudFormationLintRule):\n \"\"\"Checks if an individual rule is valid\"\"\"\n return rule.is_enabled(\n self.include_experimental,\n self.ignore_rules,\n self.include_rules,\n self.mandatory_rules,\n )\n\n # pylint: disable=inconsistent-return-statements\n def run_check(self, check, filename, rule_id, *args):\n \"\"\"Run a check\"\"\"\n try:\n return check(*args)\n except TemplateAttributeError as err:\n LOGGER.debug(str(err))\n return []\n except Exception as err: # pylint: disable=W0703\n if self.is_rule_enabled(RuleError()):\n # In debug mode, print the error include complete stack trace\n if LOGGER.getEffectiveLevel() == logging.DEBUG:\n error_message = traceback.format_exc()\n else:\n error_message = str(err)\n message = 'Unknown exception while processing rule {}: {}'\n return [\n Match(\n 1,\n 1,\n 1,\n 1,\n filename,\n RuleError(),\n message.format(rule_id, error_message),\n )\n ]\n\n def resource_property(\n self, filename, cfn, path, properties, resource_type, property_type\n ):\n \"\"\"Run loops in resource checks for embedded properties\"\"\"\n matches = []\n property_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('PropertyTypes')\n if property_type == 'Tag':\n property_spec_name = 'Tag'\n else:\n property_spec_name = f'{resource_type}.{property_type}'\n\n if property_spec_name in property_spec:\n for rule in self.rules.values():\n if isinstance(properties, dict):\n if len(properties) == 1:\n for k, _ in properties.items():\n if k != 'Fn::If':\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n else:\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n else:\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n\n resource_spec_properties = property_spec.get(property_spec_name, {}).get(\n 'Properties'\n )\n if not resource_spec_properties:\n if property_spec.get(property_spec_name, {}).get('Type') == 'List':\n if isinstance(properties, list):\n property_type = property_spec.get(property_spec_name, {}).get(\n 'ItemType'\n )\n for index, item in enumerate(properties):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path[:] + [index],\n item,\n resource_type,\n property_type,\n )\n )\n return matches\n if isinstance(properties, dict):\n for resource_property, resource_property_value in properties.items():\n property_path = path[:] + [resource_property]\n resource_spec_property = resource_spec_properties.get(\n resource_property, {}\n )\n if resource_property not in resource_spec_properties:\n if resource_property == 'Fn::If':\n if isinstance(resource_property_value, list):\n if len(resource_property_value) == 3:\n for index, c_value in enumerate(\n resource_property_value[1:]\n ):\n if isinstance(c_value, list):\n for s_i, c_l_value in enumerate(c_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:]\n + [index + 1]\n + [s_i],\n c_l_value,\n resource_type,\n property_type,\n )\n )\n else:\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:] + [index + 1],\n c_value,\n resource_type,\n property_type,\n )\n )\n continue\n if resource_spec_property.get(\n 'Type'\n ) == 'List' and not resource_spec_properties.get(\n 'PrimitiveItemType'\n ):\n if isinstance(resource_property_value, (list)):\n for index, value in enumerate(resource_property_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:] + [index],\n value,\n resource_type,\n resource_spec_property.get('ItemType'),\n )\n )\n elif resource_spec_property.get('Type'):\n if isinstance(resource_property_value, (dict)):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path,\n resource_property_value,\n resource_type,\n resource_spec_property.get('Type'),\n )\n )\n\n return matches\n\n def run_resource(self, filename, cfn, resource_type, resource_properties, path):\n \"\"\"Run loops in resource checks for embedded properties\"\"\"\n matches = []\n resource_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('ResourceTypes')\n if resource_properties is not None and resource_type in resource_spec:\n resource_spec_properties = resource_spec.get(resource_type, {}).get(\n 'Properties'\n )\n items_safe = resource_properties.items_safe(path, type_t=(dict))\n for resource_properties_safe, path_safe in items_safe:\n for (\n resource_property,\n resource_property_value,\n ) in resource_properties_safe.items():\n resource_spec_property = resource_spec_properties.get(\n resource_property, {}\n )\n if resource_spec_property.get(\n 'Type'\n ) == 'List' and not resource_spec_properties.get(\n 'PrimitiveItemType'\n ):\n if isinstance(resource_property_value, (list)):\n for index, value in enumerate(resource_property_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path_safe[:] + [resource_property, index],\n value,\n resource_type,\n resource_spec_property.get('ItemType'),\n )\n )\n elif resource_spec_property.get('Type'):\n if isinstance(resource_property_value, (dict)):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path_safe[:] + [resource_property],\n resource_property_value,\n resource_type,\n resource_spec_property.get('Type'),\n )\n )\n\n return matches\n\n def run(self, filename: Optional[str], cfn: Template):\n \"\"\"Run rules\"\"\"\n matches = []\n for rule in self.rules.values():\n rule.initialize(cfn)\n\n for rule in self.rules.values():\n for key in rule.child_rules.keys():\n rule.child_rules[key] = self.rules.get(key)\n\n for rule in self.rules.values():\n matches.extend(\n self.run_check(rule.matchall, filename, rule.id, filename, cfn)\n )\n\n for resource_name, resource_attributes in cfn.get_resources().items():\n resource_type = resource_attributes.get('Type')\n resource_properties = resource_attributes.get('Properties')\n if isinstance(resource_type, str) and isinstance(resource_properties, dict):\n path = ['Resources', resource_name, 'Properties']\n for rule in self.rules.values():\n matches.extend(\n self.run_check(\n rule.matchall_resource_properties,\n filename,\n rule.id,\n filename,\n cfn,\n resource_properties,\n resource_type,\n path,\n )\n )\n\n matches.extend(\n self.run_resource(\n filename, cfn, resource_type, resource_properties, path\n )\n )\n\n return matches\n\n def create_from_module(self, modpath):\n \"\"\"Create rules from a module import path\"\"\"\n mod = importlib.import_module(modpath)\n self.extend(cfnlint.helpers.create_rules(mod))\n\n def create_from_directory(self, rulesdir):\n \"\"\"Create rules from directory\"\"\"\n result = []\n if rulesdir != '':\n result = cfnlint.helpers.load_plugins(os.path.expanduser(rulesdir))\n self.extend(result)\n\n def create_from_custom_rules_file(self, custom_rules_file):\n \"\"\"Create rules from custom rules file\"\"\"\n custom_rules = []\n if custom_rules_file:\n with open(custom_rules_file, encoding='utf-8') as customRules:\n line_number = 1\n for line in customRules:\n LOGGER.debug('Processing Custom Rule Line %d', line_number)\n custom_rule = cfnlint.rules.custom.make_rule(line, line_number)\n if custom_rule:\n custom_rules.append(custom_rule)\n line_number += 1\n\n self.extend(custom_rules)\n\n\nclass RuleMatch:\n \"\"\"Rules Error\"\"\"\n\n def __init__(self, path, message, **kwargs):\n \"\"\"Init\"\"\"\n self.path = path\n self.path_string = '/'.join(map(str, path))\n self.message = message\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __eq__(self, item):\n \"\"\"Override unique\"\"\"\n return (self.path, self.message) == (item.path, item.message)\n\n def __hash__(self):\n \"\"\"Hash for comparisons\"\"\"\n return hash((self.path, self.message))\n\n\nclass Match: # pylint: disable=R0902\n \"\"\"Match Classes\"\"\"\n\n def __init__(\n self,\n linenumber,\n columnnumber,\n linenumberend,\n columnnumberend,\n filename,\n rule,\n message=None,\n rulematch_obj=None,\n ):\n \"\"\"Init\"\"\"\n self.linenumber = linenumber\n '''Starting line number of the region this match spans'''\n self.columnnumber = columnnumber\n '''Starting line number of the region this match spans'''\n self.linenumberend = linenumberend\n '''Ending line number of the region this match spans'''\n self.columnnumberend = columnnumberend\n '''Ending column number of the region this match spans'''\n self.filename = filename\n '''Name of the filename associated with this match, or None if there is no such file'''\n self.rule = rule\n '''The rule of this match'''\n self.message = message # or rule.shortdesc\n '''The message of this match'''\n if rulematch_obj:\n for k, v in vars(rulematch_obj).items():\n if not hasattr(self, k):\n setattr(self, k, v)\n\n def __repr__(self):\n \"\"\"Represent\"\"\"\n file_str = self.filename + ':' if self.filename else ''\n return f'[{self.rule}] ({self.message}) matched {file_str}{self.linenumber}'\n\n def __eq__(self, item):\n \"\"\"Override equal to compare matches\"\"\"\n return (self.linenumber, self.columnnumber, self.rule.id, self.message) == (\n item.linenumber,\n item.columnnumber,\n item.rule.id,\n item.message,\n )\n\n\nclass ParseError(CloudFormationLintRule):\n \"\"\"Parse Lint Rule\"\"\"\n\n id = 'E0000'\n shortdesc = 'Parsing error found when parsing the template'\n description = 'Checks for JSON/YAML formatting errors in your template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base']\n\n\nclass TransformError(CloudFormationLintRule):\n \"\"\"Transform Lint Rule\"\"\"\n\n id = 'E0001'\n shortdesc = 'Error found when transforming the template'\n description = 'Errors found when performing transformation on the template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base', 'transform']\n\n\nclass RuleError(CloudFormationLintRule):\n \"\"\"Rule processing Error\"\"\"\n\n id = 'E0002'\n shortdesc = 'Error processing rule on the template'\n description = 'Errors found when processing a rule on the template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base', 'rule']\n", "path": "src/cfnlint/rules/__init__.py" } ]
[ { "content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nfrom datetime import datetime\nimport importlib\nimport traceback\nfrom typing import Any, Dict, List, Optional, Tuple, Union\nfrom cfnlint.exceptions import DuplicateRuleError\nimport cfnlint.helpers\nimport cfnlint.rules.custom\nfrom cfnlint.decode.node import TemplateAttributeError\nfrom cfnlint.template import Template\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef matching(match_type: Any):\n \"\"\"Does Logging for match functions\"\"\"\n\n def decorator(match_function):\n \"\"\"The Actual Decorator\"\"\"\n\n def wrapper(self, filename, cfn, *args, **kwargs):\n \"\"\"Wrapper\"\"\"\n matches = []\n\n if not getattr(self, match_type):\n return []\n\n if match_type == 'match_resource_properties':\n if args[1] not in self.resource_property_types:\n return []\n elif match_type == 'match_resource_sub_properties':\n if args[1] not in self.resource_sub_property_types:\n return []\n\n start = datetime.now()\n LOGGER.debug('Starting match function for rule %s at %s', self.id, start)\n # pylint: disable=E1102\n results = match_function(self, filename, cfn, *args, **kwargs)\n LOGGER.debug(\n 'Complete match function for rule %s at %s. Ran in %s',\n self.id,\n datetime.now(),\n datetime.now() - start,\n )\n LOGGER.debug('Results from rule %s are %s: ', self.id, results)\n\n if results:\n for result in results:\n error_rule = self\n if hasattr(result, 'rule'):\n error_rule = result.rule\n linenumbers: Union[Tuple[int, int, int, int], None] = None\n if hasattr(result, 'location'):\n linenumbers = result.location\n else:\n linenumbers = cfn.get_location_yaml(cfn.template, result.path)\n if linenumbers:\n matches.append(\n Match(\n linenumbers[0] + 1,\n linenumbers[1] + 1,\n linenumbers[2] + 1,\n linenumbers[3] + 1,\n filename,\n error_rule,\n result.message,\n result,\n )\n )\n else:\n matches.append(\n Match(\n 1, 1, 1, 1, filename, error_rule, result.message, result\n )\n )\n\n return matches\n\n return wrapper\n\n return decorator\n\n\nclass CloudFormationLintRule:\n \"\"\"CloudFormation linter rules\"\"\"\n\n id: str = ''\n shortdesc: str = ''\n description: str = ''\n source_url: str = ''\n tags: List[str] = []\n experimental: bool = False\n child_rules: Dict[str, Any] = {}\n\n logger = logging.getLogger(__name__)\n\n def __init__(self):\n self.resource_property_types = []\n self.resource_sub_property_types = []\n self.config = {} # `-X E3012:strict=false`... Show more\n self.config_definition = {}\n\n def __repr__(self):\n return f'{self.id}: {self.shortdesc}'\n\n @property\n def severity(self):\n \"\"\"Severity level\"\"\"\n levels = {\n 'I': 'informational',\n 'E': 'error',\n 'W': 'warning',\n }\n return levels.get(self.id[0].upper(), 'unknown')\n\n def verbose(self):\n \"\"\"Verbose output\"\"\"\n return f'{self.id}: {self.shortdesc}\\n{self.description}'\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n\n def is_enabled(\n self,\n include_experimental=False,\n ignore_rules=None,\n include_rules=None,\n mandatory_rules=None,\n ):\n \"\"\"Is the rule enabled based on the configuration\"\"\"\n ignore_rules = ignore_rules or []\n include_rules = include_rules or []\n mandatory_rules = mandatory_rules or []\n\n # Evaluate experimental rules\n if self.experimental and not include_experimental:\n return False\n\n # Evaluate includes first:\n include_filter = False\n for include_rule in include_rules:\n if self.id.startswith(include_rule):\n include_filter = True\n if not include_filter:\n return False\n\n # Enable mandatory rules without checking for if they are ignored\n for mandatory_rule in mandatory_rules:\n if self.id.startswith(mandatory_rule):\n return True\n\n # Allowing ignoring of rules based on prefix to ignore checks\n for ignore_rule in ignore_rules:\n if self.id.startswith(ignore_rule) and ignore_rule:\n return False\n\n return True\n\n def configure(self, configs=None):\n \"\"\"Set the configuration\"\"\"\n\n # set defaults\n if isinstance(self.config_definition, dict):\n for config_name, config_values in self.config_definition.items():\n self.config[config_name] = config_values['default']\n\n if isinstance(configs, dict):\n for key, value in configs.items():\n if key in self.config_definition:\n if self.config_definition[key]['type'] == 'boolean':\n self.config[key] = cfnlint.helpers.bool_compare(value, True)\n elif self.config_definition[key]['type'] == 'string':\n self.config[key] = str(value)\n elif self.config_definition[key]['type'] == 'integer':\n self.config[key] = int(value)\n elif self.config_definition[key]['type'] == 'list':\n self.config[key] = []\n for l_value in value:\n if self.config_definition[key]['itemtype'] == 'boolean':\n self.config[key].append(\n cfnlint.helpers.bool_compare(l_value, True)\n )\n elif self.config_definition[key]['itemtype'] == 'string':\n self.config[key].append(str(l_value))\n elif self.config_definition[key]['itemtype'] == 'integer':\n self.config[key].append(int(l_value))\n\n match = None\n match_resource_properties = None\n match_resource_sub_properties = None\n\n @matching('match')\n # pylint: disable=W0613\n def matchall(self, filename, cfn):\n \"\"\"Match the entire file\"\"\"\n return self.match(cfn) # pylint: disable=E1102\n\n @matching('match_resource_properties')\n # pylint: disable=W0613\n def matchall_resource_properties(\n self, filename, cfn, resource_properties, property_type, path\n ):\n \"\"\"Check for resource properties type\"\"\"\n return self.match_resource_properties( # pylint: disable=E1102\n resource_properties, property_type, path, cfn\n )\n\n @matching('match_resource_sub_properties')\n # pylint: disable=W0613\n def matchall_resource_sub_properties(\n self, filename, cfn, resource_properties, property_type, path\n ):\n \"\"\"Check for resource properties type\"\"\"\n return self.match_resource_sub_properties( # pylint: disable=E1102\n resource_properties, property_type, path, cfn\n )\n\n\n# pylint: disable=too-many-instance-attributes\nclass RulesCollection:\n \"\"\"Collection of rules\"\"\"\n\n def __init__(\n self,\n ignore_rules=None,\n include_rules=None,\n configure_rules=None,\n include_experimental=False,\n mandatory_rules=None,\n ):\n self.rules: Dict[str, CloudFormationLintRule] = {}\n self.all_rules: Dict[str, CloudFormationLintRule] = {}\n self.used_rules = set()\n\n self.configure(\n ignore_rules=ignore_rules,\n include_rules=include_rules,\n configure_rules=configure_rules,\n include_experimental=include_experimental,\n mandatory_rules=mandatory_rules,\n )\n\n def configure(\n self,\n ignore_rules=None,\n include_rules=None,\n configure_rules=None,\n include_experimental=False,\n mandatory_rules=None,\n ):\n self.rules: Dict[str, CloudFormationLintRule] = {}\n # Whether \"experimental\" rules should be added\n self.include_experimental = include_experimental\n\n # Make Ignore Rules not required\n self.ignore_rules = ignore_rules or []\n self.include_rules = include_rules or []\n self.mandatory_rules = mandatory_rules or []\n self.configure_rules = configure_rules or {}\n # by default include 'W' and 'E'\n # 'I' has to be included manually for backwards compabitility\n # Have to add W, E here because integrations don't use config\n for default_rule in ['W', 'E']:\n if default_rule not in self.include_rules:\n self.include_rules.extend([default_rule])\n\n for rule in self.all_rules.values():\n self.__register(rule)\n\n def __register(self, rule: CloudFormationLintRule):\n \"\"\"Register and configure the rule\"\"\"\n if self.is_rule_enabled(rule):\n self.used_rules.add(rule.id)\n self.rules[rule.id] = rule\n rule.configure(self.configure_rules.get(rule.id, None))\n\n def register(self, rule: CloudFormationLintRule):\n \"\"\"Register rules\"\"\"\n # Some rules are inheritited to limit code re-use.\n # These rules have no rule ID so we filter this out\n if rule.id != '':\n if rule.id in self.all_rules:\n raise DuplicateRuleError(rule_id=rule.id)\n self.all_rules[rule.id] = rule\n self.__register(rule)\n\n def __iter__(self):\n return iter(self.rules.values())\n\n def __len__(self):\n return len(self.rules.keys())\n\n def extend(self, more):\n \"\"\"Extend rules\"\"\"\n for rule in more:\n self.register(rule)\n\n def __repr__(self):\n return '\\n'.join([self.rules[id].verbose() for id in sorted(self.rules)])\n\n def is_rule_enabled(self, rule: CloudFormationLintRule):\n \"\"\"Checks if an individual rule is valid\"\"\"\n return rule.is_enabled(\n self.include_experimental,\n self.ignore_rules,\n self.include_rules,\n self.mandatory_rules,\n )\n\n # pylint: disable=inconsistent-return-statements\n def run_check(self, check, filename, rule_id, *args):\n \"\"\"Run a check\"\"\"\n try:\n return check(*args)\n except TemplateAttributeError as err:\n LOGGER.debug(str(err))\n return []\n except Exception as err: # pylint: disable=W0703\n if self.is_rule_enabled(RuleError()):\n # In debug mode, print the error include complete stack trace\n if LOGGER.getEffectiveLevel() == logging.DEBUG:\n error_message = traceback.format_exc()\n else:\n error_message = str(err)\n message = 'Unknown exception while processing rule {}: {}'\n return [\n Match(\n 1,\n 1,\n 1,\n 1,\n filename,\n RuleError(),\n message.format(rule_id, error_message),\n )\n ]\n\n def resource_property(\n self, filename, cfn, path, properties, resource_type, property_type\n ):\n \"\"\"Run loops in resource checks for embedded properties\"\"\"\n matches = []\n property_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('PropertyTypes')\n if property_type == 'Tag':\n property_spec_name = 'Tag'\n else:\n property_spec_name = f'{resource_type}.{property_type}'\n\n if property_spec_name in property_spec:\n for rule in self.rules.values():\n if isinstance(properties, dict):\n if len(properties) == 1:\n for k, _ in properties.items():\n if k != 'Fn::If':\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n else:\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n else:\n matches.extend(\n self.run_check(\n rule.matchall_resource_sub_properties,\n filename,\n rule.id,\n filename,\n cfn,\n properties,\n property_spec_name,\n path,\n )\n )\n\n resource_spec_properties = property_spec.get(property_spec_name, {}).get(\n 'Properties'\n )\n if not resource_spec_properties:\n if property_spec.get(property_spec_name, {}).get('Type') == 'List':\n if isinstance(properties, list):\n property_type = property_spec.get(property_spec_name, {}).get(\n 'ItemType'\n )\n for index, item in enumerate(properties):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path[:] + [index],\n item,\n resource_type,\n property_type,\n )\n )\n return matches\n if isinstance(properties, dict):\n for resource_property, resource_property_value in properties.items():\n property_path = path[:] + [resource_property]\n resource_spec_property = resource_spec_properties.get(\n resource_property, {}\n )\n if resource_property not in resource_spec_properties:\n if resource_property == 'Fn::If':\n if isinstance(resource_property_value, list):\n if len(resource_property_value) == 3:\n for index, c_value in enumerate(\n resource_property_value[1:]\n ):\n if isinstance(c_value, list):\n for s_i, c_l_value in enumerate(c_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:]\n + [index + 1]\n + [s_i],\n c_l_value,\n resource_type,\n property_type,\n )\n )\n else:\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:] + [index + 1],\n c_value,\n resource_type,\n property_type,\n )\n )\n continue\n if resource_spec_property.get(\n 'Type'\n ) == 'List' and not resource_spec_properties.get(\n 'PrimitiveItemType'\n ):\n if isinstance(resource_property_value, (list)):\n for index, value in enumerate(resource_property_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path[:] + [index],\n value,\n resource_type,\n resource_spec_property.get('ItemType'),\n )\n )\n elif resource_spec_property.get('Type'):\n if isinstance(resource_property_value, (dict)):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n property_path,\n resource_property_value,\n resource_type,\n resource_spec_property.get('Type'),\n )\n )\n\n return matches\n\n def run_resource(self, filename, cfn, resource_type, resource_properties, path):\n \"\"\"Run loops in resource checks for embedded properties\"\"\"\n matches = []\n resource_spec = cfnlint.helpers.RESOURCE_SPECS['us-east-1'].get('ResourceTypes')\n if resource_properties is not None and resource_type in resource_spec:\n resource_spec_properties = resource_spec.get(resource_type, {}).get(\n 'Properties'\n )\n items_safe = resource_properties.items_safe(path, type_t=(dict))\n for resource_properties_safe, path_safe in items_safe:\n for (\n resource_property,\n resource_property_value,\n ) in resource_properties_safe.items():\n resource_spec_property = resource_spec_properties.get(\n resource_property, {}\n )\n if resource_spec_property.get(\n 'Type'\n ) == 'List' and not resource_spec_properties.get(\n 'PrimitiveItemType'\n ):\n if isinstance(resource_property_value, (list)):\n for index, value in enumerate(resource_property_value):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path_safe[:] + [resource_property, index],\n value,\n resource_type,\n resource_spec_property.get('ItemType'),\n )\n )\n elif resource_spec_property.get('Type'):\n if isinstance(resource_property_value, (dict)):\n matches.extend(\n self.resource_property(\n filename,\n cfn,\n path_safe[:] + [resource_property],\n resource_property_value,\n resource_type,\n resource_spec_property.get('Type'),\n )\n )\n\n return matches\n\n def run(self, filename: Optional[str], cfn: Template):\n \"\"\"Run rules\"\"\"\n matches = []\n for rule in self.rules.values():\n rule.initialize(cfn)\n\n for rule in self.rules.values():\n for key in rule.child_rules.keys():\n rule.child_rules[key] = self.rules.get(key)\n\n for rule in self.rules.values():\n matches.extend(\n self.run_check(rule.matchall, filename, rule.id, filename, cfn)\n )\n\n for resource_name, resource_attributes in cfn.get_resources().items():\n resource_type = resource_attributes.get('Type')\n resource_properties = resource_attributes.get('Properties')\n if isinstance(resource_type, str) and isinstance(resource_properties, dict):\n path = ['Resources', resource_name, 'Properties']\n for rule in self.rules.values():\n matches.extend(\n self.run_check(\n rule.matchall_resource_properties,\n filename,\n rule.id,\n filename,\n cfn,\n resource_properties,\n resource_type,\n path,\n )\n )\n\n matches.extend(\n self.run_resource(\n filename, cfn, resource_type, resource_properties, path\n )\n )\n\n return matches\n\n def create_from_module(self, modpath):\n \"\"\"Create rules from a module import path\"\"\"\n mod = importlib.import_module(modpath)\n self.extend(cfnlint.helpers.create_rules(mod))\n\n def create_from_directory(self, rulesdir):\n \"\"\"Create rules from directory\"\"\"\n result = []\n if rulesdir != '':\n result = cfnlint.helpers.load_plugins(os.path.expanduser(rulesdir))\n self.extend(result)\n\n def create_from_custom_rules_file(self, custom_rules_file):\n \"\"\"Create rules from custom rules file\"\"\"\n custom_rules = []\n if custom_rules_file:\n with open(custom_rules_file, encoding='utf-8') as customRules:\n line_number = 1\n for line in customRules:\n LOGGER.debug('Processing Custom Rule Line %d', line_number)\n custom_rule = cfnlint.rules.custom.make_rule(line, line_number)\n if custom_rule:\n custom_rules.append(custom_rule)\n line_number += 1\n\n self.extend(custom_rules)\n\n\nclass RuleMatch:\n \"\"\"Rules Error\"\"\"\n\n def __init__(self, path, message, **kwargs):\n \"\"\"Init\"\"\"\n self.path = path\n self.path_string = '/'.join(map(str, path))\n self.message = message\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __eq__(self, item):\n \"\"\"Override unique\"\"\"\n return (self.path, self.message) == (item.path, item.message)\n\n def __hash__(self):\n \"\"\"Hash for comparisons\"\"\"\n return hash((self.path, self.message))\n\n\nclass Match: # pylint: disable=R0902\n \"\"\"Match Classes\"\"\"\n\n def __init__(\n self,\n linenumber,\n columnnumber,\n linenumberend,\n columnnumberend,\n filename,\n rule,\n message=None,\n rulematch_obj=None,\n ):\n \"\"\"Init\"\"\"\n self.linenumber = linenumber\n '''Starting line number of the region this match spans'''\n self.columnnumber = columnnumber\n '''Starting line number of the region this match spans'''\n self.linenumberend = linenumberend\n '''Ending line number of the region this match spans'''\n self.columnnumberend = columnnumberend\n '''Ending column number of the region this match spans'''\n self.filename = filename\n '''Name of the filename associated with this match, or None if there is no such file'''\n self.rule = rule\n '''The rule of this match'''\n self.message = message # or rule.shortdesc\n '''The message of this match'''\n if rulematch_obj:\n for k, v in vars(rulematch_obj).items():\n if not hasattr(self, k):\n setattr(self, k, v)\n\n def __repr__(self):\n \"\"\"Represent\"\"\"\n file_str = self.filename + ':' if self.filename else ''\n return f'[{self.rule}] ({self.message}) matched {file_str}{self.linenumber}'\n\n def __eq__(self, item):\n \"\"\"Override equal to compare matches\"\"\"\n return (self.linenumber, self.columnnumber, self.rule.id, self.message) == (\n item.linenumber,\n item.columnnumber,\n item.rule.id,\n item.message,\n )\n\n\nclass ParseError(CloudFormationLintRule):\n \"\"\"Parse Lint Rule\"\"\"\n\n id = 'E0000'\n shortdesc = 'Parsing error found when parsing the template'\n description = 'Checks for JSON/YAML formatting errors in your template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base']\n\n\nclass TransformError(CloudFormationLintRule):\n \"\"\"Transform Lint Rule\"\"\"\n\n id = 'E0001'\n shortdesc = 'Error found when transforming the template'\n description = 'Errors found when performing transformation on the template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base', 'transform']\n\n\nclass RuleError(CloudFormationLintRule):\n \"\"\"Rule processing Error\"\"\"\n\n id = 'E0002'\n shortdesc = 'Error processing rule on the template'\n description = 'Errors found when processing a rule on the template'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['base', 'rule']\n", "path": "src/cfnlint/rules/__init__.py" } ]
diff --git a/src/cfnlint/rules/__init__.py b/src/cfnlint/rules/__init__.py index 34e8618524..5353bdb956 100644 --- a/src/cfnlint/rules/__init__.py +++ b/src/cfnlint/rules/__init__.py @@ -302,9 +302,7 @@ def extend(self, more): self.register(rule) def __repr__(self): - return '\n'.join( - [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)] - ) + return '\n'.join([self.rules[id].verbose() for id in sorted(self.rules)]) def is_rule_enabled(self, rule: CloudFormationLintRule): """Checks if an individual rule is valid""" diff --git a/test/unit/module/test_rules_collections.py b/test/unit/module/test_rules_collections.py index fb680bcf30..8ce0ff5ed4 100644 --- a/test/unit/module/test_rules_collections.py +++ b/test/unit/module/test_rules_collections.py @@ -2,6 +2,7 @@ Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 """ +import re from cfnlint.exceptions import DuplicateRuleError from test.testlib.testcase import BaseTestCase from cfnlint.template import Template @@ -231,6 +232,26 @@ class rule1_e0000(CloudFormationLintRule): rules = RulesCollection() self.assertRaises(DuplicateRuleError, rules.extend, rules_to_add) + def test_repr(self): + class rule0_e0000(CloudFormationLintRule): + """Error Rule""" + id = 'E0000' + shortdesc = 'Rule A' + description = 'First rule' + class rule1_e0001(CloudFormationLintRule): + """Error Rule""" + id = 'E0001' + shortdesc = 'Rule B' + description = 'Second rule' + rules = RulesCollection() + rules.extend([rule0_e0000(), rule1_e0001()]) + + retval = repr(rules) + pattern = r"\AE0000: Rule A\nFirst rule\nE0001: Rule B\nSecond rule\Z" + match = re.match(pattern, retval) + assert match, f"{retval} does not match {pattern}" + + class TestCreateFromModule(BaseTestCase): """Test loading a rules collection from a module"""
Cannot list all the rules ### CloudFormation Lint Version 0.70.0 ### What operating system are you using? Windows ### Describe the bug `cfn-lint --list-rules` throws below Error.(username is masked.) ``` Traceback (most recent call last): File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 197, in _run_module_as_main return _run_code(code, main_globals, None, File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 87, in _run_code exec(code, run_globals) File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\Scripts\cfn-lint.exe\__main__.py", line 7, in <module> File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\site-packages\cfnlint\__main__.py", line 38, in main (args, filenames, formatter) = cfnlint.core.get_args_filenames(sys.argv[1:]) File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\site-packages\cfnlint\core.py", line 235, in get_args_filenames print(rules) File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\site-packages\cfnlint\rules\__init__.py", line 306, in __repr__ [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)] File "C:\Users\${username}\AppData\Local\Programs\Python\Python39\lib\site-packages\cfnlint\rules\__init__.py", line 306, in <lambda> [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)] AttributeError: 'str' object has no attribute 'id' ``` ### Expected behavior show list all the rules. ### Reproduction template This is not bug of linting.
flask-admin__flask-admin-434
[ { "content": "from wtforms import fields\n\nfrom peewee import (DateTimeField, DateField, TimeField,\n PrimaryKeyField, ForeignKeyField, BaseModel)\n\nfrom wtfpeewee.orm import ModelConverter, model_form\n\nfrom flask.ext.admin import form\nfrom flask.ext.admin._compat import iteritems, itervalues\nfrom flask.ext.admin.model.form import InlineFormAdmin, InlineModelConverterBase\nfrom flask.ext.admin.model.fields import InlineModelFormField, InlineFieldList, AjaxSelectField\n\nfrom .tools import get_primary_key\nfrom .ajax import create_ajax_loader\n\n\nclass InlineModelFormList(InlineFieldList):\n \"\"\"\n Customized inline model form list field.\n \"\"\"\n\n form_field_type = InlineModelFormField\n \"\"\"\n Form field type. Override to use custom field for each inline form\n \"\"\"\n\n def __init__(self, form, model, prop, inline_view, **kwargs):\n self.form = form\n self.model = model\n self.prop = prop\n self.inline_view = inline_view\n\n self._pk = get_primary_key(model)\n super(InlineModelFormList, self).__init__(self.form_field_type(form, self._pk), **kwargs)\n\n def display_row_controls(self, field):\n return field.get_pk() is not None\n\n # *** bryhoyt removed def process() entirely, because I believe it was buggy\n # (but worked because another part of the code had a complimentary bug)\n # and I'm not sure why it was necessary anyway.\n # If we want it back in, we need to fix the following bogus query:\n # self.model.select().where(attr == data).execute() # `data` is not an ID, and only happened to be so because we patched it in in .contribute() below\n #\n # For reference:\n # .process() introduced in https://github.com/mrjoes/flask-admin/commit/2845e4b28cb40b25e2bf544b327f6202dc7e5709\n # Fixed, brokenly I think, in https://github.com/mrjoes/flask-admin/commit/4383eef3ce7eb01878f086928f8773adb9de79f8#diff-f87e7cd76fb9bc48c8681b24f238fb13R30\n\n def populate_obj(self, obj, name):\n pass\n\n def save_related(self, obj):\n model_id = getattr(obj, self._pk)\n\n attr = getattr(self.model, self.prop)\n values = self.model.select().where(attr == model_id).execute()\n\n pk_map = dict((str(getattr(v, self._pk)), v) for v in values)\n\n # Handle request data\n for field in self.entries:\n field_id = field.get_pk()\n\n if field_id in pk_map:\n model = pk_map[field_id]\n\n if self.should_delete(field):\n model.delete_instance(recursive=True)\n continue\n else:\n model = self.model()\n\n field.populate_obj(model, None)\n\n # Force relation\n setattr(model, self.prop, model_id)\n\n self.inline_view.on_model_change(field, model)\n\n model.save()\n\n\nclass CustomModelConverter(ModelConverter):\n def __init__(self, view, additional=None):\n super(CustomModelConverter, self).__init__(additional)\n self.view = view\n\n self.converters[PrimaryKeyField] = self.handle_pk\n self.converters[DateTimeField] = self.handle_datetime\n self.converters[DateField] = self.handle_date\n self.converters[TimeField] = self.handle_time\n\n def handle_foreign_key(self, model, field, **kwargs):\n loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)\n\n if loader:\n if field.null:\n kwargs['allow_blank'] = True\n\n return field.name, AjaxSelectField(loader, **kwargs)\n\n return super(CustomModelConverter, self).handle_foreign_key(model, field, **kwargs)\n\n def handle_pk(self, model, field, **kwargs):\n kwargs['validators'] = []\n return field.name, fields.HiddenField(**kwargs)\n\n def handle_date(self, model, field, **kwargs):\n kwargs['widget'] = form.DatePickerWidget()\n return field.name, fields.DateField(**kwargs)\n\n def handle_datetime(self, model, field, **kwargs):\n kwargs['widget'] = form.DateTimePickerWidget()\n return field.name, fields.DateTimeField(**kwargs)\n\n def handle_time(self, model, field, **kwargs):\n return field.name, form.TimeField(**kwargs)\n\n\ndef get_form(model, converter,\n base_class=form.BaseForm,\n only=None,\n exclude=None,\n field_args=None,\n allow_pk=False,\n extra_fields=None):\n \"\"\"\n Create form from peewee model and contribute extra fields, if necessary\n \"\"\"\n result = model_form(model,\n base_class=base_class,\n only=only,\n exclude=exclude,\n field_args=field_args,\n allow_pk=allow_pk,\n converter=converter)\n\n if extra_fields:\n for name, field in iteritems(extra_fields):\n setattr(result, name, form.recreate_field(field))\n\n return result\n\n\nclass InlineModelConverter(InlineModelConverterBase):\n \"\"\"\n Inline model form helper.\n \"\"\"\n\n inline_field_list_type = InlineModelFormList\n \"\"\"\n Used field list type.\n\n If you want to do some custom rendering of inline field lists,\n you can create your own wtforms field and use it instead\n \"\"\"\n\n def get_info(self, p):\n info = super(InlineModelConverter, self).get_info(p)\n\n if info is None:\n if isinstance(p, BaseModel):\n info = InlineFormAdmin(p)\n else:\n model = getattr(p, 'model', None)\n if model is None:\n raise Exception('Unknown inline model admin: %s' % repr(p))\n\n attrs = dict()\n\n for attr in dir(p):\n if not attr.startswith('_') and attr != model:\n attrs[attr] = getattr(p, attr)\n\n info = InlineFormAdmin(model, **attrs)\n\n # Resolve AJAX FKs\n info._form_ajax_refs = self.process_ajax_refs(info)\n\n return info\n\n def process_ajax_refs(self, info):\n refs = getattr(info, 'form_ajax_refs', None)\n\n result = {}\n\n if refs:\n for name, opts in iteritems(refs):\n new_name = '%s.%s' % (info.model.__name__.lower(), name)\n\n loader = None\n if isinstance(opts, (list, tuple)):\n loader = create_ajax_loader(info.model, new_name, name, opts)\n else:\n loader = opts\n\n result[name] = loader\n self.view._form_ajax_refs[new_name] = loader\n\n return result\n\n def contribute(self, converter, model, form_class, inline_model):\n # Find property from target model to current model\n reverse_field = None\n\n info = self.get_info(inline_model)\n\n for field in info.model._meta.get_fields():\n field_type = type(field)\n\n if field_type == ForeignKeyField:\n if field.rel_model == model:\n reverse_field = field\n break\n else:\n raise Exception('Cannot find reverse relation for model %s' % info.model)\n\n # Remove reverse property from the list\n ignore = [reverse_field.name]\n\n if info.form_excluded_columns:\n exclude = ignore + info.form_excluded_columns\n else:\n exclude = ignore\n\n # Create field\n child_form = info.get_form()\n\n if child_form is None:\n child_form = model_form(info.model,\n base_class=form.BaseForm,\n only=info.form_columns,\n exclude=exclude,\n field_args=info.form_args,\n allow_pk=True,\n converter=converter)\n\n\n prop_name = reverse_field.related_name\n\n label = self.get_label(info, prop_name)\n\n setattr(form_class,\n prop_name,\n self.inline_field_list_type(child_form,\n info.model,\n reverse_field.name,\n info,\n label=label or info.model.__name__))\n\n return form_class\n\n\ndef save_inline(form, model):\n for f in itervalues(form._fields):\n if f.type == 'InlineModelFormList':\n f.save_related(model)\n", "path": "flask_admin/contrib/peewee/form.py" } ]
[ { "content": "from wtforms import fields\n\nfrom peewee import (DateTimeField, DateField, TimeField,\n PrimaryKeyField, ForeignKeyField, BaseModel)\n\nfrom wtfpeewee.orm import ModelConverter, model_form\n\nfrom flask.ext.admin import form\nfrom flask.ext.admin._compat import iteritems, itervalues\nfrom flask.ext.admin.model.form import InlineFormAdmin, InlineModelConverterBase\nfrom flask.ext.admin.model.fields import InlineModelFormField, InlineFieldList, AjaxSelectField\n\nfrom .tools import get_primary_key\nfrom .ajax import create_ajax_loader\n\n\nclass InlineModelFormList(InlineFieldList):\n \"\"\"\n Customized inline model form list field.\n \"\"\"\n\n form_field_type = InlineModelFormField\n \"\"\"\n Form field type. Override to use custom field for each inline form\n \"\"\"\n\n def __init__(self, form, model, prop, inline_view, **kwargs):\n self.form = form\n self.model = model\n self.prop = prop\n self.inline_view = inline_view\n\n self._pk = get_primary_key(model)\n\n super(InlineModelFormList, self).__init__(self.form_field_type(form, self._pk, inline_view), **kwargs)\n\n def display_row_controls(self, field):\n return field.get_pk() is not None\n\n def process(self, formdata, data=None):\n if not formdata:\n attr = getattr(self.model, self.prop)\n data = self.model.select().where(attr == data).execute()\n else:\n data = None\n\n return super(InlineModelFormList, self).process(formdata, data)\n\n def populate_obj(self, obj, name):\n pass\n\n def save_related(self, obj):\n model_id = getattr(obj, self._pk)\n\n attr = getattr(self.model, self.prop)\n values = self.model.select().where(attr == model_id).execute()\n\n pk_map = dict((str(getattr(v, self._pk)), v) for v in values)\n\n # Handle request data\n for field in self.entries:\n field_id = field.get_pk()\n\n if field_id in pk_map:\n model = pk_map[field_id]\n\n if self.should_delete(field):\n model.delete_instance(recursive=True)\n continue\n else:\n model = self.model()\n\n field.populate_obj(model, None)\n\n # Force relation\n setattr(model, self.prop, model_id)\n\n self.inline_view.on_model_change(field, model)\n\n model.save()\n\n\nclass CustomModelConverter(ModelConverter):\n def __init__(self, view, additional=None):\n super(CustomModelConverter, self).__init__(additional)\n self.view = view\n\n self.converters[PrimaryKeyField] = self.handle_pk\n self.converters[DateTimeField] = self.handle_datetime\n self.converters[DateField] = self.handle_date\n self.converters[TimeField] = self.handle_time\n\n self.overrides = getattr(self.view, 'form_overrides', None) or {}\n\n def handle_foreign_key(self, model, field, **kwargs):\n loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)\n\n if loader:\n if field.null:\n kwargs['allow_blank'] = True\n\n return field.name, AjaxSelectField(loader, **kwargs)\n\n return super(CustomModelConverter, self).handle_foreign_key(model, field, **kwargs)\n\n def handle_pk(self, model, field, **kwargs):\n kwargs['validators'] = []\n return field.name, fields.HiddenField(**kwargs)\n\n def handle_date(self, model, field, **kwargs):\n kwargs['widget'] = form.DatePickerWidget()\n return field.name, fields.DateField(**kwargs)\n\n def handle_datetime(self, model, field, **kwargs):\n kwargs['widget'] = form.DateTimePickerWidget()\n return field.name, fields.DateTimeField(**kwargs)\n\n def handle_time(self, model, field, **kwargs):\n return field.name, form.TimeField(**kwargs)\n\n\ndef get_form(model, converter,\n base_class=form.BaseForm,\n only=None,\n exclude=None,\n field_args=None,\n allow_pk=False,\n extra_fields=None):\n \"\"\"\n Create form from peewee model and contribute extra fields, if necessary\n \"\"\"\n result = model_form(model,\n base_class=base_class,\n only=only,\n exclude=exclude,\n field_args=field_args,\n allow_pk=allow_pk,\n converter=converter)\n\n if extra_fields:\n for name, field in iteritems(extra_fields):\n setattr(result, name, form.recreate_field(field))\n\n return result\n\n\nclass InlineModelConverter(InlineModelConverterBase):\n \"\"\"\n Inline model form helper.\n \"\"\"\n\n inline_field_list_type = InlineModelFormList\n \"\"\"\n Used field list type.\n\n If you want to do some custom rendering of inline field lists,\n you can create your own wtforms field and use it instead\n \"\"\"\n\n def get_info(self, p):\n info = super(InlineModelConverter, self).get_info(p)\n\n if info is None:\n if isinstance(p, BaseModel):\n info = InlineFormAdmin(p)\n else:\n model = getattr(p, 'model', None)\n if model is None:\n raise Exception('Unknown inline model admin: %s' % repr(p))\n\n attrs = dict()\n\n for attr in dir(p):\n if not attr.startswith('_') and attr != model:\n attrs[attr] = getattr(p, attr)\n\n info = InlineFormAdmin(model, **attrs)\n\n # Resolve AJAX FKs\n info._form_ajax_refs = self.process_ajax_refs(info)\n\n return info\n\n def process_ajax_refs(self, info):\n refs = getattr(info, 'form_ajax_refs', None)\n\n result = {}\n\n if refs:\n for name, opts in iteritems(refs):\n new_name = '%s.%s' % (info.model.__name__.lower(), name)\n\n loader = None\n if isinstance(opts, (list, tuple)):\n loader = create_ajax_loader(info.model, new_name, name, opts)\n else:\n loader = opts\n\n result[name] = loader\n self.view._form_ajax_refs[new_name] = loader\n\n return result\n\n def contribute(self, converter, model, form_class, inline_model):\n # Find property from target model to current model\n reverse_field = None\n\n info = self.get_info(inline_model)\n\n for field in info.model._meta.get_fields():\n field_type = type(field)\n\n if field_type == ForeignKeyField:\n if field.rel_model == model:\n reverse_field = field\n break\n else:\n raise Exception('Cannot find reverse relation for model %s' % info.model)\n\n # Remove reverse property from the list\n ignore = [reverse_field.name]\n\n if info.form_excluded_columns:\n exclude = ignore + info.form_excluded_columns\n else:\n exclude = ignore\n\n # Create field\n child_form = info.get_form()\n\n if child_form is None:\n child_form = model_form(info.model,\n base_class=form.BaseForm,\n only=info.form_columns,\n exclude=exclude,\n field_args=info.form_args,\n allow_pk=True,\n converter=converter)\n\n prop_name = 'fa_%s' % model.__name__\n\n label = self.get_label(info, prop_name)\n\n setattr(form_class,\n prop_name,\n self.inline_field_list_type(child_form,\n info.model,\n reverse_field.name,\n info,\n label=label or info.model.__name__))\n\n setattr(field.rel_model,\n prop_name,\n property(lambda self: self.id))\n\n return form_class\n\n\ndef save_inline(form, model):\n for f in itervalues(form._fields):\n if f.type == 'InlineModelFormList':\n f.save_related(model)\n", "path": "flask_admin/contrib/peewee/form.py" } ]
diff --git a/flask_admin/contrib/peewee/form.py b/flask_admin/contrib/peewee/form.py index b071c3b53..806a67036 100644 --- a/flask_admin/contrib/peewee/form.py +++ b/flask_admin/contrib/peewee/form.py @@ -90,6 +90,8 @@ def __init__(self, view, additional=None): self.converters[DateField] = self.handle_date self.converters[TimeField] = self.handle_time + self.overrides = getattr(self.view, 'form_overrides', None) or {} + def handle_foreign_key(self, model, field, **kwargs): loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)
form_overrides does not work with Peewee model backend I assume this is the same reason as the others -- that the Peewee backend's not quite as up-to-date? Looking through the code, it appears that form_overrides is not even checked in the Peewee backend, so it shouldn't be too hard for me to fix this. If you confirm that this is a bug and I'm not missing something, I can send through some code.
google__turbinia-323
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Client objects for Turbinia.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nimport logging\nimport os\nimport stat\nimport time\n\nfrom turbinia import config\nfrom turbinia.config import logger\nfrom turbinia import task_manager\nfrom turbinia import workers\nfrom turbinia import TurbiniaException\nfrom turbinia.workers.artifact import FileArtifactExtractionTask\nfrom turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask\nfrom turbinia.workers.analysis.jenkins import JenkinsAnalysisTask\nfrom turbinia.workers.grep import GrepTask\nfrom turbinia.workers.hadoop import HadoopAnalysisTask\nfrom turbinia.workers.plaso import PlasoTask\nfrom turbinia.workers.psort import PsortTask\nfrom turbinia.workers.sshd import SSHDAnalysisTask\nfrom turbinia.workers.strings import StringsAsciiTask\nfrom turbinia.workers.strings import StringsUnicodeTask\nfrom turbinia.workers.tomcat import TomcatAnalysisTask\nfrom turbinia.workers.worker_stat import StatTask\n\n# TODO(aarontp): Remove this map after\n# https://github.com/google/turbinia/issues/278 is fixed.\nTASK_MAP = {\n 'fileartifactextractiontask': FileArtifactExtractionTask,\n 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,\n 'jenkinsanalysistask': JenkinsAnalysisTask,\n 'greptask': GrepTask,\n 'hadoopanalysistask': HadoopAnalysisTask,\n 'plasotask': PlasoTask,\n 'psorttask': PsortTask,\n 'sshdanalysistask': SSHDAnalysisTask,\n 'stringsasciitask': StringsAsciiTask,\n 'stringsunicodetask': StringsUnicodeTask,\n 'tomcatanalysistask': TomcatAnalysisTask,\n 'stattask': StatTask,\n}\n\nconfig.LoadConfig()\nif config.TASK_MANAGER.lower() == 'psq':\n import psq\n\n from google.cloud import exceptions\n from google.cloud import datastore\n from google.cloud import pubsub\n\n from turbinia.lib.google_cloud import GoogleCloudFunction\nelif config.TASK_MANAGER.lower() == 'celery':\n from turbinia.state_manager import RedisStateManager\n\nlog = logging.getLogger('turbinia')\nlogger.setup()\n\n\ndef check_directory(directory):\n \"\"\"Checks directory to make sure it exists and is writable.\n\n Args:\n directory (string): Path to directory\n\n Raises:\n TurbiniaException: When directory cannot be created or used.\n \"\"\"\n if os.path.exists(directory) and not os.path.isdir(directory):\n raise TurbiniaException(\n 'File {0:s} exists, but is not a directory'.format(directory))\n\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError:\n raise TurbiniaException(\n 'Can not create Directory {0:s}'.format(directory))\n\n if not os.access(directory, os.W_OK):\n try:\n mode = os.stat(directory)[0]\n os.chmod(directory, mode | stat.S_IWUSR)\n except OSError:\n raise TurbiniaException(\n 'Can not add write permissions to {0:s}'.format(directory))\n\n\nclass TurbiniaClient(object):\n \"\"\"Client class for Turbinia.\n\n Attributes:\n task_manager (TaskManager): Turbinia task manager\n \"\"\"\n\n def __init__(self, run_local=False):\n config.LoadConfig()\n if run_local:\n self.task_manager = None\n else:\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(server=False)\n\n def create_task(self, task_name):\n \"\"\"Creates a Turbinia Task by name.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n\n Returns:\n TurbiniaTask: An instantiated Task object.\n\n Raises:\n TurbiniaException: When no Task object matching task_name is found.\n \"\"\"\n task_obj = TASK_MAP.get(task_name.lower())\n log.debug('Looking up Task {0:s} by name'.format(task_name))\n if not task_obj:\n raise TurbiniaException('No Task named {0:s} found'.format(task_name))\n return task_obj()\n\n def list_jobs(self):\n \"\"\"List the available jobs.\"\"\"\n # TODO(aarontp): Refactor this out so that we don't need to depend on\n # the task manager from the client.\n log.info('Available Jobs:')\n for job in self.task_manager.jobs:\n log.info('\\t{0:s}'.format(job.name))\n\n def wait_for_request(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \"\"\"Polls and waits for Turbinia Request to complete.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n poll_interval (int): Interval of seconds between polling cycles.\n \"\"\"\n while True:\n task_results = self.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n completed_count = 0\n uncompleted_count = 0\n for task in task_results:\n if task.get('successful') is not None:\n completed_count += 1\n else:\n uncompleted_count += 1\n\n if completed_count and completed_count == len(task_results):\n break\n\n log.info(\n '{0:d} Tasks found, {1:d} completed. Waiting {2:d} seconds.'.format(\n len(task_results), completed_count, poll_interval))\n time.sleep(poll_interval)\n\n log.info('All {0:d} Tasks completed'.format(len(task_results)))\n\n def get_task_data(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, function_name='gettasks'):\n \"\"\"Gets task data from Google Cloud Functions.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n function_name (string): The GCF function we want to call\n\n Returns:\n List of Task dict objects.\n \"\"\"\n cloud_function = GoogleCloudFunction(project_id=project, region=region)\n func_args = {'instance': instance, 'kind': 'TurbiniaTask'}\n\n if days:\n start_time = datetime.now() - timedelta(days=days)\n # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a\n # javascript Date() object in the cloud function.\n start_string = start_time.strftime('%Y-%m-%dT%H:%M:%S')\n func_args.update({'start_time': start_string})\n elif task_id:\n func_args.update({'task_id': task_id})\n elif request_id:\n func_args.update({'request_id': request_id})\n\n if user:\n func_args.update({'user': user})\n\n response = cloud_function.ExecuteFunction(function_name, func_args)\n if 'result' not in response:\n log.error('No results found')\n if response.get('error', '{}') != '{}':\n msg = 'Error executing Cloud Function: [{0!s}].'.format(\n response.get('error'))\n log.error(msg)\n log.debug('GCF response: {0!s}'.format(response))\n raise TurbiniaException(\n 'Cloud Function {0:s} returned no results.'.format(function_name))\n\n try:\n results = json.loads(response['result'])\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not deserialize result from GCF: [{0!s}]'.format(e))\n\n return results[0]\n\n def format_task_status(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Tasks.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n all_fields (bool): Include all fields for the task, including task,\n request ids and saved file paths.\n\n Returns:\n String of task status\n \"\"\"\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user)\n num_results = len(task_results)\n results = []\n if not num_results:\n msg = '\\nNo Tasks found.'\n log.info(msg)\n return msg\n\n results.append('\\nRetrieved {0:d} Task results:'.format(num_results))\n for task in task_results:\n if task.get('successful'):\n success = 'Successful'\n elif task.get('successful') is None:\n success = 'Running'\n else:\n success = 'Failed'\n\n status = task.get('status', 'No task status')\n if all_fields:\n results.append(\n '{0:s} request: {1:s} task: {2:s} {3:s} {4:s} {5:s} {6:s}: {7:s}'\n .format(\n task.get('last_update'), task.get('request_id'), task.get('id'),\n task.get('name'), task.get('user'), task.get('worker_name'),\n success, status))\n saved_paths = task.get('saved_paths', [])\n for path in saved_paths:\n results.append('\\t{0:s}'.format(path))\n else:\n results.append(\n '{0:s} {1:s} {2:s}: {3:s}'.format(\n task.get('last_update'), task.get('name'), success, status))\n\n return '\\n'.join(results)\n\n def run_local_task(self, task_name, request):\n \"\"\"Runs a Turbinia Task locally.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n request (TurbiniaRequest): Object containing request and evidence info.\n\n Returns:\n TurbiniaTaskResult: The result returned by the Task Execution.\n \"\"\"\n task = self.create_task(task_name)\n task.request_id = request.request_id\n task.base_output_dir = config.OUTPUT_DIR\n task.run_local = True\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n result = task.run_wrapper(request.evidence[0])\n return result\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.server_pubsub.send_request(request)\n\n def close_tasks(\n self, instance, project, region, request_id=None, task_id=None, user=None,\n requester=None):\n \"\"\"Close Turbinia Tasks based on Request ID.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n request_id (string): The Id of the request we want tasks for.\n task_id (string): The Id of the request we want task for.\n user (string): The user of the request we want tasks for.\n requester (string): The user making the request to close tasks.\n\n Returns: String of closed Task IDs.\n \"\"\"\n cloud_function = GoogleCloudFunction(project_id=project, region=region)\n func_args = {\n 'instance': instance,\n 'kind': 'TurbiniaTask',\n 'request_id': request_id,\n 'task_id': task_id,\n 'user': user,\n 'requester': requester\n }\n response = cloud_function.ExecuteFunction('closetasks', func_args)\n return 'Closed Task IDs: %s' % response.get('result')\n\n\nclass TurbiniaCeleryClient(TurbiniaClient):\n \"\"\"Client class for Turbinia (Celery).\n\n Overriding some things specific to Celery operation.\n\n Attributes:\n redis (RedisStateManager): Redis datastore object\n \"\"\"\n\n def __init__(self, *_, **__):\n super(TurbiniaCeleryClient, self).__init__()\n self.redis = RedisStateManager()\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.kombu.send_request(request)\n\n # pylint: disable=arguments-differ\n def get_task_data(\n self, instance, _, __, days=0, task_id=None, request_id=None,\n function_name=None):\n \"\"\"Gets task data from Redis.\n\n We keep the same function signature, but ignore arguments passed for GCP.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n\n Returns:\n List of Task dict objects.\n \"\"\"\n return self.redis.get_task_data(instance, days, task_id, request_id)\n\n\nclass TurbiniaServer(object):\n \"\"\"Turbinia Server class.\n\n Attributes:\n task_manager (TaskManager): An object to manage turbinia tasks.\n \"\"\"\n\n def __init__(self, jobs_blacklist=None, jobs_whitelist=None):\n \"\"\"Initializes Turbinia Server.\n\n Args:\n jobs_blacklist (Optional[list[str]]): Jobs we will exclude from running\n jobs_whitelist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(jobs_blacklist, jobs_whitelist)\n\n def start(self):\n \"\"\"Start Turbinia Server.\"\"\"\n log.info('Running Turbinia Server.')\n self.task_manager.run()\n\n def add_evidence(self, evidence_):\n \"\"\"Add evidence to be processed.\"\"\"\n self.task_manager.add_evidence(evidence_)\n\n\nclass TurbiniaCeleryWorker(TurbiniaClient):\n \"\"\"Turbinia Celery Worker class.\n\n Attributes:\n worker (celery.app): Celery worker app\n \"\"\"\n\n def __init__(self, *_, **__):\n \"\"\"Initialization for Celery worker.\"\"\"\n super(TurbiniaCeleryWorker, self).__init__()\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n self.worker = self.task_manager.celery.app\n\n def start(self):\n \"\"\"Start Turbinia Celery Worker.\"\"\"\n log.info('Running Turbinia Celery Worker.')\n argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']\n self.worker.start(argv)\n\n\nclass TurbiniaPsqWorker(object):\n \"\"\"Turbinia PSQ Worker class.\n\n Attributes:\n worker (psq.Worker): PSQ Worker object\n psq (psq.Queue): A Task queue object\n\n Raises:\n TurbiniaException: When errors occur\n \"\"\"\n\n def __init__(self, *_, **__):\n \"\"\"Initialization for PSQ Worker.\"\"\"\n config.LoadConfig()\n psq_publisher = pubsub.PublisherClient()\n psq_subscriber = pubsub.SubscriberClient()\n datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)\n try:\n self.psq = psq.Queue(\n psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,\n name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))\n except exceptions.GoogleCloudError as e:\n msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))\n log.error(msg)\n raise TurbiniaException(msg)\n\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))\n self.worker = psq.Worker(queue=self.psq)\n\n def start(self):\n \"\"\"Start Turbinia PSQ Worker.\"\"\"\n log.info('Running Turbinia PSQ Worker.')\n self.worker.listen()\n", "path": "turbinia/client.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Client objects for Turbinia.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nimport logging\nimport os\nimport stat\nimport time\n\nfrom turbinia import config\nfrom turbinia.config import logger\nfrom turbinia import task_manager\nfrom turbinia import workers\nfrom turbinia import TurbiniaException\nfrom turbinia.workers.artifact import FileArtifactExtractionTask\nfrom turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask\nfrom turbinia.workers.analysis.jenkins import JenkinsAnalysisTask\nfrom turbinia.workers.grep import GrepTask\nfrom turbinia.workers.hadoop import HadoopAnalysisTask\nfrom turbinia.workers.plaso import PlasoTask\nfrom turbinia.workers.psort import PsortTask\nfrom turbinia.workers.sshd import SSHDAnalysisTask\nfrom turbinia.workers.strings import StringsAsciiTask\nfrom turbinia.workers.strings import StringsUnicodeTask\nfrom turbinia.workers.tomcat import TomcatAnalysisTask\nfrom turbinia.workers.worker_stat import StatTask\n\n# TODO(aarontp): Remove this map after\n# https://github.com/google/turbinia/issues/278 is fixed.\nTASK_MAP = {\n 'fileartifactextractiontask': FileArtifactExtractionTask,\n 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,\n 'jenkinsanalysistask': JenkinsAnalysisTask,\n 'greptask': GrepTask,\n 'hadoopanalysistask': HadoopAnalysisTask,\n 'plasotask': PlasoTask,\n 'psorttask': PsortTask,\n 'sshdanalysistask': SSHDAnalysisTask,\n 'stringsasciitask': StringsAsciiTask,\n 'stringsunicodetask': StringsUnicodeTask,\n 'tomcatanalysistask': TomcatAnalysisTask,\n 'stattask': StatTask,\n}\n\nconfig.LoadConfig()\nif config.TASK_MANAGER.lower() == 'psq':\n import psq\n\n from google.cloud import exceptions\n from google.cloud import datastore\n from google.cloud import pubsub\n\n from turbinia.lib.google_cloud import GoogleCloudFunction\nelif config.TASK_MANAGER.lower() == 'celery':\n from turbinia.state_manager import RedisStateManager\n\nlog = logging.getLogger('turbinia')\nlogger.setup()\n\n\ndef check_directory(directory):\n \"\"\"Checks directory to make sure it exists and is writable.\n\n Args:\n directory (string): Path to directory\n\n Raises:\n TurbiniaException: When directory cannot be created or used.\n \"\"\"\n if os.path.exists(directory) and not os.path.isdir(directory):\n raise TurbiniaException(\n 'File {0:s} exists, but is not a directory'.format(directory))\n\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError:\n raise TurbiniaException(\n 'Can not create Directory {0:s}'.format(directory))\n\n if not os.access(directory, os.W_OK):\n try:\n mode = os.stat(directory)[0]\n os.chmod(directory, mode | stat.S_IWUSR)\n except OSError:\n raise TurbiniaException(\n 'Can not add write permissions to {0:s}'.format(directory))\n\n\nclass TurbiniaClient(object):\n \"\"\"Client class for Turbinia.\n\n Attributes:\n task_manager (TaskManager): Turbinia task manager\n \"\"\"\n\n def __init__(self, run_local=False):\n config.LoadConfig()\n if run_local:\n self.task_manager = None\n else:\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(server=False)\n\n def create_task(self, task_name):\n \"\"\"Creates a Turbinia Task by name.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n\n Returns:\n TurbiniaTask: An instantiated Task object.\n\n Raises:\n TurbiniaException: When no Task object matching task_name is found.\n \"\"\"\n task_obj = TASK_MAP.get(task_name.lower())\n log.debug('Looking up Task {0:s} by name'.format(task_name))\n if not task_obj:\n raise TurbiniaException('No Task named {0:s} found'.format(task_name))\n return task_obj()\n\n def list_jobs(self):\n \"\"\"List the available jobs.\"\"\"\n # TODO(aarontp): Refactor this out so that we don't need to depend on\n # the task manager from the client.\n log.info('Available Jobs:')\n for job in self.task_manager.jobs:\n log.info('\\t{0:s}'.format(job.name))\n\n def wait_for_request(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \"\"\"Polls and waits for Turbinia Request to complete.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n poll_interval (int): Interval of seconds between polling cycles.\n \"\"\"\n while True:\n task_results = self.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n completed_count = 0\n uncompleted_count = 0\n for task in task_results:\n if task.get('successful') is not None:\n completed_count += 1\n else:\n uncompleted_count += 1\n\n if completed_count and completed_count == len(task_results):\n break\n\n log.info(\n '{0:d} Tasks found, {1:d} completed. Waiting {2:d} seconds.'.format(\n len(task_results), completed_count, poll_interval))\n time.sleep(poll_interval)\n\n log.info('All {0:d} Tasks completed'.format(len(task_results)))\n\n def get_task_data(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, function_name='gettasks'):\n \"\"\"Gets task data from Google Cloud Functions.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n function_name (string): The GCF function we want to call\n\n Returns:\n List of Task dict objects.\n \"\"\"\n cloud_function = GoogleCloudFunction(project_id=project, region=region)\n func_args = {'instance': instance, 'kind': 'TurbiniaTask'}\n\n if days:\n start_time = datetime.now() - timedelta(days=days)\n # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a\n # javascript Date() object in the cloud function.\n start_string = start_time.strftime('%Y-%m-%dT%H:%M:%S')\n func_args.update({'start_time': start_string})\n elif task_id:\n func_args.update({'task_id': task_id})\n elif request_id:\n func_args.update({'request_id': request_id})\n\n if user:\n func_args.update({'user': user})\n\n response = cloud_function.ExecuteFunction(function_name, func_args)\n if 'result' not in response:\n log.error('No results found')\n if response.get('error', '{}') != '{}':\n msg = 'Error executing Cloud Function: [{0!s}].'.format(\n response.get('error'))\n log.error(msg)\n log.debug('GCF response: {0!s}'.format(response))\n raise TurbiniaException(\n 'Cloud Function {0:s} returned no results.'.format(function_name))\n\n try:\n results = json.loads(response['result'])\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not deserialize result from GCF: [{0!s}]'.format(e))\n\n return results[0]\n\n def format_task_status(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Tasks.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n all_fields (bool): Include all fields for the task, including task,\n request ids and saved file paths.\n\n Returns:\n String of task status\n \"\"\"\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user)\n num_results = len(task_results)\n results = []\n if not num_results:\n msg = '\\nNo Tasks found.'\n log.info(msg)\n return msg\n\n results.append('\\nRetrieved {0:d} Task results:'.format(num_results))\n for task in task_results:\n if task.get('successful'):\n success = 'Successful'\n elif task.get('successful') is None:\n success = 'Running'\n else:\n success = 'Failed'\n\n status = task.get('status', 'No task status')\n if all_fields:\n results.append(\n '{0:s} request: {1:s} task: {2:s} {3:s} {4:s} {5:s} {6:s}: {7:s}'\n .format(\n task.get('last_update'), task.get('request_id'), task.get('id'),\n task.get('name'), task.get('user'), task.get('worker_name'),\n success, status))\n saved_paths = task.get('saved_paths', [])\n if saved_paths is None:\n saved_paths = []\n for path in saved_paths:\n results.append('\\t{0:s}'.format(path))\n else:\n results.append(\n '{0:s} {1:s} {2:s}: {3:s}'.format(\n task.get('last_update'), task.get('name'), success, status))\n\n return '\\n'.join(results)\n\n def run_local_task(self, task_name, request):\n \"\"\"Runs a Turbinia Task locally.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n request (TurbiniaRequest): Object containing request and evidence info.\n\n Returns:\n TurbiniaTaskResult: The result returned by the Task Execution.\n \"\"\"\n task = self.create_task(task_name)\n task.request_id = request.request_id\n task.base_output_dir = config.OUTPUT_DIR\n task.run_local = True\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n result = task.run_wrapper(request.evidence[0])\n return result\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.server_pubsub.send_request(request)\n\n def close_tasks(\n self, instance, project, region, request_id=None, task_id=None, user=None,\n requester=None):\n \"\"\"Close Turbinia Tasks based on Request ID.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n request_id (string): The Id of the request we want tasks for.\n task_id (string): The Id of the request we want task for.\n user (string): The user of the request we want tasks for.\n requester (string): The user making the request to close tasks.\n\n Returns: String of closed Task IDs.\n \"\"\"\n cloud_function = GoogleCloudFunction(project_id=project, region=region)\n func_args = {\n 'instance': instance,\n 'kind': 'TurbiniaTask',\n 'request_id': request_id,\n 'task_id': task_id,\n 'user': user,\n 'requester': requester\n }\n response = cloud_function.ExecuteFunction('closetasks', func_args)\n return 'Closed Task IDs: %s' % response.get('result')\n\n\nclass TurbiniaCeleryClient(TurbiniaClient):\n \"\"\"Client class for Turbinia (Celery).\n\n Overriding some things specific to Celery operation.\n\n Attributes:\n redis (RedisStateManager): Redis datastore object\n \"\"\"\n\n def __init__(self, *_, **__):\n super(TurbiniaCeleryClient, self).__init__()\n self.redis = RedisStateManager()\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.kombu.send_request(request)\n\n # pylint: disable=arguments-differ\n def get_task_data(\n self, instance, _, __, days=0, task_id=None, request_id=None,\n function_name=None):\n \"\"\"Gets task data from Redis.\n\n We keep the same function signature, but ignore arguments passed for GCP.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n\n Returns:\n List of Task dict objects.\n \"\"\"\n return self.redis.get_task_data(instance, days, task_id, request_id)\n\n\nclass TurbiniaServer(object):\n \"\"\"Turbinia Server class.\n\n Attributes:\n task_manager (TaskManager): An object to manage turbinia tasks.\n \"\"\"\n\n def __init__(self, jobs_blacklist=None, jobs_whitelist=None):\n \"\"\"Initializes Turbinia Server.\n\n Args:\n jobs_blacklist (Optional[list[str]]): Jobs we will exclude from running\n jobs_whitelist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(jobs_blacklist, jobs_whitelist)\n\n def start(self):\n \"\"\"Start Turbinia Server.\"\"\"\n log.info('Running Turbinia Server.')\n self.task_manager.run()\n\n def add_evidence(self, evidence_):\n \"\"\"Add evidence to be processed.\"\"\"\n self.task_manager.add_evidence(evidence_)\n\n\nclass TurbiniaCeleryWorker(TurbiniaClient):\n \"\"\"Turbinia Celery Worker class.\n\n Attributes:\n worker (celery.app): Celery worker app\n \"\"\"\n\n def __init__(self, *_, **__):\n \"\"\"Initialization for Celery worker.\"\"\"\n super(TurbiniaCeleryWorker, self).__init__()\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n self.worker = self.task_manager.celery.app\n\n def start(self):\n \"\"\"Start Turbinia Celery Worker.\"\"\"\n log.info('Running Turbinia Celery Worker.')\n argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']\n self.worker.start(argv)\n\n\nclass TurbiniaPsqWorker(object):\n \"\"\"Turbinia PSQ Worker class.\n\n Attributes:\n worker (psq.Worker): PSQ Worker object\n psq (psq.Queue): A Task queue object\n\n Raises:\n TurbiniaException: When errors occur\n \"\"\"\n\n def __init__(self, *_, **__):\n \"\"\"Initialization for PSQ Worker.\"\"\"\n config.LoadConfig()\n psq_publisher = pubsub.PublisherClient()\n psq_subscriber = pubsub.SubscriberClient()\n datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)\n try:\n self.psq = psq.Queue(\n psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,\n name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))\n except exceptions.GoogleCloudError as e:\n msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))\n log.error(msg)\n raise TurbiniaException(msg)\n\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))\n self.worker = psq.Worker(queue=self.psq)\n\n def start(self):\n \"\"\"Start Turbinia PSQ Worker.\"\"\"\n log.info('Running Turbinia PSQ Worker.')\n self.worker.listen()\n", "path": "turbinia/client.py" } ]
diff --git a/turbinia/client.py b/turbinia/client.py index 95d87e653..eb6d8c060 100644 --- a/turbinia/client.py +++ b/turbinia/client.py @@ -282,6 +282,8 @@ def format_task_status( task.get('name'), task.get('user'), task.get('worker_name'), success, status)) saved_paths = task.get('saved_paths', []) + if saved_paths is None: + saved_paths = [] for path in saved_paths: results.append('\t{0:s}'.format(path)) else:
turbiniactl -a status breaks on local installations When running `turbiniactl -a status` with local tasks, there is an error, and a stack trace gets thrown here and `saved_paths` is incorrectly set to None: https://github.com/google/turbinia/blob/b65bc0d26f635655ad03ece4d65fae2e2e224915/turbinia/client.py#L250 FYI @ericzinnikas
akvo__akvo-rsr-1704
[ { "content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nimport logging\nlogger = logging.getLogger('akvo.rsr')\n\nfrom django.conf import settings\nfrom django.db.models.signals import pre_save, post_save, post_delete\nfrom django.contrib.admin.models import LogEntry\n\nfrom akvo.api.models import create_api_key\n\nfrom ..signals import (\n change_name_of_file_on_change, change_name_of_file_on_create,\n create_publishing_status, create_organisation_account,\n create_payment_gateway_selector, donation_completed, act_on_log_entry,\n employment_post_save, employment_pre_save, update_project_budget,\n update_project_funding, create_iati_file)\n\nfrom .benchmark import Benchmark, Benchmarkname\nfrom .budget_item import BudgetItem, BudgetItemLabel, CountryBudgetItem\nfrom .country import Country, RecipientCountry\nfrom .crs_add import CrsAdd, CrsAddOtherFlag\nfrom .category import Category\nfrom .employment import Employment\nfrom .focus_area import FocusArea\nfrom .fss import Fss, FssForecast\nfrom .goal import Goal\nfrom .iati_export import IatiExport\nfrom .indicator import Indicator, IndicatorPeriod\nfrom .invoice import Invoice\nfrom .internal_organisation_id import InternalOrganisationID\nfrom .keyword import Keyword\nfrom .legacy_data import LegacyData\nfrom .link import Link\nfrom .location import (OrganisationLocation, ProjectLocation, ProjectUpdateLocation,\n AdministrativeLocation)\nfrom .organisation import Organisation\nfrom .organisation_account import OrganisationAccount\nfrom .partner_site import PartnerSite\nfrom .partner_type import PartnerType\nfrom .partnership import Partnership\nfrom .payment_gateway import PayPalGateway, MollieGateway, PaymentGatewaySelector\nfrom .planned_disbursement import PlannedDisbursement\nfrom .policy_marker import PolicyMarker\nfrom .project import Project\nfrom .project_comment import ProjectComment\nfrom .project_condition import ProjectCondition\nfrom .project_contact import ProjectContact\nfrom .project_document import ProjectDocument\nfrom .project_update import ProjectUpdate\nfrom .publishing_status import PublishingStatus\nfrom .region import RecipientRegion\nfrom .related_project import RelatedProject\nfrom .result import Result\nfrom .sector import Sector\nfrom .transaction import Transaction, TransactionSector\nfrom .user import User\n\n__all__ = [\n 'Benchmark',\n 'Benchmarkname',\n 'BudgetItem',\n 'BudgetItemLabel',\n 'CountryBudgetItem',\n 'Country',\n 'RecipientCountry',\n 'Category',\n 'CrsAdd',\n 'CrsAddOtherFlag',\n 'Employment',\n 'FocusArea',\n 'Fss',\n 'FssForecast',\n 'Goal',\n 'IatiExport',\n 'Indicator',\n 'IndicatorPeriod',\n 'Invoice',\n 'InternalOrganisationID',\n 'Keyword',\n 'LegacyData',\n 'Link',\n 'OrganisationLocation',\n 'ProjectLocation',\n 'AdministrativeLocation',\n 'ProjectUpdateLocation',\n 'Organisation',\n 'OrganisationAccount',\n 'PartnerSite',\n 'PartnerType',\n 'Partnership',\n 'PayPalGateway',\n 'MollieGateway',\n 'PaymentGatewaySelector',\n 'PlannedDisbursement',\n 'PolicyMarker',\n 'Project',\n 'ProjectComment',\n 'ProjectCondition',\n 'ProjectContact',\n 'ProjectDocument',\n 'ProjectUpdate',\n 'PublishingStatus',\n 'RecipientRegion',\n 'RelatedProject',\n 'Result',\n 'Sector',\n 'Transaction',\n 'TransactionSector',\n 'User',\n]\n\n# Permission rules\nimport rules\nfrom ..permissions import (is_rsr_admin, is_org_admin, is_org_user_manager,\n is_org_project_editor, is_org_user, is_self)\n\nrules.add_perm('rsr', rules.always_allow)\n\nrules.add_perm('rsr.add_benchmarkname', is_rsr_admin)\nrules.add_perm('rsr.change_benchmarkname', is_rsr_admin)\n\nrules.add_perm('rsr.add_country', is_rsr_admin)\nrules.add_perm('rsr.change_country', is_rsr_admin)\n\nrules.add_perm('rsr.add_budgetitemlabel', is_rsr_admin)\nrules.add_perm('rsr.change_budgetitemlabel', is_rsr_admin)\n\nrules.add_perm('rsr.add_category', is_rsr_admin)\nrules.add_perm('rsr.change_category', is_rsr_admin)\n\nrules.add_perm('rsr.add_focusarea', is_rsr_admin)\nrules.add_perm('rsr.change_focusarea', is_rsr_admin)\n\nrules.add_perm('rsr.add_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_keyword', is_rsr_admin)\nrules.add_perm('rsr.change_keyword', is_rsr_admin)\n\nrules.add_perm('rsr.add_partnersite', is_rsr_admin)\nrules.add_perm('rsr.change_partnersite', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_partnertype', is_rsr_admin)\nrules.add_perm('rsr.change_partnertype', is_rsr_admin)\n\nrules.add_perm('rsr.change_organisationaccount', is_rsr_admin)\n\nrules.add_perm('rsr.add_projectupdate', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor | is_org_user)\nrules.add_perm('rsr.change_projectupdate', is_rsr_admin)\n\nrules.add_perm('rsr.add_projectupdatelocation', is_rsr_admin)\nrules.add_perm('rsr.change_projectupdatelocation', is_rsr_admin)\nrules.add_perm('rsr.delete_projectupdatelocation', is_rsr_admin)\n\nrules.add_perm('rsr.add_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_link', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_link', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_link', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_countrybudgetitem', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_planneddisbursement', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_planneddisbursement', is_rsr_admin | is_org_admin |\n is_org_project_editor)\nrules.add_perm('rsr.delete_planneddisbursement', is_rsr_admin | is_org_admin |\n is_org_project_editor)\n\nrules.add_perm('rsr.add_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_recipientcountry', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_result', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_result', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_result', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_organisation', is_rsr_admin)\nrules.add_perm('rsr.change_organisation', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_organisationlocation', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_organisationlocation', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.delete_organisationlocation', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_project', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_project', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.change_publishingstatus', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_user', is_rsr_admin)\nrules.add_perm('rsr.change_user', is_rsr_admin | is_org_admin | is_org_user_manager | is_self)\n\nrules.add_perm('tastypie.change_apikey', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor)\n\nrules.add_perm('rsr.add_employment', is_rsr_admin)\nrules.add_perm('rsr.change_employment', is_rsr_admin | is_org_admin | is_org_user_manager)\n\nrules.add_perm('rsr.iati_management', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.user_management', is_rsr_admin | is_org_admin | is_org_user_manager)\n\nrules.add_perm('rsr.post_updates', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor | is_org_user)\n\n\n# Signals\npre_save.connect(employment_pre_save, sender=Employment)\npost_save.connect(employment_post_save, sender=Employment)\n\npost_save.connect(create_organisation_account, sender=Organisation)\n\npost_save.connect(create_publishing_status, sender=Project)\npost_save.connect(create_payment_gateway_selector, sender=Project)\n\nif getattr(settings, \"DONATION_NOTIFICATION_EMAILS\", True):\n post_save.connect(donation_completed, sender=Invoice)\n\npost_save.connect(change_name_of_file_on_create, sender=Organisation)\npost_save.connect(change_name_of_file_on_create, sender=Project)\npost_save.connect(change_name_of_file_on_create, sender=ProjectUpdate)\npost_save.connect(act_on_log_entry, sender=LogEntry)\n\npre_save.connect(change_name_of_file_on_change, sender=Organisation)\npre_save.connect(change_name_of_file_on_change, sender=Project)\npre_save.connect(change_name_of_file_on_change, sender=ProjectUpdate)\n\npost_save.connect(update_project_budget, sender=BudgetItem)\npost_save.connect(update_project_funding, sender=Invoice)\npost_save.connect(update_project_funding, sender=Partnership)\n\npost_delete.connect(update_project_budget, sender=BudgetItem)\npost_delete.connect(update_project_funding, sender=Invoice)\npost_delete.connect(update_project_funding, sender=Partnership)\n\npost_save.connect(create_api_key, sender=User)\n\npost_save.connect(create_iati_file, sender=IatiExport)\n", "path": "akvo/rsr/models/__init__.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nimport logging\nlogger = logging.getLogger('akvo.rsr')\n\nfrom django.conf import settings\nfrom django.db.models.signals import pre_save, post_save, post_delete\nfrom django.contrib.admin.models import LogEntry\n\nfrom akvo.api.models import create_api_key\n\nfrom ..signals import (\n change_name_of_file_on_change, change_name_of_file_on_create,\n create_publishing_status, create_organisation_account,\n create_payment_gateway_selector, donation_completed, act_on_log_entry,\n employment_post_save, employment_pre_save, update_project_budget,\n update_project_funding, create_iati_file)\n\nfrom .benchmark import Benchmark, Benchmarkname\nfrom .budget_item import BudgetItem, BudgetItemLabel, CountryBudgetItem\nfrom .country import Country, RecipientCountry\nfrom .crs_add import CrsAdd, CrsAddOtherFlag\nfrom .category import Category\nfrom .employment import Employment\nfrom .focus_area import FocusArea\nfrom .fss import Fss, FssForecast\nfrom .goal import Goal\nfrom .iati_export import IatiExport\nfrom .indicator import Indicator, IndicatorPeriod\nfrom .invoice import Invoice\nfrom .internal_organisation_id import InternalOrganisationID\nfrom .keyword import Keyword\nfrom .legacy_data import LegacyData\nfrom .link import Link\nfrom .location import (OrganisationLocation, ProjectLocation, ProjectUpdateLocation,\n AdministrativeLocation)\nfrom .organisation import Organisation\nfrom .organisation_account import OrganisationAccount\nfrom .partner_site import PartnerSite\nfrom .partner_type import PartnerType\nfrom .partnership import Partnership\nfrom .payment_gateway import PayPalGateway, MollieGateway, PaymentGatewaySelector\nfrom .planned_disbursement import PlannedDisbursement\nfrom .policy_marker import PolicyMarker\nfrom .project import Project\nfrom .project_comment import ProjectComment\nfrom .project_condition import ProjectCondition\nfrom .project_contact import ProjectContact\nfrom .project_document import ProjectDocument\nfrom .project_update import ProjectUpdate\nfrom .publishing_status import PublishingStatus\nfrom .region import RecipientRegion\nfrom .related_project import RelatedProject\nfrom .result import Result\nfrom .sector import Sector\nfrom .transaction import Transaction, TransactionSector\nfrom .user import User\n\n__all__ = [\n 'Benchmark',\n 'Benchmarkname',\n 'BudgetItem',\n 'BudgetItemLabel',\n 'CountryBudgetItem',\n 'Country',\n 'RecipientCountry',\n 'Category',\n 'CrsAdd',\n 'CrsAddOtherFlag',\n 'Employment',\n 'FocusArea',\n 'Fss',\n 'FssForecast',\n 'Goal',\n 'IatiExport',\n 'Indicator',\n 'IndicatorPeriod',\n 'Invoice',\n 'InternalOrganisationID',\n 'Keyword',\n 'LegacyData',\n 'Link',\n 'OrganisationLocation',\n 'ProjectLocation',\n 'AdministrativeLocation',\n 'ProjectUpdateLocation',\n 'Organisation',\n 'OrganisationAccount',\n 'PartnerSite',\n 'PartnerType',\n 'Partnership',\n 'PayPalGateway',\n 'MollieGateway',\n 'PaymentGatewaySelector',\n 'PlannedDisbursement',\n 'PolicyMarker',\n 'Project',\n 'ProjectComment',\n 'ProjectCondition',\n 'ProjectContact',\n 'ProjectDocument',\n 'ProjectUpdate',\n 'PublishingStatus',\n 'RecipientRegion',\n 'RelatedProject',\n 'Result',\n 'Sector',\n 'Transaction',\n 'TransactionSector',\n 'User',\n]\n\n# Permission rules\nimport rules\nfrom ..permissions import (is_rsr_admin, is_org_admin, is_org_user_manager,\n is_org_project_editor, is_org_user, is_self)\n\nrules.add_perm('rsr', rules.always_allow)\n\nrules.add_perm('rsr.add_benchmarkname', is_rsr_admin)\nrules.add_perm('rsr.change_benchmarkname', is_rsr_admin)\n\nrules.add_perm('rsr.add_country', is_rsr_admin)\nrules.add_perm('rsr.change_country', is_rsr_admin)\n\nrules.add_perm('rsr.add_budgetitemlabel', is_rsr_admin)\nrules.add_perm('rsr.change_budgetitemlabel', is_rsr_admin)\n\nrules.add_perm('rsr.add_category', is_rsr_admin)\nrules.add_perm('rsr.change_category', is_rsr_admin)\n\nrules.add_perm('rsr.add_focusarea', is_rsr_admin)\nrules.add_perm('rsr.change_focusarea', is_rsr_admin)\n\nrules.add_perm('rsr.add_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_indicator', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_indicatorperiod', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_keyword', is_rsr_admin)\nrules.add_perm('rsr.change_keyword', is_rsr_admin)\n\nrules.add_perm('rsr.add_partnersite', is_rsr_admin)\nrules.add_perm('rsr.change_partnersite', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_partnertype', is_rsr_admin)\nrules.add_perm('rsr.change_partnertype', is_rsr_admin)\n\nrules.add_perm('rsr.change_organisationaccount', is_rsr_admin)\n\nrules.add_perm('rsr.add_projectupdate', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor | is_org_user)\nrules.add_perm('rsr.change_projectupdate', is_rsr_admin)\n\nrules.add_perm('rsr.add_projectupdatelocation', is_rsr_admin)\nrules.add_perm('rsr.change_projectupdatelocation', is_rsr_admin)\nrules.add_perm('rsr.delete_projectupdatelocation', is_rsr_admin)\n\nrules.add_perm('rsr.add_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_relatedproject', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcomment', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_goal', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectlocation', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_budgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_benchmark', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_partnership', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_link', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_link', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_link', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectcondition', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectcontact', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_countrybudgetitem', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_countrybudgetitem', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_planneddisbursement', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_planneddisbursement', is_rsr_admin | is_org_admin |\n is_org_project_editor)\nrules.add_perm('rsr.delete_planneddisbursement', is_rsr_admin | is_org_admin |\n is_org_project_editor)\n\nrules.add_perm('rsr.add_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_policymarker', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_recipientcountry', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_recipientcountry', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_recipientregion', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_result', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_result', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_result', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_sector', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_transaction', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_legacydata', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.delete_projectdocument', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.add_organisation', is_rsr_admin)\nrules.add_perm('rsr.change_organisation', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_organisationlocation', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.change_organisationlocation', is_rsr_admin | is_org_admin)\nrules.add_perm('rsr.delete_organisationlocation', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_project', is_rsr_admin | is_org_admin | is_org_project_editor)\nrules.add_perm('rsr.change_project', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.change_publishingstatus', is_rsr_admin | is_org_admin)\n\nrules.add_perm('rsr.add_user', is_rsr_admin)\nrules.add_perm('rsr.change_user', is_rsr_admin | is_org_admin | is_org_user_manager | is_self)\n\nrules.add_perm('tastypie.change_apikey', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor)\n\nrules.add_perm('rsr.add_employment', is_rsr_admin)\nrules.add_perm('rsr.change_employment', is_rsr_admin | is_org_admin | is_org_user_manager)\nrules.add_perm('rsr.delete_employment', is_rsr_admin | is_org_admin | is_org_user_manager)\n\nrules.add_perm('rsr.iati_management', is_rsr_admin | is_org_admin | is_org_project_editor)\n\nrules.add_perm('rsr.user_management', is_rsr_admin | is_org_admin | is_org_user_manager)\n\nrules.add_perm('rsr.post_updates', is_rsr_admin | is_org_admin | is_org_user_manager |\n is_org_project_editor | is_org_user)\n\n\n# Signals\npre_save.connect(employment_pre_save, sender=Employment)\npost_save.connect(employment_post_save, sender=Employment)\n\npost_save.connect(create_organisation_account, sender=Organisation)\n\npost_save.connect(create_publishing_status, sender=Project)\npost_save.connect(create_payment_gateway_selector, sender=Project)\n\nif getattr(settings, \"DONATION_NOTIFICATION_EMAILS\", True):\n post_save.connect(donation_completed, sender=Invoice)\n\npost_save.connect(change_name_of_file_on_create, sender=Organisation)\npost_save.connect(change_name_of_file_on_create, sender=Project)\npost_save.connect(change_name_of_file_on_create, sender=ProjectUpdate)\npost_save.connect(act_on_log_entry, sender=LogEntry)\n\npre_save.connect(change_name_of_file_on_change, sender=Organisation)\npre_save.connect(change_name_of_file_on_change, sender=Project)\npre_save.connect(change_name_of_file_on_change, sender=ProjectUpdate)\n\npost_save.connect(update_project_budget, sender=BudgetItem)\npost_save.connect(update_project_funding, sender=Invoice)\npost_save.connect(update_project_funding, sender=Partnership)\n\npost_delete.connect(update_project_budget, sender=BudgetItem)\npost_delete.connect(update_project_funding, sender=Invoice)\npost_delete.connect(update_project_funding, sender=Partnership)\n\npost_save.connect(create_api_key, sender=User)\n\npost_save.connect(create_iati_file, sender=IatiExport)\n", "path": "akvo/rsr/models/__init__.py" } ]
diff --git a/akvo/rsr/models/__init__.py b/akvo/rsr/models/__init__.py index 8f6e7071d2..04395819db 100644 --- a/akvo/rsr/models/__init__.py +++ b/akvo/rsr/models/__init__.py @@ -265,6 +265,7 @@ rules.add_perm('rsr.add_employment', is_rsr_admin) rules.add_perm('rsr.change_employment', is_rsr_admin | is_org_admin | is_org_user_manager) +rules.add_perm('rsr.delete_employment', is_rsr_admin | is_org_admin | is_org_user_manager) rules.add_perm('rsr.iati_management', is_rsr_admin | is_org_admin | is_org_project_editor)
User permissions for deleting employments Currently the organisation Admins and User Managers do not have the right to delete an employment. As they are managing these employments, they should be able to do so. ## Test plan GIVEN the user management page WHEN logged in as an Organisation Admin or User Manager THEN the user should be able to delete employments
deis__deis-1332
[ { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nData models for the Deis API.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport etcd\nimport importlib\nimport logging\nimport os\nimport subprocess\n\nfrom celery.canvas import group\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models, connections\nfrom django.db.models import Max\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\nfrom json_field.fields import JSONField\n\nfrom api import fields, tasks\nfrom registry import publish_release\nfrom utils import dict_diff, fingerprint\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_event(app, msg, level=logging.INFO):\n msg = \"{}: {}\".format(app.id, msg)\n logger.log(level, msg)\n\n\ndef close_db_connections(func, *args, **kwargs):\n \"\"\"\n Decorator to close db connections during threaded execution\n\n Note this is necessary to work around:\n https://code.djangoproject.com/ticket/22420\n \"\"\"\n def _inner(*args, **kwargs):\n func(*args, **kwargs)\n for conn in connections.all():\n conn.close()\n return _inner\n\n\nclass AuditedModel(models.Model):\n \"\"\"Add created and updated fields to a model.\"\"\"\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n \"\"\"Mark :class:`AuditedModel` as abstract.\"\"\"\n abstract = True\n\n\nclass UuidAuditedModel(AuditedModel):\n \"\"\"Add a UUID primary key to an :class:`AuditedModel`.\"\"\"\n\n uuid = fields.UuidField('UUID', primary_key=True)\n\n class Meta:\n \"\"\"Mark :class:`UuidAuditedModel` as abstract.\"\"\"\n abstract = True\n\n\n@python_2_unicode_compatible\nclass Cluster(UuidAuditedModel):\n \"\"\"\n Cluster used to run jobs\n \"\"\"\n\n CLUSTER_TYPES = (('mock', 'Mock Cluster'),\n ('coreos', 'CoreOS Cluster'),\n ('faulty', 'Faulty Cluster'))\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128, unique=True)\n type = models.CharField(max_length=16, choices=CLUSTER_TYPES, default='coreos')\n\n domain = models.CharField(max_length=128)\n hosts = models.CharField(max_length=256)\n auth = models.TextField()\n options = JSONField(default='{}', blank=True)\n\n def __str__(self):\n return self.id\n\n def _get_scheduler(self, *args, **kwargs):\n module_name = 'scheduler.' + self.type\n mod = importlib.import_module(module_name)\n return mod.SchedulerClient(self.id, self.hosts, self.auth,\n self.domain, self.options)\n\n _scheduler = property(_get_scheduler)\n\n def create(self):\n \"\"\"\n Initialize a cluster's router and log aggregator\n \"\"\"\n return tasks.create_cluster.delay(self).get()\n\n def destroy(self):\n \"\"\"\n Destroy a cluster's router and log aggregator\n \"\"\"\n return tasks.destroy_cluster.delay(self).get()\n\n\n@python_2_unicode_compatible\nclass App(UuidAuditedModel):\n \"\"\"\n Application used to service requests on behalf of end-users\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.SlugField(max_length=64, unique=True)\n cluster = models.ForeignKey('Cluster')\n structure = JSONField(default='{}', blank=True)\n\n class Meta:\n permissions = (('use_app', 'Can use app'),)\n\n def __str__(self):\n return self.id\n\n def create(self, *args, **kwargs):\n config = Config.objects.create(owner=self.owner, app=self, values={})\n build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)\n Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)\n\n def delete(self, *args, **kwargs):\n for c in self.container_set.all():\n c.destroy()\n return super(App, self).delete(*args, **kwargs)\n\n def deploy(self, release, initial=False):\n tasks.deploy_release.delay(self, release).get()\n if initial:\n # if there is no SHA, assume a docker image is being promoted\n if not release.build.sha:\n self.structure = {'cmd': 1}\n # if a dockerfile exists without a procfile, assume docker workflow\n elif release.build.dockerfile and not release.build.procfile:\n self.structure = {'cmd': 1}\n # if a procfile exists without a web entry, assume docker workflow\n elif release.build.procfile and not 'web' in release.build.procfile:\n self.structure = {'cmd': 1}\n # default to heroku workflow\n else:\n self.structure = {'web': 1}\n self.save()\n self.scale()\n\n def destroy(self, *args, **kwargs):\n return self.delete(*args, **kwargs)\n\n def scale(self, **kwargs): # noqa\n \"\"\"Scale containers up or down to match requested.\"\"\"\n requested_containers = self.structure.copy()\n release = self.release_set.latest()\n # test for available process types\n available_process_types = release.build.procfile or {}\n for container_type in requested_containers.keys():\n if container_type == 'cmd':\n continue # allow docker cmd types in case we don't have the image source\n if not container_type in available_process_types:\n raise EnvironmentError(\n 'Container type {} does not exist in application'.format(container_type))\n msg = 'Containers scaled ' + ' '.join(\n \"{}={}\".format(k, v) for k, v in requested_containers.items())\n # iterate and scale by container type (web, worker, etc)\n changed = False\n to_add, to_remove = [], []\n for container_type in requested_containers.keys():\n containers = list(self.container_set.filter(type=container_type).order_by('created'))\n # increment new container nums off the most recent container\n results = self.container_set.filter(type=container_type).aggregate(Max('num'))\n container_num = (results.get('num__max') or 0) + 1\n requested = requested_containers.pop(container_type)\n diff = requested - len(containers)\n if diff == 0:\n continue\n changed = True\n while diff < 0:\n c = containers.pop()\n to_remove.append(c)\n diff += 1\n while diff > 0:\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=release,\n type=container_type,\n num=container_num)\n to_add.append(c)\n container_num += 1\n diff -= 1\n if changed:\n subtasks = []\n if to_add:\n subtasks.append(tasks.start_containers.s(to_add))\n if to_remove:\n subtasks.append(tasks.stop_containers.s(to_remove))\n group(*subtasks).apply_async().join()\n log_event(self, msg)\n return changed\n\n def logs(self):\n \"\"\"Return aggregated log data for this application.\"\"\"\n path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')\n if not os.path.exists(path):\n raise EnvironmentError('Could not locate logs')\n data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])\n return data\n\n def run(self, command):\n \"\"\"Run a one-off command in an ephemeral app container.\"\"\"\n # TODO: add support for interactive shell\n log_event(self, \"deis run '{}'\".format(command))\n c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=self.release_set.latest(),\n type='admin',\n num=c_num)\n rc, output = tasks.run_command.delay(c, command).get()\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Container(UuidAuditedModel):\n \"\"\"\n Docker container used to securely host an application process.\n \"\"\"\n INITIALIZED = 'initialized'\n CREATED = 'created'\n UP = 'up'\n DOWN = 'down'\n DESTROYED = 'destroyed'\n STATE_CHOICES = (\n (INITIALIZED, 'initialized'),\n (CREATED, 'created'),\n (UP, 'up'),\n (DOWN, 'down'),\n (DESTROYED, 'destroyed')\n )\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n release = models.ForeignKey('Release')\n type = models.CharField(max_length=128, blank=True)\n num = models.PositiveIntegerField()\n state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, protected=True)\n\n def short_name(self):\n if self.type:\n return \"{}.{}.{}\".format(self.release.app.id, self.type, self.num)\n return \"{}.{}\".format(self.release.app.id, self.num)\n short_name.short_description = 'Name'\n\n def __str__(self):\n return self.short_name()\n\n class Meta:\n get_latest_by = '-created'\n ordering = ['created']\n\n def _get_job_id(self):\n app = self.app.id\n release = self.release\n version = \"v{}\".format(release.version)\n num = self.num\n c_type = self.type\n if not c_type:\n job_id = \"{app}_{version}.{num}\".format(**locals())\n else:\n job_id = \"{app}_{version}.{c_type}.{num}\".format(**locals())\n return job_id\n\n _job_id = property(_get_job_id)\n\n def _get_scheduler(self):\n return self.app.cluster._scheduler\n\n _scheduler = property(_get_scheduler)\n\n def _get_command(self):\n c_type = self.type\n if c_type:\n # handle special case for Dockerfile deployments\n if c_type == 'cmd':\n return ''\n else:\n return \"start {}\".format(c_type)\n else:\n return ''\n\n _command = property(_get_command)\n\n def _command_announceable(self):\n return self._command.lower() in ['start web', '']\n\n @close_db_connections\n @transition(field=state, source=INITIALIZED, target=CREATED)\n def create(self):\n image = self.release.image\n self._scheduler.create(name=self._job_id,\n image=image,\n command=self._command,\n use_announcer=self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[CREATED, UP, DOWN],\n target=UP, crashed=DOWN)\n def start(self):\n self._scheduler.start(self._job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=UP,\n crashed=DOWN)\n def deploy(self, release):\n old_job_id = self._job_id\n # update release\n self.release = release\n self.save()\n # deploy new container\n new_job_id = self._job_id\n image = self.release.image\n c_type = self.type\n self._scheduler.create(name=new_job_id,\n image=image,\n command=self._command.format(**locals()),\n use_announcer=self._command_announceable())\n self._scheduler.start(new_job_id, self._command_announceable())\n # destroy old container\n self._scheduler.destroy(old_job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state, source=UP, target=DOWN)\n def stop(self):\n self._scheduler.stop(self._job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=DESTROYED)\n def destroy(self):\n # TODO: add check for active connections before killing\n self._scheduler.destroy(self._job_id, self._command_announceable())\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, DESTROYED],\n target=DESTROYED)\n def run(self, command):\n \"\"\"Run a one-off command\"\"\"\n rc, output = self._scheduler.run(self._job_id, self.release.image, command)\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Push(UuidAuditedModel):\n \"\"\"\n Instance of a push used to trigger an application build\n \"\"\"\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n sha = models.CharField(max_length=40)\n\n fingerprint = models.CharField(max_length=255)\n receive_user = models.CharField(max_length=255)\n receive_repo = models.CharField(max_length=255)\n\n ssh_connection = models.CharField(max_length=255)\n ssh_original_command = models.CharField(max_length=255)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.sha[:7])\n\n\n@python_2_unicode_compatible\nclass Build(UuidAuditedModel):\n \"\"\"\n Instance of a software build used by runtime nodes\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n image = models.CharField(max_length=256)\n\n # optional fields populated by builder\n sha = models.CharField(max_length=40, blank=True)\n procfile = JSONField(default='{}', blank=True)\n dockerfile = models.TextField(blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Config(UuidAuditedModel):\n \"\"\"\n Set of configuration values applied as environment variables\n during runtime execution of the Application.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n values = JSONField(default='{}', blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{}-{}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Release(UuidAuditedModel):\n \"\"\"\n Software release deployed by the application platform\n\n Releases contain a :class:`Build` and a :class:`Config`.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n version = models.PositiveIntegerField()\n summary = models.TextField(blank=True, null=True)\n\n config = models.ForeignKey('Config')\n build = models.ForeignKey('Build')\n # NOTE: image contains combined build + config, ready to run\n image = models.CharField(max_length=256)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'version'),)\n\n def __str__(self):\n return \"{0}-v{1}\".format(self.app.id, self.version)\n\n def new(self, user, config=None, build=None, summary=None, source_version=None):\n \"\"\"\n Create a new application release using the provided Build and Config\n on behalf of a user.\n\n Releases start at v1 and auto-increment.\n \"\"\"\n if not config:\n config = self.config\n if not build:\n build = self.build\n if not source_version:\n source_version = 'latest'\n else:\n source_version = 'v{}'.format(source_version)\n # prepare release tag\n new_version = self.version + 1\n tag = 'v{}'.format(new_version)\n image = build.image + ':{tag}'.format(**locals())\n # create new release and auto-increment version\n release = Release.objects.create(\n owner=user, app=self.app, config=config,\n build=build, version=new_version, image=image, summary=summary)\n # publish release to registry as new docker image\n repository_path = self.app.id\n publish_release(repository_path,\n config.values,\n tag,\n source_tag=source_version)\n return release\n\n def previous(self):\n \"\"\"\n Return the previous Release to this one.\n\n :return: the previous :class:`Release`, or None\n \"\"\"\n releases = self.app.release_set\n if self.pk:\n releases = releases.exclude(pk=self.pk)\n try:\n # Get the Release previous to this one\n prev_release = releases.latest()\n except Release.DoesNotExist:\n prev_release = None\n return prev_release\n\n def save(self, *args, **kwargs):\n if not self.summary:\n self.summary = ''\n prev_release = self.previous()\n # compare this build to the previous build\n old_build = prev_release.build if prev_release else None\n # if the build changed, log it and who pushed it\n if self.build != old_build:\n if self.build.sha:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.sha[:7])\n else:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.image)\n # compare this config to the previous config\n old_config = prev_release.config if prev_release else None\n # if the config data changed, log the dict diff\n if self.config != old_config:\n dict1 = self.config.values\n dict2 = old_config.values if old_config else {}\n diff = dict_diff(dict1, dict2)\n # try to be as succinct as possible\n added = ', '.join(k for k in diff.get('added', {}))\n added = 'added ' + added if added else ''\n changed = ', '.join(k for k in diff.get('changed', {}))\n changed = 'changed ' + changed if changed else ''\n deleted = ', '.join(k for k in diff.get('deleted', {}))\n deleted = 'deleted ' + deleted if deleted else ''\n changes = ', '.join(i for i in (added, changed, deleted) if i)\n if changes:\n if self.summary:\n self.summary += ' and '\n self.summary += \"{} {}\".format(self.config.owner, changes)\n if not self.summary:\n if self.version == 1:\n self.summary = \"{} created the initial release\".format(self.owner)\n else:\n self.summary = \"{} changed nothing\".format(self.owner)\n super(Release, self).save(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass Domain(AuditedModel):\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n domain = models.TextField(blank=False, null=False, unique=True)\n\n def __str__(self):\n return self.domain\n\n\n@python_2_unicode_compatible\nclass Key(UuidAuditedModel):\n \"\"\"An SSH public key.\"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128)\n public = models.TextField(unique=True)\n\n class Meta:\n verbose_name = 'SSH Key'\n unique_together = (('owner', 'id'))\n\n def __str__(self):\n return \"{}...{}\".format(self.public[:18], self.public[-31:])\n\n\n# define update/delete callbacks for synchronizing\n# models with the configuration management backend\n\ndef _log_build_created(**kwargs):\n if kwargs.get('created'):\n build = kwargs['instance']\n log_event(build.app, \"Build {} created\".format(build))\n\n\ndef _log_release_created(**kwargs):\n if kwargs.get('created'):\n release = kwargs['instance']\n log_event(release.app, \"Release {} created\".format(release))\n\n\ndef _log_config_updated(**kwargs):\n config = kwargs['instance']\n log_event(config.app, \"Config {} updated\".format(config))\n\n\ndef _log_domain_added(**kwargs):\n domain = kwargs['instance']\n log_event(domain.app, \"Domain {} added\".format(domain))\n\n\ndef _log_domain_removed(**kwargs):\n domain = kwargs['instance']\n log_event(domain.app, \"Domain {} removed\".format(domain))\n\n\ndef _etcd_publish_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.write('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)), key.public)\n\n\ndef _etcd_purge_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.delete('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)))\n\n\ndef _etcd_purge_user(**kwargs):\n username = kwargs['instance'].username\n _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n\n\ndef _etcd_publish_domains(**kwargs):\n app = kwargs['instance'].app\n app_domains = app.domain_set.all()\n if app_domains:\n _etcd_client.write('/deis/domains/{}'.format(app),\n ' '.join(str(d.domain) for d in app_domains))\n else:\n _etcd_client.delete('/deis/domains/{}'.format(app))\n\n\n# Log significant app-related events\npost_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')\npost_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')\npost_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')\npost_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')\npost_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')\n\n\n# save FSM transitions as they happen\ndef _save_transition(**kwargs):\n kwargs['instance'].save()\n\npost_transition.connect(_save_transition)\n\n# wire up etcd publishing if we can connect\ntry:\n _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))\n _etcd_client.get('/deis')\nexcept etcd.EtcdException:\n logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')\n _etcd_client = None\n\nif _etcd_client:\n post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_user, sender=User, dispatch_uid='api.models')\n post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')\n post_delete.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')\n", "path": "controller/api/models.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nData models for the Deis API.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport etcd\nimport importlib\nimport logging\nimport os\nimport subprocess\n\nfrom celery.canvas import group\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models, connections\nfrom django.db.models import Max\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\nfrom json_field.fields import JSONField\n\nfrom api import fields, tasks\nfrom registry import publish_release\nfrom utils import dict_diff, fingerprint\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_event(app, msg, level=logging.INFO):\n msg = \"{}: {}\".format(app.id, msg)\n logger.log(level, msg)\n\n\ndef close_db_connections(func, *args, **kwargs):\n \"\"\"\n Decorator to close db connections during threaded execution\n\n Note this is necessary to work around:\n https://code.djangoproject.com/ticket/22420\n \"\"\"\n def _inner(*args, **kwargs):\n func(*args, **kwargs)\n for conn in connections.all():\n conn.close()\n return _inner\n\n\nclass AuditedModel(models.Model):\n \"\"\"Add created and updated fields to a model.\"\"\"\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n \"\"\"Mark :class:`AuditedModel` as abstract.\"\"\"\n abstract = True\n\n\nclass UuidAuditedModel(AuditedModel):\n \"\"\"Add a UUID primary key to an :class:`AuditedModel`.\"\"\"\n\n uuid = fields.UuidField('UUID', primary_key=True)\n\n class Meta:\n \"\"\"Mark :class:`UuidAuditedModel` as abstract.\"\"\"\n abstract = True\n\n\n@python_2_unicode_compatible\nclass Cluster(UuidAuditedModel):\n \"\"\"\n Cluster used to run jobs\n \"\"\"\n\n CLUSTER_TYPES = (('mock', 'Mock Cluster'),\n ('coreos', 'CoreOS Cluster'),\n ('faulty', 'Faulty Cluster'))\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128, unique=True)\n type = models.CharField(max_length=16, choices=CLUSTER_TYPES, default='coreos')\n\n domain = models.CharField(max_length=128)\n hosts = models.CharField(max_length=256)\n auth = models.TextField()\n options = JSONField(default='{}', blank=True)\n\n def __str__(self):\n return self.id\n\n def _get_scheduler(self, *args, **kwargs):\n module_name = 'scheduler.' + self.type\n mod = importlib.import_module(module_name)\n return mod.SchedulerClient(self.id, self.hosts, self.auth,\n self.domain, self.options)\n\n _scheduler = property(_get_scheduler)\n\n def create(self):\n \"\"\"\n Initialize a cluster's router and log aggregator\n \"\"\"\n return tasks.create_cluster.delay(self).get()\n\n def destroy(self):\n \"\"\"\n Destroy a cluster's router and log aggregator\n \"\"\"\n return tasks.destroy_cluster.delay(self).get()\n\n\n@python_2_unicode_compatible\nclass App(UuidAuditedModel):\n \"\"\"\n Application used to service requests on behalf of end-users\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.SlugField(max_length=64, unique=True)\n cluster = models.ForeignKey('Cluster')\n structure = JSONField(default='{}', blank=True)\n\n class Meta:\n permissions = (('use_app', 'Can use app'),)\n\n def __str__(self):\n return self.id\n\n def create(self, *args, **kwargs):\n config = Config.objects.create(owner=self.owner, app=self, values={})\n build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)\n Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)\n\n def delete(self, *args, **kwargs):\n for c in self.container_set.all():\n c.destroy()\n return super(App, self).delete(*args, **kwargs)\n\n def deploy(self, release, initial=False):\n tasks.deploy_release.delay(self, release).get()\n if initial:\n # if there is no SHA, assume a docker image is being promoted\n if not release.build.sha:\n self.structure = {'cmd': 1}\n # if a dockerfile exists without a procfile, assume docker workflow\n elif release.build.dockerfile and not release.build.procfile:\n self.structure = {'cmd': 1}\n # if a procfile exists without a web entry, assume docker workflow\n elif release.build.procfile and not 'web' in release.build.procfile:\n self.structure = {'cmd': 1}\n # default to heroku workflow\n else:\n self.structure = {'web': 1}\n self.save()\n self.scale()\n\n def destroy(self, *args, **kwargs):\n return self.delete(*args, **kwargs)\n\n def scale(self, **kwargs): # noqa\n \"\"\"Scale containers up or down to match requested.\"\"\"\n requested_containers = self.structure.copy()\n release = self.release_set.latest()\n # test for available process types\n available_process_types = release.build.procfile or {}\n for container_type in requested_containers.keys():\n if container_type == 'cmd':\n continue # allow docker cmd types in case we don't have the image source\n if not container_type in available_process_types:\n raise EnvironmentError(\n 'Container type {} does not exist in application'.format(container_type))\n msg = 'Containers scaled ' + ' '.join(\n \"{}={}\".format(k, v) for k, v in requested_containers.items())\n # iterate and scale by container type (web, worker, etc)\n changed = False\n to_add, to_remove = [], []\n for container_type in requested_containers.keys():\n containers = list(self.container_set.filter(type=container_type).order_by('created'))\n # increment new container nums off the most recent container\n results = self.container_set.filter(type=container_type).aggregate(Max('num'))\n container_num = (results.get('num__max') or 0) + 1\n requested = requested_containers.pop(container_type)\n diff = requested - len(containers)\n if diff == 0:\n continue\n changed = True\n while diff < 0:\n c = containers.pop()\n to_remove.append(c)\n diff += 1\n while diff > 0:\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=release,\n type=container_type,\n num=container_num)\n to_add.append(c)\n container_num += 1\n diff -= 1\n if changed:\n subtasks = []\n if to_add:\n subtasks.append(tasks.start_containers.s(to_add))\n if to_remove:\n subtasks.append(tasks.stop_containers.s(to_remove))\n group(*subtasks).apply_async().join()\n log_event(self, msg)\n return changed\n\n def logs(self):\n \"\"\"Return aggregated log data for this application.\"\"\"\n path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')\n if not os.path.exists(path):\n raise EnvironmentError('Could not locate logs')\n data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])\n return data\n\n def run(self, command):\n \"\"\"Run a one-off command in an ephemeral app container.\"\"\"\n # TODO: add support for interactive shell\n log_event(self, \"deis run '{}'\".format(command))\n c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=self.release_set.latest(),\n type='admin',\n num=c_num)\n rc, output = tasks.run_command.delay(c, command).get()\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Container(UuidAuditedModel):\n \"\"\"\n Docker container used to securely host an application process.\n \"\"\"\n INITIALIZED = 'initialized'\n CREATED = 'created'\n UP = 'up'\n DOWN = 'down'\n DESTROYED = 'destroyed'\n STATE_CHOICES = (\n (INITIALIZED, 'initialized'),\n (CREATED, 'created'),\n (UP, 'up'),\n (DOWN, 'down'),\n (DESTROYED, 'destroyed')\n )\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n release = models.ForeignKey('Release')\n type = models.CharField(max_length=128, blank=True)\n num = models.PositiveIntegerField()\n state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, protected=True)\n\n def short_name(self):\n if self.type:\n return \"{}.{}.{}\".format(self.release.app.id, self.type, self.num)\n return \"{}.{}\".format(self.release.app.id, self.num)\n short_name.short_description = 'Name'\n\n def __str__(self):\n return self.short_name()\n\n class Meta:\n get_latest_by = '-created'\n ordering = ['created']\n\n def _get_job_id(self):\n app = self.app.id\n release = self.release\n version = \"v{}\".format(release.version)\n num = self.num\n c_type = self.type\n if not c_type:\n job_id = \"{app}_{version}.{num}\".format(**locals())\n else:\n job_id = \"{app}_{version}.{c_type}.{num}\".format(**locals())\n return job_id\n\n _job_id = property(_get_job_id)\n\n def _get_scheduler(self):\n return self.app.cluster._scheduler\n\n _scheduler = property(_get_scheduler)\n\n def _get_command(self):\n c_type = self.type\n if c_type:\n # handle special case for Dockerfile deployments\n if c_type == 'cmd':\n return ''\n else:\n return \"start {}\".format(c_type)\n else:\n return ''\n\n _command = property(_get_command)\n\n def _command_announceable(self):\n return self._command.lower() in ['start web', '']\n\n @close_db_connections\n @transition(field=state, source=INITIALIZED, target=CREATED)\n def create(self):\n image = self.release.image\n self._scheduler.create(name=self._job_id,\n image=image,\n command=self._command,\n use_announcer=self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[CREATED, UP, DOWN],\n target=UP, crashed=DOWN)\n def start(self):\n self._scheduler.start(self._job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=UP,\n crashed=DOWN)\n def deploy(self, release):\n old_job_id = self._job_id\n # update release\n self.release = release\n self.save()\n # deploy new container\n new_job_id = self._job_id\n image = self.release.image\n c_type = self.type\n self._scheduler.create(name=new_job_id,\n image=image,\n command=self._command.format(**locals()),\n use_announcer=self._command_announceable())\n self._scheduler.start(new_job_id, self._command_announceable())\n # destroy old container\n self._scheduler.destroy(old_job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state, source=UP, target=DOWN)\n def stop(self):\n self._scheduler.stop(self._job_id, self._command_announceable())\n\n @close_db_connections\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=DESTROYED)\n def destroy(self):\n # TODO: add check for active connections before killing\n self._scheduler.destroy(self._job_id, self._command_announceable())\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, DESTROYED],\n target=DESTROYED)\n def run(self, command):\n \"\"\"Run a one-off command\"\"\"\n rc, output = self._scheduler.run(self._job_id, self.release.image, command)\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Push(UuidAuditedModel):\n \"\"\"\n Instance of a push used to trigger an application build\n \"\"\"\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n sha = models.CharField(max_length=40)\n\n fingerprint = models.CharField(max_length=255)\n receive_user = models.CharField(max_length=255)\n receive_repo = models.CharField(max_length=255)\n\n ssh_connection = models.CharField(max_length=255)\n ssh_original_command = models.CharField(max_length=255)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.sha[:7])\n\n\n@python_2_unicode_compatible\nclass Build(UuidAuditedModel):\n \"\"\"\n Instance of a software build used by runtime nodes\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n image = models.CharField(max_length=256)\n\n # optional fields populated by builder\n sha = models.CharField(max_length=40, blank=True)\n procfile = JSONField(default='{}', blank=True)\n dockerfile = models.TextField(blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Config(UuidAuditedModel):\n \"\"\"\n Set of configuration values applied as environment variables\n during runtime execution of the Application.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n values = JSONField(default='{}', blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{}-{}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Release(UuidAuditedModel):\n \"\"\"\n Software release deployed by the application platform\n\n Releases contain a :class:`Build` and a :class:`Config`.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n version = models.PositiveIntegerField()\n summary = models.TextField(blank=True, null=True)\n\n config = models.ForeignKey('Config')\n build = models.ForeignKey('Build')\n # NOTE: image contains combined build + config, ready to run\n image = models.CharField(max_length=256)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'version'),)\n\n def __str__(self):\n return \"{0}-v{1}\".format(self.app.id, self.version)\n\n def new(self, user, config=None, build=None, summary=None, source_version=None):\n \"\"\"\n Create a new application release using the provided Build and Config\n on behalf of a user.\n\n Releases start at v1 and auto-increment.\n \"\"\"\n if not config:\n config = self.config\n if not build:\n build = self.build\n if not source_version:\n source_version = 'latest'\n else:\n source_version = 'v{}'.format(source_version)\n # prepare release tag\n new_version = self.version + 1\n tag = 'v{}'.format(new_version)\n image = build.image + ':{tag}'.format(**locals())\n # create new release and auto-increment version\n release = Release.objects.create(\n owner=user, app=self.app, config=config,\n build=build, version=new_version, image=image, summary=summary)\n # publish release to registry as new docker image\n repository_path = self.app.id\n publish_release(repository_path,\n config.values,\n tag,\n source_tag=source_version)\n return release\n\n def previous(self):\n \"\"\"\n Return the previous Release to this one.\n\n :return: the previous :class:`Release`, or None\n \"\"\"\n releases = self.app.release_set\n if self.pk:\n releases = releases.exclude(pk=self.pk)\n try:\n # Get the Release previous to this one\n prev_release = releases.latest()\n except Release.DoesNotExist:\n prev_release = None\n return prev_release\n\n def save(self, *args, **kwargs):\n if not self.summary:\n self.summary = ''\n prev_release = self.previous()\n # compare this build to the previous build\n old_build = prev_release.build if prev_release else None\n # if the build changed, log it and who pushed it\n if self.build != old_build:\n if self.build.sha:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.sha[:7])\n else:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.image)\n # compare this config to the previous config\n old_config = prev_release.config if prev_release else None\n # if the config data changed, log the dict diff\n if self.config != old_config:\n dict1 = self.config.values\n dict2 = old_config.values if old_config else {}\n diff = dict_diff(dict1, dict2)\n # try to be as succinct as possible\n added = ', '.join(k for k in diff.get('added', {}))\n added = 'added ' + added if added else ''\n changed = ', '.join(k for k in diff.get('changed', {}))\n changed = 'changed ' + changed if changed else ''\n deleted = ', '.join(k for k in diff.get('deleted', {}))\n deleted = 'deleted ' + deleted if deleted else ''\n changes = ', '.join(i for i in (added, changed, deleted) if i)\n if changes:\n if self.summary:\n self.summary += ' and '\n self.summary += \"{} {}\".format(self.config.owner, changes)\n if not self.summary:\n if self.version == 1:\n self.summary = \"{} created the initial release\".format(self.owner)\n else:\n self.summary = \"{} changed nothing\".format(self.owner)\n super(Release, self).save(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass Domain(AuditedModel):\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n domain = models.TextField(blank=False, null=False, unique=True)\n\n def __str__(self):\n return self.domain\n\n\n@python_2_unicode_compatible\nclass Key(UuidAuditedModel):\n \"\"\"An SSH public key.\"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128)\n public = models.TextField(unique=True)\n\n class Meta:\n verbose_name = 'SSH Key'\n unique_together = (('owner', 'id'))\n\n def __str__(self):\n return \"{}...{}\".format(self.public[:18], self.public[-31:])\n\n\n# define update/delete callbacks for synchronizing\n# models with the configuration management backend\n\ndef _log_build_created(**kwargs):\n if kwargs.get('created'):\n build = kwargs['instance']\n log_event(build.app, \"Build {} created\".format(build))\n\n\ndef _log_release_created(**kwargs):\n if kwargs.get('created'):\n release = kwargs['instance']\n log_event(release.app, \"Release {} created\".format(release))\n\n\ndef _log_config_updated(**kwargs):\n config = kwargs['instance']\n log_event(config.app, \"Config {} updated\".format(config))\n\n\ndef _log_domain_added(**kwargs):\n domain = kwargs['instance']\n log_event(domain.app, \"Domain {} added\".format(domain))\n\n\ndef _log_domain_removed(**kwargs):\n domain = kwargs['instance']\n log_event(domain.app, \"Domain {} removed\".format(domain))\n\n\ndef _etcd_publish_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.write('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)), key.public)\n\n\ndef _etcd_purge_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.delete('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)))\n\n\ndef _etcd_purge_user(**kwargs):\n username = kwargs['instance'].username\n try:\n _etcd_client.delete(\n '/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n except KeyError:\n # If _etcd_publish_key() wasn't called, there is no user dir to delete.\n pass\n\n\ndef _etcd_publish_domains(**kwargs):\n app = kwargs['instance'].app\n app_domains = app.domain_set.all()\n if app_domains:\n _etcd_client.write('/deis/domains/{}'.format(app),\n ' '.join(str(d.domain) for d in app_domains))\n else:\n _etcd_client.delete('/deis/domains/{}'.format(app))\n\n\n# Log significant app-related events\npost_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')\npost_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')\npost_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')\npost_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')\npost_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')\n\n\n# save FSM transitions as they happen\ndef _save_transition(**kwargs):\n kwargs['instance'].save()\n\npost_transition.connect(_save_transition)\n\n# wire up etcd publishing if we can connect\ntry:\n _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))\n _etcd_client.get('/deis')\nexcept etcd.EtcdException:\n logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')\n _etcd_client = None\n\nif _etcd_client:\n post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_user, sender=User, dispatch_uid='api.models')\n post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')\n post_delete.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')\n", "path": "controller/api/models.py" } ]
diff --git a/controller/api/models.py b/controller/api/models.py index 0e0e0c5704..0440a430a3 100644 --- a/controller/api/models.py +++ b/controller/api/models.py @@ -618,7 +618,12 @@ def _etcd_purge_key(**kwargs): def _etcd_purge_user(**kwargs): username = kwargs['instance'].username - _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True) + try: + _etcd_client.delete( + '/deis/builder/users/{}'.format(username), dir=True, recursive=True) + except KeyError: + # If _etcd_publish_key() wasn't called, there is no user dir to delete. + pass def _etcd_publish_domains(**kwargs):
deis auth:cancel doesn't delete the account steps to reproduce the issue ``` deis register http://deis.54.193.35.8.xip.io --username=Test --password=asdf1234 [email protected] Registered Test Logged in as Test sivarams-MacBook-Pro:integration ram$ deis auth:cancel Please log in again in order to cancel this account username: Test password: Logged in as Test Cancel account "Test" at http://deis.54.193.35.8.xip.io? (y/n) y Account cancelled sivarams-MacBook-Pro:integration ram$ deis register http://deis.54.193.35.8.xip.io --username=Test --password=asdf1234 [email protected] Registration failed {"username": ["User with this Username already exists."]} ``` auth:cancel says account cancelled but when I try to register with the same account it's not succeeding . controller logs ``` 2014-07-01 14:12:00 [119] [DEBUG] POST /api/auth/register 2014-07-01 14:12:01 [119] [DEBUG] GET /api/auth/login/ 2014-07-01 14:12:01 [120] [DEBUG] POST /api/auth/login/ 2014-07-01 14:12:14 [120] [DEBUG] GET /api/auth/login/ 2014-07-01 14:12:14 [120] [DEBUG] POST /api/auth/login/ 2014-07-01 14:12:16 [121] [DEBUG] DELETE /api/auth/cancel ERROR Internal Server Error: /api/auth/cancel Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 114, in get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/usr/local/lib/python2.7/dist-packages/rest_framework/viewsets.py", line 78, in view return self.dispatch(request, *args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/django/views/decorators/csrf.py", line 57, in wrapped_view return view_func(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/rest_framework/views.py", line 400, in dispatch response = self.handle_exception(exc) File "/usr/local/lib/python2.7/dist-packages/rest_framework/views.py", line 397, in dispatch response = handler(request, *args, **kwargs) File "/app/api/views.py", line 162, in destroy obj.delete() File "/usr/local/lib/python2.7/dist-packages/django/db/models/base.py", line 695, in delete collector.delete() File "/usr/local/lib/python2.7/dist-packages/django/db/models/deletion.py", line 282, in delete sender=model, instance=obj, using=self.using File "/usr/local/lib/python2.7/dist-packages/django/dispatch/dispatcher.py", line 185, in send response = receiver(signal=self, sender=sender, **named) File "/app/api/models.py", line 613, in _etcd_purge_user _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True) File "/usr/local/lib/python2.7/dist-packages/etcd/client.py", line 343, in delete self.key_endpoint + key, self._MDELETE, kwds) File "/usr/local/lib/python2.7/dist-packages/etcd/client.py", line 533, in api_execute return self._handle_server_response(response) File "/usr/local/lib/python2.7/dist-packages/etcd/client.py", line 549, in _handle_server_response etcd.EtcdError.handle(**r) File "/usr/local/lib/python2.7/dist-packages/etcd/__init__.py", line 110, in handle raise exc(msg) KeyError: 'Key not found : /deis/builder/users/Test' ```
xonsh__xonsh-5328
[ { "content": "import functools\nimport json\nimport re\nimport shutil\nimport subprocess\nimport textwrap\nfrom pathlib import Path\n\nfrom xonsh.built_ins import XSH\nfrom xonsh.completers.tools import RichCompletion, contextual_command_completer\nfrom xonsh.parsers.completion_context import CommandContext\n\n\[email protected]\ndef get_man_completions_path() -> Path:\n env = XSH.env or {}\n datadir = Path(env[\"XONSH_DATA_DIR\"]) / \"generated_completions\" / \"man\"\n if datadir.exists() and (not datadir.is_dir()):\n shutil.move(datadir, datadir.with_suffix(\".bkp\"))\n if not datadir.exists():\n datadir.mkdir(exist_ok=True, parents=True)\n return datadir\n\n\ndef _get_man_page(cmd: str):\n \"\"\"without control characters\"\"\"\n env = XSH.env.detype()\n manpage = subprocess.Popen(\n [\"man\", cmd], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, env=env\n )\n # This is a trick to get rid of reverse line feeds\n return subprocess.check_output([\"col\", \"-b\"], stdin=manpage.stdout, env=env)\n\n\[email protected]\ndef _man_option_string_regex():\n return re.compile(\n r\"(?:(,\\s?)|^|(\\sor\\s))(?P<option>-[\\w]|--[\\w-]+)(?=\\[?(\\s|,|=\\w+|$))\"\n )\n\n\ndef generate_options_of(cmd: str):\n out = _get_man_page(cmd)\n if not out:\n return\n\n def get_headers(text: str):\n \"\"\"split as header-body based on indent\"\"\"\n if not text:\n return\n header = \"\"\n body = []\n for line in textwrap.dedent(text.replace(\"\\n\\t\", \"\\n \")).splitlines():\n if not line.strip():\n continue\n if line.startswith((\" \", \"\\t\")):\n body.append(line)\n else:\n if header or body:\n yield header, body\n\n # found new section\n header = line.strip()\n body = []\n if header or body:\n yield header, body\n\n def split_options_string(text: str):\n text = text.strip()\n regex = _man_option_string_regex()\n\n regex.findall(text)\n options = []\n for match in regex.finditer(text):\n option = match.groupdict().pop(\"option\", None)\n if option:\n options.append(option)\n text = text[match.end() :]\n return options, text.strip()\n\n def get_option_section():\n option_sect = dict(get_headers(out.decode()))\n small_names = {k.lower(): k for k in option_sect}\n for head in (\n \"options\",\n \"command options\",\n \"description\",\n ): # prefer sections in this order\n if head in small_names:\n title = small_names[head]\n return \"\\n\".join(option_sect[title])\n\n def get_options(text):\n \"\"\"finally get the options\"\"\"\n # return old section if\n for opt, lines in get_headers(text):\n # todo: some have [+-] or such vague notations\n if opt.startswith(\"-\"):\n # sometime a single line will have both desc and options\n option_strings, rest = split_options_string(opt)\n descs = []\n if rest:\n descs.append(rest)\n if lines:\n descs.append(textwrap.dedent(\"\\n\".join(lines)))\n if option_strings:\n yield \". \".join(descs), tuple(option_strings)\n elif lines:\n # sometimes the options are nested inside subheaders\n yield from get_options(\"\\n\".join(lines))\n\n yield from get_options(get_option_section())\n\n\[email protected]_cache(maxsize=10)\ndef _parse_man_page_options(cmd: str) -> \"dict[str, tuple[str, ...]]\":\n path = get_man_completions_path() / f\"{cmd}.json\"\n if path.exists():\n return json.loads(path.read_text())\n options = dict(generate_options_of(cmd))\n path.write_text(json.dumps(options))\n return options\n\n\n@contextual_command_completer\ndef complete_from_man(context: CommandContext):\n \"\"\"\n Completes an option name, based on the contents of the associated man\n page.\n \"\"\"\n\n if context.arg_index == 0 or not context.prefix.startswith(\"-\"):\n return\n cmd = context.args[0].value\n\n def completions():\n for desc, opts in _parse_man_page_options(cmd).items():\n yield RichCompletion(\n value=opts[-1], display=\", \".join(opts), description=desc\n )\n\n return completions(), False\n", "path": "xonsh/completers/man.py" } ]
[ { "content": "import functools\nimport json\nimport re\nimport shutil\nimport subprocess\nimport textwrap\nfrom pathlib import Path\n\nfrom xonsh.built_ins import XSH\nfrom xonsh.completers.tools import RichCompletion, contextual_command_completer\nfrom xonsh.parsers.completion_context import CommandContext\n\n\[email protected]\ndef get_man_completions_path() -> Path:\n env = XSH.env or {}\n datadir = Path(env[\"XONSH_DATA_DIR\"]) / \"generated_completions\" / \"man\"\n if datadir.exists() and (not datadir.is_dir()):\n shutil.move(datadir, datadir.with_suffix(\".bkp\"))\n if not datadir.exists():\n datadir.mkdir(exist_ok=True, parents=True)\n return datadir\n\n\ndef _get_man_page(cmd: str):\n \"\"\"without control characters\"\"\"\n env = XSH.env.detype()\n manpage = subprocess.Popen(\n [\"man\", cmd], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, env=env\n )\n # This is a trick to get rid of reverse line feeds\n return subprocess.check_output([\"col\", \"-b\"], stdin=manpage.stdout, env=env)\n\n\[email protected]\ndef _man_option_string_regex():\n return re.compile(\n r\"(?:(,\\s?)|^|(\\sor\\s))(?P<option>-[\\w]|--[\\w-]+)(?=\\[?(\\s|,|=\\w+|$))\"\n )\n\n\ndef generate_options_of(cmd: str):\n out = _get_man_page(cmd)\n if not out:\n return\n\n def get_headers(text: str):\n \"\"\"split as header-body based on indent\"\"\"\n if not text:\n return\n header = \"\"\n body = []\n for line in textwrap.dedent(text.replace(\"\\n\\t\", \"\\n \")).splitlines():\n if not line.strip():\n continue\n if line.startswith((\" \", \"\\t\")):\n body.append(line)\n else:\n if header or body:\n yield header, body\n\n # found new section\n header = line.strip()\n body = []\n if header or body:\n yield header, body\n\n def split_options_string(text: str):\n text = text.strip()\n regex = _man_option_string_regex()\n\n regex.findall(text)\n options = []\n for match in regex.finditer(text):\n option = match.groupdict().pop(\"option\", None)\n if option:\n options.append(option)\n text = text[match.end() :]\n return options, text.strip()\n\n def get_option_section():\n option_sect = dict(get_headers(out.decode()))\n small_names = {k.lower(): k for k in option_sect}\n for head in (\n \"options\",\n \"command options\",\n \"description\",\n ): # prefer sections in this order\n if head in small_names:\n title = small_names[head]\n return \"\\n\".join(option_sect[title])\n\n def get_options(text):\n \"\"\"finally get the options\"\"\"\n # return old section if\n for opt, lines in get_headers(text):\n # todo: some have [+-] or such vague notations\n if opt.startswith(\"-\"):\n # sometime a single line will have both desc and options\n option_strings, rest = split_options_string(opt)\n descs = []\n if rest:\n descs.append(rest)\n if lines:\n descs.append(textwrap.dedent(\"\\n\".join(lines)))\n if option_strings:\n yield \". \".join(descs), tuple(option_strings)\n elif lines:\n # sometimes the options are nested inside subheaders\n yield from get_options(\"\\n\".join(lines))\n\n yield from get_options(get_option_section())\n\n\[email protected]_cache(maxsize=10)\ndef _parse_man_page_options(cmd: str) -> \"dict[str, tuple[str, ...]]\":\n path = get_man_completions_path() / Path(cmd).with_suffix(\".json\").name\n if path.exists():\n return json.loads(path.read_text())\n options = dict(generate_options_of(cmd))\n path.write_text(json.dumps(options))\n return options\n\n\n@contextual_command_completer\ndef complete_from_man(context: CommandContext):\n \"\"\"\n Completes an option name, based on the contents of the associated man\n page.\n \"\"\"\n\n if context.arg_index == 0 or not context.prefix.startswith(\"-\"):\n return\n cmd = context.args[0].value\n\n def completions():\n for desc, opts in _parse_man_page_options(cmd).items():\n yield RichCompletion(\n value=opts[-1], display=\", \".join(opts), description=desc\n )\n\n return completions(), False\n", "path": "xonsh/completers/man.py" } ]
diff --git a/news/fix-abs-path-completions-on-keypress.rst b/news/fix-abs-path-completions-on-keypress.rst new file mode 100644 index 0000000000..8e7e072b3d --- /dev/null +++ b/news/fix-abs-path-completions-on-keypress.rst @@ -0,0 +1,23 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* Fixed an issue with completions when using absolute paths to commands and having $UPDATE_COMPLETIONS_ON_KEYPRESS set to True. https://github.com/xonsh/xonsh/issues/5127 + +**Security:** + +* <news item> diff --git a/xonsh/completers/man.py b/xonsh/completers/man.py index a2e56f13d7..a0e57bfd92 100644 --- a/xonsh/completers/man.py +++ b/xonsh/completers/man.py @@ -114,7 +114,7 @@ def get_options(text): @functools.lru_cache(maxsize=10) def _parse_man_page_options(cmd: str) -> "dict[str, tuple[str, ...]]": - path = get_man_completions_path() / f"{cmd}.json" + path = get_man_completions_path() / Path(cmd).with_suffix(".json").name if path.exists(): return json.loads(path.read_text()) options = dict(generate_options_of(cmd))
Unexpected exception while updating completions <!--- Provide a general summary of the issue in the Title above --> When I set $UPDATE_COMPLETIONS_ON_KEYPRESS = True and type for instance /usr/bin/ls -a in terminal, following exception is thrown: "Exception [Errno 13] Permission denied: '/usr/bin/ls.json'" <!--- If you have a question along the lines of "How do I do this Bash command in xonsh" please first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html If you don't find an answer there, please do open an issue! --> ## xonfig <details> ``` +------------------+----------------------+ | xonsh | 0.13.4 | | Python | 3.8.10 | | PLY | 3.11 | | have readline | True | | prompt toolkit | 3.0.36 | | shell type | prompt_toolkit | | history backend | json | | pygments | 2.14.0 | | on posix | True | | on linux | True | | distro | ubuntu | | on wsl | False | | on darwin | False | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | | xontrib | [] | | RC file 1 | /home/ralis/.xonshrc | +------------------+----------------------+ ``` </details> ## Expected Behavior <!--- Tell us what should happen --> The warning should be either more subtle or no completion suggestions should be shown. ## Current Behavior <!--- Tell us what happens instead of the expected behavior --> Huge multi-line error is printed. <!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`. On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` --> ### Traceback (if applicable) <details> ``` Unhandled exception in event loop: File "/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/buffer.py", line 1939, in new_coroutine await coroutine(*a, **kw) File "/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/buffer.py", line 1763, in async_completer async for completion in async_generator: File "/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/completion/base.py", line 326, in get_completions_async async for completion in completer.get_completions_async( File "/home/ralis/.local/lib/python3.8/site-packages/prompt_toolkit/completion/base.py", line 202, in get_completions_async for item in self.get_completions(document, complete_event): File "/usr/local/lib/python3.8/dist-packages/xonsh/ptk_shell/completer.py", line 58, in get_completions completions, plen = self.completer.complete( File "/usr/local/lib/python3.8/dist-packages/xonsh/completer.py", line 121, in complete return self.complete_from_context( File "/usr/local/lib/python3.8/dist-packages/xonsh/completer.py", line 272, in complete_from_context for comp in self.generate_completions( File "/usr/local/lib/python3.8/dist-packages/xonsh/completer.py", line 233, in generate_completions for comp in res: File "/usr/local/lib/python3.8/dist-packages/xonsh/completers/man.py", line 137, in completions for desc, opts in _parse_man_page_options(cmd).items(): File "/usr/local/lib/python3.8/dist-packages/xonsh/completers/man.py", line 121, in _parse_man_page_options path.write_text(json.dumps(options)) File "/usr/lib/python3.8/pathlib.py", line 1255, in write_text with self.open(mode='w', encoding=encoding, errors=errors) as f: File "/usr/lib/python3.8/pathlib.py", line 1222, in open return io.open(self, mode, buffering, encoding, errors, newline, File "/usr/lib/python3.8/pathlib.py", line 1078, in _opener return self._accessor.open(self, flags, mode) Exception [Errno 13] Permission denied: '/usr/bin/ls.json' ``` </details> ## Steps to Reproduce <!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! --> ```xsh $UPDATE_COMPLETIONS_ON_KEYPRESS = True /usr/bin/ls - # exception after typing ``` ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
dbt-labs__dbt-core-3351
[ { "content": "import json\nimport re\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom functools import lru_cache\nimport agate\nfrom requests.exceptions import ConnectionError\nfrom typing import Optional, Any, Dict, Tuple\n\nimport google.auth\nimport google.auth.exceptions\nimport google.cloud.bigquery\nimport google.cloud.exceptions\nfrom google.api_core import retry, client_info\nfrom google.auth import impersonated_credentials\nfrom google.oauth2 import (\n credentials as GoogleCredentials,\n service_account as GoogleServiceAccountCredentials\n)\n\nfrom dbt.utils import format_bytes, format_rows_number\nfrom dbt.clients import agate_helper, gcloud\nfrom dbt.tracking import active_user\nfrom dbt.contracts.connection import ConnectionState, AdapterResponse\nfrom dbt.exceptions import (\n FailedToConnectException, RuntimeException, DatabaseException\n)\nfrom dbt.adapters.base import BaseConnectionManager, Credentials\nfrom dbt.logger import GLOBAL_LOGGER as logger\nfrom dbt.version import __version__ as dbt_version\n\nfrom dbt.dataclass_schema import StrEnum\n\n\nBQ_QUERY_JOB_SPLIT = '-----Query Job SQL Follows-----'\n\nWRITE_TRUNCATE = google.cloud.bigquery.job.WriteDisposition.WRITE_TRUNCATE\n\nREOPENABLE_ERRORS = (\n ConnectionResetError,\n ConnectionError,\n)\n\nRETRYABLE_ERRORS = (\n google.cloud.exceptions.ServerError,\n google.cloud.exceptions.BadRequest,\n ConnectionResetError,\n ConnectionError,\n)\n\n\n@lru_cache()\ndef get_bigquery_defaults(scopes=None) -> Tuple[Any, Optional[str]]:\n \"\"\"\n Returns (credentials, project_id)\n\n project_id is returned available from the environment; otherwise None\n \"\"\"\n # Cached, because the underlying implementation shells out, taking ~1s\n return google.auth.default(scopes=scopes)\n\n\nclass Priority(StrEnum):\n Interactive = 'interactive'\n Batch = 'batch'\n\n\nclass BigQueryConnectionMethod(StrEnum):\n OAUTH = 'oauth'\n SERVICE_ACCOUNT = 'service-account'\n SERVICE_ACCOUNT_JSON = 'service-account-json'\n OAUTH_SECRETS = 'oauth-secrets'\n\n\n@dataclass\nclass BigQueryAdapterResponse(AdapterResponse):\n bytes_processed: Optional[int] = None\n\n\n@dataclass\nclass BigQueryCredentials(Credentials):\n method: BigQueryConnectionMethod\n # BigQuery allows an empty database / project, where it defers to the\n # environment for the project\n database: Optional[str]\n timeout_seconds: Optional[int] = 300\n location: Optional[str] = None\n priority: Optional[Priority] = None\n retries: Optional[int] = 1\n maximum_bytes_billed: Optional[int] = None\n impersonate_service_account: Optional[str] = None\n\n # Keyfile json creds\n keyfile: Optional[str] = None\n keyfile_json: Optional[Dict[str, Any]] = None\n\n # oauth-secrets\n token: Optional[str] = None\n refresh_token: Optional[str] = None\n client_id: Optional[str] = None\n client_secret: Optional[str] = None\n token_uri: Optional[str] = None\n\n _ALIASES = {\n 'project': 'database',\n 'dataset': 'schema',\n }\n\n @property\n def type(self):\n return 'bigquery'\n\n def _connection_keys(self):\n return ('method', 'database', 'schema', 'location', 'priority',\n 'timeout_seconds', 'maximum_bytes_billed')\n\n @classmethod\n def __pre_deserialize__(cls, d: Dict[Any, Any]) -> Dict[Any, Any]:\n # We need to inject the correct value of the database (aka project) at\n # this stage, ref\n # https://github.com/fishtown-analytics/dbt/pull/2908#discussion_r532927436.\n\n # `database` is an alias of `project` in BigQuery\n if 'database' not in d:\n _, database = get_bigquery_defaults()\n d['database'] = database\n return d\n\n\nclass BigQueryConnectionManager(BaseConnectionManager):\n TYPE = 'bigquery'\n\n SCOPE = ('https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/drive')\n\n QUERY_TIMEOUT = 300\n RETRIES = 1\n DEFAULT_INITIAL_DELAY = 1.0 # Seconds\n DEFAULT_MAXIMUM_DELAY = 1.0 # Seconds\n\n @classmethod\n def handle_error(cls, error, message):\n error_msg = \"\\n\".join([item['message'] for item in error.errors])\n raise DatabaseException(error_msg)\n\n def clear_transaction(self):\n pass\n\n @contextmanager\n def exception_handler(self, sql):\n try:\n yield\n\n except google.cloud.exceptions.BadRequest as e:\n message = \"Bad request while running query\"\n self.handle_error(e, message)\n\n except google.cloud.exceptions.Forbidden as e:\n message = \"Access denied while running query\"\n self.handle_error(e, message)\n\n except google.auth.exceptions.RefreshError as e:\n message = \"Unable to generate access token, if you're using \" \\\n \"impersonate_service_account, make sure your \" \\\n 'initial account has the \"roles/' \\\n 'iam.serviceAccountTokenCreator\" role on the ' \\\n 'account you are trying to impersonate.\\n\\n' \\\n f'{str(e)}'\n raise RuntimeException(message)\n\n except Exception as e:\n logger.debug(\"Unhandled error while running:\\n{}\".format(sql))\n logger.debug(e)\n if isinstance(e, RuntimeException):\n # during a sql query, an internal to dbt exception was raised.\n # this sounds a lot like a signal handler and probably has\n # useful information, so raise it without modification.\n raise\n exc_message = str(e)\n # the google bigquery library likes to add the query log, which we\n # don't want to log. Hopefully they never change this!\n if BQ_QUERY_JOB_SPLIT in exc_message:\n exc_message = exc_message.split(BQ_QUERY_JOB_SPLIT)[0].strip()\n raise RuntimeException(exc_message)\n\n def cancel_open(self) -> None:\n pass\n\n @classmethod\n def close(cls, connection):\n connection.state = ConnectionState.CLOSED\n\n return connection\n\n def begin(self):\n pass\n\n def commit(self):\n pass\n\n @classmethod\n def get_bigquery_credentials(cls, profile_credentials):\n method = profile_credentials.method\n creds = GoogleServiceAccountCredentials.Credentials\n\n if method == BigQueryConnectionMethod.OAUTH:\n credentials, _ = get_bigquery_defaults(scopes=cls.SCOPE)\n return credentials\n\n elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT:\n keyfile = profile_credentials.keyfile\n return creds.from_service_account_file(keyfile, scopes=cls.SCOPE)\n\n elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT_JSON:\n details = profile_credentials.keyfile_json\n return creds.from_service_account_info(details, scopes=cls.SCOPE)\n\n elif method == BigQueryConnectionMethod.OAUTH_SECRETS:\n return GoogleCredentials.Credentials(\n token=profile_credentials.token,\n refresh_token=profile_credentials.refresh_token,\n client_id=profile_credentials.client_id,\n client_secret=profile_credentials.client_secret,\n token_uri=profile_credentials.token_uri,\n scopes=cls.SCOPE\n )\n\n error = ('Invalid `method` in profile: \"{}\"'.format(method))\n raise FailedToConnectException(error)\n\n @classmethod\n def get_impersonated_bigquery_credentials(cls, profile_credentials):\n source_credentials = cls.get_bigquery_credentials(profile_credentials)\n return impersonated_credentials.Credentials(\n source_credentials=source_credentials,\n target_principal=profile_credentials.impersonate_service_account,\n target_scopes=list(cls.SCOPE),\n lifetime=profile_credentials.timeout_seconds,\n )\n\n @classmethod\n def get_bigquery_client(cls, profile_credentials):\n if profile_credentials.impersonate_service_account:\n creds =\\\n cls.get_impersonated_bigquery_credentials(profile_credentials)\n else:\n creds = cls.get_bigquery_credentials(profile_credentials)\n database = profile_credentials.database\n location = getattr(profile_credentials, 'location', None)\n\n info = client_info.ClientInfo(user_agent=f'dbt-{dbt_version}')\n return google.cloud.bigquery.Client(\n database,\n creds,\n location=location,\n client_info=info,\n )\n\n @classmethod\n def open(cls, connection):\n if connection.state == 'open':\n logger.debug('Connection is already open, skipping open.')\n return connection\n\n try:\n handle = cls.get_bigquery_client(connection.credentials)\n\n except google.auth.exceptions.DefaultCredentialsError:\n logger.info(\"Please log into GCP to continue\")\n gcloud.setup_default_credentials()\n\n handle = cls.get_bigquery_client(connection.credentials)\n\n except Exception as e:\n logger.debug(\"Got an error when attempting to create a bigquery \"\n \"client: '{}'\".format(e))\n\n connection.handle = None\n connection.state = 'fail'\n\n raise FailedToConnectException(str(e))\n\n connection.handle = handle\n connection.state = 'open'\n return connection\n\n @classmethod\n def get_timeout(cls, conn):\n credentials = conn.credentials\n return credentials.timeout_seconds\n\n @classmethod\n def get_retries(cls, conn) -> int:\n credentials = conn.credentials\n if credentials.retries is not None:\n return credentials.retries\n else:\n return 1\n\n @classmethod\n def get_table_from_response(cls, resp):\n column_names = [field.name for field in resp.schema]\n return agate_helper.table_from_data_flat(resp, column_names)\n\n def raw_execute(self, sql, fetch=False, *, use_legacy_sql=False):\n conn = self.get_thread_connection()\n client = conn.handle\n\n logger.debug('On {}: {}', conn.name, sql)\n\n if self.profile.query_comment.job_label:\n query_comment = self.query_header.comment.query_comment\n labels = self._labels_from_query_comment(query_comment)\n else:\n labels = {}\n\n if active_user:\n labels['dbt_invocation_id'] = active_user.invocation_id\n\n job_params = {'use_legacy_sql': use_legacy_sql, 'labels': labels}\n\n priority = conn.credentials.priority\n if priority == Priority.Batch:\n job_params['priority'] = google.cloud.bigquery.QueryPriority.BATCH\n else:\n job_params[\n 'priority'] = google.cloud.bigquery.QueryPriority.INTERACTIVE\n\n maximum_bytes_billed = conn.credentials.maximum_bytes_billed\n if maximum_bytes_billed is not None and maximum_bytes_billed != 0:\n job_params['maximum_bytes_billed'] = maximum_bytes_billed\n\n def fn():\n return self._query_and_results(client, sql, conn, job_params)\n\n query_job, iterator = self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n return query_job, iterator\n\n def execute(\n self, sql, auto_begin=False, fetch=None\n ) -> Tuple[BigQueryAdapterResponse, agate.Table]:\n sql = self._add_query_comment(sql)\n # auto_begin is ignored on bigquery, and only included for consistency\n query_job, iterator = self.raw_execute(sql, fetch=fetch)\n\n if fetch:\n table = self.get_table_from_response(iterator)\n else:\n table = agate_helper.empty_table()\n\n message = 'OK'\n code = None\n num_rows = None\n bytes_processed = None\n\n if query_job.statement_type == 'CREATE_VIEW':\n code = 'CREATE VIEW'\n\n elif query_job.statement_type == 'CREATE_TABLE_AS_SELECT':\n conn = self.get_thread_connection()\n client = conn.handle\n query_table = client.get_table(query_job.destination)\n code = 'CREATE TABLE'\n num_rows = query_table.num_rows\n bytes_processed = query_job.total_bytes_processed\n message = '{} ({} rows, {} processed)'.format(\n code,\n format_rows_number(num_rows),\n format_bytes(bytes_processed)\n )\n\n elif query_job.statement_type == 'SCRIPT':\n code = 'SCRIPT'\n bytes_processed = query_job.total_bytes_processed\n message = f'{code} ({format_bytes(bytes_processed)} processed)'\n\n elif query_job.statement_type in ['INSERT', 'DELETE', 'MERGE']:\n code = query_job.statement_type\n num_rows = query_job.num_dml_affected_rows\n bytes_processed = query_job.total_bytes_processed\n message = '{} ({} rows, {} processed)'.format(\n code,\n format_rows_number(num_rows),\n format_bytes(bytes_processed),\n )\n\n response = BigQueryAdapterResponse(\n _message=message,\n rows_affected=num_rows,\n code=code,\n bytes_processed=bytes_processed\n )\n\n return response, table\n\n def get_partitions_metadata(self, table):\n def standard_to_legacy(table):\n return table.project + ':' + table.dataset + '.' + table.identifier\n\n legacy_sql = 'SELECT * FROM ['\\\n + standard_to_legacy(table) + '$__PARTITIONS_SUMMARY__]'\n\n sql = self._add_query_comment(legacy_sql)\n # auto_begin is ignored on bigquery, and only included for consistency\n _, iterator =\\\n self.raw_execute(sql, fetch='fetch_result', use_legacy_sql=True)\n return self.get_table_from_response(iterator)\n\n def create_bigquery_table(self, database, schema, table_name, callback,\n sql):\n \"\"\"Create a bigquery table. The caller must supply a callback\n that takes one argument, a `google.cloud.bigquery.Table`, and mutates\n it.\n \"\"\"\n conn = self.get_thread_connection()\n client = conn.handle\n\n view_ref = self.table_ref(database, schema, table_name, conn)\n view = google.cloud.bigquery.Table(view_ref)\n callback(view)\n\n def fn():\n return client.create_table(view)\n self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n def create_view(self, database, schema, table_name, sql):\n def callback(table):\n table.view_query = sql\n table.view_use_legacy_sql = False\n\n self.create_bigquery_table(database, schema, table_name, callback, sql)\n\n def create_table(self, database, schema, table_name, sql):\n conn = self.get_thread_connection()\n client = conn.handle\n\n table_ref = self.table_ref(database, schema, table_name, conn)\n job_params = {'destination': table_ref,\n 'write_disposition': WRITE_TRUNCATE}\n\n timeout = self.get_timeout(conn)\n\n def fn():\n return self._query_and_results(client, sql, conn, job_params,\n timeout=timeout)\n self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n def create_date_partitioned_table(self, database, schema, table_name):\n def callback(table):\n table.partitioning_type = 'DAY'\n\n self.create_bigquery_table(database, schema, table_name, callback,\n 'CREATE DAY PARTITIONED TABLE')\n\n def copy_bq_table(self, source, destination, write_disposition):\n conn = self.get_thread_connection()\n client = conn.handle\n\n source_ref = self.table_ref(\n source.database, source.schema, source.table, conn)\n destination_ref = self.table_ref(\n destination.database, destination.schema, destination.table, conn)\n\n logger.debug(\n 'Copying table \"{}\" to \"{}\" with disposition: \"{}\"',\n source_ref.path, destination_ref.path, write_disposition)\n\n def copy_and_results():\n job_config = google.cloud.bigquery.CopyJobConfig(\n write_disposition=write_disposition)\n copy_job = client.copy_table(\n source_ref, destination_ref, job_config=job_config)\n iterator = copy_job.result(timeout=self.get_timeout(conn))\n return copy_job, iterator\n\n self._retry_and_handle(\n msg='copy table \"{}\" to \"{}\"'.format(\n source_ref.path, destination_ref.path),\n conn=conn, fn=copy_and_results)\n\n @staticmethod\n def dataset(database, schema, conn):\n dataset_ref = conn.handle.dataset(schema, database)\n return google.cloud.bigquery.Dataset(dataset_ref)\n\n @staticmethod\n def dataset_from_id(dataset_id):\n return google.cloud.bigquery.Dataset.from_string(dataset_id)\n\n def table_ref(self, database, schema, table_name, conn):\n dataset = self.dataset(database, schema, conn)\n return dataset.table(table_name)\n\n def get_bq_table(self, database, schema, identifier):\n \"\"\"Get a bigquery table for a schema/model.\"\"\"\n conn = self.get_thread_connection()\n table_ref = self.table_ref(database, schema, identifier, conn)\n return conn.handle.get_table(table_ref)\n\n def drop_dataset(self, database, schema):\n conn = self.get_thread_connection()\n dataset = self.dataset(database, schema, conn)\n client = conn.handle\n\n def fn():\n return client.delete_dataset(\n dataset, delete_contents=True, not_found_ok=True)\n\n self._retry_and_handle(\n msg='drop dataset', conn=conn, fn=fn)\n\n def create_dataset(self, database, schema):\n conn = self.get_thread_connection()\n client = conn.handle\n dataset = self.dataset(database, schema, conn)\n\n def fn():\n return client.create_dataset(dataset, exists_ok=True)\n self._retry_and_handle(msg='create dataset', conn=conn, fn=fn)\n\n def _query_and_results(self, client, sql, conn, job_params, timeout=None):\n \"\"\"Query the client and wait for results.\"\"\"\n # Cannot reuse job_config if destination is set and ddl is used\n job_config = google.cloud.bigquery.QueryJobConfig(**job_params)\n query_job = client.query(sql, job_config=job_config)\n iterator = query_job.result(timeout=timeout)\n\n return query_job, iterator\n\n def _retry_and_handle(self, msg, conn, fn):\n \"\"\"retry a function call within the context of exception_handler.\"\"\"\n def reopen_conn_on_error(error):\n if isinstance(error, REOPENABLE_ERRORS):\n logger.warning('Reopening connection after {!r}', error)\n self.close(conn)\n self.open(conn)\n return\n\n with self.exception_handler(msg):\n return retry.retry_target(\n target=fn,\n predicate=_ErrorCounter(self.get_retries(conn)).count_error,\n sleep_generator=self._retry_generator(),\n deadline=None,\n on_error=reopen_conn_on_error)\n\n def _retry_generator(self):\n \"\"\"Generates retry intervals that exponentially back off.\"\"\"\n return retry.exponential_sleep_generator(\n initial=self.DEFAULT_INITIAL_DELAY,\n maximum=self.DEFAULT_MAXIMUM_DELAY)\n\n def _labels_from_query_comment(self, comment: str) -> Dict:\n try:\n comment_labels = json.loads(comment)\n except (TypeError, ValueError):\n return {'query_comment': _sanitize_label(comment)}\n return {\n _sanitize_label(key): _sanitize_label(str(value))\n for key, value in comment_labels.items()\n }\n\n\nclass _ErrorCounter(object):\n \"\"\"Counts errors seen up to a threshold then raises the next error.\"\"\"\n\n def __init__(self, retries):\n self.retries = retries\n self.error_count = 0\n\n def count_error(self, error):\n if self.retries == 0:\n return False # Don't log\n self.error_count += 1\n if _is_retryable(error) and self.error_count <= self.retries:\n logger.debug(\n 'Retry attempt {} of {} after error: {}',\n self.error_count, self.retries, repr(error))\n return True\n else:\n return False\n\n\ndef _is_retryable(error):\n \"\"\"Return true for errors that are unlikely to occur again if retried.\"\"\"\n if isinstance(error, RETRYABLE_ERRORS):\n return True\n elif isinstance(error, google.api_core.exceptions.Forbidden) and any(\n e['reason'] == 'rateLimitExceeded' for e in error.errors):\n return True\n return False\n\n\n_SANITIZE_LABEL_PATTERN = re.compile(r\"[^a-z0-9_-]\")\n\n\ndef _sanitize_label(value: str) -> str:\n \"\"\"Return a legal value for a BigQuery label.\"\"\"\n value = value.strip().lower()\n value = _SANITIZE_LABEL_PATTERN.sub(\"_\", value)\n return value\n", "path": "plugins/bigquery/dbt/adapters/bigquery/connections.py" } ]
[ { "content": "import json\nimport re\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom functools import lru_cache\nimport agate\nfrom requests.exceptions import ConnectionError\nfrom typing import Optional, Any, Dict, Tuple\n\nimport google.auth\nimport google.auth.exceptions\nimport google.cloud.bigquery\nimport google.cloud.exceptions\nfrom google.api_core import retry, client_info\nfrom google.auth import impersonated_credentials\nfrom google.oauth2 import (\n credentials as GoogleCredentials,\n service_account as GoogleServiceAccountCredentials\n)\n\nfrom dbt.utils import format_bytes, format_rows_number\nfrom dbt.clients import agate_helper, gcloud\nfrom dbt.tracking import active_user\nfrom dbt.contracts.connection import ConnectionState, AdapterResponse\nfrom dbt.exceptions import (\n FailedToConnectException, RuntimeException, DatabaseException\n)\nfrom dbt.adapters.base import BaseConnectionManager, Credentials\nfrom dbt.logger import GLOBAL_LOGGER as logger\nfrom dbt.version import __version__ as dbt_version\n\nfrom dbt.dataclass_schema import StrEnum\n\n\nBQ_QUERY_JOB_SPLIT = '-----Query Job SQL Follows-----'\n\nWRITE_TRUNCATE = google.cloud.bigquery.job.WriteDisposition.WRITE_TRUNCATE\n\nREOPENABLE_ERRORS = (\n ConnectionResetError,\n ConnectionError,\n)\n\nRETRYABLE_ERRORS = (\n google.cloud.exceptions.ServerError,\n google.cloud.exceptions.BadRequest,\n ConnectionResetError,\n ConnectionError,\n)\n\n\n@lru_cache()\ndef get_bigquery_defaults(scopes=None) -> Tuple[Any, Optional[str]]:\n \"\"\"\n Returns (credentials, project_id)\n\n project_id is returned available from the environment; otherwise None\n \"\"\"\n # Cached, because the underlying implementation shells out, taking ~1s\n return google.auth.default(scopes=scopes)\n\n\nclass Priority(StrEnum):\n Interactive = 'interactive'\n Batch = 'batch'\n\n\nclass BigQueryConnectionMethod(StrEnum):\n OAUTH = 'oauth'\n SERVICE_ACCOUNT = 'service-account'\n SERVICE_ACCOUNT_JSON = 'service-account-json'\n OAUTH_SECRETS = 'oauth-secrets'\n\n\n@dataclass\nclass BigQueryAdapterResponse(AdapterResponse):\n bytes_processed: Optional[int] = None\n\n\n@dataclass\nclass BigQueryCredentials(Credentials):\n method: BigQueryConnectionMethod\n # BigQuery allows an empty database / project, where it defers to the\n # environment for the project\n database: Optional[str]\n timeout_seconds: Optional[int] = 300\n location: Optional[str] = None\n priority: Optional[Priority] = None\n retries: Optional[int] = 1\n maximum_bytes_billed: Optional[int] = None\n impersonate_service_account: Optional[str] = None\n\n # Keyfile json creds\n keyfile: Optional[str] = None\n keyfile_json: Optional[Dict[str, Any]] = None\n\n # oauth-secrets\n token: Optional[str] = None\n refresh_token: Optional[str] = None\n client_id: Optional[str] = None\n client_secret: Optional[str] = None\n token_uri: Optional[str] = None\n\n _ALIASES = {\n 'project': 'database',\n 'dataset': 'schema',\n }\n\n @property\n def type(self):\n return 'bigquery'\n\n def _connection_keys(self):\n return ('method', 'database', 'schema', 'location', 'priority',\n 'timeout_seconds', 'maximum_bytes_billed')\n\n @classmethod\n def __pre_deserialize__(cls, d: Dict[Any, Any]) -> Dict[Any, Any]:\n # We need to inject the correct value of the database (aka project) at\n # this stage, ref\n # https://github.com/fishtown-analytics/dbt/pull/2908#discussion_r532927436.\n\n # `database` is an alias of `project` in BigQuery\n if 'database' not in d:\n _, database = get_bigquery_defaults()\n d['database'] = database\n return d\n\n\nclass BigQueryConnectionManager(BaseConnectionManager):\n TYPE = 'bigquery'\n\n SCOPE = ('https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/drive')\n\n QUERY_TIMEOUT = 300\n RETRIES = 1\n DEFAULT_INITIAL_DELAY = 1.0 # Seconds\n DEFAULT_MAXIMUM_DELAY = 1.0 # Seconds\n\n @classmethod\n def handle_error(cls, error, message):\n error_msg = \"\\n\".join([item['message'] for item in error.errors])\n raise DatabaseException(error_msg)\n\n def clear_transaction(self):\n pass\n\n @contextmanager\n def exception_handler(self, sql):\n try:\n yield\n\n except google.cloud.exceptions.BadRequest as e:\n message = \"Bad request while running query\"\n self.handle_error(e, message)\n\n except google.cloud.exceptions.Forbidden as e:\n message = \"Access denied while running query\"\n self.handle_error(e, message)\n\n except google.auth.exceptions.RefreshError as e:\n message = \"Unable to generate access token, if you're using \" \\\n \"impersonate_service_account, make sure your \" \\\n 'initial account has the \"roles/' \\\n 'iam.serviceAccountTokenCreator\" role on the ' \\\n 'account you are trying to impersonate.\\n\\n' \\\n f'{str(e)}'\n raise RuntimeException(message)\n\n except Exception as e:\n logger.debug(\"Unhandled error while running:\\n{}\".format(sql))\n logger.debug(e)\n if isinstance(e, RuntimeException):\n # during a sql query, an internal to dbt exception was raised.\n # this sounds a lot like a signal handler and probably has\n # useful information, so raise it without modification.\n raise\n exc_message = str(e)\n # the google bigquery library likes to add the query log, which we\n # don't want to log. Hopefully they never change this!\n if BQ_QUERY_JOB_SPLIT in exc_message:\n exc_message = exc_message.split(BQ_QUERY_JOB_SPLIT)[0].strip()\n raise RuntimeException(exc_message)\n\n def cancel_open(self) -> None:\n pass\n\n @classmethod\n def close(cls, connection):\n connection.state = ConnectionState.CLOSED\n\n return connection\n\n def begin(self):\n pass\n\n def commit(self):\n pass\n\n @classmethod\n def get_bigquery_credentials(cls, profile_credentials):\n method = profile_credentials.method\n creds = GoogleServiceAccountCredentials.Credentials\n\n if method == BigQueryConnectionMethod.OAUTH:\n credentials, _ = get_bigquery_defaults(scopes=cls.SCOPE)\n return credentials\n\n elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT:\n keyfile = profile_credentials.keyfile\n return creds.from_service_account_file(keyfile, scopes=cls.SCOPE)\n\n elif method == BigQueryConnectionMethod.SERVICE_ACCOUNT_JSON:\n details = profile_credentials.keyfile_json\n return creds.from_service_account_info(details, scopes=cls.SCOPE)\n\n elif method == BigQueryConnectionMethod.OAUTH_SECRETS:\n return GoogleCredentials.Credentials(\n token=profile_credentials.token,\n refresh_token=profile_credentials.refresh_token,\n client_id=profile_credentials.client_id,\n client_secret=profile_credentials.client_secret,\n token_uri=profile_credentials.token_uri,\n scopes=cls.SCOPE\n )\n\n error = ('Invalid `method` in profile: \"{}\"'.format(method))\n raise FailedToConnectException(error)\n\n @classmethod\n def get_impersonated_bigquery_credentials(cls, profile_credentials):\n source_credentials = cls.get_bigquery_credentials(profile_credentials)\n return impersonated_credentials.Credentials(\n source_credentials=source_credentials,\n target_principal=profile_credentials.impersonate_service_account,\n target_scopes=list(cls.SCOPE),\n lifetime=profile_credentials.timeout_seconds,\n )\n\n @classmethod\n def get_bigquery_client(cls, profile_credentials):\n if profile_credentials.impersonate_service_account:\n creds =\\\n cls.get_impersonated_bigquery_credentials(profile_credentials)\n else:\n creds = cls.get_bigquery_credentials(profile_credentials)\n database = profile_credentials.database\n location = getattr(profile_credentials, 'location', None)\n\n info = client_info.ClientInfo(user_agent=f'dbt-{dbt_version}')\n return google.cloud.bigquery.Client(\n database,\n creds,\n location=location,\n client_info=info,\n )\n\n @classmethod\n def open(cls, connection):\n if connection.state == 'open':\n logger.debug('Connection is already open, skipping open.')\n return connection\n\n try:\n handle = cls.get_bigquery_client(connection.credentials)\n\n except google.auth.exceptions.DefaultCredentialsError:\n logger.info(\"Please log into GCP to continue\")\n gcloud.setup_default_credentials()\n\n handle = cls.get_bigquery_client(connection.credentials)\n\n except Exception as e:\n logger.debug(\"Got an error when attempting to create a bigquery \"\n \"client: '{}'\".format(e))\n\n connection.handle = None\n connection.state = 'fail'\n\n raise FailedToConnectException(str(e))\n\n connection.handle = handle\n connection.state = 'open'\n return connection\n\n @classmethod\n def get_timeout(cls, conn):\n credentials = conn.credentials\n return credentials.timeout_seconds\n\n @classmethod\n def get_retries(cls, conn) -> int:\n credentials = conn.credentials\n if credentials.retries is not None:\n return credentials.retries\n else:\n return 1\n\n @classmethod\n def get_table_from_response(cls, resp):\n column_names = [field.name for field in resp.schema]\n return agate_helper.table_from_data_flat(resp, column_names)\n\n def raw_execute(self, sql, fetch=False, *, use_legacy_sql=False):\n conn = self.get_thread_connection()\n client = conn.handle\n\n logger.debug('On {}: {}', conn.name, sql)\n\n if self.profile.query_comment and self.profile.query_comment.job_label:\n query_comment = self.query_header.comment.query_comment\n labels = self._labels_from_query_comment(query_comment)\n else:\n labels = {}\n\n if active_user:\n labels['dbt_invocation_id'] = active_user.invocation_id\n\n job_params = {'use_legacy_sql': use_legacy_sql, 'labels': labels}\n\n priority = conn.credentials.priority\n if priority == Priority.Batch:\n job_params['priority'] = google.cloud.bigquery.QueryPriority.BATCH\n else:\n job_params[\n 'priority'] = google.cloud.bigquery.QueryPriority.INTERACTIVE\n\n maximum_bytes_billed = conn.credentials.maximum_bytes_billed\n if maximum_bytes_billed is not None and maximum_bytes_billed != 0:\n job_params['maximum_bytes_billed'] = maximum_bytes_billed\n\n def fn():\n return self._query_and_results(client, sql, conn, job_params)\n\n query_job, iterator = self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n return query_job, iterator\n\n def execute(\n self, sql, auto_begin=False, fetch=None\n ) -> Tuple[BigQueryAdapterResponse, agate.Table]:\n sql = self._add_query_comment(sql)\n # auto_begin is ignored on bigquery, and only included for consistency\n query_job, iterator = self.raw_execute(sql, fetch=fetch)\n\n if fetch:\n table = self.get_table_from_response(iterator)\n else:\n table = agate_helper.empty_table()\n\n message = 'OK'\n code = None\n num_rows = None\n bytes_processed = None\n\n if query_job.statement_type == 'CREATE_VIEW':\n code = 'CREATE VIEW'\n\n elif query_job.statement_type == 'CREATE_TABLE_AS_SELECT':\n conn = self.get_thread_connection()\n client = conn.handle\n query_table = client.get_table(query_job.destination)\n code = 'CREATE TABLE'\n num_rows = query_table.num_rows\n bytes_processed = query_job.total_bytes_processed\n message = '{} ({} rows, {} processed)'.format(\n code,\n format_rows_number(num_rows),\n format_bytes(bytes_processed)\n )\n\n elif query_job.statement_type == 'SCRIPT':\n code = 'SCRIPT'\n bytes_processed = query_job.total_bytes_processed\n message = f'{code} ({format_bytes(bytes_processed)} processed)'\n\n elif query_job.statement_type in ['INSERT', 'DELETE', 'MERGE']:\n code = query_job.statement_type\n num_rows = query_job.num_dml_affected_rows\n bytes_processed = query_job.total_bytes_processed\n message = '{} ({} rows, {} processed)'.format(\n code,\n format_rows_number(num_rows),\n format_bytes(bytes_processed),\n )\n\n response = BigQueryAdapterResponse(\n _message=message,\n rows_affected=num_rows,\n code=code,\n bytes_processed=bytes_processed\n )\n\n return response, table\n\n def get_partitions_metadata(self, table):\n def standard_to_legacy(table):\n return table.project + ':' + table.dataset + '.' + table.identifier\n\n legacy_sql = 'SELECT * FROM ['\\\n + standard_to_legacy(table) + '$__PARTITIONS_SUMMARY__]'\n\n sql = self._add_query_comment(legacy_sql)\n # auto_begin is ignored on bigquery, and only included for consistency\n _, iterator =\\\n self.raw_execute(sql, fetch='fetch_result', use_legacy_sql=True)\n return self.get_table_from_response(iterator)\n\n def create_bigquery_table(self, database, schema, table_name, callback,\n sql):\n \"\"\"Create a bigquery table. The caller must supply a callback\n that takes one argument, a `google.cloud.bigquery.Table`, and mutates\n it.\n \"\"\"\n conn = self.get_thread_connection()\n client = conn.handle\n\n view_ref = self.table_ref(database, schema, table_name, conn)\n view = google.cloud.bigquery.Table(view_ref)\n callback(view)\n\n def fn():\n return client.create_table(view)\n self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n def create_view(self, database, schema, table_name, sql):\n def callback(table):\n table.view_query = sql\n table.view_use_legacy_sql = False\n\n self.create_bigquery_table(database, schema, table_name, callback, sql)\n\n def create_table(self, database, schema, table_name, sql):\n conn = self.get_thread_connection()\n client = conn.handle\n\n table_ref = self.table_ref(database, schema, table_name, conn)\n job_params = {'destination': table_ref,\n 'write_disposition': WRITE_TRUNCATE}\n\n timeout = self.get_timeout(conn)\n\n def fn():\n return self._query_and_results(client, sql, conn, job_params,\n timeout=timeout)\n self._retry_and_handle(msg=sql, conn=conn, fn=fn)\n\n def create_date_partitioned_table(self, database, schema, table_name):\n def callback(table):\n table.partitioning_type = 'DAY'\n\n self.create_bigquery_table(database, schema, table_name, callback,\n 'CREATE DAY PARTITIONED TABLE')\n\n def copy_bq_table(self, source, destination, write_disposition):\n conn = self.get_thread_connection()\n client = conn.handle\n\n source_ref = self.table_ref(\n source.database, source.schema, source.table, conn)\n destination_ref = self.table_ref(\n destination.database, destination.schema, destination.table, conn)\n\n logger.debug(\n 'Copying table \"{}\" to \"{}\" with disposition: \"{}\"',\n source_ref.path, destination_ref.path, write_disposition)\n\n def copy_and_results():\n job_config = google.cloud.bigquery.CopyJobConfig(\n write_disposition=write_disposition)\n copy_job = client.copy_table(\n source_ref, destination_ref, job_config=job_config)\n iterator = copy_job.result(timeout=self.get_timeout(conn))\n return copy_job, iterator\n\n self._retry_and_handle(\n msg='copy table \"{}\" to \"{}\"'.format(\n source_ref.path, destination_ref.path),\n conn=conn, fn=copy_and_results)\n\n @staticmethod\n def dataset(database, schema, conn):\n dataset_ref = conn.handle.dataset(schema, database)\n return google.cloud.bigquery.Dataset(dataset_ref)\n\n @staticmethod\n def dataset_from_id(dataset_id):\n return google.cloud.bigquery.Dataset.from_string(dataset_id)\n\n def table_ref(self, database, schema, table_name, conn):\n dataset = self.dataset(database, schema, conn)\n return dataset.table(table_name)\n\n def get_bq_table(self, database, schema, identifier):\n \"\"\"Get a bigquery table for a schema/model.\"\"\"\n conn = self.get_thread_connection()\n table_ref = self.table_ref(database, schema, identifier, conn)\n return conn.handle.get_table(table_ref)\n\n def drop_dataset(self, database, schema):\n conn = self.get_thread_connection()\n dataset = self.dataset(database, schema, conn)\n client = conn.handle\n\n def fn():\n return client.delete_dataset(\n dataset, delete_contents=True, not_found_ok=True)\n\n self._retry_and_handle(\n msg='drop dataset', conn=conn, fn=fn)\n\n def create_dataset(self, database, schema):\n conn = self.get_thread_connection()\n client = conn.handle\n dataset = self.dataset(database, schema, conn)\n\n def fn():\n return client.create_dataset(dataset, exists_ok=True)\n self._retry_and_handle(msg='create dataset', conn=conn, fn=fn)\n\n def _query_and_results(self, client, sql, conn, job_params, timeout=None):\n \"\"\"Query the client and wait for results.\"\"\"\n # Cannot reuse job_config if destination is set and ddl is used\n job_config = google.cloud.bigquery.QueryJobConfig(**job_params)\n query_job = client.query(sql, job_config=job_config)\n iterator = query_job.result(timeout=timeout)\n\n return query_job, iterator\n\n def _retry_and_handle(self, msg, conn, fn):\n \"\"\"retry a function call within the context of exception_handler.\"\"\"\n def reopen_conn_on_error(error):\n if isinstance(error, REOPENABLE_ERRORS):\n logger.warning('Reopening connection after {!r}', error)\n self.close(conn)\n self.open(conn)\n return\n\n with self.exception_handler(msg):\n return retry.retry_target(\n target=fn,\n predicate=_ErrorCounter(self.get_retries(conn)).count_error,\n sleep_generator=self._retry_generator(),\n deadline=None,\n on_error=reopen_conn_on_error)\n\n def _retry_generator(self):\n \"\"\"Generates retry intervals that exponentially back off.\"\"\"\n return retry.exponential_sleep_generator(\n initial=self.DEFAULT_INITIAL_DELAY,\n maximum=self.DEFAULT_MAXIMUM_DELAY)\n\n def _labels_from_query_comment(self, comment: str) -> Dict:\n try:\n comment_labels = json.loads(comment)\n except (TypeError, ValueError):\n return {'query_comment': _sanitize_label(comment)}\n return {\n _sanitize_label(key): _sanitize_label(str(value))\n for key, value in comment_labels.items()\n }\n\n\nclass _ErrorCounter(object):\n \"\"\"Counts errors seen up to a threshold then raises the next error.\"\"\"\n\n def __init__(self, retries):\n self.retries = retries\n self.error_count = 0\n\n def count_error(self, error):\n if self.retries == 0:\n return False # Don't log\n self.error_count += 1\n if _is_retryable(error) and self.error_count <= self.retries:\n logger.debug(\n 'Retry attempt {} of {} after error: {}',\n self.error_count, self.retries, repr(error))\n return True\n else:\n return False\n\n\ndef _is_retryable(error):\n \"\"\"Return true for errors that are unlikely to occur again if retried.\"\"\"\n if isinstance(error, RETRYABLE_ERRORS):\n return True\n elif isinstance(error, google.api_core.exceptions.Forbidden) and any(\n e['reason'] == 'rateLimitExceeded' for e in error.errors):\n return True\n return False\n\n\n_SANITIZE_LABEL_PATTERN = re.compile(r\"[^a-z0-9_-]\")\n\n\ndef _sanitize_label(value: str) -> str:\n \"\"\"Return a legal value for a BigQuery label.\"\"\"\n value = value.strip().lower()\n value = _SANITIZE_LABEL_PATTERN.sub(\"_\", value)\n return value\n", "path": "plugins/bigquery/dbt/adapters/bigquery/connections.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 562a46b4ca6..bd672ed05f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - Fix unique_id generation for generic tests so tests with the same FQN but different configuration will run. ([#3254](https://github.com/fishtown-analytics/dbt/issues/3254), [#3335](https://github.com/fishtown-analytics/dbt/issues/3335)) - Update the snowflake adapter to only comment on a column if it exists when using the persist_docs config ([#3039](https://github.com/fishtown-analytics/dbt/issues/3039), [#3149](https://github.com/fishtown-analytics/dbt/pull/3149)) - Separate `compiled_path` from `build_path`, and print the former alongside node error messages ([#1985](https://github.com/fishtown-analytics/dbt/issues/1985), [#3327](https://github.com/fishtown-analytics/dbt/pull/3327)) +- Fix exception caused when running `dbt debug` with BigQuery connections ([#3314](https://github.com/fishtown-analytics/dbt/issues/3314), [#3351](https://github.com/fishtown-analytics/dbt/pull/3351)) ### Under the hood - Added logic for registry requests to raise a timeout error after a response hangs out for 30 seconds and 5 attempts have been made to reach the endpoint ([#3177](https://github.com/fishtown-analytics/dbt/issues/3177), [#3275](https://github.com/fishtown-analytics/dbt/pull/3275)) diff --git a/plugins/bigquery/dbt/adapters/bigquery/connections.py b/plugins/bigquery/dbt/adapters/bigquery/connections.py index e0140c38c01..137b02d4f48 100644 --- a/plugins/bigquery/dbt/adapters/bigquery/connections.py +++ b/plugins/bigquery/dbt/adapters/bigquery/connections.py @@ -309,7 +309,7 @@ def raw_execute(self, sql, fetch=False, *, use_legacy_sql=False): logger.debug('On {}: {}', conn.name, sql) - if self.profile.query_comment.job_label: + if self.profile.query_comment and self.profile.query_comment.job_label: query_comment = self.query_header.comment.query_comment labels = self._labels_from_query_comment(query_comment) else: diff --git a/test/integration/049_dbt_debug_test/test_debug.py b/test/integration/049_dbt_debug_test/test_debug.py index 8a5fbd774f3..74fabfcbc7b 100644 --- a/test/integration/049_dbt_debug_test/test_debug.py +++ b/test/integration/049_dbt_debug_test/test_debug.py @@ -64,6 +64,11 @@ def test_postgres_ok(self): self.run_dbt(['debug']) self.assertNotIn('ERROR', self.capsys.readouterr().out) + @use_profile('bigquery') + def test_bigquery_ok(self): + self.run_dbt(['debug']) + self.assertNotIn('ERROR', self.capsys.readouterr().out) + @use_profile('postgres') def test_postgres_nopass(self): self.run_dbt(['debug', '--target', 'nopass'], expect_pass=False)
'NoneType' object has no attribute 'job_label' I am trying to configure the bigquery connection profile. Somehow get the following error message after "dbt debug". >'NoneType' object has no attribute 'job_label' Not sure how to debug this. Can someone give me a hint please? ![image](https://user-images.githubusercontent.com/14008800/116947235-4d994f00-acd0-11eb-8eda-171b4fb638f4.png)
dj-stripe__dj-stripe-980
[ { "content": "import stripe\nfrom django.db import models, transaction\nfrom stripe.error import InvalidRequestError\n\nfrom .. import enums\nfrom .. import settings as djstripe_settings\nfrom ..exceptions import StripeObjectManipulationException\nfrom ..fields import (\n JSONField,\n StripeCurrencyCodeField,\n StripeDecimalCurrencyAmountField,\n StripeEnumField,\n)\nfrom .base import StripeModel, logger\nfrom .core import Customer\n\n\nclass DjstripePaymentMethod(models.Model):\n \"\"\"\n An internal model that abstracts the legacy Card and BankAccount\n objects with Source objects.\n\n Contains two fields: `id` and `type`:\n - `id` is the id of the Stripe object.\n - `type` can be `card`, `bank_account` or `source`.\n \"\"\"\n\n id = models.CharField(max_length=255, primary_key=True)\n type = models.CharField(max_length=12, db_index=True)\n\n @classmethod\n def from_stripe_object(cls, data):\n source_type = data[\"object\"]\n model = cls._model_for_type(source_type)\n\n with transaction.atomic():\n model.sync_from_stripe_data(data)\n instance, _ = cls.objects.get_or_create(\n id=data[\"id\"], defaults={\"type\": source_type}\n )\n\n return instance\n\n @classmethod\n def _get_or_create_source(cls, data, source_type):\n try:\n model = cls._model_for_type(source_type)\n model._get_or_create_from_stripe_object(data)\n except ValueError as e:\n # This may happen if we have source types we don't know about.\n # Let's not make dj-stripe entirely unusable if that happens.\n logger.warning(\"Could not sync source of type %r: %s\", source_type, e)\n\n return cls.objects.get_or_create(id=data[\"id\"], defaults={\"type\": source_type})\n\n @classmethod\n def _model_for_type(cls, type):\n if type == \"card\":\n return Card\n elif type == \"source\":\n return Source\n elif type == \"bank_account\":\n return BankAccount\n\n raise ValueError(\"Unknown source type: {}\".format(type))\n\n @property\n def object_model(self):\n return self._model_for_type(self.type)\n\n def resolve(self):\n return self.object_model.objects.get(id=self.id)\n\n\nclass LegacySourceMixin:\n \"\"\"\n Mixin for functionality shared between the legacy Card & BankAccount sources\n \"\"\"\n\n @classmethod\n def _get_customer_from_kwargs(cls, **kwargs):\n if \"customer\" not in kwargs or not isinstance(kwargs[\"customer\"], Customer):\n raise StripeObjectManipulationException(\n \"{}s must be manipulated through a Customer. \"\n \"Pass a Customer object into this call.\".format(cls.__name__)\n )\n\n customer = kwargs[\"customer\"]\n del kwargs[\"customer\"]\n\n return customer, kwargs\n\n @classmethod\n def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n # OVERRIDING the parent version of this function\n # Cards & Bank Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to\n # check if either a customer or account is supplied to determine\n # the correct object to use.\n\n customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)\n\n return customer.api_retrieve().sources.create(api_key=api_key, **clean_kwargs)\n\n @classmethod\n def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n # OVERRIDING the parent version of this function\n # Cards & Bank Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to\n # check if either a customer or account is supplied to determine\n # the correct object to use.\n\n customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)\n\n return (\n customer.api_retrieve(api_key=api_key)\n .sources.list(object=cls.stripe_class.OBJECT_NAME, **clean_kwargs)\n .auto_paging_iter()\n )\n\n def get_stripe_dashboard_url(self):\n return self.customer.get_stripe_dashboard_url()\n\n def remove(self):\n \"\"\"\n Removes a legacy source from this customer's account.\n \"\"\"\n\n # First, wipe default source on all customers that use this card.\n Customer.objects.filter(default_source=self.id).update(default_source=None)\n\n try:\n self._api_delete()\n except InvalidRequestError as exc:\n if \"No such source:\" in str(exc) or \"No such customer:\" in str(exc):\n # The exception was thrown because the stripe customer or card\n # was already deleted on the stripe side, ignore the exception\n pass\n else:\n # The exception was raised for another reason, re-raise it\n raise\n\n self.delete()\n\n def api_retrieve(self, api_key=None):\n # OVERRIDING the parent version of this function\n # Cards & Banks Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to check if\n # either a customer or account is supplied to determine the\n # correct object to use.\n api_key = api_key or self.default_api_key\n customer = self.customer.api_retrieve(api_key=api_key)\n\n # If the customer is deleted, the sources attribute will be absent.\n # eg. {\"id\": \"cus_XXXXXXXX\", \"deleted\": True}\n if \"sources\" not in customer:\n # We fake a native stripe InvalidRequestError so that it's caught\n # like an invalid ID error.\n raise InvalidRequestError(\"No such source: %s\" % (self.id), \"id\")\n\n return customer.sources.retrieve(self.id, expand=self.expand_fields)\n\n\nclass BankAccount(LegacySourceMixin, StripeModel):\n stripe_class = stripe.BankAccount\n\n account = models.ForeignKey(\n \"Account\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"bank_account\",\n help_text=\"The account the charge was made on behalf of. Null here indicates \"\n \"that this value was never set.\",\n )\n account_holder_name = models.TextField(\n max_length=5000,\n default=\"\",\n blank=True,\n help_text=\"The name of the person or business that owns the bank account.\",\n )\n account_holder_type = StripeEnumField(\n enum=enums.BankAccountHolderType,\n help_text=\"The type of entity that holds the account.\",\n )\n bank_name = models.CharField(\n max_length=255,\n help_text=\"Name of the bank associated with the routing number \"\n \"(e.g., `WELLS FARGO`).\",\n )\n country = models.CharField(\n max_length=2,\n help_text=\"Two-letter ISO code representing the country the bank account \"\n \"is located in.\",\n )\n currency = StripeCurrencyCodeField()\n customer = models.ForeignKey(\n \"Customer\", on_delete=models.SET_NULL, null=True, related_name=\"bank_account\"\n )\n default_for_currency = models.NullBooleanField(\n help_text=\"Whether this external account is the default account for \"\n \"its currency.\"\n )\n fingerprint = models.CharField(\n max_length=16,\n help_text=(\n \"Uniquely identifies this particular bank account. \"\n \"You can use this attribute to check whether two bank accounts are \"\n \"the same.\"\n ),\n )\n last4 = models.CharField(max_length=4)\n routing_number = models.CharField(\n max_length=255, help_text=\"The routing transit number for the bank account.\"\n )\n status = StripeEnumField(enum=enums.BankAccountStatus)\n\n\nclass Card(LegacySourceMixin, StripeModel):\n \"\"\"\n You can store multiple cards on a customer in order to charge the customer later.\n\n This is a legacy model which only applies to the \"v2\" Stripe API (eg. Checkout.js).\n You should strive to use the Stripe \"v3\" API (eg. Stripe Elements).\n Also see: https://stripe.com/docs/stripe-js/elements/migrating\n When using Elements, you will not be using Card objects. Instead, you will use\n Source objects.\n A Source object of type \"card\" is equivalent to a Card object. However, Card\n objects cannot be converted into Source objects by Stripe at this time.\n\n Stripe documentation: https://stripe.com/docs/api/python#cards\n \"\"\"\n\n stripe_class = stripe.Card\n\n address_city = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"City/District/Suburb/Town/Village.\",\n )\n address_country = models.TextField(\n max_length=5000, blank=True, default=\"\", help_text=\"Billing address country.\"\n )\n address_line1 = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"Street address/PO Box/Company name.\",\n )\n address_line1_check = StripeEnumField(\n enum=enums.CardCheckResult,\n blank=True,\n default=\"\",\n help_text=\"If `address_line1` was provided, results of the check.\",\n )\n address_line2 = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"Apartment/Suite/Unit/Building.\",\n )\n address_state = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"State/County/Province/Region.\",\n )\n address_zip = models.TextField(\n max_length=5000, blank=True, default=\"\", help_text=\"ZIP or postal code.\"\n )\n address_zip_check = StripeEnumField(\n enum=enums.CardCheckResult,\n blank=True,\n default=\"\",\n help_text=\"If `address_zip` was provided, results of the check.\",\n )\n brand = StripeEnumField(enum=enums.CardBrand, help_text=\"Card brand.\")\n country = models.CharField(\n max_length=2,\n default=\"\",\n blank=True,\n help_text=\"Two-letter ISO code representing the country of the card.\",\n )\n customer = models.ForeignKey(\n \"Customer\", on_delete=models.SET_NULL, null=True, related_name=\"legacy_cards\"\n )\n cvc_check = StripeEnumField(\n enum=enums.CardCheckResult,\n default=\"\",\n blank=True,\n help_text=\"If a CVC was provided, results of the check.\",\n )\n dynamic_last4 = models.CharField(\n max_length=4,\n default=\"\",\n blank=True,\n help_text=\"(For tokenized numbers only.) The last four digits of the device \"\n \"account number.\",\n )\n exp_month = models.IntegerField(help_text=\"Card expiration month.\")\n exp_year = models.IntegerField(help_text=\"Card expiration year.\")\n fingerprint = models.CharField(\n default=\"\",\n blank=True,\n max_length=16,\n help_text=\"Uniquely identifies this particular card number.\",\n )\n funding = StripeEnumField(\n enum=enums.CardFundingType, help_text=\"Card funding type.\"\n )\n last4 = models.CharField(max_length=4, help_text=\"Last four digits of Card number.\")\n name = models.TextField(\n max_length=5000, default=\"\", blank=True, help_text=\"Cardholder name.\"\n )\n tokenization_method = StripeEnumField(\n enum=enums.CardTokenizationMethod,\n default=\"\",\n blank=True,\n help_text=\"If the card number is tokenized, this is the method that was used.\",\n )\n\n def str_parts(self):\n return [\n \"brand={brand}\".format(brand=self.brand),\n \"last4={last4}\".format(last4=self.last4),\n \"exp_month={exp_month}\".format(exp_month=self.exp_month),\n \"exp_year={exp_year}\".format(exp_year=self.exp_year),\n ] + super().str_parts()\n\n @classmethod\n def create_token(\n cls,\n number,\n exp_month,\n exp_year,\n cvc,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n **kwargs\n ):\n \"\"\"\n Creates a single use token that wraps the details of a credit card.\n This token can be used in place of a credit card dictionary with any API method.\n These tokens can only be used once: by creating a new charge object,\n or attaching them to a customer.\n (Source: https://stripe.com/docs/api/python#create_card_token)\n\n :param exp_month: The card's expiration month.\n :type exp_month: Two digit int\n :param exp_year: The card's expiration year.\n :type exp_year: Two or Four digit int\n :param number: The card number\n :type number: string without any separators (no spaces)\n :param cvc: Card security code.\n :type cvc: string\n \"\"\"\n\n card = {\n \"number\": number,\n \"exp_month\": exp_month,\n \"exp_year\": exp_year,\n \"cvc\": cvc,\n }\n card.update(kwargs)\n\n return stripe.Token.create(api_key=api_key, card=card)\n\n\nclass Source(StripeModel):\n \"\"\"\n Stripe documentation: https://stripe.com/docs/api#sources\n \"\"\"\n\n amount = StripeDecimalCurrencyAmountField(\n null=True,\n blank=True,\n help_text=(\n \"Amount associated with the source. \"\n \"This is the amount for which the source will be chargeable once ready. \"\n \"Required for `single_use` sources.\"\n ),\n )\n client_secret = models.CharField(\n max_length=255,\n help_text=(\n \"The client secret of the source. \"\n \"Used for client-side retrieval using a publishable key.\"\n ),\n )\n currency = StripeCurrencyCodeField(default=\"\", blank=True)\n flow = StripeEnumField(\n enum=enums.SourceFlow, help_text=\"The authentication flow of the source.\"\n )\n owner = JSONField(\n help_text=(\n \"Information about the owner of the payment instrument that may be \"\n \"used or required by particular source types.\"\n )\n )\n statement_descriptor = models.CharField(\n max_length=255,\n default=\"\",\n blank=True,\n help_text=\"Extra information about a source. This will appear on your \"\n \"customer's statement every time you charge the source.\",\n )\n status = StripeEnumField(\n enum=enums.SourceStatus,\n help_text=\"The status of the source. Only `chargeable` sources can be used \"\n \"to create a charge.\",\n )\n type = StripeEnumField(enum=enums.SourceType, help_text=\"The type of the source.\")\n usage = StripeEnumField(\n enum=enums.SourceUsage,\n help_text=\"Whether this source should be reusable or not. \"\n \"Some source types may or may not be reusable by construction, \"\n \"while other may leave the option at creation.\",\n )\n\n # Flows\n code_verification = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the code verification flow. \"\n \"Present if the source is authenticated by a verification code \"\n \"(`flow` is `code_verification`).\",\n )\n receiver = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the receiver flow. \"\n \"Present if the source is a receiver (`flow` is `receiver`).\",\n )\n redirect = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the redirect flow. \"\n \"Present if the source is authenticated by a redirect (`flow` is `redirect`).\",\n )\n\n source_data = JSONField(help_text=\"The data corresponding to the source type.\")\n\n customer = models.ForeignKey(\n \"Customer\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"sources\",\n )\n\n stripe_class = stripe.Source\n stripe_dashboard_item_name = \"sources\"\n\n @classmethod\n def _manipulate_stripe_object_hook(cls, data):\n # The source_data dict is an alias of all the source types\n data[\"source_data\"] = data[data[\"type\"]]\n return data\n\n def _attach_objects_hook(self, cls, data):\n customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)\n if customer:\n self.customer = customer\n else:\n self.customer = None\n\n def detach(self):\n \"\"\"\n Detach the source from its customer.\n \"\"\"\n\n # First, wipe default source on all customers that use this.\n Customer.objects.filter(default_source=self.id).update(default_source=None)\n\n try:\n # TODO - we could use the return value of sync_from_stripe_data\n # or call its internals - self._sync/_attach_objects_hook etc here\n # to update `self` at this point?\n self.sync_from_stripe_data(self.api_retrieve().detach())\n return True\n except (InvalidRequestError, NotImplementedError):\n # The source was already detached. Resyncing.\n # NotImplementedError is an artifact of stripe-python<2.0\n # https://github.com/stripe/stripe-python/issues/376\n self.sync_from_stripe_data(self.api_retrieve())\n return False\n\n\nclass PaymentMethod(StripeModel):\n \"\"\"\n Stripe documentation: https://stripe.com/docs/api#payment_methods\n \"\"\"\n\n billing_details = JSONField(\n help_text=(\n \"Billing information associated with the PaymentMethod that may be used or \"\n \"required by particular types of payment methods.\"\n )\n )\n card = JSONField(\n help_text=\"If this is a card PaymentMethod, this hash contains details \"\n \"about the card.\"\n )\n card_present = JSONField(\n null=True,\n blank=True,\n help_text=\"If this is an card_present PaymentMethod, this hash contains \"\n \"details about the Card Present payment method.\",\n )\n customer = models.ForeignKey(\n \"Customer\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"payment_methods\",\n help_text=\"Customer to which this PaymentMethod is saved.\"\n \"This will not be set when the PaymentMethod has not been saved to a Customer.\",\n )\n type = models.CharField(\n max_length=255,\n null=True,\n blank=True,\n help_text=\"The type of the PaymentMethod. An additional hash is included \"\n \"on the PaymentMethod with a name matching this value. It contains additional \"\n \"information specific to the PaymentMethod type.\",\n )\n\n stripe_class = stripe.PaymentMethod\n\n @classmethod\n def attach(\n cls, payment_method, customer, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Attach a payment method to a customer\n :param payment_method:\n :type payment_method: str, PaymentMethod\n :param customer:\n :type customer: str, Customer\n :param api_key:\n :return:\n \"\"\"\n\n if isinstance(payment_method, StripeModel):\n payment_method = payment_method.id\n\n if isinstance(customer, StripeModel):\n customer = customer.id\n\n extra_kwargs = {}\n if not isinstance(payment_method, stripe.PaymentMethod):\n # send api_key if we're not passing in a Stripe object\n # avoids \"Received unknown parameter: api_key\" since api uses the\n # key cached in the Stripe object\n extra_kwargs = {\"api_key\": api_key}\n\n stripe_payment_method = stripe.PaymentMethod.attach(\n payment_method, customer=customer, **extra_kwargs\n )\n return cls.sync_from_stripe_data(stripe_payment_method)\n", "path": "djstripe/models/payment_methods.py" } ]
[ { "content": "import stripe\nfrom django.db import models, transaction\nfrom stripe.error import InvalidRequestError\n\nfrom .. import enums\nfrom .. import settings as djstripe_settings\nfrom ..exceptions import StripeObjectManipulationException\nfrom ..fields import (\n JSONField,\n StripeCurrencyCodeField,\n StripeDecimalCurrencyAmountField,\n StripeEnumField,\n)\nfrom .base import StripeModel, logger\nfrom .core import Customer\n\n\nclass DjstripePaymentMethod(models.Model):\n \"\"\"\n An internal model that abstracts the legacy Card and BankAccount\n objects with Source objects.\n\n Contains two fields: `id` and `type`:\n - `id` is the id of the Stripe object.\n - `type` can be `card`, `bank_account` or `source`.\n \"\"\"\n\n id = models.CharField(max_length=255, primary_key=True)\n type = models.CharField(max_length=12, db_index=True)\n\n @classmethod\n def from_stripe_object(cls, data):\n source_type = data[\"object\"]\n model = cls._model_for_type(source_type)\n\n with transaction.atomic():\n model.sync_from_stripe_data(data)\n instance, _ = cls.objects.get_or_create(\n id=data[\"id\"], defaults={\"type\": source_type}\n )\n\n return instance\n\n @classmethod\n def _get_or_create_source(cls, data, source_type):\n try:\n model = cls._model_for_type(source_type)\n model._get_or_create_from_stripe_object(data)\n except ValueError as e:\n # This may happen if we have source types we don't know about.\n # Let's not make dj-stripe entirely unusable if that happens.\n logger.warning(\"Could not sync source of type %r: %s\", source_type, e)\n\n return cls.objects.get_or_create(id=data[\"id\"], defaults={\"type\": source_type})\n\n @classmethod\n def _model_for_type(cls, type):\n if type == \"card\":\n return Card\n elif type == \"source\":\n return Source\n elif type == \"bank_account\":\n return BankAccount\n\n raise ValueError(\"Unknown source type: {}\".format(type))\n\n @property\n def object_model(self):\n return self._model_for_type(self.type)\n\n def resolve(self):\n return self.object_model.objects.get(id=self.id)\n\n\nclass LegacySourceMixin:\n \"\"\"\n Mixin for functionality shared between the legacy Card & BankAccount sources\n \"\"\"\n\n @classmethod\n def _get_customer_from_kwargs(cls, **kwargs):\n if \"customer\" not in kwargs or not isinstance(kwargs[\"customer\"], Customer):\n raise StripeObjectManipulationException(\n \"{}s must be manipulated through a Customer. \"\n \"Pass a Customer object into this call.\".format(cls.__name__)\n )\n\n customer = kwargs[\"customer\"]\n del kwargs[\"customer\"]\n\n return customer, kwargs\n\n @classmethod\n def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n # OVERRIDING the parent version of this function\n # Cards & Bank Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to\n # check if either a customer or account is supplied to determine\n # the correct object to use.\n\n customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)\n\n return customer.api_retrieve().sources.create(api_key=api_key, **clean_kwargs)\n\n @classmethod\n def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n # OVERRIDING the parent version of this function\n # Cards & Bank Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to\n # check if either a customer or account is supplied to determine\n # the correct object to use.\n\n customer, clean_kwargs = cls._get_customer_from_kwargs(**kwargs)\n\n return (\n customer.api_retrieve(api_key=api_key)\n .sources.list(object=cls.stripe_class.OBJECT_NAME, **clean_kwargs)\n .auto_paging_iter()\n )\n\n def get_stripe_dashboard_url(self):\n return self.customer.get_stripe_dashboard_url()\n\n def remove(self):\n \"\"\"\n Removes a legacy source from this customer's account.\n \"\"\"\n\n # First, wipe default source on all customers that use this card.\n Customer.objects.filter(default_source=self.id).update(default_source=None)\n\n try:\n self._api_delete()\n except InvalidRequestError as exc:\n if \"No such source:\" in str(exc) or \"No such customer:\" in str(exc):\n # The exception was thrown because the stripe customer or card\n # was already deleted on the stripe side, ignore the exception\n pass\n else:\n # The exception was raised for another reason, re-raise it\n raise\n\n self.delete()\n\n def api_retrieve(self, api_key=None):\n # OVERRIDING the parent version of this function\n # Cards & Banks Accounts must be manipulated through a customer or account.\n # TODO: When managed accounts are supported, this method needs to check if\n # either a customer or account is supplied to determine the\n # correct object to use.\n api_key = api_key or self.default_api_key\n customer = self.customer.api_retrieve(api_key=api_key)\n\n # If the customer is deleted, the sources attribute will be absent.\n # eg. {\"id\": \"cus_XXXXXXXX\", \"deleted\": True}\n if \"sources\" not in customer:\n # We fake a native stripe InvalidRequestError so that it's caught\n # like an invalid ID error.\n raise InvalidRequestError(\"No such source: %s\" % (self.id), \"id\")\n\n return customer.sources.retrieve(self.id, expand=self.expand_fields)\n\n\nclass BankAccount(LegacySourceMixin, StripeModel):\n stripe_class = stripe.BankAccount\n\n account = models.ForeignKey(\n \"Account\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"bank_account\",\n help_text=\"The account the charge was made on behalf of. Null here indicates \"\n \"that this value was never set.\",\n )\n account_holder_name = models.TextField(\n max_length=5000,\n default=\"\",\n blank=True,\n help_text=\"The name of the person or business that owns the bank account.\",\n )\n account_holder_type = StripeEnumField(\n enum=enums.BankAccountHolderType,\n help_text=\"The type of entity that holds the account.\",\n )\n bank_name = models.CharField(\n max_length=255,\n help_text=\"Name of the bank associated with the routing number \"\n \"(e.g., `WELLS FARGO`).\",\n )\n country = models.CharField(\n max_length=2,\n help_text=\"Two-letter ISO code representing the country the bank account \"\n \"is located in.\",\n )\n currency = StripeCurrencyCodeField()\n customer = models.ForeignKey(\n \"Customer\", on_delete=models.SET_NULL, null=True, related_name=\"bank_account\"\n )\n default_for_currency = models.NullBooleanField(\n help_text=\"Whether this external account is the default account for \"\n \"its currency.\"\n )\n fingerprint = models.CharField(\n max_length=16,\n help_text=(\n \"Uniquely identifies this particular bank account. \"\n \"You can use this attribute to check whether two bank accounts are \"\n \"the same.\"\n ),\n )\n last4 = models.CharField(max_length=4)\n routing_number = models.CharField(\n max_length=255, help_text=\"The routing transit number for the bank account.\"\n )\n status = StripeEnumField(enum=enums.BankAccountStatus)\n\n\nclass Card(LegacySourceMixin, StripeModel):\n \"\"\"\n You can store multiple cards on a customer in order to charge the customer later.\n\n This is a legacy model which only applies to the \"v2\" Stripe API (eg. Checkout.js).\n You should strive to use the Stripe \"v3\" API (eg. Stripe Elements).\n Also see: https://stripe.com/docs/stripe-js/elements/migrating\n When using Elements, you will not be using Card objects. Instead, you will use\n Source objects.\n A Source object of type \"card\" is equivalent to a Card object. However, Card\n objects cannot be converted into Source objects by Stripe at this time.\n\n Stripe documentation: https://stripe.com/docs/api/python#cards\n \"\"\"\n\n stripe_class = stripe.Card\n\n address_city = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"City/District/Suburb/Town/Village.\",\n )\n address_country = models.TextField(\n max_length=5000, blank=True, default=\"\", help_text=\"Billing address country.\"\n )\n address_line1 = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"Street address/PO Box/Company name.\",\n )\n address_line1_check = StripeEnumField(\n enum=enums.CardCheckResult,\n blank=True,\n default=\"\",\n help_text=\"If `address_line1` was provided, results of the check.\",\n )\n address_line2 = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"Apartment/Suite/Unit/Building.\",\n )\n address_state = models.TextField(\n max_length=5000,\n blank=True,\n default=\"\",\n help_text=\"State/County/Province/Region.\",\n )\n address_zip = models.TextField(\n max_length=5000, blank=True, default=\"\", help_text=\"ZIP or postal code.\"\n )\n address_zip_check = StripeEnumField(\n enum=enums.CardCheckResult,\n blank=True,\n default=\"\",\n help_text=\"If `address_zip` was provided, results of the check.\",\n )\n brand = StripeEnumField(enum=enums.CardBrand, help_text=\"Card brand.\")\n country = models.CharField(\n max_length=2,\n default=\"\",\n blank=True,\n help_text=\"Two-letter ISO code representing the country of the card.\",\n )\n customer = models.ForeignKey(\n \"Customer\", on_delete=models.SET_NULL, null=True, related_name=\"legacy_cards\"\n )\n cvc_check = StripeEnumField(\n enum=enums.CardCheckResult,\n default=\"\",\n blank=True,\n help_text=\"If a CVC was provided, results of the check.\",\n )\n dynamic_last4 = models.CharField(\n max_length=4,\n default=\"\",\n blank=True,\n help_text=\"(For tokenized numbers only.) The last four digits of the device \"\n \"account number.\",\n )\n exp_month = models.IntegerField(help_text=\"Card expiration month.\")\n exp_year = models.IntegerField(help_text=\"Card expiration year.\")\n fingerprint = models.CharField(\n default=\"\",\n blank=True,\n max_length=16,\n help_text=\"Uniquely identifies this particular card number.\",\n )\n funding = StripeEnumField(\n enum=enums.CardFundingType, help_text=\"Card funding type.\"\n )\n last4 = models.CharField(max_length=4, help_text=\"Last four digits of Card number.\")\n name = models.TextField(\n max_length=5000, default=\"\", blank=True, help_text=\"Cardholder name.\"\n )\n tokenization_method = StripeEnumField(\n enum=enums.CardTokenizationMethod,\n default=\"\",\n blank=True,\n help_text=\"If the card number is tokenized, this is the method that was used.\",\n )\n\n def str_parts(self):\n return [\n \"brand={brand}\".format(brand=self.brand),\n \"last4={last4}\".format(last4=self.last4),\n \"exp_month={exp_month}\".format(exp_month=self.exp_month),\n \"exp_year={exp_year}\".format(exp_year=self.exp_year),\n ] + super().str_parts()\n\n @classmethod\n def create_token(\n cls,\n number,\n exp_month,\n exp_year,\n cvc,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n **kwargs\n ):\n \"\"\"\n Creates a single use token that wraps the details of a credit card.\n This token can be used in place of a credit card dictionary with any API method.\n These tokens can only be used once: by creating a new charge object,\n or attaching them to a customer.\n (Source: https://stripe.com/docs/api/python#create_card_token)\n\n :param exp_month: The card's expiration month.\n :type exp_month: Two digit int\n :param exp_year: The card's expiration year.\n :type exp_year: Two or Four digit int\n :param number: The card number\n :type number: string without any separators (no spaces)\n :param cvc: Card security code.\n :type cvc: string\n \"\"\"\n\n card = {\n \"number\": number,\n \"exp_month\": exp_month,\n \"exp_year\": exp_year,\n \"cvc\": cvc,\n }\n card.update(kwargs)\n\n return stripe.Token.create(api_key=api_key, card=card)\n\n\nclass Source(StripeModel):\n \"\"\"\n Stripe documentation: https://stripe.com/docs/api#sources\n \"\"\"\n\n amount = StripeDecimalCurrencyAmountField(\n null=True,\n blank=True,\n help_text=(\n \"Amount associated with the source. \"\n \"This is the amount for which the source will be chargeable once ready. \"\n \"Required for `single_use` sources.\"\n ),\n )\n client_secret = models.CharField(\n max_length=255,\n help_text=(\n \"The client secret of the source. \"\n \"Used for client-side retrieval using a publishable key.\"\n ),\n )\n currency = StripeCurrencyCodeField(default=\"\", blank=True)\n flow = StripeEnumField(\n enum=enums.SourceFlow, help_text=\"The authentication flow of the source.\"\n )\n owner = JSONField(\n help_text=(\n \"Information about the owner of the payment instrument that may be \"\n \"used or required by particular source types.\"\n )\n )\n statement_descriptor = models.CharField(\n max_length=255,\n default=\"\",\n blank=True,\n help_text=\"Extra information about a source. This will appear on your \"\n \"customer's statement every time you charge the source.\",\n )\n status = StripeEnumField(\n enum=enums.SourceStatus,\n help_text=\"The status of the source. Only `chargeable` sources can be used \"\n \"to create a charge.\",\n )\n type = StripeEnumField(enum=enums.SourceType, help_text=\"The type of the source.\")\n usage = StripeEnumField(\n enum=enums.SourceUsage,\n help_text=\"Whether this source should be reusable or not. \"\n \"Some source types may or may not be reusable by construction, \"\n \"while other may leave the option at creation.\",\n )\n\n # Flows\n code_verification = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the code verification flow. \"\n \"Present if the source is authenticated by a verification code \"\n \"(`flow` is `code_verification`).\",\n )\n receiver = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the receiver flow. \"\n \"Present if the source is a receiver (`flow` is `receiver`).\",\n )\n redirect = JSONField(\n null=True,\n blank=True,\n help_text=\"Information related to the redirect flow. \"\n \"Present if the source is authenticated by a redirect (`flow` is `redirect`).\",\n )\n\n source_data = JSONField(help_text=\"The data corresponding to the source type.\")\n\n customer = models.ForeignKey(\n \"Customer\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"sources\",\n )\n\n stripe_class = stripe.Source\n stripe_dashboard_item_name = \"sources\"\n\n @classmethod\n def _manipulate_stripe_object_hook(cls, data):\n # The source_data dict is an alias of all the source types\n data[\"source_data\"] = data[data[\"type\"]]\n return data\n\n def _attach_objects_hook(self, cls, data):\n customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)\n if customer:\n self.customer = customer\n else:\n self.customer = None\n\n def detach(self):\n \"\"\"\n Detach the source from its customer.\n \"\"\"\n\n # First, wipe default source on all customers that use this.\n Customer.objects.filter(default_source=self.id).update(default_source=None)\n\n try:\n # TODO - we could use the return value of sync_from_stripe_data\n # or call its internals - self._sync/_attach_objects_hook etc here\n # to update `self` at this point?\n self.sync_from_stripe_data(self.api_retrieve().detach())\n return True\n except (InvalidRequestError, NotImplementedError):\n # The source was already detached. Resyncing.\n # NotImplementedError is an artifact of stripe-python<2.0\n # https://github.com/stripe/stripe-python/issues/376\n self.sync_from_stripe_data(self.api_retrieve())\n return False\n\n\nclass PaymentMethod(StripeModel):\n \"\"\"\n Stripe documentation: https://stripe.com/docs/api#payment_methods\n \"\"\"\n\n billing_details = JSONField(\n help_text=(\n \"Billing information associated with the PaymentMethod that may be used or \"\n \"required by particular types of payment methods.\"\n )\n )\n card = JSONField(\n help_text=\"If this is a card PaymentMethod, this hash contains details \"\n \"about the card.\"\n )\n card_present = JSONField(\n null=True,\n blank=True,\n help_text=\"If this is an card_present PaymentMethod, this hash contains \"\n \"details about the Card Present payment method.\",\n )\n customer = models.ForeignKey(\n \"Customer\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"payment_methods\",\n help_text=\"Customer to which this PaymentMethod is saved.\"\n \"This will not be set when the PaymentMethod has not been saved to a Customer.\",\n )\n type = models.CharField(\n max_length=255,\n null=True,\n blank=True,\n help_text=\"The type of the PaymentMethod. An additional hash is included \"\n \"on the PaymentMethod with a name matching this value. It contains additional \"\n \"information specific to the PaymentMethod type.\",\n )\n\n stripe_class = stripe.PaymentMethod\n\n def _attach_objects_hook(self, cls, data):\n customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)\n if customer:\n self.customer = customer\n else:\n self.customer = None\n\n @classmethod\n def attach(\n cls, payment_method, customer, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Attach a payment method to a customer\n :param payment_method:\n :type payment_method: str, PaymentMethod\n :param customer:\n :type customer: str, Customer\n :param api_key:\n :return:\n \"\"\"\n\n if isinstance(payment_method, StripeModel):\n payment_method = payment_method.id\n\n if isinstance(customer, StripeModel):\n customer = customer.id\n\n extra_kwargs = {}\n if not isinstance(payment_method, stripe.PaymentMethod):\n # send api_key if we're not passing in a Stripe object\n # avoids \"Received unknown parameter: api_key\" since api uses the\n # key cached in the Stripe object\n extra_kwargs = {\"api_key\": api_key}\n\n stripe_payment_method = stripe.PaymentMethod.attach(\n payment_method, customer=customer, **extra_kwargs\n )\n return cls.sync_from_stripe_data(stripe_payment_method)\n", "path": "djstripe/models/payment_methods.py" } ]
diff --git a/djstripe/models/payment_methods.py b/djstripe/models/payment_methods.py index d6d9fd3d84..07634f9f14 100644 --- a/djstripe/models/payment_methods.py +++ b/djstripe/models/payment_methods.py @@ -527,6 +527,13 @@ class PaymentMethod(StripeModel): stripe_class = stripe.PaymentMethod + def _attach_objects_hook(self, cls, data): + customer = cls._stripe_object_to_customer(target_cls=Customer, data=data) + if customer: + self.customer = customer + else: + self.customer = None + @classmethod def attach( cls, payment_method, customer, api_key=djstripe_settings.STRIPE_SECRET_KEY diff --git a/tests/test_payment_method.py b/tests/test_payment_method.py index 3b0c478a03..096ee633c8 100644 --- a/tests/test_payment_method.py +++ b/tests/test_payment_method.py @@ -78,3 +78,24 @@ def test_attach_synced(self, attach_mock): "djstripe.Customer.default_payment_method", }, ) + + def test_sync_null_customer(self): + payment_method = PaymentMethod.sync_from_stripe_data( + deepcopy(FAKE_PAYMENT_METHOD_I) + ) + + self.assertIsNotNone(payment_method.customer) + + # simulate remote detach + fake_payment_method_no_customer = deepcopy(FAKE_PAYMENT_METHOD_I) + fake_payment_method_no_customer["customer"] = None + + payment_method = PaymentMethod.sync_from_stripe_data( + fake_payment_method_no_customer + ) + + self.assertIsNone(payment_method.customer) + + self.assert_fks( + payment_method, expected_blank_fks={"djstripe.PaymentMethod.customer"} + )
PaymentMethod detachment doesn't work because it doesn't unset customer **Describe the bug** I'm using 4a828a48092a3904094917776ce725ec9aa3fce5 (after the #914 merge) and trying to set up an SCA complaint flow. As I said in #941, the events don't sync this model. Though even if I sync it manually (using sync_from_stripe_data), it doesn't work correctly for the detached payment_method case. The customer isn't actually detached (the content of the stripe event is `customer=null`) **To Reproduce** Steps to reproduce the behavior: 1. Detach a payment method from the stripe api 2. Try to sync the payment method object: `payment_method.sync_from_stripe_data(payment_method.api_retrieve())` 3. Observe that the customer association isn't removed even though stripe returns `customer=null`. **Expected behavior** The payment method should get detached from the customer. Example event: ``` { "object": { "id": "pm_1F8uMTHa6wE0PhFmIrAlCMuB", "object": "payment_method", "billing_details": { "address": { "city": null, "country": "GB", "line1": null, "line2": null, "postal_code": "W1", "state": null }, "email": "[email protected]", "name": "Jeo", "phone": null }, "card": { "brand": "visa", "checks": { "address_line1_check": null, "address_postal_code_check": "pass", "cvc_check": "pass" }, "country": "US", "exp_month": 2, "exp_year": 2022, "fingerprint": "No6qZ6uMjc9xCthT", "funding": "credit", "generated_from": null, "last4": "4242", "three_d_secure_usage": { "supported": true }, "wallet": null }, "created": 1566157701, "customer": null, "livemode": false, "metadata": { }, "type": "card" }, "previous_attributes": { "customer": "cus_FXgo0IlbgH2cQS" } }``` **Environment** - dj-stripe version: master at 4a828a48092a3904094917776ce725ec9aa3fce5 - Your Stripe account's default API version: [e.g. 2019-02-19 - shown as "default" on https://dashboard.stripe.com/developers] - Database: Postgres - Python version: 3.7.4 - Django version: 2.2.4 **Can you reproduce the issue with the latest version of master?** Yes **Additional context** Add any other context about the problem here.
ansible__ansible-modules-extras-3417
[ { "content": "#!/usr/bin/python\n# encoding: utf-8\n\n# (c) 2015, Jose Armesto <[email protected]>\n#\n# This file is part of Ansible\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = \"\"\"\n---\nmodule: ec2_lc_find\nshort_description: Find AWS Autoscaling Launch Configurations\ndescription:\n - Returns list of matching Launch Configurations for a given name, along with other useful information\n - Results can be sorted and sliced\n - It depends on boto\n - Based on the work by Tom Bamford (https://github.com/tombamford)\n\nversion_added: \"2.2\"\nauthor: \"Jose Armesto (@fiunchinho)\"\noptions:\n region:\n description:\n - The AWS region to use.\n required: true\n aliases: ['aws_region', 'ec2_region']\n name_regex:\n description:\n - A Launch Configuration to match\n - It'll be compiled as regex\n required: True\n sort_order:\n description:\n - Order in which to sort results.\n choices: ['ascending', 'descending']\n default: 'ascending'\n required: false\n limit:\n description:\n - How many results to show.\n - Corresponds to Python slice notation like list[:limit].\n default: null\n required: false\nrequirements:\n - \"python >= 2.6\"\n - boto3\n\"\"\"\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Search for the Launch Configurations that start with \"app\"\n- ec2_lc_find:\n name_regex: app.*\n sort_order: descending\n limit: 2\n'''\n\nRETURN = '''\nimage_id:\n description: AMI id\n returned: when Launch Configuration was found\n type: string\n sample: \"ami-0d75df7e\"\nuser_data:\n description: User data used to start instance\n returned: when Launch Configuration was found\n type: string\n user_data: \"ZXhwb3J0IENMT1VE\"\nname:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"myapp-v123\"\narn:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject\"\ninstance_type:\n description: Type of ec2 instance\n returned: when Launch Configuration was found\n type: string\n sample: \"t2.small\"\ncreated_time:\n description: When it was created\n returned: when Launch Configuration was found\n type: string\n sample: \"2016-06-29T14:59:22.222000+00:00\"\nebs_optimized:\n description: Launch Configuration EBS optimized property\n returned: when Launch Configuration was found\n type: boolean\n sample: False\ninstance_monitoring:\n description: Launch Configuration instance monitoring property\n returned: when Launch Configuration was found\n type: string\n sample: {\"Enabled\": false}\nclassic_link_vpc_security_groups:\n description: Launch Configuration classic link vpc security groups property\n returned: when Launch Configuration was found\n type: list\n sample: []\nblock_device_mappings:\n description: Launch Configuration block device mappings property\n returned: when Launch Configuration was found\n type: list\n sample: []\nkeyname:\n description: Launch Configuration ssh key\n returned: when Launch Configuration was found\n type: string\n sample: mykey\nsecurity_groups:\n description: Launch Configuration security groups\n returned: when Launch Configuration was found\n type: list\n sample: []\nkernel_id:\n description: Launch Configuration kernel to use\n returned: when Launch Configuration was found\n type: string\n sample: ''\nram_disk_id:\n description: Launch Configuration ram disk property\n returned: when Launch Configuration was found\n type: string\n sample: ''\nassociate_public_address:\n description: Assign public address or not\n returned: when Launch Configuration was found\n type: boolean\n sample: True\n...\n'''\n\n\ndef find_launch_configs(client, module):\n name_regex = module.params.get('name_regex')\n sort_order = module.params.get('sort_order')\n limit = module.params.get('limit')\n\n paginator = client.get_paginator('describe_launch_configurations')\n\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 1000,\n 'PageSize': 100\n }\n )\n\n results = []\n\n for response in response_iterator:\n response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),\n response['LaunchConfigurations'])\n\n for lc in response['LaunchConfigurations']:\n data = {\n 'name': lc['LaunchConfigurationName'],\n 'arn': lc['LaunchConfigurationARN'],\n 'created_time': lc['CreatedTime'],\n 'user_data': lc['UserData'],\n 'instance_type': lc['InstanceType'],\n 'image_id': lc['ImageId'],\n 'ebs_optimized': lc['EbsOptimized'],\n 'instance_monitoring': lc['InstanceMonitoring'],\n 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],\n 'block_device_mappings': lc['BlockDeviceMappings'],\n 'keyname': lc['KeyName'],\n 'security_groups': lc['SecurityGroups'],\n 'kernel_id': lc['KernelId'],\n 'ram_disk_id': lc['RamdiskId'],\n 'associate_public_address': lc['AssociatePublicIpAddress'],\n }\n\n results.append(data)\n\n results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))\n\n if limit:\n results = results[:int(limit)]\n\n module.exit_json(changed=False, results=results)\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n region=dict(required=True, aliases=['aws_region', 'ec2_region']),\n name_regex=dict(required=True),\n sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),\n limit=dict(required=False, type='int'),\n )\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n )\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)\n\n client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)\n find_launch_configs(client, module)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/amazon/ec2_lc_find.py" } ]
[ { "content": "#!/usr/bin/python\n# encoding: utf-8\n\n# (c) 2015, Jose Armesto <[email protected]>\n#\n# This file is part of Ansible\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = \"\"\"\n---\nmodule: ec2_lc_find\nshort_description: Find AWS Autoscaling Launch Configurations\ndescription:\n - Returns list of matching Launch Configurations for a given name, along with other useful information\n - Results can be sorted and sliced\n - It depends on boto\n - Based on the work by Tom Bamford (https://github.com/tombamford)\n\nversion_added: \"2.2\"\nauthor: \"Jose Armesto (@fiunchinho)\"\noptions:\n region:\n description:\n - The AWS region to use.\n required: true\n aliases: ['aws_region', 'ec2_region']\n name_regex:\n description:\n - A Launch Configuration to match\n - It'll be compiled as regex\n required: True\n sort_order:\n description:\n - Order in which to sort results.\n choices: ['ascending', 'descending']\n default: 'ascending'\n required: false\n limit:\n description:\n - How many results to show.\n - Corresponds to Python slice notation like list[:limit].\n default: null\n required: false\nrequirements:\n - \"python >= 2.6\"\n - boto3\n\"\"\"\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Search for the Launch Configurations that start with \"app\"\n- ec2_lc_find:\n name_regex: app.*\n sort_order: descending\n limit: 2\n'''\n\nRETURN = '''\nimage_id:\n description: AMI id\n returned: when Launch Configuration was found\n type: string\n sample: \"ami-0d75df7e\"\nuser_data:\n description: User data used to start instance\n returned: when Launch Configuration was found\n type: string\n user_data: \"ZXhwb3J0IENMT1VE\"\nname:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"myapp-v123\"\narn:\n description: Name of the AMI\n returned: when Launch Configuration was found\n type: string\n sample: \"arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject\"\ninstance_type:\n description: Type of ec2 instance\n returned: when Launch Configuration was found\n type: string\n sample: \"t2.small\"\ncreated_time:\n description: When it was created\n returned: when Launch Configuration was found\n type: string\n sample: \"2016-06-29T14:59:22.222000+00:00\"\nebs_optimized:\n description: Launch Configuration EBS optimized property\n returned: when Launch Configuration was found\n type: boolean\n sample: False\ninstance_monitoring:\n description: Launch Configuration instance monitoring property\n returned: when Launch Configuration was found\n type: string\n sample: {\"Enabled\": false}\nclassic_link_vpc_security_groups:\n description: Launch Configuration classic link vpc security groups property\n returned: when Launch Configuration was found\n type: list\n sample: []\nblock_device_mappings:\n description: Launch Configuration block device mappings property\n returned: when Launch Configuration was found\n type: list\n sample: []\nkeyname:\n description: Launch Configuration ssh key\n returned: when Launch Configuration was found\n type: string\n sample: mykey\nsecurity_groups:\n description: Launch Configuration security groups\n returned: when Launch Configuration was found\n type: list\n sample: []\nkernel_id:\n description: Launch Configuration kernel to use\n returned: when Launch Configuration was found\n type: string\n sample: ''\nram_disk_id:\n description: Launch Configuration ram disk property\n returned: when Launch Configuration was found\n type: string\n sample: ''\nassociate_public_address:\n description: Assign public address or not\n returned: when Launch Configuration was found\n type: boolean\n sample: True\n...\n'''\n\n\ndef find_launch_configs(client, module):\n name_regex = module.params.get('name_regex')\n sort_order = module.params.get('sort_order')\n limit = module.params.get('limit')\n\n paginator = client.get_paginator('describe_launch_configurations')\n\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 1000,\n 'PageSize': 100\n }\n )\n\n results = []\n\n for response in response_iterator:\n response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),\n response['LaunchConfigurations'])\n\n for lc in response['LaunchConfigurations']:\n data = {\n 'name': lc['LaunchConfigurationName'],\n 'arn': lc['LaunchConfigurationARN'],\n 'created_time': lc['CreatedTime'],\n 'user_data': lc['UserData'],\n 'instance_type': lc['InstanceType'],\n 'image_id': lc['ImageId'],\n 'ebs_optimized': lc['EbsOptimized'],\n 'instance_monitoring': lc['InstanceMonitoring'],\n 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],\n 'block_device_mappings': lc['BlockDeviceMappings'],\n 'keyname': lc['KeyName'],\n 'security_groups': lc['SecurityGroups'],\n 'kernel_id': lc['KernelId'],\n 'ram_disk_id': lc['RamdiskId'],\n 'associate_public_address': lc.get('AssociatePublicIpAddress', False),\n }\n\n results.append(data)\n\n results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))\n\n if limit:\n results = results[:int(limit)]\n\n module.exit_json(changed=False, results=results)\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n region=dict(required=True, aliases=['aws_region', 'ec2_region']),\n name_regex=dict(required=True),\n sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),\n limit=dict(required=False, type='int'),\n )\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n )\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)\n\n client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)\n find_launch_configs(client, module)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/amazon/ec2_lc_find.py" } ]
diff --git a/cloud/amazon/ec2_lc_find.py b/cloud/amazon/ec2_lc_find.py index f18bdfb42be..e5463e4ce80 100644 --- a/cloud/amazon/ec2_lc_find.py +++ b/cloud/amazon/ec2_lc_find.py @@ -184,7 +184,7 @@ def find_launch_configs(client, module): 'security_groups': lc['SecurityGroups'], 'kernel_id': lc['KernelId'], 'ram_disk_id': lc['RamdiskId'], - 'associate_public_address': lc['AssociatePublicIpAddress'], + 'associate_public_address': lc.get('AssociatePublicIpAddress', False), } results.append(data)
ec2_lc_find not returning AssociatePublicIpAddress ##### ISSUE TYPE Bug Report ##### COMPONENT NAME ec2_lc_find ##### ANSIBLE VERSION ``` ansible 2.2.0.0 config file = /home/centos/ansiblebase/ansible.cfg configured module search path = Default w/o overrides ``` ##### CONFIGURATION No significant changes ##### OS / ENVIRONMENT Started with Ansible Tower 3.0.3 on CentOS 7 x86_64 Did a yum update on ansible to 2.2. Did pip install boto3. ##### SUMMARY Running ec2_lc_find fails with a missing key for AssociatePublicIpAddress ##### STEPS TO REPRODUCE ``` - ec2_lc_find: region: "{{ region }}" name_regex: lc_name-*" sort_order: ascending limit: 3 register: old_lc_result ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS Correctly returns load configurations matching regex. ##### ACTUAL RESULTS ``` An exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'AssociatePublicIpAddress' fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 225, in <module>\n main()\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 217, in main\n find_launch_configs(client, module)\n File \"/tmp/ansible_dJ3ho0/ansible_module_ec2_lc_find.py\", line 187, in find_launch_configs\n 'associate_public_address': lc['AssociatePublicIpAddress'],\nKeyError: 'AssociatePublicIpAddress'\n", "module_stdout": "", "msg": "MODULE FAILURE"} ```
zulip__zulip-5407
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport logging\nimport re\nimport scrapy\n\nfrom scrapy import Request\nfrom scrapy.linkextractors import IGNORED_EXTENSIONS\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.utils.url import url_has_any_extension\n\nfrom typing import Any, Generator, List, Optional, Tuple\n\nEXCLUDED_URLS = [\n # Google calendar returns 404s on HEAD requests unconditionally\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n]\n\n\nclass BaseDocumentationSpider(scrapy.Spider):\n name = None # type: Optional[str]\n # Exclude domain address.\n deny_domains = [] # type: List[str]\n start_urls = [] # type: List[str]\n deny = [] # type: List[str]\n file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]\n tags = ('a', 'area', 'img')\n attrs = ('href', 'src')\n\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n super(BaseDocumentationSpider, self).__init__(*args, **kwargs)\n self.has_error = False\n\n def _set_error_state(self):\n # type: () -> None\n self.has_error = True\n\n def _has_extension(self, url):\n # type: (str) -> bool\n return url_has_any_extension(url, self.file_extensions)\n\n def _is_external_url(self, url):\n # type: (str) -> bool\n return url.startswith('http') or self._has_extension(url)\n\n def check_existing(self, response):\n # type: (Any) -> None\n self.log(response)\n\n def check_permalink(self, response):\n # type: (Any) -> None\n self.log(response)\n xpath_template = \"//*[@id='{permalink}' or @name='{permalink}']\"\n m = re.match(r\".+\\#(?P<permalink>.*)$\", response.request.url) # Get anchor value.\n if not m:\n return\n permalink = m.group('permalink')\n # Check permalink existing on response page.\n if not response.selector.xpath(xpath_template.format(permalink=permalink)):\n self._set_error_state()\n raise Exception(\n \"Permalink #{} is not found on page {}\".format(permalink, response.request.url))\n\n def parse(self, response):\n # type: (Any) -> Generator[Request, None, None]\n self.log(response)\n for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],\n tags=self.tags, attrs=self.attrs, deny=self.deny,\n canonicalize=False).extract_links(response):\n callback = self.parse # type: Any\n dont_filter = False\n method = 'GET'\n if self._is_external_url(link.url):\n callback = self.check_existing\n method = 'HEAD'\n elif '#' in link.url:\n dont_filter = True\n callback = self.check_permalink\n yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,\n errback=self.error_callback)\n\n def retry_request_with_get(self, request):\n # type: (Request) -> Generator[Request, None, None]\n request.method = 'GET'\n request.dont_filter = True\n yield request\n\n def exclude_error(self, url):\n # type: (str) -> bool\n if url in EXCLUDED_URLS:\n return True\n return False\n\n def error_callback(self, failure):\n # type: (Any) -> Optional[Generator[Any, None, None]]\n if hasattr(failure.value, 'response') and failure.value.response:\n response = failure.value.response\n if self.exclude_error(response.url):\n return None\n if response.status == 404:\n self._set_error_state()\n raise Exception('Page not found: {}'.format(response))\n if response.status == 405 and response.request.method == 'HEAD':\n # Method 'HEAD' not allowed, repeat request with 'GET'\n return self.retry_request_with_get(response.request)\n self.log(\"Error! Please check link: {}\".format(response), logging.ERROR)\n elif isinstance(failure.type, IOError):\n self._set_error_state()\n else:\n raise Exception(failure.value)\n return None\n", "path": "tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport logging\nimport re\nimport scrapy\n\nfrom scrapy import Request\nfrom scrapy.linkextractors import IGNORED_EXTENSIONS\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.utils.url import url_has_any_extension\n\nfrom typing import Any, Generator, List, Optional, Tuple\n\nEXCLUDED_URLS = [\n # Google calendar returns 404s on HEAD requests unconditionally\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n # Returns 404 to HEAD requests unconditionally\n 'https://www.git-tower.com/blog/command-line-cheat-sheet/',\n]\n\n\nclass BaseDocumentationSpider(scrapy.Spider):\n name = None # type: Optional[str]\n # Exclude domain address.\n deny_domains = [] # type: List[str]\n start_urls = [] # type: List[str]\n deny = [] # type: List[str]\n file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]\n tags = ('a', 'area', 'img')\n attrs = ('href', 'src')\n\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n super(BaseDocumentationSpider, self).__init__(*args, **kwargs)\n self.has_error = False\n\n def _set_error_state(self):\n # type: () -> None\n self.has_error = True\n\n def _has_extension(self, url):\n # type: (str) -> bool\n return url_has_any_extension(url, self.file_extensions)\n\n def _is_external_url(self, url):\n # type: (str) -> bool\n return url.startswith('http') or self._has_extension(url)\n\n def check_existing(self, response):\n # type: (Any) -> None\n self.log(response)\n\n def check_permalink(self, response):\n # type: (Any) -> None\n self.log(response)\n xpath_template = \"//*[@id='{permalink}' or @name='{permalink}']\"\n m = re.match(r\".+\\#(?P<permalink>.*)$\", response.request.url) # Get anchor value.\n if not m:\n return\n permalink = m.group('permalink')\n # Check permalink existing on response page.\n if not response.selector.xpath(xpath_template.format(permalink=permalink)):\n self._set_error_state()\n raise Exception(\n \"Permalink #{} is not found on page {}\".format(permalink, response.request.url))\n\n def parse(self, response):\n # type: (Any) -> Generator[Request, None, None]\n self.log(response)\n for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],\n tags=self.tags, attrs=self.attrs, deny=self.deny,\n canonicalize=False).extract_links(response):\n callback = self.parse # type: Any\n dont_filter = False\n method = 'GET'\n if self._is_external_url(link.url):\n callback = self.check_existing\n method = 'HEAD'\n elif '#' in link.url:\n dont_filter = True\n callback = self.check_permalink\n yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,\n errback=self.error_callback)\n\n def retry_request_with_get(self, request):\n # type: (Request) -> Generator[Request, None, None]\n request.method = 'GET'\n request.dont_filter = True\n yield request\n\n def exclude_error(self, url):\n # type: (str) -> bool\n if url in EXCLUDED_URLS:\n return True\n return False\n\n def error_callback(self, failure):\n # type: (Any) -> Optional[Generator[Any, None, None]]\n if hasattr(failure.value, 'response') and failure.value.response:\n response = failure.value.response\n if self.exclude_error(response.url):\n return None\n if response.status == 404:\n self._set_error_state()\n raise Exception('Page not found: {}'.format(response))\n if response.status == 405 and response.request.method == 'HEAD':\n # Method 'HEAD' not allowed, repeat request with 'GET'\n return self.retry_request_with_get(response.request)\n self.log(\"Error! Please check link: {}\".format(response), logging.ERROR)\n elif isinstance(failure.type, IOError):\n self._set_error_state()\n else:\n raise Exception(failure.value)\n return None\n", "path": "tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py" } ]
diff --git a/docs/fixing-commits.md b/docs/fixing-commits.md new file mode 100644 index 0000000000000..9bd258475a910 --- /dev/null +++ b/docs/fixing-commits.md @@ -0,0 +1,35 @@ +# Fixing Commits +This is mostly from +[here](https://help.github.com/articles/changing-a-commit-message/#rewriting-the-most-recent-commit-message). + +## Fixing the last commit +### Changing the last commit message +1. `git commit --amend -m "New Message"` + +### Changing the last commit +1. Make your changes to the files +2. Run `git add <filename>` to add one file or `git add <filename1> <filename2> ...` to add multiple files +3. `git commit --amend` + +## Fixing older commits +### Changing commit messages +1. `git rebase -i HEAD~5` (if, for example, you are editing some of the last five commits) +2. For each commit that you want to change the message, change `pick` to `reword`, and save +3. Change the commit messages + +### Deleting old commits +1. `git rebase -i HEAD~n` where `n` is the number of commits you are looking at +2. For each commit that you want to delete, change `pick` to `drop`, and save + +## Squashing commits +Sometimes, you want to make one commit out of a bunch of commits. To do this, + +1. `git rebase -i HEAD~n` where `n` is the number of commits you are interested in +2. Change `pick` to `squash` on the lines containing the commits you want to squash and save + +## Reordering commits +1. `git rebase -i HEAD~n` where `n` is the number of commits you are interested in +2. Reorder the lines containing the commits and save + +## Pushing commits after tidying them +1. `git push origin +my-feature-branch` (Note the `+` there and substitute your actual branch name.) diff --git a/docs/git-cheat-sheet-detailed.md b/docs/git-cheat-sheet-detailed.md new file mode 100644 index 0000000000000..21c578b0c9de8 --- /dev/null +++ b/docs/git-cheat-sheet-detailed.md @@ -0,0 +1,54 @@ +# Git Cheat Sheet (Detailed) + +See also +[fixing commits][fix-commit] + +Commands: + +- add + - `git add foo.py`: add `foo.py` to the staging area + - `git add foo.py bar.py`: add `foo.py` AND `bar.py` to the staging area +- checkout + - `git checkout -b new-branch-name`: create branch `new-branch-name` and switch/checkout to that new branch + - `git checkout master`: switch to your `master` branch + - `git checkout old-branch-name`: switch to an existing branch `old-branch-name` +- commit + - `git commit --amend`: changing the last commit message. Read more [here][fix-commit] +- config + - `git config --global core.editor nano`: set core editor to `nano` (you can set this to `vim` or others) + - `git config --global core.symlinks true`: allow symbolic links +- diff + - `git diff`: display the changes you have made to all files + - `git diff --cached`: display the changes you have made to staged files + - `git diff HEAD~2..`: display the 2 most recent changes you have made to files +- fetch + - `git fetch origin`: fetch origin repository + - `git fetch upstream`: fetch upstream repository +- grep + - `git grep update_unread_counts -- '*.js'`: search all files (ending in `.js`) for `update_unread_counts` +- log + - `git log`: show commit logs +- pull + - **do not use for Zulip** +- push + - `git push origin +branch-name`: push your commits to your origin repository +- rebase + - `git rebase -i HEAD~3`: interactive rebasing current branch with first three items on HEAD + - `git rebase -i master`: interactive rebasing current branch with master branch + - `git rebase upstream/master`: rebasing current branch with master branch from upstream repository +- reflog + - `git reflog | head -10`: manage reference logs for the past 10 commits +- remote + - `git remote -v`: display your origin and upstream repositories +- reset + - `git reset HEAD~2`: reset two most recent commits +- rm + - `git rm oops.txt`: remove `oops.txt` +- show + - `git show HEAD`: display most recent commit + - `git show HEAD~~~`: display third most recent commit + - `git show master`: display most recent commit on `master` +- status + - `git status`: show the working tree status, unstaged and staged files + +[fix-commit]: fixing-commits.html diff --git a/docs/git-cheat-sheet.md b/docs/git-cheat-sheet.md new file mode 100644 index 0000000000000..1b8524d239e51 --- /dev/null +++ b/docs/git-cheat-sheet.md @@ -0,0 +1,52 @@ +# Git Cheat Sheet + +See also [fixing commits][fix-commit] + +Commands: + +- add + - `git add foo.py` +- checkout + - `git checkout -b new-branch-name` + - `git checkout master` + - `git checkout old-branch-name` +- commit + - `git commit --amend` +- config + - `git config --global core.editor nano` + - `git config --global core.symlinks true` +- diff + - `git diff` + - `git diff --cached` + - `git diff HEAD~2..` +- fetch + - `git fetch origin` + - `git fetch upstream` +- grep + - `git grep update_unread_counts -- '*.js'` +- log + - `git log` +- pull + - **do not use for Zulip** +- push + - `git push origin +branch-name` +- rebase + - `git rebase -i HEAD~3` + - `git rebase -i master` + - `git rebase upstream/master` +- reflog + - `git reflog | head -10` +- remote + - `git remote -v` +- reset + - `git reset HEAD~2` +- rm + - `git rm oops.txt` +- show + - `git show HEAD` + - `git show HEAD~~~` + - `git show master` +- status + - `git status` + +[fix-commit]: fixing-commits.html diff --git a/docs/images/shell-screenshot.png b/docs/images/shell-screenshot.png new file mode 100644 index 0000000000000..cda46476b8a88 Binary files /dev/null and b/docs/images/shell-screenshot.png differ diff --git a/docs/index.rst b/docs/index.rst index 62b60816c1c64..f1749f156433e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -84,6 +84,11 @@ Contents: life-of-a-request reading-list screenshot-and-gif-software + fixing-commits + git-cheat-sheet-detailed + git-cheat-sheet + shell-tips + working-copies .. _code-contribution-guide: diff --git a/docs/shell-tips.md b/docs/shell-tips.md new file mode 100644 index 0000000000000..606d266a92182 --- /dev/null +++ b/docs/shell-tips.md @@ -0,0 +1,338 @@ +# Shell tips + +The *shell* is a **command line interpreter**. To use it you can open a +*terminal* (sometimes called a *console*). This is how most terminal windows +look like: + +![An example shell window](images/shell-screenshot.png) + +If you haven't used it before, you should probably take a look at +[this tutorial](http://linuxcommand.org/lc3_learning_the_shell.php). + +If you're using Windows, +[these videos](https://www.youtube.com/playlist?list=PL6gx4Cwl9DGDV6SnbINlVUd0o2xT4JbMu) +may be useful too, but keep in mind that the following tips only apply to +Linux/macOS environments (Unix shells). You can also use a tool, for example +[Cygwin](https://www.cygwin.com/), to have a Unix-like shell on Windows. + +## The prompt (`$`) + +When searching Google, or Zulip's docs, you'll find commands that begin +with a dollar sign `$` or a dollar sign preceded by some text +(e.g. `(venv)john@laptop:~$`). + +This is called the **prompt**, and it's only an indicator that the shell is +awaiting new orders. The prompt can contain useful information, let's look +at `(venv)john@laptop:~$`: + +- `(venv)` informs the user that they're currently in a virtual environment +(more on [Python virtual + environments](http://docs.python-guide.org/en/latest/dev/virtualenvs/)) +- the `john` before `@` is the username +- the `laptop` is the host machine name +- the `~` after the colon informs the user they're currently in the home +folder of the user `john` + +You shouldn't type the prompt or the text preceding it, since it isn't a +part of the commands. + +## Tilde character (`~`) + +It's very frequent to see the tilde (`~`) in paths. The tilde is an +abbreviation for your home directory (`/home/YOUR_USERNAME` most of the times). + +That's why the following is exactly the same, if the user running it is +`john`: + +``` +$ cd ~ +$ cd /home/john +``` + +## Change directory (`cd`) + +When you're using the shell, you work inside a directory (the one specified in +the prompt). This way you can point to files relative to your current +directory, instead of writing the whole path. + +Imagine you have a file called `ideas.txt` inside `/home/john/notes/`, and +you want to edit it using `nano`. You could use: + +``` +$ nano /home/john/notes/ideas.txt +``` + +However, that isn't very practical, especially if you are working with +longer paths. + +That's why it's very useful to change the path where you are currently +located (usually known as **working directory**). To do that, you use `cd` +(**c**hange **d**irectory): + +``` +$ cd /home/john/notes/ +~/notes$ nano ideas.txt +``` + +Or, if you're the user `john`: `cd ~/notes`. + +You can now access to all the files inside `/home/john/notes` directly, without +needing to type the whole path. + +[Relative paths](http://www.linuxnix.com/abslute-path-vs-relative-path-in-linuxunix/) +make it much easier to move through files and directories, too. + +## Running commands as root (`sudo`) + +You may have noticed that many commands begin with `sudo`. This informs the +shell that the following command must be run as the root - a user that by +default has access to all commands and files on a Unix operating system (i.e. +a user with administrator privileges). That's why you may be asked for a +password in those cases: the system verifies you have permission to act as +the *root* user. + +In case you were wondering, the name `sudo` comes from **s**uper **u**ser +**do**. + +## Escaping characters + +Some characters cannot be used directly in the shell, because they have a +special meaning. Consider the following example: + +``` +$ echo "He said hello" +He said hello +``` + +What if you wanted to display double quotes? You can't use +`echo "He said "hello""`, because in that case you're using the +double quotes for two different purposes: + +- Delimiting the string you want to use, from `He` to `"hello"`. +- Quoting something, by literally printing `"`. + +You have to specify which double quotes are used in each case. When you want +one of those "special characters" to be literally printed, that's called +**character escaping**. To escape a character, simply add a backslash (`\`) +before it. + +Returning to our example: + +``` +$ echo "He said \"hello\"" +He said "hello" +``` + +As you can see, the double quotes with the backslash are shown, but the ones +without it are used as string delimiters. + +Double quotes aren't the only case of special characters. Some others are `$`, +`#`, `{` or `}`, but there are many more. The backslash itself can be escaped +as well, using the same procedure: `\\`. + +## Sequencing commands + +It's also possible to run multiple commands in a single line. For that purpose, +the shell provides two different separators: + +- **Semicolon `;`**: runs a command, and once it has finished, runs the next + one: + + ``` + $ echo "Hello"; echo "World!" + Hello + World! + ``` + +- **Double ampersand `&&`**: runs a command, and **only if** it finished + without errors, it proceeds with the next one: + + ``` + $ qwfvijwe && echo "Hello" + qwfvijwe: command not found + ``` + + Notice that it doesn't print `Hello` at the end, because the previous + command (`qwfvijwe`) returned an error. + + When using an incorrect command with a semicolon, the `Hello` will still + be printed: + + ``` + $ qwfvijwe; echo "Hello" + qwfvijwe: command not found + Hello + ``` + +## Splitting commands into multiple lines + +Sometimes you end up with a very long command, that is hard to read and may +be unclear. This is a problem, especially if you want to share that command, +e.g. in a documentation file. + +In those cases, you can use a backslash at the end of each line, to inform the +shell "wait, there's more on the next line". + +This is an example, taken from the docs on how to install the Zulip development +environment: + +``` +sudo apt-get -y purge vagrant && \ +wget https://releases.hashicorp.com/vagrant/1.8.6/vagrant_1.8.6_x86_64.deb && \ +sudo dpkg -i vagrant*.deb && \ +sudo apt-get -y install build-essential git ruby lxc lxc-templates cgroup-lite redir && \ +vagrant plugin install vagrant-lxc && \ +vagrant lxc sudoers +``` + +It's all a single command, joined using the double ampersand explained in +[Sequencing commands](#sequencing-commands). If you're typing it manually, +you don't need to include the backslashes, just write it all on the same line, +and hit <kbd>ENTER</kbd>/<kbd>RETURN</kbd> at the end. + +If you think about it, what is happening here is actually another case of +character escaping. The newline character (the one that appears when you hit +<kbd>ENTER</kbd>) usually means "read this command". However, here we want to +literally have the newline character, and thus the `\<newline>`. + +The newline character is invisible (we only see a line break), but it's still +there! + +## Arguments + +Most commands need additional data to work, like a path or a file. That extra +information is called an **argument**, and it's specified after the name of the +command, like this: + +``` +$ cd /home/john/notes +``` + +Here, the command is `cd`, and the first (and only) argument is +`/home/john/notes`: + +- `cd` - *command*: changes your current directory. + +- `/home/john/notes` - *argument*: the directory where you want to go. + +In each command the arguments are specified in different ways, and have +different meanings. + +Sometimes, a command can accept arguments indicated with dashes. Here's another +example of arguments usage: + +``` +$ nano -C /home/john/backups --mouse todo.txt +``` + +As you can see, some arguments imply that more information has to be specified, +while others don't. + +In this case, we're saying: "Bash, use the app `nano` to open the file +`todo.txt`, enabling mouse support, and saving the backup files to +`/home/john/backups`". The different parts are: + +- `nano` - *command*: program that allows editing text easily. + +- `-C` - *argument*: needs you to indicate where the backups should be stored, + and thus you have to add an additional argument after it, to specify the + directory (`/home/john/backups` in the example). + +- `--mouse` - *argument*: is just an option you set, `nano` doesn't need + anything else to make it work. Thus, there isn't any extra argument for that. + +Note that the `todo.txt` is the file we want to open! It has nothing to do with +the previous argument. This will probably clarify it (taken from `nano`'s +help): + +``` +Usage: nano [OPTIONS] [FILE]... +``` + +So, in the options you indicate the arguments, and `FILE` is... well, the file. + +Don't worry, you don't have to memorize the meaning of +all the arguments for every single command. There are +[tools](#understanding-commands) that help you with that :wink:. + +## Shebang + +You can run some files directly, without specifying a program to interpret +them. + +That's why you may have seen cases when some Python scripts are called with +`python`: + +``` +$ python my_program.py +``` + +While other times, `python` isn't used: + +``` +$ ./my_program.py +``` + +In the latter, it's skipped because `my_program.py` already specifies in it +which interpreter should be used (in this case, `python`). + +This is indicated in the very first line of the script files, and it's called +a **shebang**. In Python scripts, it looks like this: + +``` +#!/usr/bin/env python +``` + +With this, you're telling the shell: "if I tell you to run this, ask +`/usr/bin/env python` how to understand it". + +`/usr/bin/env` is a way to identify where `python` is installed. If it was in +`/usr/bin/python`, you could use the shebang `#!/usr/bin/python`, but `env` +allows more flexibility (since not everyone has their Python interpreter +there). + +Another example of shebang is the one used in Bash scripts. In those cases, +`#!/bin/sh` is used. + +The result is that the shell calls the program specified in the shebang, with +the script as a parameter. So, returning to our example with `my_program.py`, +when you run `./my_program.py`, what happens under the hood is: + +``` +$ /usr/bin/env python my_program.py +``` + +## Understanding commands + +Frequently, you may find commands that you don't understand, or don't +know what they do. You can use `man <command>` to see the **man**ual page for +that specific command. Also, you may find useful +[explainshell](http://explainshell.com/), a webpage that explains what most +commands do, part by part. + +## Cheatsheet + +There are many more commands in the shell, besides the ones explained in this +file. +[Here](https://www.git-tower.com/blog/command-line-cheat-sheet/) you can find +a simple yet useful cheatsheet, created by Tower, that could help you +understand and remember what other common commands do (e.g. `ls`). + +## Git + +Probably at this point you've heard about Git. It's basically a tool that most +developers use to manage all the changes in their code. + +At first it seems like magic, but once you get the basic concepts you find it +extremely useful and even easy to use (at least the 99% of the time). + +To learn more about how to use it, read +[our docs](http://zulip.readthedocs.io/en/latest/git-guide.html) on Git and +Github. + +[This cheatsheet][git-cheat-detailed] +will be useful in your journey, as well. + +![Git - XKCD 1597](https://imgs.xkcd.com/comics/git.png) + +[git-cheat-detailed]: git-cheat-sheet-detailed.html diff --git a/docs/working-copies.md b/docs/working-copies.md new file mode 100644 index 0000000000000..52a0f9cfa6f26 --- /dev/null +++ b/docs/working-copies.md @@ -0,0 +1,53 @@ +# Working copies + +When you work on Zulip code, there are three working copies +of the Zulip git repo that you are generally concerned with: + +- local copy: This lives on your laptop or your remove dev instance. +- forked copy: This lives on GitHub, and it's tied to your account. +- official Zulip repo: https://github.com/zulip/zulip + +We sometimes call the forked copy the **origin** remote. + +We sometimes call the official repo the **upstream** remote. + +When you work on Zulip code, you will end up moving code between +the various working copies. + +## Workflows + +Sometimes you need to get commits. Here are some scenarios: + +- You may fork the official Zulip repo to your GitHub fork. +- You may fetch commits from the offical Zulip repo to your local copy. +- You occasionally may fetch commits from your forked copy. + +Sometimes you want to publish commits. Here are scenarios: + +- You push code from your local copy to your GitHub fork. (You usually + want to put the commit on a feature branch.) +- You submit a PR to the official Zulip repo. + +Finally, the Zulip core team will occasionally want your changes! + +- The Zulip core team can accept your changes and add them to + the official repo, usually on the master branch. + +## Names + +We call remote working copies of the repository by these short +names. + +- **origin**: This is your fork. +- **upstream**: This is the official Zulip repo. + +## Relevant git commands + +The following commands are useful for moving commits between +working copies: + +- `git fetch`: This grabs code from another repo to your local copy. +- `git push`: This pushes code from your local repo to one of the remotes. +- `git remote`: This helps you configure short names for remotes. +- `git pull`: **Do not use this, please**! + diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py index 29d88ae4e360d..408504f7043d0 100644 --- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py +++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py @@ -17,6 +17,8 @@ 'https://calendar.google.com/calendar/[email protected]', # Returns 409 errors to HEAD requests frequently 'https://medium.freecodecamp.com', + # Returns 404 to HEAD requests unconditionally + 'https://www.git-tower.com/blog/command-line-cheat-sheet/', ]
Pull GCI docs into main Zulip repo. Some of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places. cc @synicalsyntax @lonerz @tommyip
pre-commit__pre-commit-1113
[ { "content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os.path\n\nimport toml\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'rustenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(target_dir):\n return (\n (\n 'PATH',\n (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),\n ),\n )\n\n\[email protected]\ndef in_env(prefix):\n target_dir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(target_dir)):\n yield\n\n\ndef _add_dependencies(cargo_toml_path, additional_dependencies):\n with open(cargo_toml_path, 'r+') as f:\n cargo_toml = toml.load(f)\n cargo_toml.setdefault('dependencies', {})\n for dep in additional_dependencies:\n name, _, spec = dep.partition(':')\n cargo_toml['dependencies'][name] = spec or '*'\n f.seek(0)\n toml.dump(cargo_toml, f)\n f.truncate()\n\n\ndef install_environment(prefix, version, additional_dependencies):\n helpers.assert_version_default('rust', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # There are two cases where we might want to specify more dependencies:\n # as dependencies for the library being built, and as binary packages\n # to be `cargo install`'d.\n #\n # Unlike e.g. Python, if we just `cargo install` a library, it won't be\n # used for compilation. And if we add a crate providing a binary to the\n # `Cargo.toml`, the binary won't be built.\n #\n # Because of this, we allow specifying \"cli\" dependencies by prefixing\n # with 'cli:'.\n cli_deps = {\n dep for dep in additional_dependencies if dep.startswith('cli:')\n }\n lib_deps = set(additional_dependencies) - cli_deps\n\n if len(lib_deps) > 0:\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n\n with clean_path_on_failure(directory):\n packages_to_install = {()}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n if version != '':\n packages_to_install.add((package, '--version', version))\n else:\n packages_to_install.add((package,))\n\n for package in packages_to_install:\n cmd_output(\n 'cargo', 'install', '--bins', '--root', directory, *package,\n cwd=prefix.prefix_dir\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/rust.py" } ]
[ { "content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os.path\n\nimport toml\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'rustenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(target_dir):\n return (\n (\n 'PATH',\n (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),\n ),\n )\n\n\[email protected]\ndef in_env(prefix):\n target_dir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(target_dir)):\n yield\n\n\ndef _add_dependencies(cargo_toml_path, additional_dependencies):\n with open(cargo_toml_path, 'r+') as f:\n cargo_toml = toml.load(f)\n cargo_toml.setdefault('dependencies', {})\n for dep in additional_dependencies:\n name, _, spec = dep.partition(':')\n cargo_toml['dependencies'][name] = spec or '*'\n f.seek(0)\n toml.dump(cargo_toml, f)\n f.truncate()\n\n\ndef install_environment(prefix, version, additional_dependencies):\n helpers.assert_version_default('rust', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # There are two cases where we might want to specify more dependencies:\n # as dependencies for the library being built, and as binary packages\n # to be `cargo install`'d.\n #\n # Unlike e.g. Python, if we just `cargo install` a library, it won't be\n # used for compilation. And if we add a crate providing a binary to the\n # `Cargo.toml`, the binary won't be built.\n #\n # Because of this, we allow specifying \"cli\" dependencies by prefixing\n # with 'cli:'.\n cli_deps = {\n dep for dep in additional_dependencies if dep.startswith('cli:')\n }\n lib_deps = set(additional_dependencies) - cli_deps\n\n if len(lib_deps) > 0:\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n\n with clean_path_on_failure(directory):\n packages_to_install = {('--path', '.')}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n if version != '':\n packages_to_install.add((package, '--version', version))\n else:\n packages_to_install.add((package,))\n\n for package in packages_to_install:\n cmd_output(\n 'cargo', 'install', '--bins', '--root', directory, *package,\n cwd=prefix.prefix_dir\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/rust.py" } ]
diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py index e09d0078f..4b25a9d10 100644 --- a/pre_commit/languages/rust.py +++ b/pre_commit/languages/rust.py @@ -73,7 +73,7 @@ def install_environment(prefix, version, additional_dependencies): _add_dependencies(prefix.path('Cargo.toml'), lib_deps) with clean_path_on_failure(directory): - packages_to_install = {()} + packages_to_install = {('--path', '.')} for cli_dep in cli_deps: cli_dep = cli_dep[len('cli:'):] package, _, version = cli_dep.partition(':')
rust hook requires `--path` attribute Cargo has changed how packages get installed and requires an extra `--path <destination>` attribute. Symptom: ``` [INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt. [INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... An unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default') Return code: 101 Expected return code: 0 Output: (none) Errors: error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package. ``` I guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87 Do we want to make pre-commit compatible with multiple versions of cargo or just the latest one? /cc @asottile @chriskuehl
statsmodels__statsmodels-3044
[ { "content": "from statsmodels.compat.numpy import recarray_select\nfrom statsmodels.compat.python import (range, StringIO, urlopen,\n HTTPError, URLError, lrange,\n cPickle, urljoin, BytesIO, long, PY3)\nimport sys\nimport shutil\nfrom os import environ\nfrom os import makedirs\nfrom os.path import expanduser\nfrom os.path import exists\nfrom os.path import join\n\nimport numpy as np\nfrom numpy import array\nfrom pandas import read_csv, DataFrame, Index\n\n\ndef webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):\n \"\"\"\n Download and return an example dataset from Stata.\n\n Parameters\n ----------\n data : str\n Name of dataset to fetch.\n baseurl : str\n The base URL to the stata datasets.\n as_df : bool\n If True, returns a `pandas.DataFrame`\n\n Returns\n -------\n dta : Record Array\n A record array containing the Stata dataset.\n\n Examples\n --------\n >>> dta = webuse('auto')\n\n Notes\n -----\n Make sure baseurl has trailing forward slash. Doesn't do any\n error checking in response URLs.\n \"\"\"\n # lazy imports\n from statsmodels.iolib import genfromdta\n\n url = urljoin(baseurl, data+'.dta')\n dta = urlopen(url)\n dta = BytesIO(dta.read()) # make it truly file-like\n if as_df: # could make this faster if we don't process dta twice?\n return DataFrame.from_records(genfromdta(dta))\n else:\n return genfromdta(dta)\n\n\nclass Dataset(dict):\n def __init__(self, **kw):\n # define some default attributes, so pylint can find them\n self.endog = None\n self.exog = None\n self.data = None\n self.names = None\n\n dict.__init__(self, kw)\n self.__dict__ = self\n # Some datasets have string variables. If you want a raw_data\n # attribute you must create this in the dataset's load function.\n try: # some datasets have string variables\n self.raw_data = self.data.view((float, len(self.names)))\n except:\n pass\n\n def __repr__(self):\n return str(self.__class__)\n\n\ndef process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):\n names = list(data.dtype.names)\n\n if isinstance(endog_idx, (int, long)):\n endog = array(data[names[endog_idx]], dtype=dtype)\n endog_name = names[endog_idx]\n endog_idx = [endog_idx]\n else:\n endog_name = [names[i] for i in endog_idx]\n\n if stack:\n endog = np.column_stack(data[field] for field in endog_name)\n else:\n endog = data[endog_name]\n\n if exog_idx is None:\n exog_name = [names[i] for i in range(len(names))\n if i not in endog_idx]\n else:\n exog_name = [names[i] for i in exog_idx]\n\n if stack:\n exog = np.column_stack(data[field] for field in exog_name)\n else:\n exog = recarray_select(data, exog_name)\n\n if dtype:\n endog = endog.astype(dtype)\n exog = exog.astype(dtype)\n\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name=endog_name, exog_name=exog_name)\n\n return dataset\n\n\ndef process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,\n index_idx=None):\n\n data = DataFrame(data, dtype=dtype)\n names = data.columns\n\n if isinstance(endog_idx, (int, long)):\n endog_name = names[endog_idx]\n endog = data[endog_name]\n if exog_idx is None:\n exog = data.drop([endog_name], axis=1)\n else:\n exog = data.filter(names[exog_idx])\n else:\n endog = data.ix[:, endog_idx]\n endog_name = list(endog.columns)\n if exog_idx is None:\n exog = data.drop(endog_name, axis=1)\n elif isinstance(exog_idx, (int, long)):\n exog = data.filter([names[exog_idx]])\n else:\n exog = data.filter(names[exog_idx])\n\n if index_idx is not None: # NOTE: will have to be improved for dates\n endog.index = Index(data.ix[:, index_idx])\n exog.index = Index(data.ix[:, index_idx])\n data = data.set_index(names[index_idx])\n\n exog_name = list(exog.columns)\n dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,\n endog_name=endog_name, exog_name=exog_name)\n return dataset\n\n\ndef _maybe_reset_index(data):\n \"\"\"\n All the Rdatasets have the integer row.labels from R if there is no\n real index. Strip this for a zero-based index\n \"\"\"\n if data.index.equals(Index(lrange(1, len(data) + 1))):\n data = data.reset_index(drop=True)\n return data\n\n\ndef _get_cache(cache):\n if cache is False:\n # do not do any caching or load from cache\n cache = None\n elif cache is True: # use default dir for cache\n cache = get_data_home(None)\n else:\n cache = get_data_home(cache)\n return cache\n\n\ndef _cache_it(data, cache_path):\n if PY3:\n # for some reason encode(\"zip\") won't work for me in Python 3?\n import zlib\n # use protocol 2 so can open with python 2.x if cached in 3.x\n open(cache_path, \"wb\").write(zlib.compress(cPickle.dumps(data,\n protocol=2)))\n else:\n open(cache_path, \"wb\").write(cPickle.dumps(data).encode(\"zip\"))\n\n\ndef _open_cache(cache_path):\n if PY3:\n # NOTE: don't know why but decode('zip') doesn't work on my\n # Python 3 build\n import zlib\n data = zlib.decompress(open(cache_path, 'rb').read())\n # return as bytes object encoded in utf-8 for cross-compat of cached\n data = cPickle.loads(data).encode('utf-8')\n else:\n data = open(cache_path, 'rb').read().decode('zip')\n data = cPickle.loads(data)\n return data\n\n\ndef _urlopen_cached(url, cache):\n \"\"\"\n Tries to load data from cache location otherwise downloads it. If it\n downloads the data and cache is not None then it will put the downloaded\n data in the cache path.\n \"\"\"\n from_cache = False\n if cache is not None:\n cache_path = join(cache,\n url.split(\"://\")[-1].replace('/', ',') + \".zip\")\n try:\n data = _open_cache(cache_path)\n from_cache = True\n except:\n pass\n\n # not using the cache or didn't find it in cache\n if not from_cache:\n data = urlopen(url).read()\n if cache is not None: # then put it in the cache\n _cache_it(data, cache_path)\n return data, from_cache\n\n\ndef _get_data(base_url, dataname, cache, extension=\"csv\"):\n url = base_url + (dataname + \".%s\") % extension\n try:\n data, from_cache = _urlopen_cached(url, cache)\n except HTTPError as err:\n if '404' in str(err):\n raise ValueError(\"Dataset %s was not found.\" % dataname)\n else:\n raise err\n\n data = data.decode('utf-8', 'strict')\n return StringIO(data), from_cache\n\n\ndef _get_dataset_meta(dataname, package, cache):\n # get the index, you'll probably want this cached because you have\n # to download info about all the data to get info about any of the data...\n index_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/master/\"\n \"datasets.csv\")\n data, _ = _urlopen_cached(index_url, cache)\n # Python 3\n if PY3: # pragma: no cover\n data = data.decode('utf-8', 'strict')\n index = read_csv(StringIO(data))\n idx = np.logical_and(index.Item == dataname, index.Package == package)\n dataset_meta = index.ix[idx]\n return dataset_meta[\"Title\"].item()\n\n\ndef get_rdataset(dataname, package=\"datasets\", cache=False):\n \"\"\"download and return R dataset\n\n Parameters\n ----------\n dataname : str\n The name of the dataset you want to download\n package : str\n The package in which the dataset is found. The default is the core\n 'datasets' package.\n cache : bool or str\n If True, will download this data into the STATSMODELS_DATA folder.\n The default location is a folder called statsmodels_data in the\n user home folder. Otherwise, you can specify a path to a folder to\n use for caching the data. If False, the data will not be cached.\n\n Returns\n -------\n dataset : Dataset instance\n A `statsmodels.data.utils.Dataset` instance. This objects has\n attributes::\n\n * data - A pandas DataFrame containing the data\n * title - The dataset title\n * package - The package from which the data came\n * from_cache - Whether not cached data was retrieved\n * __doc__ - The verbatim R documentation.\n\n\n Notes\n -----\n If the R dataset has an integer index. This is reset to be zero-based.\n Otherwise the index is preserved. The caching facilities are dumb. That\n is, no download dates, e-tags, or otherwise identifying information\n is checked to see if the data should be downloaded again or not. If the\n dataset is in the cache, it's used.\n \"\"\"\n # NOTE: use raw github bc html site might not be most up to date\n data_base_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/\"\n \"master/csv/\"+package+\"/\")\n docs_base_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/\"\n \"master/doc/\"+package+\"/rst/\")\n cache = _get_cache(cache)\n data, from_cache = _get_data(data_base_url, dataname, cache)\n data = read_csv(data, index_col=0)\n data = _maybe_reset_index(data)\n\n title = _get_dataset_meta(dataname, package, cache)\n doc, _ = _get_data(docs_base_url, dataname, cache, \"rst\")\n\n return Dataset(data=data, __doc__=doc.read(), package=package, title=title,\n from_cache=from_cache)\n\n# The below function were taken from sklearn\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the statsmodels data dir.\n\n This folder is used by some large dataset loaders to avoid\n downloading the data several times.\n\n By default the data dir is set to a folder named 'statsmodels_data'\n in the user home folder.\n\n Alternatively, it can be set by the 'STATSMODELS_DATA' environment\n variable or programatically by giving an explit folder path. The\n '~' symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n \"\"\"\n if data_home is None:\n data_home = environ.get('STATSMODELS_DATA',\n join('~', 'statsmodels_data'))\n data_home = expanduser(data_home)\n if not exists(data_home):\n makedirs(data_home)\n return data_home\n\n\ndef clear_data_home(data_home=None):\n \"\"\"Delete all the content of the data home cache.\"\"\"\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n\ndef check_internet():\n \"\"\"Check if internet is available\"\"\"\n try:\n urlopen(\"https://github.com\")\n except URLError as err:\n return False\n return True\n", "path": "statsmodels/datasets/utils.py" } ]
[ { "content": "from statsmodels.compat.numpy import recarray_select\nfrom statsmodels.compat.python import (range, StringIO, urlopen,\n HTTPError, URLError, lrange,\n cPickle, urljoin, BytesIO, long, PY3)\nimport sys\nimport shutil\nfrom os import environ\nfrom os import makedirs\nfrom os.path import expanduser\nfrom os.path import exists\nfrom os.path import join\n\nimport numpy as np\nfrom numpy import array\nfrom pandas import read_csv, DataFrame, Index\n\n\ndef webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):\n \"\"\"\n Download and return an example dataset from Stata.\n\n Parameters\n ----------\n data : str\n Name of dataset to fetch.\n baseurl : str\n The base URL to the stata datasets.\n as_df : bool\n If True, returns a `pandas.DataFrame`\n\n Returns\n -------\n dta : Record Array\n A record array containing the Stata dataset.\n\n Examples\n --------\n >>> dta = webuse('auto')\n\n Notes\n -----\n Make sure baseurl has trailing forward slash. Doesn't do any\n error checking in response URLs.\n \"\"\"\n # lazy imports\n from statsmodels.iolib import genfromdta\n\n url = urljoin(baseurl, data+'.dta')\n dta = urlopen(url)\n dta = BytesIO(dta.read()) # make it truly file-like\n if as_df: # could make this faster if we don't process dta twice?\n return DataFrame.from_records(genfromdta(dta))\n else:\n return genfromdta(dta)\n\n\nclass Dataset(dict):\n def __init__(self, **kw):\n # define some default attributes, so pylint can find them\n self.endog = None\n self.exog = None\n self.data = None\n self.names = None\n\n dict.__init__(self, kw)\n self.__dict__ = self\n # Some datasets have string variables. If you want a raw_data\n # attribute you must create this in the dataset's load function.\n try: # some datasets have string variables\n self.raw_data = self.data.view((float, len(self.names)))\n except:\n pass\n\n def __repr__(self):\n return str(self.__class__)\n\n\ndef process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):\n names = list(data.dtype.names)\n\n if isinstance(endog_idx, (int, long)):\n endog = array(data[names[endog_idx]], dtype=dtype)\n endog_name = names[endog_idx]\n endog_idx = [endog_idx]\n else:\n endog_name = [names[i] for i in endog_idx]\n\n if stack:\n endog = np.column_stack(data[field] for field in endog_name)\n else:\n endog = data[endog_name]\n\n if exog_idx is None:\n exog_name = [names[i] for i in range(len(names))\n if i not in endog_idx]\n else:\n exog_name = [names[i] for i in exog_idx]\n\n if stack:\n exog = np.column_stack(data[field] for field in exog_name)\n else:\n exog = recarray_select(data, exog_name)\n\n if dtype:\n endog = endog.astype(dtype)\n exog = exog.astype(dtype)\n\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name=endog_name, exog_name=exog_name)\n\n return dataset\n\n\ndef process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,\n index_idx=None):\n\n data = DataFrame(data, dtype=dtype)\n names = data.columns\n\n if isinstance(endog_idx, (int, long)):\n endog_name = names[endog_idx]\n endog = data[endog_name]\n if exog_idx is None:\n exog = data.drop([endog_name], axis=1)\n else:\n exog = data.filter(names[exog_idx])\n else:\n endog = data.ix[:, endog_idx]\n endog_name = list(endog.columns)\n if exog_idx is None:\n exog = data.drop(endog_name, axis=1)\n elif isinstance(exog_idx, (int, long)):\n exog = data.filter([names[exog_idx]])\n else:\n exog = data.filter(names[exog_idx])\n\n if index_idx is not None: # NOTE: will have to be improved for dates\n endog.index = Index(data.ix[:, index_idx])\n exog.index = Index(data.ix[:, index_idx])\n data = data.set_index(names[index_idx])\n\n exog_name = list(exog.columns)\n dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,\n endog_name=endog_name, exog_name=exog_name)\n return dataset\n\n\ndef _maybe_reset_index(data):\n \"\"\"\n All the Rdatasets have the integer row.labels from R if there is no\n real index. Strip this for a zero-based index\n \"\"\"\n if data.index.equals(Index(lrange(1, len(data) + 1))):\n data = data.reset_index(drop=True)\n return data\n\n\ndef _get_cache(cache):\n if cache is False:\n # do not do any caching or load from cache\n cache = None\n elif cache is True: # use default dir for cache\n cache = get_data_home(None)\n else:\n cache = get_data_home(cache)\n return cache\n\n\ndef _cache_it(data, cache_path):\n if PY3:\n # for some reason encode(\"zip\") won't work for me in Python 3?\n import zlib\n # use protocol 2 so can open with python 2.x if cached in 3.x\n open(cache_path, \"wb\").write(zlib.compress(cPickle.dumps(data,\n protocol=2)))\n else:\n open(cache_path, \"wb\").write(cPickle.dumps(data).encode(\"zip\"))\n\n\ndef _open_cache(cache_path):\n if PY3:\n # NOTE: don't know why but decode('zip') doesn't work on my\n # Python 3 build\n import zlib\n data = zlib.decompress(open(cache_path, 'rb').read())\n # return as bytes object encoded in utf-8 for cross-compat of cached\n data = cPickle.loads(data).encode('utf-8')\n else:\n data = open(cache_path, 'rb').read().decode('zip')\n data = cPickle.loads(data)\n return data\n\n\ndef _urlopen_cached(url, cache):\n \"\"\"\n Tries to load data from cache location otherwise downloads it. If it\n downloads the data and cache is not None then it will put the downloaded\n data in the cache path.\n \"\"\"\n from_cache = False\n if cache is not None:\n cache_path = join(cache,\n url.split(\"://\")[-1].replace('/', ',') + \".zip\")\n try:\n data = _open_cache(cache_path)\n from_cache = True\n except:\n pass\n\n # not using the cache or didn't find it in cache\n if not from_cache:\n data = urlopen(url).read()\n if cache is not None: # then put it in the cache\n _cache_it(data, cache_path)\n return data, from_cache\n\n\ndef _get_data(base_url, dataname, cache, extension=\"csv\"):\n url = base_url + (dataname + \".%s\") % extension\n try:\n data, from_cache = _urlopen_cached(url, cache)\n except HTTPError as err:\n if '404' in str(err):\n raise ValueError(\"Dataset %s was not found.\" % dataname)\n else:\n raise err\n\n data = data.decode('utf-8', 'strict')\n return StringIO(data), from_cache\n\n\ndef _get_dataset_meta(dataname, package, cache):\n # get the index, you'll probably want this cached because you have\n # to download info about all the data to get info about any of the data...\n index_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/master/\"\n \"datasets.csv\")\n data, _ = _urlopen_cached(index_url, cache)\n # Python 3\n if PY3: # pragma: no cover\n data = data.decode('utf-8', 'strict')\n index = read_csv(StringIO(data))\n idx = np.logical_and(index.Item == dataname, index.Package == package)\n dataset_meta = index.ix[idx]\n return dataset_meta[\"Title\"].item()\n\n\ndef get_rdataset(dataname, package=\"datasets\", cache=False):\n \"\"\"download and return R dataset\n\n Parameters\n ----------\n dataname : str\n The name of the dataset you want to download\n package : str\n The package in which the dataset is found. The default is the core\n 'datasets' package.\n cache : bool or str\n If True, will download this data into the STATSMODELS_DATA folder.\n The default location is a folder called statsmodels_data in the\n user home folder. Otherwise, you can specify a path to a folder to\n use for caching the data. If False, the data will not be cached.\n\n Returns\n -------\n dataset : Dataset instance\n A `statsmodels.data.utils.Dataset` instance. This objects has\n attributes::\n\n * data - A pandas DataFrame containing the data\n * title - The dataset title\n * package - The package from which the data came\n * from_cache - Whether not cached data was retrieved\n * __doc__ - The verbatim R documentation.\n\n\n Notes\n -----\n If the R dataset has an integer index. This is reset to be zero-based.\n Otherwise the index is preserved. The caching facilities are dumb. That\n is, no download dates, e-tags, or otherwise identifying information\n is checked to see if the data should be downloaded again or not. If the\n dataset is in the cache, it's used.\n \"\"\"\n # NOTE: use raw github bc html site might not be most up to date\n data_base_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/\"\n \"master/csv/\"+package+\"/\")\n docs_base_url = (\"https://raw.github.com/vincentarelbundock/Rdatasets/\"\n \"master/doc/\"+package+\"/rst/\")\n cache = _get_cache(cache)\n data, from_cache = _get_data(data_base_url, dataname, cache)\n data = read_csv(data, index_col=0)\n data = _maybe_reset_index(data)\n\n title = _get_dataset_meta(dataname, package, cache)\n doc, _ = _get_data(docs_base_url, dataname, cache, \"rst\")\n\n return Dataset(data=data, __doc__=doc.read(), package=package, title=title,\n from_cache=from_cache)\n\n# The below function were taken from sklearn\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the statsmodels data dir.\n\n This folder is used by some large dataset loaders to avoid\n downloading the data several times.\n\n By default the data dir is set to a folder named 'statsmodels_data'\n in the user home folder.\n\n Alternatively, it can be set by the 'STATSMODELS_DATA' environment\n variable or programatically by giving an explit folder path. The\n '~' symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n \"\"\"\n if data_home is None:\n data_home = environ.get('STATSMODELS_DATA',\n join('~', 'statsmodels_data'))\n data_home = expanduser(data_home)\n if not exists(data_home):\n makedirs(data_home)\n return data_home\n\n\ndef clear_data_home(data_home=None):\n \"\"\"Delete all the content of the data home cache.\"\"\"\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n\ndef check_internet(url=None):\n \"\"\"Check if internet is available\"\"\"\n url = \"https://github.com\" if url is None else url\n try:\n urlopen(url)\n except URLError as err:\n return False\n return True\n", "path": "statsmodels/datasets/utils.py" } ]
diff --git a/statsmodels/datasets/tests/test_utils.py b/statsmodels/datasets/tests/test_utils.py index 23115b5142f..35671743d9f 100644 --- a/statsmodels/datasets/tests/test_utils.py +++ b/statsmodels/datasets/tests/test_utils.py @@ -1,38 +1,48 @@ from statsmodels.compat.python import PY3 import os -from statsmodels.datasets import get_rdataset, webuse, check_internet + +from nose import SkipTest from numpy.testing import assert_, assert_array_equal, dec +from statsmodels.datasets import get_rdataset, webuse, check_internet, utils + cur_dir = os.path.dirname(os.path.abspath(__file__)) -dec.skipif(PY3, 'Not testable on Python 3.x') def test_get_rdataset(): # smoke test + test_url = "https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/cars.csv" + internet_available = check_internet(test_url) + if not internet_available: + raise SkipTest('Unable to retrieve file - skipping test') + duncan = get_rdataset("Duncan", "car", cache=cur_dir) + assert_(isinstance(duncan, utils.Dataset)) if not PY3: #NOTE: there's no way to test both since the cached files were #created with Python 2.x, they're strings, but Python 3 expects #bytes and the index file path is hard-coded so both can't live #side by side - duncan = get_rdataset("Duncan", "car", cache=cur_dir) assert_(duncan.from_cache) -#internet_available = check_internet() -#@dec.skipif(not internet_available) -def t_est_webuse(): +def test_webuse(): # test copied and adjusted from iolib/tests/test_foreign from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2 - #base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/" - base_gh = "http://www.statsmodels.org/devel/_static/" + base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/" + internet_available = check_internet(base_gh) + if not internet_available: + raise SkipTest('Unable to retrieve file - skipping test') res1 = webuse('macrodata', baseurl=base_gh, as_df=False) - assert_array_equal(res1 == res2, True) + assert_array_equal(res1, res2) + -#@dec.skipif(not internet_available) -def t_est_webuse_pandas(): +def test_webuse_pandas(): # test copied and adjusted from iolib/tests/test_foreign from pandas.util.testing import assert_frame_equal from statsmodels.datasets import macrodata dta = macrodata.load_pandas().data base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/" + internet_available = check_internet(base_gh) + if not internet_available: + raise SkipTest('Unable to retrieve file - skipping test') res1 = webuse('macrodata', baseurl=base_gh) res1 = res1.astype(float) assert_frame_equal(res1, dta) diff --git a/statsmodels/datasets/utils.py b/statsmodels/datasets/utils.py index b5a223d73d6..ca1fb64a8cc 100644 --- a/statsmodels/datasets/utils.py +++ b/statsmodels/datasets/utils.py @@ -329,10 +329,11 @@ def clear_data_home(data_home=None): data_home = get_data_home(data_home) shutil.rmtree(data_home) -def check_internet(): +def check_internet(url=None): """Check if internet is available""" + url = "https://github.com" if url is None else url try: - urlopen("https://github.com") + urlopen(url) except URLError as err: return False return True
TST disable test_webuse again see #2233 one file is being downloaded from sourceforge which has frequent connection problems the download from our source on github worked without problems But I want to get 0.7 out instead of being distracted by network problems
geopandas__geopandas-94
[ { "content": "try:\n from collections import OrderedDict\nexcept ImportError:\n # Python 2.6\n from ordereddict import OrderedDict\nfrom collections import defaultdict\nimport json\nimport os\nimport sys\n\nimport numpy as np\nfrom pandas import DataFrame, Series\nfrom shapely.geometry import mapping, shape\nfrom shapely.geometry.base import BaseGeometry\nfrom six import string_types\nfrom six import string_types, iteritems\n\nfrom geopandas import GeoSeries\nfrom geopandas.base import GeoPandasBase\nfrom geopandas.plotting import plot_dataframe\nimport geopandas.io\n\n\nDEFAULT_GEO_COLUMN_NAME = 'geometry'\nPY3 = sys.version_info[0] == 3\n\n\nclass GeoDataFrame(GeoPandasBase, DataFrame):\n \"\"\"\n A GeoDataFrame object is a pandas.DataFrame that has a column\n with geometry. In addition to the standard DataFrame constructor arguments,\n GeoDataFrame also accepts the following keyword arguments:\n\n Keyword Arguments\n -----------------\n crs : str (optional)\n Coordinate system\n geometry : str or array (optional)\n If str, column to use as geometry. If array, will be set as 'geometry'\n column on GeoDataFrame.\n \"\"\"\n _metadata = ['crs', '_geometry_column_name']\n _geometry_column_name = DEFAULT_GEO_COLUMN_NAME\n\n def __init__(self, *args, **kwargs):\n crs = kwargs.pop('crs', None)\n geometry = kwargs.pop('geometry', None)\n super(GeoDataFrame, self).__init__(*args, **kwargs)\n self.crs = crs\n if geometry is not None:\n self.set_geometry(geometry, inplace=True)\n\n def __setattr__(self, attr, val):\n # have to special case geometry b/c pandas tries to use as column...\n if attr == 'geometry':\n object.__setattr__(self, attr, val)\n else:\n super(GeoDataFrame, self).__setattr__(attr, val)\n\n def _get_geometry(self):\n if self._geometry_column_name not in self:\n raise AttributeError(\"No geometry data set yet (expected in\"\n \" column '%s'.\" % self._geometry_column_name)\n return self[self._geometry_column_name]\n\n def _set_geometry(self, col):\n # TODO: Use pandas' core.common.is_list_like() here.\n if not isinstance(col, (list, np.ndarray, Series)):\n raise ValueError(\"Must use a list-like to set the geometry\"\n \" property\")\n\n self.set_geometry(col, inplace=True)\n\n geometry = property(fget=_get_geometry, fset=_set_geometry,\n doc=\"Geometry data for GeoDataFrame\")\n\n def set_geometry(self, col, drop=False, inplace=False, crs=None):\n \"\"\"\n Set the GeoDataFrame geometry using either an existing column or\n the specified input. By default yields a new object.\n\n The original geometry column is replaced with the input.\n\n Parameters\n ----------\n keys : column label or array\n drop : boolean, default True\n Delete column to be used as the new geometry\n inplace : boolean, default False\n Modify the GeoDataFrame in place (do not create a new object)\n crs : str/result of fion.get_crs (optional)\n Coordinate system to use. If passed, overrides both DataFrame and\n col's crs. Otherwise, tries to get crs from passed col values or\n DataFrame.\n\n Examples\n --------\n >>> df1 = df.set_geometry([Point(0,0), Point(1,1), Point(2,2)])\n >>> df2 = df.set_geometry('geom1')\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n # Most of the code here is taken from DataFrame.set_index()\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n if not crs:\n crs = getattr(col, 'crs', self.crs)\n\n to_remove = None\n geo_column_name = DEFAULT_GEO_COLUMN_NAME\n if isinstance(col, Series):\n level = col.values\n elif isinstance(col, (list, np.ndarray)):\n level = col\n elif hasattr(col, 'ndim') and col.ndim != 1:\n raise ValueError(\"Must pass array with one dimension only.\")\n else:\n try:\n level = frame[col].values\n except KeyError:\n raise ValueError(\"Unknown column %s\" % col)\n except:\n raise\n if drop:\n to_remove = col\n geo_column_name = DEFAULT_GEO_COLUMN_NAME\n else:\n geo_column_name = col\n\n if to_remove:\n del frame[to_remove]\n\n if isinstance(level, GeoSeries) and level.crs != crs:\n # Avoids caching issues/crs sharing issues\n level = level.copy()\n level.crs = crs\n\n # Check that we are using a listlike of geometries\n if not all(isinstance(item, BaseGeometry) for item in level):\n raise TypeError(\"Input geometry column must contain valid geometry objects.\")\n frame[geo_column_name] = level\n frame._geometry_column_name = geo_column_name\n frame.crs = crs\n\n if not inplace:\n return frame\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"\n Alternate constructor to create a GeoDataFrame from a file.\n \n Example:\n df = geopandas.GeoDataFrame.from_file('nybb.shp')\n\n Wraps geopandas.read_file(). For additional help, see read_file()\n\n \"\"\"\n return geopandas.io.file.read_file(filename, **kwargs)\n\n @classmethod\n def from_features(cls, features, crs=None):\n \"\"\"\n Alternate constructor to create GeoDataFrame from an iterable of\n features. Each element must be a feature dictionary or implement\n the __geo_interface__.\n See: https://gist.github.com/sgillies/2217756\n\n Note: This method does not attempt to align rows. Properties that are\n not present in all features of the source file will not be properly\n aligned. This should be fixed.\n\n \"\"\"\n geoms = []\n columns = defaultdict(lambda: [])\n for f in features:\n if hasattr(f, \"__geo_interface__\"):\n f = f.__geo_interface__\n else:\n f = f\n\n geoms.append(shape(f['geometry']))\n for key, value in f['properties'].iteritems():\n columns[key].append(value)\n geom = GeoSeries(geoms)\n df = GeoDataFrame(columns)\n df['geometry'] = geom\n df.crs = crs\n return df\n\n @classmethod\n def from_postgis(cls, sql, con, geom_col='geom', crs=None, index_col=None,\n coerce_float=True, params=None):\n \"\"\"\n Alternate constructor to create a GeoDataFrame from a sql query\n containing a geometry column.\n\n Example:\n df = geopandas.GeoDataFrame.from_postgis(con,\n \"SELECT geom, highway FROM roads;\")\n\n Wraps geopandas.read_postgis(). For additional help, see read_postgis()\n\n \"\"\"\n return geopandas.io.sql.read_postgis(sql, con, geom_col, crs, index_col, \n coerce_float, params)\n\n\n def to_json(self, na='null', **kwargs):\n \"\"\"Returns a GeoJSON representation of the GeoDataFrame.\n\n Parameters\n ----------\n na : {'null', 'drop', 'keep'}, default 'null'\n Indicates how to output missing (NaN) values in the GeoDataFrame\n * null: ouput the missing entries as JSON null\n * drop: remove the property from the feature. This applies to\n each feature individually so that features may have\n different properties\n * keep: output the missing entries as NaN\n \n The remaining *kwargs* are passed to json.dumps().\n \"\"\"\n def fill_none(row):\n \"\"\"\n Takes in a Series, converts to a dictionary with null values\n set to None\n\n \"\"\"\n na_keys = row.index[row.isnull()]\n d = row.to_dict()\n for k in na_keys:\n d[k] = None\n return d\n\n # na_methods must take in a Series and return dict-like\n na_methods = {'null': fill_none,\n 'drop': lambda row: row.dropna(),\n 'keep': lambda row: row}\n\n if na not in na_methods:\n raise ValueError('Unknown na method {}'.format(na))\n f = na_methods[na]\n\n def feature(i, row):\n row = f(row)\n return {\n 'id': str(i),\n 'type': 'Feature',\n 'properties':\n dict((k, v) for k, v in iteritems(row) if k != self._geometry_column_name),\n 'geometry': mapping(row[self._geometry_column_name]) }\n\n return json.dumps(\n {'type': 'FeatureCollection',\n 'features': [feature(i, row) for i, row in self.iterrows()]},\n **kwargs )\n \n def to_file(self, filename, driver=\"ESRI Shapefile\", **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n \n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n filename : string \n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n\n The *kwargs* are passed to fiona.open and can be used to write \n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n import fiona\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n return type(np.asscalar(np.zeros(1, in_type))).__name__\n \n def feature(i, row):\n return {\n 'id': str(i),\n 'type': 'Feature',\n 'properties':\n dict((k, v) for k, v in iteritems(row) if k != 'geometry'),\n 'geometry': mapping(row['geometry']) }\n \n properties = OrderedDict([(col, convert_type(_type)) for col, _type \n in zip(self.columns, self.dtypes) if col!='geometry'])\n # Need to check geom_types before we write to file... \n # Some (most?) providers expect a single geometry type: \n # Point, LineString, or Polygon\n geom_types = self['geometry'].geom_type.unique()\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types])[::-1] # Reverse\n if geom_type == '': # No common suffix = mixed geometry types\n raise ValueError(\"Geometry column cannot contains mutiple \"\n \"geometry types when writing to file.\")\n schema = {'geometry': geom_type, 'properties': properties}\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=self.crs, \n schema=schema, **kwargs) as c:\n for i, row in self.iterrows():\n c.write(feature(i, row))\n\n def to_crs(self, crs=None, epsg=None, inplace=False):\n \"\"\"Transform geometries to a new coordinate reference system\n\n This method will transform all points in all objects. It has\n no notion or projecting entire geometries. All segments\n joining points are assumed to be lines in the current\n projection, not geodesics. Objects crossing the dateline (or\n other projection boundary) will have undesirable behavior.\n \"\"\"\n if inplace:\n df = self\n else:\n df = self.copy()\n geom = df.geometry.to_crs(crs=crs, epsg=epsg)\n df.geometry = geom\n df.crs = geom.crs\n if not inplace:\n return df\n\n def __getitem__(self, key):\n \"\"\"\n If the result is a column containing only 'geometry', return a\n GeoSeries. If it's a DataFrame with a 'geometry' column, return a\n GeoDataFrame.\n \"\"\"\n result = super(GeoDataFrame, self).__getitem__(key)\n geo_col = self._geometry_column_name\n if isinstance(key, string_types) and key == geo_col:\n result.__class__ = GeoSeries\n result.crs = self.crs\n elif isinstance(result, DataFrame) and geo_col in result:\n result.__class__ = GeoDataFrame\n result.crs = self.crs\n result._geometry_column_name = geo_col\n elif isinstance(result, DataFrame) and geo_col not in result:\n result.__class__ = DataFrame\n result.crs = self.crs\n return result\n\n #\n # Implement pandas methods\n #\n\n @property\n def _constructor(self):\n return GeoDataFrame\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\" propagate metadata from other to self \"\"\"\n # NOTE: backported from pandas master (upcoming v0.13)\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this GeoDataFrame object\n\n Parameters\n ----------\n deep : boolean, default True\n Make a deep copy, i.e. also copy data\n\n Returns\n -------\n copy : GeoDataFrame\n \"\"\"\n # FIXME: this will likely be unnecessary in pandas >= 0.13\n data = self._data\n if deep:\n data = data.copy()\n return GeoDataFrame(data).__finalize__(self)\n\n def plot(self, *args, **kwargs):\n return plot_dataframe(self, *args, **kwargs)\n\ndef _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):\n if inplace:\n raise ValueError(\"Can't do inplace setting when converting from\"\n \" DataFrame to GeoDataFrame\")\n gf = GeoDataFrame(self)\n # this will copy so that BlockManager gets copied\n return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)\n\nif PY3:\n DataFrame.set_geometry = _dataframe_set_geometry\nelse:\n import types\n DataFrame.set_geometry = types.MethodType(_dataframe_set_geometry, None,\n DataFrame)\n", "path": "geopandas/geodataframe.py" } ]
[ { "content": "try:\n from collections import OrderedDict\nexcept ImportError:\n # Python 2.6\n from ordereddict import OrderedDict\nfrom collections import defaultdict\nimport json\nimport os\nimport sys\n\nimport numpy as np\nfrom pandas import DataFrame, Series\nfrom shapely.geometry import mapping, shape\nfrom shapely.geometry.base import BaseGeometry\nfrom six import string_types\nfrom six import string_types, iteritems\n\nfrom geopandas import GeoSeries\nfrom geopandas.base import GeoPandasBase\nfrom geopandas.plotting import plot_dataframe\nimport geopandas.io\n\n\nDEFAULT_GEO_COLUMN_NAME = 'geometry'\nPY3 = sys.version_info[0] == 3\n\n\nclass GeoDataFrame(GeoPandasBase, DataFrame):\n \"\"\"\n A GeoDataFrame object is a pandas.DataFrame that has a column\n with geometry. In addition to the standard DataFrame constructor arguments,\n GeoDataFrame also accepts the following keyword arguments:\n\n Keyword Arguments\n -----------------\n crs : str (optional)\n Coordinate system\n geometry : str or array (optional)\n If str, column to use as geometry. If array, will be set as 'geometry'\n column on GeoDataFrame.\n \"\"\"\n _metadata = ['crs', '_geometry_column_name']\n _geometry_column_name = DEFAULT_GEO_COLUMN_NAME\n\n def __init__(self, *args, **kwargs):\n crs = kwargs.pop('crs', None)\n geometry = kwargs.pop('geometry', None)\n super(GeoDataFrame, self).__init__(*args, **kwargs)\n self.crs = crs\n if geometry is not None:\n self.set_geometry(geometry, inplace=True)\n\n def __setattr__(self, attr, val):\n # have to special case geometry b/c pandas tries to use as column...\n if attr == 'geometry':\n object.__setattr__(self, attr, val)\n else:\n super(GeoDataFrame, self).__setattr__(attr, val)\n\n def _get_geometry(self):\n if self._geometry_column_name not in self:\n raise AttributeError(\"No geometry data set yet (expected in\"\n \" column '%s'.\" % self._geometry_column_name)\n return self[self._geometry_column_name]\n\n def _set_geometry(self, col):\n # TODO: Use pandas' core.common.is_list_like() here.\n if not isinstance(col, (list, np.ndarray, Series)):\n raise ValueError(\"Must use a list-like to set the geometry\"\n \" property\")\n\n self.set_geometry(col, inplace=True)\n\n geometry = property(fget=_get_geometry, fset=_set_geometry,\n doc=\"Geometry data for GeoDataFrame\")\n\n def set_geometry(self, col, drop=False, inplace=False, crs=None):\n \"\"\"\n Set the GeoDataFrame geometry using either an existing column or\n the specified input. By default yields a new object.\n\n The original geometry column is replaced with the input.\n\n Parameters\n ----------\n keys : column label or array\n drop : boolean, default True\n Delete column to be used as the new geometry\n inplace : boolean, default False\n Modify the GeoDataFrame in place (do not create a new object)\n crs : str/result of fion.get_crs (optional)\n Coordinate system to use. If passed, overrides both DataFrame and\n col's crs. Otherwise, tries to get crs from passed col values or\n DataFrame.\n\n Examples\n --------\n >>> df1 = df.set_geometry([Point(0,0), Point(1,1), Point(2,2)])\n >>> df2 = df.set_geometry('geom1')\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n # Most of the code here is taken from DataFrame.set_index()\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n if not crs:\n crs = getattr(col, 'crs', self.crs)\n\n to_remove = None\n geo_column_name = DEFAULT_GEO_COLUMN_NAME\n if isinstance(col, Series):\n level = col.values\n elif isinstance(col, (list, np.ndarray)):\n level = col\n elif hasattr(col, 'ndim') and col.ndim != 1:\n raise ValueError(\"Must pass array with one dimension only.\")\n else:\n try:\n level = frame[col].values\n except KeyError:\n raise ValueError(\"Unknown column %s\" % col)\n except:\n raise\n if drop:\n to_remove = col\n geo_column_name = DEFAULT_GEO_COLUMN_NAME\n else:\n geo_column_name = col\n\n if to_remove:\n del frame[to_remove]\n\n if isinstance(level, GeoSeries) and level.crs != crs:\n # Avoids caching issues/crs sharing issues\n level = level.copy()\n level.crs = crs\n\n # Check that we are using a listlike of geometries\n if not all(isinstance(item, BaseGeometry) for item in level):\n raise TypeError(\"Input geometry column must contain valid geometry objects.\")\n frame[geo_column_name] = level\n frame._geometry_column_name = geo_column_name\n frame.crs = crs\n\n if not inplace:\n return frame\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"\n Alternate constructor to create a GeoDataFrame from a file.\n \n Example:\n df = geopandas.GeoDataFrame.from_file('nybb.shp')\n\n Wraps geopandas.read_file(). For additional help, see read_file()\n\n \"\"\"\n return geopandas.io.file.read_file(filename, **kwargs)\n\n @classmethod\n def from_features(cls, features, crs=None):\n \"\"\"\n Alternate constructor to create GeoDataFrame from an iterable of\n features. Each element must be a feature dictionary or implement\n the __geo_interface__.\n See: https://gist.github.com/sgillies/2217756\n\n Note: This method does not attempt to align rows. Properties that are\n not present in all features of the source file will not be properly\n aligned. This should be fixed.\n\n \"\"\"\n geoms = []\n columns = defaultdict(lambda: [])\n for f in features:\n if hasattr(f, \"__geo_interface__\"):\n f = f.__geo_interface__\n else:\n f = f\n\n geoms.append(shape(f['geometry']))\n for key, value in f['properties'].iteritems():\n columns[key].append(value)\n geom = GeoSeries(geoms)\n df = GeoDataFrame(columns)\n df['geometry'] = geom\n df.crs = crs\n return df\n\n @classmethod\n def from_postgis(cls, sql, con, geom_col='geom', crs=None, index_col=None,\n coerce_float=True, params=None):\n \"\"\"\n Alternate constructor to create a GeoDataFrame from a sql query\n containing a geometry column.\n\n Example:\n df = geopandas.GeoDataFrame.from_postgis(con,\n \"SELECT geom, highway FROM roads;\")\n\n Wraps geopandas.read_postgis(). For additional help, see read_postgis()\n\n \"\"\"\n return geopandas.io.sql.read_postgis(sql, con, geom_col, crs, index_col, \n coerce_float, params)\n\n\n def to_json(self, na='null', **kwargs):\n \"\"\"Returns a GeoJSON representation of the GeoDataFrame.\n\n Parameters\n ----------\n na : {'null', 'drop', 'keep'}, default 'null'\n Indicates how to output missing (NaN) values in the GeoDataFrame\n * null: ouput the missing entries as JSON null\n * drop: remove the property from the feature. This applies to\n each feature individually so that features may have\n different properties\n * keep: output the missing entries as NaN\n \n The remaining *kwargs* are passed to json.dumps().\n \"\"\"\n def fill_none(row):\n \"\"\"\n Takes in a Series, converts to a dictionary with null values\n set to None\n\n \"\"\"\n na_keys = row.index[row.isnull()]\n d = row.to_dict()\n for k in na_keys:\n d[k] = None\n return d\n\n # na_methods must take in a Series and return dict-like\n na_methods = {'null': fill_none,\n 'drop': lambda row: row.dropna(),\n 'keep': lambda row: row}\n\n if na not in na_methods:\n raise ValueError('Unknown na method {}'.format(na))\n f = na_methods[na]\n\n def feature(i, row):\n row = f(row)\n return {\n 'id': str(i),\n 'type': 'Feature',\n 'properties':\n dict((k, v) for k, v in iteritems(row) if k != self._geometry_column_name),\n 'geometry': mapping(row[self._geometry_column_name]) }\n\n return json.dumps(\n {'type': 'FeatureCollection',\n 'features': [feature(i, row) for i, row in self.iterrows()]},\n **kwargs )\n \n def to_file(self, filename, driver=\"ESRI Shapefile\", **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n \n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n filename : string \n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n\n The *kwargs* are passed to fiona.open and can be used to write \n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n import fiona\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n \n def feature(i, row):\n return {\n 'id': str(i),\n 'type': 'Feature',\n 'properties':\n dict((k, v) for k, v in iteritems(row) if k != 'geometry'),\n 'geometry': mapping(row['geometry']) }\n \n properties = OrderedDict([(col, convert_type(_type)) for col, _type \n in zip(self.columns, self.dtypes) if col!='geometry'])\n # Need to check geom_types before we write to file... \n # Some (most?) providers expect a single geometry type: \n # Point, LineString, or Polygon\n geom_types = self['geometry'].geom_type.unique()\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types])[::-1] # Reverse\n if geom_type == '': # No common suffix = mixed geometry types\n raise ValueError(\"Geometry column cannot contains mutiple \"\n \"geometry types when writing to file.\")\n schema = {'geometry': geom_type, 'properties': properties}\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=self.crs, \n schema=schema, **kwargs) as c:\n for i, row in self.iterrows():\n c.write(feature(i, row))\n\n def to_crs(self, crs=None, epsg=None, inplace=False):\n \"\"\"Transform geometries to a new coordinate reference system\n\n This method will transform all points in all objects. It has\n no notion or projecting entire geometries. All segments\n joining points are assumed to be lines in the current\n projection, not geodesics. Objects crossing the dateline (or\n other projection boundary) will have undesirable behavior.\n \"\"\"\n if inplace:\n df = self\n else:\n df = self.copy()\n geom = df.geometry.to_crs(crs=crs, epsg=epsg)\n df.geometry = geom\n df.crs = geom.crs\n if not inplace:\n return df\n\n def __getitem__(self, key):\n \"\"\"\n If the result is a column containing only 'geometry', return a\n GeoSeries. If it's a DataFrame with a 'geometry' column, return a\n GeoDataFrame.\n \"\"\"\n result = super(GeoDataFrame, self).__getitem__(key)\n geo_col = self._geometry_column_name\n if isinstance(key, string_types) and key == geo_col:\n result.__class__ = GeoSeries\n result.crs = self.crs\n elif isinstance(result, DataFrame) and geo_col in result:\n result.__class__ = GeoDataFrame\n result.crs = self.crs\n result._geometry_column_name = geo_col\n elif isinstance(result, DataFrame) and geo_col not in result:\n result.__class__ = DataFrame\n result.crs = self.crs\n return result\n\n #\n # Implement pandas methods\n #\n\n @property\n def _constructor(self):\n return GeoDataFrame\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\" propagate metadata from other to self \"\"\"\n # NOTE: backported from pandas master (upcoming v0.13)\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this GeoDataFrame object\n\n Parameters\n ----------\n deep : boolean, default True\n Make a deep copy, i.e. also copy data\n\n Returns\n -------\n copy : GeoDataFrame\n \"\"\"\n # FIXME: this will likely be unnecessary in pandas >= 0.13\n data = self._data\n if deep:\n data = data.copy()\n return GeoDataFrame(data).__finalize__(self)\n\n def plot(self, *args, **kwargs):\n return plot_dataframe(self, *args, **kwargs)\n\ndef _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):\n if inplace:\n raise ValueError(\"Can't do inplace setting when converting from\"\n \" DataFrame to GeoDataFrame\")\n gf = GeoDataFrame(self)\n # this will copy so that BlockManager gets copied\n return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)\n\nif PY3:\n DataFrame.set_geometry = _dataframe_set_geometry\nelse:\n import types\n DataFrame.set_geometry = types.MethodType(_dataframe_set_geometry, None,\n DataFrame)\n", "path": "geopandas/geodataframe.py" } ]
diff --git a/geopandas/geodataframe.py b/geopandas/geodataframe.py index 4c74bac174..988c8bd120 100644 --- a/geopandas/geodataframe.py +++ b/geopandas/geodataframe.py @@ -283,7 +283,10 @@ def to_file(self, filename, driver="ESRI Shapefile", **kwargs): def convert_type(in_type): if in_type == object: return 'str' - return type(np.asscalar(np.zeros(1, in_type))).__name__ + out_type = type(np.asscalar(np.zeros(1, in_type))).__name__ + if out_type == 'long': + out_type = 'int' + return out_type def feature(i, row): return { diff --git a/tests/test_geodataframe.py b/tests/test_geodataframe.py index 2d5f60fbe7..98efdca26b 100644 --- a/tests/test_geodataframe.py +++ b/tests/test_geodataframe.py @@ -268,6 +268,17 @@ def test_to_file(self): self.assertTrue(len(df) == 5) self.assertTrue(np.alltrue(df['BoroName'].values == self.boros)) + def test_to_file_types(self): + """ Test various integer type columns (GH#93) """ + tempfilename = os.path.join(self.tempdir, 'int.shp') + int_types = [np.int, np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.long] + geometry = self.df2.geometry + data = dict((str(i), np.arange(len(geometry), dtype=dtype)) + for i, dtype in enumerate(int_types)) + df = GeoDataFrame(data, geometry=geometry) + df.to_file(tempfilename) + def test_mixed_types_to_file(self): """ Test that mixed geometry types raise error when writing to file """ tempfilename = os.path.join(self.tempdir, 'test.shp')
to_file(): 'long' isn't a valid Fiona property type The question http://gis.stackexchange.com/questions/89206/geopandas-error-when-writing-to-file-valueerror-long-is-not-in-list revealed a bug to me. If you pass schema={'geometry': 'Point', 'properties': {'foo': 'long'}} into fiona.open(), the type 'long' isn't found at https://github.com/Toblerity/Fiona/blob/master/src/fiona/ogrext.pyx#L973. OGR doesn't distinguish between long and int, so converting 'long' to 'int' within Fiona may help... ## But :) Fiona will always return 'int' in the .schema attribute and this could cause trouble for programs that pass 'long' and expect it to stick. So, let's fix up geopandas so it always uses 'int' and never 'long'.
cloud-custodian__cloud-custodian-5339
[ { "content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom concurrent.futures import as_completed\nfrom datetime import datetime, timedelta\n\nfrom c7n.actions import BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import Filter, MetricsFilter\nfrom c7n.filters.iamaccess import CrossAccountAccessFilter\nfrom c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo\nfrom c7n.manager import resources\nfrom c7n.resolver import ValuesFrom\nfrom c7n.tags import universal_augment\nfrom c7n.utils import type_schema, local_session, chunks, get_retry\n\n\[email protected]('alarm')\nclass Alarm(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudwatch'\n arn_type = 'alarm'\n enum_spec = ('describe_alarms', 'MetricAlarms', None)\n id = 'AlarmArn'\n filter_name = 'AlarmNames'\n filter_type = 'list'\n name = 'AlarmName'\n date = 'AlarmConfigurationUpdatedTimestamp'\n config_type = 'AWS::CloudWatch::Alarm'\n\n retry = staticmethod(get_retry(('Throttled',)))\n\n\[email protected]_registry.register('delete')\nclass AlarmDelete(BaseAction):\n \"\"\"Delete a cloudwatch alarm.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-alarms\n resource: alarm\n filters:\n - type: value\n value_type: age\n key: StateUpdatedTimestamp\n value: 30\n op: ge\n - StateValue: INSUFFICIENT_DATA\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudwatch:DeleteAlarms',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('cloudwatch')\n\n for resource_set in chunks(resources, size=100):\n self.manager.retry(\n client.delete_alarms,\n AlarmNames=[r['AlarmName'] for r in resource_set])\n\n\[email protected]('event-rule')\nclass EventRule(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'events'\n arn_type = 'event-rule'\n enum_spec = ('list_rules', 'Rules', None)\n name = \"Name\"\n id = \"Name\"\n filter_name = \"NamePrefix\"\n filter_type = \"scalar\"\n\n\[email protected]_registry.register('metrics')\nclass EventRuleMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n return [{'Name': 'RuleName', 'Value': resource['Name']}]\n\n\[email protected]('event-rule-target')\nclass EventRuleTarget(ChildResourceManager):\n\n class resource_type(TypeInfo):\n service = 'events'\n arn = False\n arn_type = 'event-rule-target'\n enum_spec = ('list_targets_by_rule', 'Targets', None)\n parent_spec = ('event-rule', 'Rule', True)\n name = id = 'Id'\n\n\[email protected]_registry.register('cross-account')\nclass CrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n # dummy permission\n permissions = ('events:ListTargetsByRule',)\n\n def __call__(self, r):\n account_id = r['Arn'].split(':', 5)[4]\n return account_id not in self.accounts\n\n\[email protected]_registry.register('delete')\nclass DeleteTarget(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('events:RemoveTargets',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('events')\n rule_targets = {}\n for r in resources:\n rule_targets.setdefault(r['c7n:parent-id'], []).append(r['Id'])\n\n for rule_id, target_ids in rule_targets.items():\n client.remove_targets(\n Ids=target_ids,\n Rule=rule_id)\n\n\[email protected]('log-group')\nclass LogGroup(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'logs'\n arn_type = 'log-group'\n enum_spec = ('describe_log_groups', 'logGroups', None)\n name = 'logGroupName'\n id = 'arn'\n filter_name = 'logGroupNamePrefix'\n filter_type = 'scalar'\n dimension = 'LogGroupName'\n date = 'creationTime'\n universal_taggable = True\n\n def augment(self, resources):\n resources = universal_augment(self, resources)\n for r in resources:\n r['creationTime'] = r['creationTime'] / 1000.0\n return resources\n\n def get_arns(self, resources):\n # log group arn in resource describe has ':*' suffix, not all\n # apis can use that form, so normalize to standard arn.\n return [r['arn'][:-2] for r in resources]\n\n\[email protected]_registry.register('retention')\nclass Retention(BaseAction):\n \"\"\"Action to set the retention period (in days) for CloudWatch log groups\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-set-log-group-retention\n resource: log-group\n actions:\n - type: retention\n days: 200\n \"\"\"\n\n schema = type_schema('retention', days={'type': 'integer'})\n permissions = ('logs:PutRetentionPolicy',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n days = self.data['days']\n for r in resources:\n client.put_retention_policy(\n logGroupName=r['logGroupName'],\n retentionInDays=days)\n\n\[email protected]_registry.register('delete')\nclass Delete(BaseAction):\n \"\"\"\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-log-group\n resource: log-group\n filters:\n - type: last-write\n days: 182.5\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('logs:DeleteLogGroup',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n for r in resources:\n client.delete_log_group(logGroupName=r['logGroupName'])\n\n\[email protected]_registry.register('last-write')\nclass LastWriteDays(Filter):\n \"\"\"Filters CloudWatch log groups by last write\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-stale-groups\n resource: log-group\n filters:\n - type: last-write\n days: 60\n \"\"\"\n\n schema = type_schema(\n 'last-write', days={'type': 'number'})\n permissions = ('logs:DescribeLogStreams',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n self.date_threshold = datetime.utcnow() - timedelta(\n days=self.data['days'])\n return [r for r in resources if self.check_group(client, r)]\n\n def check_group(self, client, group):\n streams = client.describe_log_streams(\n logGroupName=group['logGroupName'],\n orderBy='LastEventTime',\n descending=True,\n limit=3).get('logStreams')\n group['streams'] = streams\n if not streams:\n last_timestamp = group['creationTime']\n elif streams[0]['storedBytes'] == 0:\n last_timestamp = streams[0]['creationTime']\n else:\n last_timestamp = streams[0]['lastIngestionTime']\n\n last_write = datetime.fromtimestamp(last_timestamp / 1000.0)\n group['lastWrite'] = last_write\n return self.date_threshold > last_write\n\n\[email protected]_registry.register('cross-account')\nclass LogCrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n permissions = ('logs:DescribeSubscriptionFilters',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n accounts = self.get_accounts()\n results = []\n with self.executor_factory(max_workers=1) as w:\n futures = []\n for rset in chunks(resources, 50):\n futures.append(\n w.submit(\n self.process_resource_set, client, accounts, rset))\n for f in as_completed(futures):\n if f.exception():\n self.log.error(\n \"Error checking log groups cross-account %s\",\n f.exception())\n continue\n results.extend(f.result())\n return results\n\n def process_resource_set(self, client, accounts, resources):\n results = []\n for r in resources:\n found = False\n filters = self.manager.retry(\n client.describe_subscription_filters,\n logGroupName=r['logGroupName']).get('subscriptionFilters', ())\n for f in filters:\n if 'destinationArn' not in f:\n continue\n account_id = f['destinationArn'].split(':', 5)[4]\n if account_id not in accounts:\n r.setdefault('c7n:CrossAccountViolations', []).append(\n account_id)\n found = True\n if found:\n results.append(r)\n return results\n\n\[email protected]_registry.register('set-encryption')\nclass EncryptLogGroup(BaseAction):\n \"\"\"Encrypt/Decrypt a log group\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: encrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: absent\n actions:\n - type: set-encryption\n kms-key: alias/mylogkey\n state: True\n\n - name: decrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: kms:key:arn\n actions:\n - type: set-encryption\n state: False\n \"\"\"\n schema = type_schema(\n 'set-encryption',\n **{'kms-key': {'type': 'string'},\n 'state': {'type': 'boolean'}})\n permissions = (\n 'logs:AssociateKmsKey', 'logs:DisassociateKmsKey', 'kms:DescribeKey')\n\n def validate(self):\n if not self.data.get('state', True):\n return self\n key = self.data.get('kms-key', '')\n if not key:\n raise ValueError('Must specify either a KMS key ARN or Alias')\n if 'alias/' not in key and ':key/' not in key:\n raise PolicyValidationError(\n \"Invalid kms key format %s\" % key)\n return self\n\n def resolve_key(self, key):\n if not key:\n return\n\n # Qualified arn for key\n if key.startswith('arn:') and ':key/' in key:\n return key\n\n # Alias\n key = local_session(\n self.manager.session_factory).client(\n 'kms').describe_key(\n KeyId=key)['KeyMetadata']['Arn']\n return key\n\n def process(self, resources):\n session = local_session(self.manager.session_factory)\n client = session.client('logs')\n\n state = self.data.get('state', True)\n key = self.resolve_key(self.data.get('kms-key'))\n\n for r in resources:\n try:\n if state:\n client.associate_kms_key(\n logGroupName=r['logGroupName'], kmsKeyId=key)\n else:\n client.disassociate_kms_key(logGroupName=r['logGroupName'])\n except client.exceptions.ResourceNotFoundException:\n continue\n", "path": "c7n/resources/cw.py" } ]
[ { "content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom concurrent.futures import as_completed\nfrom datetime import datetime, timedelta\n\nfrom c7n.actions import BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import Filter, MetricsFilter\nfrom c7n.filters.iamaccess import CrossAccountAccessFilter\nfrom c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo\nfrom c7n.manager import resources\nfrom c7n.resolver import ValuesFrom\nfrom c7n.tags import universal_augment\nfrom c7n.utils import type_schema, local_session, chunks, get_retry\n\n\[email protected]('alarm')\nclass Alarm(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudwatch'\n arn_type = 'alarm'\n enum_spec = ('describe_alarms', 'MetricAlarms', None)\n id = 'AlarmArn'\n filter_name = 'AlarmNames'\n filter_type = 'list'\n name = 'AlarmName'\n date = 'AlarmConfigurationUpdatedTimestamp'\n config_type = 'AWS::CloudWatch::Alarm'\n\n retry = staticmethod(get_retry(('Throttled',)))\n\n\[email protected]_registry.register('delete')\nclass AlarmDelete(BaseAction):\n \"\"\"Delete a cloudwatch alarm.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-alarms\n resource: alarm\n filters:\n - type: value\n value_type: age\n key: StateUpdatedTimestamp\n value: 30\n op: ge\n - StateValue: INSUFFICIENT_DATA\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudwatch:DeleteAlarms',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('cloudwatch')\n\n for resource_set in chunks(resources, size=100):\n self.manager.retry(\n client.delete_alarms,\n AlarmNames=[r['AlarmName'] for r in resource_set])\n\n\[email protected]('event-rule')\nclass EventRule(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'events'\n arn_type = 'event-rule'\n enum_spec = ('list_rules', 'Rules', None)\n name = \"Name\"\n id = \"Name\"\n filter_name = \"NamePrefix\"\n filter_type = \"scalar\"\n\n\[email protected]_registry.register('metrics')\nclass EventRuleMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n return [{'Name': 'RuleName', 'Value': resource['Name']}]\n\n\[email protected]('event-rule-target')\nclass EventRuleTarget(ChildResourceManager):\n\n class resource_type(TypeInfo):\n service = 'events'\n arn = False\n arn_type = 'event-rule-target'\n enum_spec = ('list_targets_by_rule', 'Targets', None)\n parent_spec = ('event-rule', 'Rule', True)\n name = id = 'Id'\n\n\[email protected]_registry.register('cross-account')\nclass CrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n # dummy permission\n permissions = ('events:ListTargetsByRule',)\n\n def __call__(self, r):\n account_id = r['Arn'].split(':', 5)[4]\n return account_id not in self.accounts\n\n\[email protected]_registry.register('delete')\nclass DeleteTarget(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('events:RemoveTargets',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('events')\n rule_targets = {}\n for r in resources:\n rule_targets.setdefault(r['c7n:parent-id'], []).append(r['Id'])\n\n for rule_id, target_ids in rule_targets.items():\n client.remove_targets(\n Ids=target_ids,\n Rule=rule_id)\n\n\[email protected]('log-group')\nclass LogGroup(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'logs'\n arn_type = 'log-group'\n enum_spec = ('describe_log_groups', 'logGroups', None)\n name = 'logGroupName'\n id = 'arn'\n filter_name = 'logGroupNamePrefix'\n filter_type = 'scalar'\n dimension = 'LogGroupName'\n date = 'creationTime'\n universal_taggable = True\n\n def augment(self, resources):\n resources = universal_augment(self, resources)\n for r in resources:\n r['creationTime'] = r['creationTime'] / 1000.0\n return resources\n\n def get_arns(self, resources):\n # log group arn in resource describe has ':*' suffix, not all\n # apis can use that form, so normalize to standard arn.\n return [r['arn'][:-2] for r in resources]\n\n\[email protected]_registry.register('retention')\nclass Retention(BaseAction):\n \"\"\"Action to set the retention period (in days) for CloudWatch log groups\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-set-log-group-retention\n resource: log-group\n actions:\n - type: retention\n days: 200\n \"\"\"\n\n schema = type_schema('retention', days={'type': 'integer'})\n permissions = ('logs:PutRetentionPolicy',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n days = self.data['days']\n for r in resources:\n self.manager.retry(\n client.put_retention_policy,\n logGroupName=r['logGroupName'],\n retentionInDays=days)\n\n\[email protected]_registry.register('delete')\nclass Delete(BaseAction):\n \"\"\"\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-delete-stale-log-group\n resource: log-group\n filters:\n - type: last-write\n days: 182.5\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('logs:DeleteLogGroup',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('logs')\n for r in resources:\n client.delete_log_group(logGroupName=r['logGroupName'])\n\n\[email protected]_registry.register('last-write')\nclass LastWriteDays(Filter):\n \"\"\"Filters CloudWatch log groups by last write\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudwatch-stale-groups\n resource: log-group\n filters:\n - type: last-write\n days: 60\n \"\"\"\n\n schema = type_schema(\n 'last-write', days={'type': 'number'})\n permissions = ('logs:DescribeLogStreams',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n self.date_threshold = datetime.utcnow() - timedelta(\n days=self.data['days'])\n return [r for r in resources if self.check_group(client, r)]\n\n def check_group(self, client, group):\n streams = client.describe_log_streams(\n logGroupName=group['logGroupName'],\n orderBy='LastEventTime',\n descending=True,\n limit=3).get('logStreams')\n group['streams'] = streams\n if not streams:\n last_timestamp = group['creationTime']\n elif streams[0]['storedBytes'] == 0:\n last_timestamp = streams[0]['creationTime']\n else:\n last_timestamp = streams[0]['lastIngestionTime']\n\n last_write = datetime.fromtimestamp(last_timestamp / 1000.0)\n group['lastWrite'] = last_write\n return self.date_threshold > last_write\n\n\[email protected]_registry.register('cross-account')\nclass LogCrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n permissions = ('logs:DescribeSubscriptionFilters',)\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client('logs')\n accounts = self.get_accounts()\n results = []\n with self.executor_factory(max_workers=1) as w:\n futures = []\n for rset in chunks(resources, 50):\n futures.append(\n w.submit(\n self.process_resource_set, client, accounts, rset))\n for f in as_completed(futures):\n if f.exception():\n self.log.error(\n \"Error checking log groups cross-account %s\",\n f.exception())\n continue\n results.extend(f.result())\n return results\n\n def process_resource_set(self, client, accounts, resources):\n results = []\n for r in resources:\n found = False\n filters = self.manager.retry(\n client.describe_subscription_filters,\n logGroupName=r['logGroupName']).get('subscriptionFilters', ())\n for f in filters:\n if 'destinationArn' not in f:\n continue\n account_id = f['destinationArn'].split(':', 5)[4]\n if account_id not in accounts:\n r.setdefault('c7n:CrossAccountViolations', []).append(\n account_id)\n found = True\n if found:\n results.append(r)\n return results\n\n\[email protected]_registry.register('set-encryption')\nclass EncryptLogGroup(BaseAction):\n \"\"\"Encrypt/Decrypt a log group\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: encrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: absent\n actions:\n - type: set-encryption\n kms-key: alias/mylogkey\n state: True\n\n - name: decrypt-log-group\n resource: log-group\n filters:\n - kmsKeyId: kms:key:arn\n actions:\n - type: set-encryption\n state: False\n \"\"\"\n schema = type_schema(\n 'set-encryption',\n **{'kms-key': {'type': 'string'},\n 'state': {'type': 'boolean'}})\n permissions = (\n 'logs:AssociateKmsKey', 'logs:DisassociateKmsKey', 'kms:DescribeKey')\n\n def validate(self):\n if not self.data.get('state', True):\n return self\n key = self.data.get('kms-key', '')\n if not key:\n raise ValueError('Must specify either a KMS key ARN or Alias')\n if 'alias/' not in key and ':key/' not in key:\n raise PolicyValidationError(\n \"Invalid kms key format %s\" % key)\n return self\n\n def resolve_key(self, key):\n if not key:\n return\n\n # Qualified arn for key\n if key.startswith('arn:') and ':key/' in key:\n return key\n\n # Alias\n key = local_session(\n self.manager.session_factory).client(\n 'kms').describe_key(\n KeyId=key)['KeyMetadata']['Arn']\n return key\n\n def process(self, resources):\n session = local_session(self.manager.session_factory)\n client = session.client('logs')\n\n state = self.data.get('state', True)\n key = self.resolve_key(self.data.get('kms-key'))\n\n for r in resources:\n try:\n if state:\n client.associate_kms_key(\n logGroupName=r['logGroupName'], kmsKeyId=key)\n else:\n client.disassociate_kms_key(logGroupName=r['logGroupName'])\n except client.exceptions.ResourceNotFoundException:\n continue\n", "path": "c7n/resources/cw.py" } ]
diff --git a/c7n/resources/cw.py b/c7n/resources/cw.py index 688df0d21f9..6536d1f3b48 100644 --- a/c7n/resources/cw.py +++ b/c7n/resources/cw.py @@ -196,7 +196,8 @@ def process(self, resources): client = local_session(self.manager.session_factory).client('logs') days = self.data['days'] for r in resources: - client.put_retention_policy( + self.manager.retry( + client.put_retention_policy, logGroupName=r['logGroupName'], retentionInDays=days)
Put retention days on Cloud Watch logs. with the TRaceback Here is the Policy that I'm running periodically. I'm getting throttling Errors. policies: - name: Custodian-loggroup-retention resource: log-group description: | Checks log groups weekely and sets the log retention for log groups that doesn't have log retention set. mode: type: periodic schedule: "cron(0 12 ? * 2 *)" role: CustodianLambdaExecutionRole packages: - boto3 - botocore filters: - type: value key: retentionInDays value: absent actions: - type: retention days: 400 **Here is the traceback.** [DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:ResourceCount Count:820 policy:custodian-loggroup-retention restype:log-group scope:policy [DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:PolicyException Count:1 policy:custodian-loggroup-retention restype:log-group [DEBUG] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 metric:ApiCalls Count:110 policy:custodian-loggroup-retention restype:log-group [ERROR] 2020-02-10T16:00:25.670Z fc012410-e0d7-49a5-a52f-c8a996ffb8e8 Error while executing policy Traceback (most recent call last): File "/var/task/c7n/policy.py", line 320, in run results = a.process(resources) File "/var/task/c7n/resources/cw.py", line 201, in process retentionInDays=days) File "/var/task/botocore/client.py", line 276, in _api_call return self._make_api_call(operation_name, kwargs) File "/var/task/botocore/client.py", line 586, in _make_api_call raise error_class(parsed_response, operation_name) botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the PutRetentionPolicy operation (reached max retries: 4): Rate exceeded
ray-project__ray-8491
[ { "content": "from collections import deque, OrderedDict\nimport numpy as np\n\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\n\ndef unflatten(vector, shapes):\n i = 0\n arrays = []\n for shape in shapes:\n size = np.prod(shape, dtype=np.int)\n array = vector[i:(i + size)].reshape(shape)\n arrays.append(array)\n i += size\n assert len(vector) == i, \"Passed weight does not have the correct shape.\"\n return arrays\n\n\nclass TensorFlowVariables:\n \"\"\"A class used to set and get weights for Tensorflow networks.\n\n Attributes:\n sess (tf.Session): The tensorflow session used to run assignment.\n variables (Dict[str, tf.Variable]): Extracted variables from the loss\n or additional variables that are passed in.\n placeholders (Dict[str, tf.placeholders]): Placeholders for weights.\n assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.\n \"\"\"\n\n def __init__(self, output, sess=None, input_variables=None):\n \"\"\"Creates TensorFlowVariables containing extracted variables.\n\n The variables are extracted by performing a BFS search on the\n dependency graph with loss as the root node. After the tree is\n traversed and those variables are collected, we append input_variables\n to the collected variables. For each variable in the list, the\n variable has a placeholder and assignment operation created for it.\n\n Args:\n output (tf.Operation, List[tf.Operation]): The tensorflow\n operation to extract all variables from.\n sess (tf.Session): Session used for running the get and set\n methods.\n input_variables (List[tf.Variables]): Variables to include in the\n list.\n \"\"\"\n self.sess = sess\n if not isinstance(output, (list, tuple)):\n output = [output]\n queue = deque(output)\n variable_names = []\n explored_inputs = set(output)\n\n # We do a BFS on the dependency graph of the input function to find\n # the variables.\n while len(queue) != 0:\n tf_obj = queue.popleft()\n if tf_obj is None:\n continue\n # The object put into the queue is not necessarily an operation,\n # so we want the op attribute to get the operation underlying the\n # object. Only operations contain the inputs that we can explore.\n if hasattr(tf_obj, \"op\"):\n tf_obj = tf_obj.op\n for input_op in tf_obj.inputs:\n if input_op not in explored_inputs:\n queue.append(input_op)\n explored_inputs.add(input_op)\n # Tensorflow control inputs can be circular, so we keep track of\n # explored operations.\n for control in tf_obj.control_inputs:\n if control not in explored_inputs:\n queue.append(control)\n explored_inputs.add(control)\n if (\"Variable\" in tf_obj.node_def.op\n or \"VarHandle\" in tf_obj.node_def.op):\n variable_names.append(tf_obj.node_def.name)\n self.variables = OrderedDict()\n variable_list = [\n v for v in tf.global_variables()\n if v.op.node_def.name in variable_names\n ]\n if input_variables is not None:\n variable_list += input_variables\n for v in variable_list:\n self.variables[v.op.node_def.name] = v\n\n self.placeholders = {}\n self.assignment_nodes = {}\n\n # Create new placeholders to put in custom weights.\n for k, var in self.variables.items():\n self.placeholders[k] = tf.placeholder(\n var.value().dtype,\n var.get_shape().as_list(),\n name=\"Placeholder_\" + k)\n self.assignment_nodes[k] = var.assign(self.placeholders[k])\n\n def set_session(self, sess):\n \"\"\"Sets the current session used by the class.\n\n Args:\n sess (tf.Session): Session to set the attribute with.\n \"\"\"\n self.sess = sess\n\n def get_flat_size(self):\n \"\"\"Returns the total length of all of the flattened variables.\n\n Returns:\n The length of all flattened variables concatenated.\n \"\"\"\n return sum(\n np.prod(v.get_shape().as_list()) for v in self.variables.values())\n\n def _check_sess(self):\n \"\"\"Checks if the session is set, and if not throw an error message.\"\"\"\n assert self.sess is not None, (\"The session is not set. Set the \"\n \"session either by passing it into the \"\n \"TensorFlowVariables constructor or by \"\n \"calling set_session(sess).\")\n\n def get_flat(self):\n \"\"\"Gets the weights and returns them as a flat array.\n\n Returns:\n 1D Array containing the flattened weights.\n \"\"\"\n self._check_sess()\n return np.concatenate([\n v.eval(session=self.sess).flatten()\n for v in self.variables.values()\n ])\n\n def set_flat(self, new_weights):\n \"\"\"Sets the weights to new_weights, converting from a flat array.\n\n Note:\n You can only set all weights in the network using this function,\n i.e., the length of the array must match get_flat_size.\n\n Args:\n new_weights (np.ndarray): Flat array containing weights.\n \"\"\"\n self._check_sess()\n shapes = [v.get_shape().as_list() for v in self.variables.values()]\n arrays = unflatten(new_weights, shapes)\n placeholders = [\n self.placeholders[k] for k, v in self.variables.items()\n ]\n self.sess.run(\n list(self.assignment_nodes.values()),\n feed_dict=dict(zip(placeholders, arrays)))\n\n def get_weights(self):\n \"\"\"Returns a dictionary containing the weights of the network.\n\n Returns:\n Dictionary mapping variable names to their weights.\n \"\"\"\n self._check_sess()\n return {\n k: v.eval(session=self.sess)\n for k, v in self.variables.items()\n }\n\n def set_weights(self, new_weights):\n \"\"\"Sets the weights to new_weights.\n\n Note:\n Can set subsets of variables as well, by only passing in the\n variables you want to be set.\n\n Args:\n new_weights (Dict): Dictionary mapping variable names to their\n weights.\n \"\"\"\n self._check_sess()\n assign_list = [\n self.assignment_nodes[name] for name in new_weights.keys()\n if name in self.assignment_nodes\n ]\n assert assign_list, (\"No variables in the input matched those in the \"\n \"network. Possible cause: Two networks were \"\n \"defined in the same TensorFlow graph. To fix \"\n \"this, place each network definition in its own \"\n \"tf.Graph.\")\n self.sess.run(\n assign_list,\n feed_dict={\n self.placeholders[name]: value\n for (name, value) in new_weights.items()\n if name in self.placeholders\n })\n", "path": "python/ray/experimental/tf_utils.py" } ]
[ { "content": "from collections import deque, OrderedDict\nimport numpy as np\n\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\n\ndef unflatten(vector, shapes):\n i = 0\n arrays = []\n for shape in shapes:\n size = np.prod(shape, dtype=np.int)\n array = vector[i:(i + size)].reshape(shape)\n arrays.append(array)\n i += size\n assert len(vector) == i, \"Passed weight does not have the correct shape.\"\n return arrays\n\n\nclass TensorFlowVariables:\n \"\"\"A class used to set and get weights for Tensorflow networks.\n\n Attributes:\n sess (tf.Session): The tensorflow session used to run assignment.\n variables (Dict[str, tf.Variable]): Extracted variables from the loss\n or additional variables that are passed in.\n placeholders (Dict[str, tf.placeholders]): Placeholders for weights.\n assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.\n \"\"\"\n\n def __init__(self, output, sess=None, input_variables=None):\n \"\"\"Creates TensorFlowVariables containing extracted variables.\n\n The variables are extracted by performing a BFS search on the\n dependency graph with loss as the root node. After the tree is\n traversed and those variables are collected, we append input_variables\n to the collected variables. For each variable in the list, the\n variable has a placeholder and assignment operation created for it.\n\n Args:\n output (tf.Operation, List[tf.Operation]): The tensorflow\n operation to extract all variables from.\n sess (tf.Session): Session used for running the get and set\n methods.\n input_variables (List[tf.Variables]): Variables to include in the\n list.\n \"\"\"\n self.sess = sess\n if not isinstance(output, (list, tuple)):\n output = [output]\n queue = deque(output)\n variable_names = []\n explored_inputs = set(output)\n\n # We do a BFS on the dependency graph of the input function to find\n # the variables.\n while len(queue) != 0:\n tf_obj = queue.popleft()\n if tf_obj is None:\n continue\n # The object put into the queue is not necessarily an operation,\n # so we want the op attribute to get the operation underlying the\n # object. Only operations contain the inputs that we can explore.\n if hasattr(tf_obj, \"op\"):\n tf_obj = tf_obj.op\n for input_op in tf_obj.inputs:\n if input_op not in explored_inputs:\n queue.append(input_op)\n explored_inputs.add(input_op)\n # Tensorflow control inputs can be circular, so we keep track of\n # explored operations.\n for control in tf_obj.control_inputs:\n if control not in explored_inputs:\n queue.append(control)\n explored_inputs.add(control)\n if (\"Variable\" in tf_obj.node_def.op\n or \"VarHandle\" in tf_obj.node_def.op):\n variable_names.append(tf_obj.node_def.name)\n self.variables = OrderedDict()\n variable_list = [\n v for v in tf.global_variables()\n if v.op.node_def.name in variable_names\n ]\n if input_variables is not None:\n variable_list += input_variables\n for v in variable_list:\n self.variables[v.op.node_def.name] = v\n\n self.placeholders = {}\n self.assignment_nodes = {}\n\n # Create new placeholders to put in custom weights.\n for k, var in self.variables.items():\n self.placeholders[k] = tf.placeholder(\n var.value().dtype,\n var.get_shape().as_list(),\n name=\"Placeholder_\" + k)\n self.assignment_nodes[k] = var.assign(self.placeholders[k])\n\n def set_session(self, sess):\n \"\"\"Sets the current session used by the class.\n\n Args:\n sess (tf.Session): Session to set the attribute with.\n \"\"\"\n self.sess = sess\n\n def get_flat_size(self):\n \"\"\"Returns the total length of all of the flattened variables.\n\n Returns:\n The length of all flattened variables concatenated.\n \"\"\"\n return sum(\n np.prod(v.get_shape().as_list()) for v in self.variables.values())\n\n def _check_sess(self):\n \"\"\"Checks if the session is set, and if not throw an error message.\"\"\"\n assert self.sess is not None, (\"The session is not set. Set the \"\n \"session either by passing it into the \"\n \"TensorFlowVariables constructor or by \"\n \"calling set_session(sess).\")\n\n def get_flat(self):\n \"\"\"Gets the weights and returns them as a flat array.\n\n Returns:\n 1D Array containing the flattened weights.\n \"\"\"\n self._check_sess()\n return np.concatenate([\n v.eval(session=self.sess).flatten()\n for v in self.variables.values()\n ])\n\n def set_flat(self, new_weights):\n \"\"\"Sets the weights to new_weights, converting from a flat array.\n\n Note:\n You can only set all weights in the network using this function,\n i.e., the length of the array must match get_flat_size.\n\n Args:\n new_weights (np.ndarray): Flat array containing weights.\n \"\"\"\n self._check_sess()\n shapes = [v.get_shape().as_list() for v in self.variables.values()]\n arrays = unflatten(new_weights, shapes)\n placeholders = [\n self.placeholders[k] for k, v in self.variables.items()\n ]\n self.sess.run(\n list(self.assignment_nodes.values()),\n feed_dict=dict(zip(placeholders, arrays)))\n\n def get_weights(self):\n \"\"\"Returns a dictionary containing the weights of the network.\n\n Returns:\n Dictionary mapping variable names to their weights.\n \"\"\"\n self._check_sess()\n return self.sess.run(self.variables)\n\n def set_weights(self, new_weights):\n \"\"\"Sets the weights to new_weights.\n\n Note:\n Can set subsets of variables as well, by only passing in the\n variables you want to be set.\n\n Args:\n new_weights (Dict): Dictionary mapping variable names to their\n weights.\n \"\"\"\n self._check_sess()\n assign_list = [\n self.assignment_nodes[name] for name in new_weights.keys()\n if name in self.assignment_nodes\n ]\n assert assign_list, (\"No variables in the input matched those in the \"\n \"network. Possible cause: Two networks were \"\n \"defined in the same TensorFlow graph. To fix \"\n \"this, place each network definition in its own \"\n \"tf.Graph.\")\n self.sess.run(\n assign_list,\n feed_dict={\n self.placeholders[name]: value\n for (name, value) in new_weights.items()\n if name in self.placeholders\n })\n", "path": "python/ray/experimental/tf_utils.py" } ]
diff --git a/python/ray/experimental/tf_utils.py b/python/ray/experimental/tf_utils.py index bdddc6a96b5b..8fa0d455297a 100644 --- a/python/ray/experimental/tf_utils.py +++ b/python/ray/experimental/tf_utils.py @@ -161,10 +161,7 @@ def get_weights(self): Dictionary mapping variable names to their weights. """ self._check_sess() - return { - k: v.eval(session=self.sess) - for k, v in self.variables.items() - } + return self.sess.run(self.variables) def set_weights(self, new_weights): """Sets the weights to new_weights.
Time to initialize a policy grows linearly with the number of agents <!-- General questions should be asked on the mailing list [email protected]. Questions about how to use Ray should be asked on [StackOverflow](https://stackoverflow.com/questions/tagged/ray). Before submitting an issue, please fill out the following form. --> ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 18.04 - **Ray installed from (source or binary)**: Binary - **Ray version**: 0.7.4 - **Python version**: 3.7.4 - **Exact command to reproduce**: N/A <!-- You can obtain the Ray version with python -c "import ray; print(ray.__version__)" --> ### Describe the problem <!-- Describe the problem clearly here. --> I noticed that in multi agent settings, the time to initialize a policy per agent increases as more agents are initialized. In the sample output I provided below, you can see that the time to initialize a single DynamicTFPolicy grows from 4.6 seconds to 15.3 seconds from the first agent to the tenth agent created. Line 291 of `rllib/policy/dynamic_tf_policy.py` is ```python self._sess.run(tf.global_variables_initializer()) ``` which I believe will run one time for each agent initialized. If I'm not mistaken, this means that every variable in the computation graph is being initialized each time that we initialize a DynamicTFPolicy. If initializing a DynamicTFPolicy adds new variables to the computation graph (as I believe it does), this would explain why the time to initialize a DynamicTFPolicy grows over time: We are initializing every variable in the computation graph, and the computation graph is growing. My question is, why does line 291 run a global variables initializer? Is there a reason for this that I can't see inside this method? How hard would it be to modify this to only initialize variables in the individual policy that we care to initialize? I'm asking this because as detailed in #5753, I'm trying to modify rllib to allow initialization and removal of policies during training. The overhead incurred by this initialization quickly slows the training script down enough to be useless. Also, if anyone knows what the resource bottleneck is for policy initialization, that would be very helpful to know for when we're picking new hardware. Does it need a ton of cores to run in parallel, or more memory, or a bigger GPU or more GPUs or something? Thanks. ### Source code / logs <!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. --> ``` (pytorch) root@e3a955e42cae:~/bees/bees# python trainer.py settings/settings.json 2019-10-23 12:38:04,168 WARNING worker.py:1426 -- WARNING: Not updating worker name since `setproctitle` is not installed. Install this with `pip install setproctitle` (or ray[debug]) to enable monitoring of worker processes. 2019-10-23 12:38:04,169 INFO resource_spec.py:205 -- Starting Ray with 3.52 GiB memory available for workers and up to 1.78 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>). 2019-10-23 12:38:05,354 INFO trainer.py:344 -- Tip: set 'eager': true or the --eager flag to enable TensorFlow eager execution 2019-10-23 12:38:05,754 WARNING ppo.py:149 -- Using the simple minibatch optimizer. This will significantly reduce performance, consider simple_optimizer=False. DTFP: 4.604122s DTFP: 4.856234s DTFP: 5.630484s DTFP: 6.850456s DTFP: 7.856700s DTFP: 9.624164s DTFP: 10.894944s DTFP: 12.129192s DTFP: 14.210247s DTFP: 15.342738s ``` Line 130 in `tf_policy_template.py` (modified to print debug output above) ```python t = time.time() DynamicTFPolicy.__init__( self, obs_space, action_space, config, loss_fn, stats_fn=stats_fn, grad_stats_fn=grad_stats_fn, before_loss_init=before_loss_init_wrapper, make_model=make_model, action_sampler_fn=action_sampler_fn, existing_model=existing_model, existing_inputs=existing_inputs, get_batch_divisibility_req=get_batch_divisibility_req, obs_include_prev_action_reward=obs_include_prev_action_reward) print("DTFP: %fs" % (time.time() - t)) ``` Snippet of trainer script used. ```python # pylint: disable=invalid-name if __name__ == "__main__": ray.init() # Get ``settings`` file for now. settings_file = sys.argv[1] with open(settings_file, "r") as f: settings = json.load(f) env_config = settings["env"] time_steps = env_config["time_steps"] space_env = create_env(settings) env = create_env(settings) # Register environment register_env("world", lambda _: env) # Build environment instance to get ``obs_space``. obs_space = space_env.observation_space act_space = space_env.action_space # You can also have multiple policies per trainer, but here we just # show one each for PPO and DQN. policies: Dict[str, Tuple[Any, gym.Space, gym.Space, Dict[Any, Any]]] = { "0": (PPOTFPolicy, obs_space, act_space, {}), "1": (PPOTFPolicy, obs_space, act_space, {}), "2": (PPOTFPolicy, obs_space, act_space, {}), "3": (PPOTFPolicy, obs_space, act_space, {}), "4": (PPOTFPolicy, obs_space, act_space, {}), "5": (PPOTFPolicy, obs_space, act_space, {}), "6": (PPOTFPolicy, obs_space, act_space, {}), "7": (PPOTFPolicy, obs_space, act_space, {}), "8": (PPOTFPolicy, obs_space, act_space, {}), "9": (PPOTFPolicy, obs_space, act_space, {}), } def policy_mapping_fn(agent_id: int) -> str: """ Returns the given agent's policy identifier. """ return str(agent_id) ppo_trainer = PPOTrainer( env="bee_world", config={ "multiagent": { "policies": policies, "policy_mapping_fn": policy_mapping_fn, "policies_to_train": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], }, "simple_optimizer": True, # Disable filters, otherwise we would need to synchronize those # as well to the DQN agent. "observation_filter": "NoFilter", "num_workers": 2, "num_gpus": 1, "train_batch_size": 2, "sample_batch_size": 1, "sgd_minibatch_size": 2, }, ) ```
bookwyrm-social__bookwyrm-1639
[ { "content": "\"\"\"ISNI author checking utilities\"\"\"\nimport xml.etree.ElementTree as ET\nimport requests\n\nfrom bookwyrm import activitypub, models\n\n\ndef request_isni_data(search_index, search_term, max_records=5):\n \"\"\"Request data from the ISNI API\"\"\"\n\n search_string = f'{search_index}=\"{search_term}\"'\n query_params = {\n \"query\": search_string,\n \"version\": \"1.1\",\n \"operation\": \"searchRetrieve\",\n \"recordSchema\": \"isni-b\",\n \"maximumRecords\": max_records,\n \"startRecord\": \"1\",\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=10)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n return result.text\n\n\ndef make_name_string(element):\n \"\"\"create a string of form 'personal_name surname'\"\"\"\n\n # NOTE: this will often be incorrect, many naming systems\n # list \"surname\" before personal name\n forename = element.find(\".//forename\")\n surname = element.find(\".//surname\")\n if forename is not None:\n return \"\".join([forename.text, \" \", surname.text])\n return surname.text\n\n\ndef get_other_identifier(element, code):\n \"\"\"Get other identifiers associated with an author from their ISNI record\"\"\"\n\n identifiers = element.findall(\".//otherIdentifierOfIdentity\")\n for section_head in identifiers:\n if (\n section_head.find(\".//type\") is not None\n and section_head.find(\".//type\").text == code\n and section_head.find(\".//identifier\") is not None\n ):\n return section_head.find(\".//identifier\").text\n\n # if we can't find it in otherIdentifierOfIdentity,\n # try sources\n for source in element.findall(\".//sources\"):\n code_of_source = source.find(\".//codeOfSource\")\n if code_of_source is not None and code_of_source.text.lower() == code.lower():\n return source.find(\".//sourceIdentifier\").text\n\n return \"\"\n\n\ndef get_external_information_uri(element, match_string):\n \"\"\"Get URLs associated with an author from their ISNI record\"\"\"\n\n sources = element.findall(\".//externalInformation\")\n for source in sources:\n information = source.find(\".//information\")\n uri = source.find(\".//URI\")\n if (\n uri is not None\n and information is not None\n and information.text.lower() == match_string.lower()\n ):\n return uri.text\n return \"\"\n\n\ndef find_authors_by_name(name_string, description=False):\n \"\"\"Query the ISNI database for possible author matches by name\"\"\"\n\n payload = request_isni_data(\"pica.na\", name_string)\n # parse xml\n root = ET.fromstring(payload)\n # build list of possible authors\n possible_authors = []\n for element in root.iter(\"responseRecord\"):\n personal_name = element.find(\".//forename/..\")\n if not personal_name:\n continue\n\n author = get_author_from_isni(element.find(\".//isniUnformatted\").text)\n\n if bool(description):\n\n titles = []\n # prefer title records from LoC+ coop, Australia, Ireland, or Singapore\n # in that order\n for source in [\"LCNACO\", \"NLA\", \"N6I\", \"NLB\"]:\n for parent in element.findall(f'.//titleOfWork/[@source=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n for parent in element.findall(f'.//titleOfWork[@subsource=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n # otherwise just grab the first title listing\n titles.append(element.find(\".//title\"))\n\n if titles is not None:\n # some of the \"titles\" in ISNI are a little ...iffy\n # '@' is used by ISNI/OCLC to index the starting point ignoring stop words\n # (e.g. \"The @Government of no one\")\n title_elements = [\n e for e in titles if not e.text.replace(\"@\", \"\").isnumeric()\n ]\n if len(title_elements):\n author.bio = title_elements[0].text.replace(\"@\", \"\")\n else:\n author.bio = None\n\n possible_authors.append(author)\n\n return possible_authors\n\n\ndef get_author_from_isni(isni):\n \"\"\"Find data to populate a new author record from their ISNI\"\"\"\n\n payload = request_isni_data(\"pica.isn\", isni)\n # parse xml\n root = ET.fromstring(payload)\n # there should only be a single responseRecord\n # but let's use the first one just in case\n element = root.find(\".//responseRecord\")\n name = make_name_string(element.find(\".//forename/..\"))\n viaf = get_other_identifier(element, \"viaf\")\n # use a set to dedupe aliases in ISNI\n aliases = set()\n aliases_element = element.findall(\".//personalNameVariant\")\n for entry in aliases_element:\n aliases.add(make_name_string(entry))\n # aliases needs to be list not set\n aliases = list(aliases)\n bio = element.find(\".//nameTitle\")\n bio = bio.text if bio is not None else \"\"\n wikipedia = get_external_information_uri(element, \"Wikipedia\")\n\n author = activitypub.Author(\n id=element.find(\".//isniURI\").text,\n name=name,\n isni=isni,\n viafId=viaf,\n aliases=aliases,\n bio=bio,\n wikipediaLink=wikipedia,\n )\n\n return author\n\n\ndef build_author_from_isni(match_value):\n \"\"\"Build basic author class object from ISNI URL\"\"\"\n\n # if it is an isni value get the data\n if match_value.startswith(\"https://isni.org/isni/\"):\n isni = match_value.replace(\"https://isni.org/isni/\", \"\")\n return {\"author\": get_author_from_isni(isni)}\n # otherwise it's a name string\n return {}\n\n\ndef augment_author_metadata(author, isni):\n \"\"\"Update any missing author fields from ISNI data\"\"\"\n\n isni_author = get_author_from_isni(isni)\n isni_author.to_model(model=models.Author, instance=author, overwrite=False)\n\n # we DO want to overwrite aliases because we're adding them to the\n # existing aliases and ISNI will usually have more.\n # We need to dedupe because ISNI records often have lots of dupe aliases\n aliases = set(isni_author.aliases)\n for alias in author.aliases:\n aliases.add(alias)\n author.aliases = list(aliases)\n author.save()\n", "path": "bookwyrm/utils/isni.py" } ]
[ { "content": "\"\"\"ISNI author checking utilities\"\"\"\nimport xml.etree.ElementTree as ET\nimport requests\n\nfrom bookwyrm import activitypub, models\n\n\ndef request_isni_data(search_index, search_term, max_records=5):\n \"\"\"Request data from the ISNI API\"\"\"\n\n search_string = f'{search_index}=\"{search_term}\"'\n query_params = {\n \"query\": search_string,\n \"version\": \"1.1\",\n \"operation\": \"searchRetrieve\",\n \"recordSchema\": \"isni-b\",\n \"maximumRecords\": max_records,\n \"startRecord\": \"1\",\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=15)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n return result.text\n\n\ndef make_name_string(element):\n \"\"\"create a string of form 'personal_name surname'\"\"\"\n\n # NOTE: this will often be incorrect, many naming systems\n # list \"surname\" before personal name\n forename = element.find(\".//forename\")\n surname = element.find(\".//surname\")\n if forename is not None:\n return \"\".join([forename.text, \" \", surname.text])\n return surname.text\n\n\ndef get_other_identifier(element, code):\n \"\"\"Get other identifiers associated with an author from their ISNI record\"\"\"\n\n identifiers = element.findall(\".//otherIdentifierOfIdentity\")\n for section_head in identifiers:\n if (\n section_head.find(\".//type\") is not None\n and section_head.find(\".//type\").text == code\n and section_head.find(\".//identifier\") is not None\n ):\n return section_head.find(\".//identifier\").text\n\n # if we can't find it in otherIdentifierOfIdentity,\n # try sources\n for source in element.findall(\".//sources\"):\n code_of_source = source.find(\".//codeOfSource\")\n if code_of_source is not None and code_of_source.text.lower() == code.lower():\n return source.find(\".//sourceIdentifier\").text\n\n return \"\"\n\n\ndef get_external_information_uri(element, match_string):\n \"\"\"Get URLs associated with an author from their ISNI record\"\"\"\n\n sources = element.findall(\".//externalInformation\")\n for source in sources:\n information = source.find(\".//information\")\n uri = source.find(\".//URI\")\n if (\n uri is not None\n and information is not None\n and information.text.lower() == match_string.lower()\n ):\n return uri.text\n return \"\"\n\n\ndef find_authors_by_name(name_string, description=False):\n \"\"\"Query the ISNI database for possible author matches by name\"\"\"\n\n payload = request_isni_data(\"pica.na\", name_string)\n # parse xml\n root = ET.fromstring(payload)\n # build list of possible authors\n possible_authors = []\n for element in root.iter(\"responseRecord\"):\n personal_name = element.find(\".//forename/..\")\n if not personal_name:\n continue\n\n author = get_author_from_isni(element.find(\".//isniUnformatted\").text)\n\n if bool(description):\n\n titles = []\n # prefer title records from LoC+ coop, Australia, Ireland, or Singapore\n # in that order\n for source in [\"LCNACO\", \"NLA\", \"N6I\", \"NLB\"]:\n for parent in element.findall(f'.//titleOfWork/[@source=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n for parent in element.findall(f'.//titleOfWork[@subsource=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n # otherwise just grab the first title listing\n titles.append(element.find(\".//title\"))\n\n if titles is not None:\n # some of the \"titles\" in ISNI are a little ...iffy\n # '@' is used by ISNI/OCLC to index the starting point ignoring stop words\n # (e.g. \"The @Government of no one\")\n title_elements = [\n e for e in titles if not e.text.replace(\"@\", \"\").isnumeric()\n ]\n if len(title_elements):\n author.bio = title_elements[0].text.replace(\"@\", \"\")\n else:\n author.bio = None\n\n possible_authors.append(author)\n\n return possible_authors\n\n\ndef get_author_from_isni(isni):\n \"\"\"Find data to populate a new author record from their ISNI\"\"\"\n\n payload = request_isni_data(\"pica.isn\", isni)\n # parse xml\n root = ET.fromstring(payload)\n # there should only be a single responseRecord\n # but let's use the first one just in case\n element = root.find(\".//responseRecord\")\n name = make_name_string(element.find(\".//forename/..\"))\n viaf = get_other_identifier(element, \"viaf\")\n # use a set to dedupe aliases in ISNI\n aliases = set()\n aliases_element = element.findall(\".//personalNameVariant\")\n for entry in aliases_element:\n aliases.add(make_name_string(entry))\n # aliases needs to be list not set\n aliases = list(aliases)\n bio = element.find(\".//nameTitle\")\n bio = bio.text if bio is not None else \"\"\n wikipedia = get_external_information_uri(element, \"Wikipedia\")\n\n author = activitypub.Author(\n id=element.find(\".//isniURI\").text,\n name=name,\n isni=isni,\n viafId=viaf,\n aliases=aliases,\n bio=bio,\n wikipediaLink=wikipedia,\n )\n\n return author\n\n\ndef build_author_from_isni(match_value):\n \"\"\"Build basic author class object from ISNI URL\"\"\"\n\n # if it is an isni value get the data\n if match_value.startswith(\"https://isni.org/isni/\"):\n isni = match_value.replace(\"https://isni.org/isni/\", \"\")\n return {\"author\": get_author_from_isni(isni)}\n # otherwise it's a name string\n return {}\n\n\ndef augment_author_metadata(author, isni):\n \"\"\"Update any missing author fields from ISNI data\"\"\"\n\n isni_author = get_author_from_isni(isni)\n isni_author.to_model(model=models.Author, instance=author, overwrite=False)\n\n # we DO want to overwrite aliases because we're adding them to the\n # existing aliases and ISNI will usually have more.\n # We need to dedupe because ISNI records often have lots of dupe aliases\n aliases = set(isni_author.aliases)\n for alias in author.aliases:\n aliases.add(alias)\n author.aliases = list(aliases)\n author.save()\n", "path": "bookwyrm/utils/isni.py" } ]
diff --git a/bookwyrm/templates/directory/directory.html b/bookwyrm/templates/directory/directory.html index 9753c4c0f8..c3ddb3c5c4 100644 --- a/bookwyrm/templates/directory/directory.html +++ b/bookwyrm/templates/directory/directory.html @@ -18,7 +18,7 @@ <h1 class="title"> </p> <form name="directory" method="POST" action="{% url 'directory' %}"> {% csrf_token %} - <button class="button is-primary" type="submit">Join Directory</button> + <button class="button is-primary" type="submit">{% trans "Join Directory" %}</button> <p class="help"> {% url 'prefs-profile' as path %} {% blocktrans with path=path %}You can opt-out at any time in your <a href="{{ path }}">profile settings.</a>{% endblocktrans %} @@ -28,7 +28,7 @@ <h1 class="title"> <div class="column is-narrow"> {% trans "Dismiss message" as button_text %} <button type="button" class="delete set-display" data-id="hide_join_directory" data-value="true"> - <span>Dismiss message</span> + <span>{% trans "Dismiss message" %}</span> </button> </div> </div></div> diff --git a/bookwyrm/templates/get_started/book_preview.html b/bookwyrm/templates/get_started/book_preview.html index 893e7593a2..8a20d0d773 100644 --- a/bookwyrm/templates/get_started/book_preview.html +++ b/bookwyrm/templates/get_started/book_preview.html @@ -4,9 +4,14 @@ <div class="select is-small mt-1 mb-3"> <select name="{{ book.id }}" aria-label="{% blocktrans with book_title=book.title %}Have you read {{ book_title }}?{% endblocktrans %}"> - <option disabled selected value>Add to your books</option> + <option disabled selected value>{% trans 'Add to your books' %}</option> {% for shelf in user_shelves %} - <option value="{{ shelf.id }}">{{ shelf.name }}</option> + <option value="{{ shelf.id }}"> + {% if shelf.identifier == 'to-read' %}{% trans "To Read" %} + {% elif shelf.identifier == 'reading' %}{% trans "Currently Reading" %} + {% elif shelf.identifier == 'read' %}{% trans "Read" %} + {% else %}{{ shelf.name }}{% endif %} + </option> {% endfor %} </select> </div> diff --git a/bookwyrm/templates/search/book.html b/bookwyrm/templates/search/book.html index 704f055bf6..66adb8c837 100644 --- a/bookwyrm/templates/search/book.html +++ b/bookwyrm/templates/search/book.html @@ -39,7 +39,7 @@ <header class="columns is-mobile"> <div class="column"> <h3 class="title is-5"> - Results from + {% trans 'Results from' %} <a href="{{ result_set.connector.base_url }}" target="_blank">{{ result_set.connector.name|default:result_set.connector.identifier }}</a> </h3> </div> diff --git a/bookwyrm/templates/shelf/shelf.html b/bookwyrm/templates/shelf/shelf.html index 01d41aa079..0184ab1d88 100644 --- a/bookwyrm/templates/shelf/shelf.html +++ b/bookwyrm/templates/shelf/shelf.html @@ -80,7 +80,10 @@ <h1 class="title"> <div class="block columns is-mobile"> <div class="column"> <h2 class="title is-3"> - {{ shelf.name }} + {% if shelf.identifier == 'to-read' %}{% trans "To Read" %} + {% elif shelf.identifier == 'reading' %}{% trans "Currently Reading" %} + {% elif shelf.identifier == 'read' %}{% trans "Read" %} + {% else %}{{ shelf.name }}{% endif %} <span class="subtitle"> {% include 'snippets/privacy-icons.html' with item=shelf %} </span> diff --git a/bookwyrm/templates/snippets/shelf_selector.html b/bookwyrm/templates/snippets/shelf_selector.html index 4b32f5a8e2..3ee6fa92c6 100644 --- a/bookwyrm/templates/snippets/shelf_selector.html +++ b/bookwyrm/templates/snippets/shelf_selector.html @@ -22,7 +22,15 @@ <input type="hidden" name="book" value="{{ book.id }}"> <input type="hidden" name="change-shelf-from" value="{{ current.identifier }}"> <input type="hidden" name="shelf" value="{{ shelf.identifier }}"> - <button class="button is-fullwidth is-small shelf-option is-radiusless is-white" type="submit" {% if shelf in book.shelf_set.all %} disabled {% endif %}><span>{{ shelf.name }}</span></button> + + <button class="button is-fullwidth is-small shelf-option is-radiusless is-white" type="submit" {% if shelf.identifier == current.identifier %}disabled{% endif %}> + <span> + {% if shelf.identifier == 'to-read' %}{% trans "To Read" %} + {% elif shelf.identifier == 'reading' %}{% trans "Currently Reading" %} + {% elif shelf.identifier == 'read' %}{% trans "Read" %} + {% else %}{{ shelf.name }}{% endif %} + </span> + </button> </form> </li> {% else%} diff --git a/bookwyrm/templates/user/user.html b/bookwyrm/templates/user/user.html index 36e646aa9e..ccc4a44ea8 100755 --- a/bookwyrm/templates/user/user.html +++ b/bookwyrm/templates/user/user.html @@ -29,8 +29,13 @@ <h2 class="title"> <div class="columns is-mobile scroll-x"> {% for shelf in shelves %} <div class="column is-narrow"> - <h3>{{ shelf.name }} - {% if shelf.size > 3 %}<small>(<a href="{{ shelf.local_path }}">{% blocktrans with size=shelf.size %}View all {{ size }}{% endblocktrans %}</a>)</small>{% endif %}</h3> + <h3> + {% if shelf.name == 'To Read' %}{% trans "To Read" %} + {% elif shelf.name == 'Currently Reading' %}{% trans "Currently Reading" %} + {% elif shelf.name == 'Read' %}{% trans "Read" %} + {% else %}{{ shelf.name }}{% endif %} + {% if shelf.size > 3 %}<small>(<a href="{{ shelf.local_path }}">{% blocktrans with size=shelf.size %}View all {{ size }}{% endblocktrans %}</a>)</small>{% endif %} + </h3> <div class="is-mobile field is-grouped"> {% for book in shelf.books %} <div class="control"> @@ -49,7 +54,8 @@ <h3>{{ shelf.name }} {% if goal %} <div class="block"> - <h2 class="title">{% now 'Y' %} Reading Goal</h2> + {% now 'Y' as current_year%} + <h2 class="title">{% blocktrans %}{{ current_year }} Reading Goal{% endblocktrans %}</h2> {% include 'snippets/goal_progress.html' with goal=goal %} </div> {% endif %} diff --git a/bookwyrm/utils/isni.py b/bookwyrm/utils/isni.py index a35c3f2493..65d20c85cc 100644 --- a/bookwyrm/utils/isni.py +++ b/bookwyrm/utils/isni.py @@ -19,7 +19,7 @@ def request_isni_data(search_index, search_term, max_records=5): "recordPacking": "xml", "sortKeys": "RLV,pica,0,,", } - result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=10) + result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=15) # the OCLC ISNI server asserts the payload is encoded # in latin1, but we know better result.encoding = "utf-8"
Some translation strings missing/not applied **Describe the bug** Some translations are not being applied. **To Reproduce** change Bookwyrm language to other than English (tested with Lithuanian) https://ziurkes.group.lt/user/athinkingmeat/books/read https://ziurkes.group.lt/user/athinkingmeat/books/reading https://ziurkes.group.lt/user/athinkingmeat/books/to-read **Expected behavior** All these links should have "read", "currently reading" and "to read" strings translated, but they are shown in English **Screenshots** ![image](https://user-images.githubusercontent.com/81133/143778383-c5e6f8b2-4925-4060-a886-e40b9b6361fb.png) ![image](https://user-images.githubusercontent.com/81133/143778390-5ba3cc06-6744-4a60-a855-27fb3202f690.png) ![image](https://user-images.githubusercontent.com/81133/143778375-e799c900-1737-4025-8d1e-80ea99b3df81.png) **Instance** https://ziurkes.group.lt/ **Additional context** Probably is a problem with other languages as well
edgedb__edgedb-6313
[ { "content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"IR compiler context.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport collections\nimport contextlib\nimport dataclasses\nimport enum\nimport uuid\n\nimport immutables as immu\n\nfrom edb.common import compiler\n\nfrom edb.pgsql import ast as pgast\nfrom edb.pgsql import params as pgparams\n\nfrom . import aliases\n\nif TYPE_CHECKING:\n from edb.ir import ast as irast\n\n\nclass ContextSwitchMode(enum.Enum):\n TRANSPARENT = enum.auto()\n SUBREL = enum.auto()\n NEWREL = enum.auto()\n SUBSTMT = enum.auto()\n NEWSCOPE = enum.auto()\n\n\nclass ShapeFormat(enum.Enum):\n SERIALIZED = enum.auto()\n FLAT = enum.auto()\n\n\nclass OutputFormat(enum.Enum):\n #: Result data output in PostgreSQL format.\n NATIVE = enum.auto()\n #: Result data output as a single JSON string.\n JSON = enum.auto()\n #: Result data output as a single PostgreSQL JSONB type value.\n JSONB = enum.auto()\n #: Result data output as a JSON string for each element in returned set.\n JSON_ELEMENTS = enum.auto()\n #: None mode: query result not returned, cardinality of result set\n #: is returned instead.\n NONE = enum.auto()\n #: Like NATIVE, but objects without an explicit shape are serialized\n #: as UUIDs.\n NATIVE_INTERNAL = enum.auto()\n\n\nNO_STMT = pgast.SelectStmt()\n\n\nOverlayEntry = tuple[\n str,\n Union[pgast.BaseRelation, pgast.CommonTableExpr],\n 'irast.PathId',\n]\n\n\[email protected](kw_only=True)\nclass RelOverlays:\n \"\"\"Container for relation overlays.\n\n These track \"overlays\" that can be registered for different types,\n in the context of DML.\n\n Consider the query:\n with X := (\n insert Person {\n name := \"Sully\",\n notes := assert_distinct({\n (insert Note {name := \"1\"}),\n (select Note filter .name = \"2\"),\n }),\n }),\n select X { name, notes: {name} };\n\n When we go to select X, we find the source of that set without any\n trouble (it's the result of the actual insert statement, more or\n less; in any case, it's in a CTE that we then include).\n\n Handling the notes are trickier, though:\n * The links aren't in the link table yet, but only in a CTE.\n (In similar update cases, with things like +=, they might be mixed\n between both.)\n * Some of the actual Note objects aren't in the table yet, just an insert\n CTE. But some *are*, so we need to union them.\n\n We solve these problems using overlays:\n * Whenever we do DML (or reference WITH-bound DML),\n we register overlays describing the changes done\n to *all of the enclosing DML*. So here, the Note insert's overlays\n get registered both for the Note insert and for the Person insert.\n * When we try to compile a root set or pointer, we see if it is connected\n to a DML statement, and if so we apply the overlays.\n\n The overlay itself is simply a sequence of operations on relations\n and CTEs that mix in the new data. In the obvious insert cases,\n these consist of unioning the new data in.\n\n This system works decently well but is also a little broken: I\n think that both the \"all of the enclosing DML\" and the \"see if it\n is connected to a DML statement\" have dangers; see Issue #3030.\n\n In relctx, see range_for_material_objtype, range_for_ptrref, and\n range_from_queryset (which those two call) for details on how\n overlays are applied.\n Overlays are added to with relctx.add_type_rel_overlay\n and relctx.add_ptr_rel_overlay.\n\n\n ===== NOTE ON MUTABILITY:\n In typical use, the overlays are mutable: nested DML adds overlays\n that are then consumed by code in enclosing contexts.\n\n In some places, however, we need to temporarily customize the\n overlay environment (during policy and trigger compilation, for\n example).\n\n The original version of overlays were implemented as a dict of\n dicts of lists. Doing temporary customizations required doing\n at least some copying. Doing a full deep copy always felt excessive\n but doing anything short of that left me constantly terrified.\n\n So instead we represent the overlays as a mutable object that\n contains immutable maps. When we add overlays, we update the maps\n and then reassign their values.\n\n When we want to do a temporary adjustment, we can cheaply make a\n fresh RelOverlays object and then modify that without touching the\n original.\n \"\"\"\n\n #: Relations used to \"overlay\" the main table for\n #: the type. Mostly used with DML statements.\n type: immu.Map[\n Optional[irast.MutatingLikeStmt],\n immu.Map[\n uuid.UUID,\n tuple[OverlayEntry, ...],\n ],\n ] = immu.Map()\n\n #: Relations used to \"overlay\" the main table for\n #: the pointer. Mostly used with DML statements.\n ptr: immu.Map[\n Optional[irast.MutatingLikeStmt],\n immu.Map[\n Tuple[uuid.UUID, str],\n Tuple[\n Tuple[\n str,\n Union[pgast.BaseRelation, pgast.CommonTableExpr],\n irast.PathId,\n ], ...\n ],\n ],\n ] = immu.Map()\n\n def copy(self) -> RelOverlays:\n return RelOverlays(type=self.type, ptr=self.ptr)\n\n\nclass CompilerContextLevel(compiler.ContextLevel):\n #: static compilation environment\n env: Environment\n\n #: mapping of named args to position\n argmap: Dict[str, pgast.Param]\n\n #: whether compiling in singleton expression mode\n singleton_mode: bool\n\n #: whether compiling a trigger\n trigger_mode: bool\n\n #: the top-level SQL statement\n toplevel_stmt: pgast.Query\n\n #: Record of DML CTEs generated for the corresponding IR DML.\n #: CTEs generated for DML-containing FOR statements are keyed\n #: by their iterator set.\n dml_stmts: Dict[Union[irast.MutatingStmt, irast.Set],\n pgast.CommonTableExpr]\n\n #: SQL statement corresponding to the IR statement\n #: currently being compiled.\n stmt: pgast.SelectStmt\n\n #: Current SQL subquery\n rel: pgast.SelectStmt\n\n #: SQL query hierarchy\n rel_hierarchy: Dict[pgast.Query, pgast.Query]\n\n #: CTEs representing decoded parameters\n param_ctes: Dict[str, pgast.CommonTableExpr]\n\n #: CTEs representing schema types, when rewritten based on access policy\n type_ctes: Dict[FullRewriteKey, pgast.CommonTableExpr]\n\n #: A set of type CTEs currently being generated\n pending_type_ctes: Set[RewriteKey]\n\n #: The logical parent of the current query in the\n #: query hierarchy\n parent_rel: Optional[pgast.Query]\n\n #: Query to become current in the next SUBSTMT switch.\n pending_query: Optional[pgast.SelectStmt]\n\n #: Sets currently being materialized\n materializing: FrozenSet[irast.Stmt]\n\n #: Whether the expression currently being processed is\n #: directly exposed to the output of the statement.\n expr_exposed: Optional[bool]\n\n #: A hack that indicates a tuple element that should be treated as\n #: exposed. This enables us to treat 'bar' in (foo, bar).1 as exposed,\n #: which eta-expansion and some casts rely on.\n expr_exposed_tuple_cheat: Optional[irast.TupleElement]\n\n #: Expression to use to force SQL expression volatility in this context\n #: (Delayed with a lambda to avoid inserting it when not used.)\n volatility_ref: Tuple[\n Callable[[pgast.SelectStmt, CompilerContextLevel],\n Optional[pgast.BaseExpr]], ...]\n\n # Current path_id we are INSERTing, so that we can avoid creating\n # a bogus volatility ref to it...\n current_insert_path_id: Optional[irast.PathId]\n\n #: Paths, for which semi-join is banned in this context.\n disable_semi_join: FrozenSet[irast.PathId]\n\n #: Paths, which need to be explicitly wrapped into SQL\n #: optionality scaffolding.\n force_optional: FrozenSet[irast.PathId]\n\n #: Paths that can be ignored when they appear as the source of a\n # computable. This is key to optimizing away free object sources in\n # group by aggregates.\n skippable_sources: FrozenSet[irast.PathId]\n\n #: Specifies that references to a specific Set must be narrowed\n #: by only selecting instances of type specified by the mapping value.\n intersection_narrowing: Dict[irast.Set, irast.Set]\n\n #: Which SQL query holds the SQL scope for the given PathId\n path_scope: ChainMap[irast.PathId, Optional[pgast.SelectStmt]]\n\n #: Relevant IR scope for this context.\n scope_tree: irast.ScopeTreeNode\n\n #: A stack of dml statements currently being compiled. Used for\n #: figuring out what to record in type_rel_overlays.\n dml_stmt_stack: List[irast.MutatingLikeStmt]\n\n #: Relations used to \"overlay\" the main table for\n #: the type. Mostly used with DML statements.\n rel_overlays: RelOverlays\n\n #: Mapping from path ids to \"external\" rels given by a particular relation\n external_rels: Mapping[\n irast.PathId,\n Tuple[pgast.BaseRelation | pgast.CommonTableExpr, Tuple[str, ...]]\n ]\n\n #: The CTE and some metadata of any enclosing iterator-like\n #: construct (which includes iterators, insert/update, and INSERT\n #: ELSE select clauses) currently being compiled.\n enclosing_cte_iterator: Optional[pgast.IteratorCTE]\n\n #: Sets to force shape compilation on, because the values are\n #: needed by DML.\n shapes_needed_by_dml: Set[irast.Set]\n\n def __init__(\n self,\n prevlevel: Optional[CompilerContextLevel],\n mode: ContextSwitchMode,\n *,\n env: Optional[Environment] = None,\n scope_tree: Optional[irast.ScopeTreeNode] = None,\n ) -> None:\n if prevlevel is None:\n assert env is not None\n assert scope_tree is not None\n\n self.env = env\n self.argmap = collections.OrderedDict()\n\n self.singleton_mode = False\n\n self.toplevel_stmt = NO_STMT\n self.stmt = NO_STMT\n self.rel = NO_STMT\n self.rel_hierarchy = {}\n self.param_ctes = {}\n self.type_ctes = {}\n self.pending_type_ctes = set()\n self.dml_stmts = {}\n self.parent_rel = None\n self.pending_query = None\n self.materializing = frozenset()\n\n self.expr_exposed = None\n self.expr_exposed_tuple_cheat = None\n self.volatility_ref = ()\n self.current_insert_path_id = None\n\n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n self.skippable_sources = frozenset()\n self.intersection_narrowing = {}\n\n self.path_scope = collections.ChainMap()\n self.scope_tree = scope_tree\n self.dml_stmt_stack = []\n self.rel_overlays = RelOverlays()\n\n self.external_rels = {}\n self.enclosing_cte_iterator = None\n self.shapes_needed_by_dml = set()\n\n self.trigger_mode = False\n\n else:\n self.env = prevlevel.env\n self.argmap = prevlevel.argmap\n\n self.singleton_mode = prevlevel.singleton_mode\n\n self.toplevel_stmt = prevlevel.toplevel_stmt\n self.stmt = prevlevel.stmt\n self.rel = prevlevel.rel\n self.rel_hierarchy = prevlevel.rel_hierarchy\n self.param_ctes = prevlevel.param_ctes\n self.type_ctes = prevlevel.type_ctes\n self.pending_type_ctes = prevlevel.pending_type_ctes\n self.dml_stmts = prevlevel.dml_stmts\n self.parent_rel = prevlevel.parent_rel\n self.pending_query = prevlevel.pending_query\n self.materializing = prevlevel.materializing\n\n self.expr_exposed = prevlevel.expr_exposed\n self.expr_exposed_tuple_cheat = prevlevel.expr_exposed_tuple_cheat\n self.volatility_ref = prevlevel.volatility_ref\n self.current_insert_path_id = prevlevel.current_insert_path_id\n\n self.disable_semi_join = prevlevel.disable_semi_join\n self.force_optional = prevlevel.force_optional\n self.skippable_sources = prevlevel.skippable_sources\n self.intersection_narrowing = prevlevel.intersection_narrowing\n\n self.path_scope = prevlevel.path_scope\n self.scope_tree = prevlevel.scope_tree\n self.dml_stmt_stack = prevlevel.dml_stmt_stack\n self.rel_overlays = prevlevel.rel_overlays\n self.enclosing_cte_iterator = prevlevel.enclosing_cte_iterator\n self.shapes_needed_by_dml = prevlevel.shapes_needed_by_dml\n self.external_rels = prevlevel.external_rels\n\n self.trigger_mode = prevlevel.trigger_mode\n\n if mode is ContextSwitchMode.SUBSTMT:\n if self.pending_query is not None:\n self.rel = self.pending_query\n else:\n self.rel = pgast.SelectStmt()\n if prevlevel.parent_rel is not None:\n parent_rel = prevlevel.parent_rel\n else:\n parent_rel = prevlevel.rel\n self.rel_hierarchy[self.rel] = parent_rel\n\n self.stmt = self.rel\n self.pending_query = None\n self.parent_rel = None\n\n elif mode is ContextSwitchMode.SUBREL:\n self.rel = pgast.SelectStmt()\n if prevlevel.parent_rel is not None:\n parent_rel = prevlevel.parent_rel\n else:\n parent_rel = prevlevel.rel\n self.rel_hierarchy[self.rel] = parent_rel\n self.pending_query = None\n self.parent_rel = None\n\n elif mode is ContextSwitchMode.NEWREL:\n self.rel = pgast.SelectStmt()\n self.pending_query = None\n self.parent_rel = None\n self.path_scope = collections.ChainMap()\n self.rel_hierarchy = {}\n self.scope_tree = prevlevel.scope_tree.root\n\n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n self.intersection_narrowing = {}\n self.pending_type_ctes = set(prevlevel.pending_type_ctes)\n\n elif mode == ContextSwitchMode.NEWSCOPE:\n self.path_scope = prevlevel.path_scope.new_child()\n\n def subrel(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.SUBREL)\n\n def newrel(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.NEWREL)\n\n def substmt(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.SUBSTMT)\n\n def newscope(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.NEWSCOPE)\n\n def up_hierarchy(\n self,\n n: int, q: Optional[pgast.Query]=None\n ) -> Optional[pgast.Query]:\n # mostly intended as a debugging helper\n q = q or self.rel\n for _ in range(n):\n if q:\n q = self.rel_hierarchy.get(q)\n return q\n\n\nclass CompilerContext(compiler.CompilerContext[CompilerContextLevel]):\n ContextLevelClass = CompilerContextLevel\n default_mode = ContextSwitchMode.TRANSPARENT\n\n\nRewriteKey = Tuple[uuid.UUID, bool]\nFullRewriteKey = Tuple[\n uuid.UUID, bool, Optional[frozenset['irast.MutatingLikeStmt']]]\n\n\nclass Environment:\n \"\"\"Static compilation environment.\"\"\"\n\n aliases: aliases.AliasGenerator\n output_format: Optional[OutputFormat]\n named_param_prefix: Optional[tuple[str, ...]]\n ptrref_source_visibility: Dict[irast.BasePointerRef, bool]\n expected_cardinality_one: bool\n ignore_object_shapes: bool\n explicit_top_cast: Optional[irast.TypeRef]\n singleton_mode: bool\n query_params: List[irast.Param]\n type_rewrites: Dict[RewriteKey, irast.Set]\n scope_tree_nodes: Dict[int, irast.ScopeTreeNode]\n external_rvars: Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]\n materialized_views: Dict[uuid.UUID, irast.Set]\n backend_runtime_params: pgparams.BackendRuntimeParams\n\n #: A list of CTEs that implement constraint validation at the\n #: query level.\n check_ctes: List[pgast.CommonTableExpr]\n\n def __init__(\n self,\n *,\n output_format: Optional[OutputFormat],\n named_param_prefix: Optional[tuple[str, ...]],\n expected_cardinality_one: bool,\n ignore_object_shapes: bool,\n singleton_mode: bool,\n expand_inhviews: bool,\n explicit_top_cast: Optional[irast.TypeRef],\n query_params: List[irast.Param],\n type_rewrites: Dict[RewriteKey, irast.Set],\n scope_tree_nodes: Dict[int, irast.ScopeTreeNode],\n external_rvars: Optional[\n Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]\n ] = None,\n backend_runtime_params: pgparams.BackendRuntimeParams,\n ) -> None:\n self.aliases = aliases.AliasGenerator()\n self.output_format = output_format\n self.named_param_prefix = named_param_prefix\n self.ptrref_source_visibility = {}\n self.expected_cardinality_one = expected_cardinality_one\n self.ignore_object_shapes = ignore_object_shapes\n self.singleton_mode = singleton_mode\n self.expand_inhviews = expand_inhviews\n self.explicit_top_cast = explicit_top_cast\n self.query_params = query_params\n self.type_rewrites = type_rewrites\n self.scope_tree_nodes = scope_tree_nodes\n self.external_rvars = external_rvars or {}\n self.materialized_views = {}\n self.check_ctes = []\n self.backend_runtime_params = backend_runtime_params\n\n\n# XXX: this context hack is necessary until pathctx is converted\n# to use context levels instead of using env directly.\[email protected]\ndef output_format(\n ctx: CompilerContextLevel,\n output_format: OutputFormat,\n) -> Generator[None, None, None]:\n original_output_format = ctx.env.output_format\n original_ignore_object_shapes = ctx.env.ignore_object_shapes\n ctx.env.output_format = output_format\n ctx.env.ignore_object_shapes = False\n try:\n yield\n finally:\n ctx.env.output_format = original_output_format\n ctx.env.ignore_object_shapes = original_ignore_object_shapes\n", "path": "edb/pgsql/compiler/context.py" } ]
[ { "content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"IR compiler context.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport collections\nimport contextlib\nimport dataclasses\nimport enum\nimport uuid\n\nimport immutables as immu\n\nfrom edb.common import compiler\n\nfrom edb.pgsql import ast as pgast\nfrom edb.pgsql import params as pgparams\n\nfrom . import aliases\n\nif TYPE_CHECKING:\n from edb.ir import ast as irast\n\n\nclass ContextSwitchMode(enum.Enum):\n TRANSPARENT = enum.auto()\n SUBREL = enum.auto()\n NEWREL = enum.auto()\n SUBSTMT = enum.auto()\n NEWSCOPE = enum.auto()\n\n\nclass ShapeFormat(enum.Enum):\n SERIALIZED = enum.auto()\n FLAT = enum.auto()\n\n\nclass OutputFormat(enum.Enum):\n #: Result data output in PostgreSQL format.\n NATIVE = enum.auto()\n #: Result data output as a single JSON string.\n JSON = enum.auto()\n #: Result data output as a single PostgreSQL JSONB type value.\n JSONB = enum.auto()\n #: Result data output as a JSON string for each element in returned set.\n JSON_ELEMENTS = enum.auto()\n #: None mode: query result not returned, cardinality of result set\n #: is returned instead.\n NONE = enum.auto()\n #: Like NATIVE, but objects without an explicit shape are serialized\n #: as UUIDs.\n NATIVE_INTERNAL = enum.auto()\n\n\nNO_STMT = pgast.SelectStmt()\n\n\nOverlayEntry = tuple[\n str,\n Union[pgast.BaseRelation, pgast.CommonTableExpr],\n 'irast.PathId',\n]\n\n\[email protected](kw_only=True)\nclass RelOverlays:\n \"\"\"Container for relation overlays.\n\n These track \"overlays\" that can be registered for different types,\n in the context of DML.\n\n Consider the query:\n with X := (\n insert Person {\n name := \"Sully\",\n notes := assert_distinct({\n (insert Note {name := \"1\"}),\n (select Note filter .name = \"2\"),\n }),\n }),\n select X { name, notes: {name} };\n\n When we go to select X, we find the source of that set without any\n trouble (it's the result of the actual insert statement, more or\n less; in any case, it's in a CTE that we then include).\n\n Handling the notes are trickier, though:\n * The links aren't in the link table yet, but only in a CTE.\n (In similar update cases, with things like +=, they might be mixed\n between both.)\n * Some of the actual Note objects aren't in the table yet, just an insert\n CTE. But some *are*, so we need to union them.\n\n We solve these problems using overlays:\n * Whenever we do DML (or reference WITH-bound DML),\n we register overlays describing the changes done\n to *all of the enclosing DML*. So here, the Note insert's overlays\n get registered both for the Note insert and for the Person insert.\n * When we try to compile a root set or pointer, we see if it is connected\n to a DML statement, and if so we apply the overlays.\n\n The overlay itself is simply a sequence of operations on relations\n and CTEs that mix in the new data. In the obvious insert cases,\n these consist of unioning the new data in.\n\n This system works decently well but is also a little broken: I\n think that both the \"all of the enclosing DML\" and the \"see if it\n is connected to a DML statement\" have dangers; see Issue #3030.\n\n In relctx, see range_for_material_objtype, range_for_ptrref, and\n range_from_queryset (which those two call) for details on how\n overlays are applied.\n Overlays are added to with relctx.add_type_rel_overlay\n and relctx.add_ptr_rel_overlay.\n\n\n ===== NOTE ON MUTABILITY:\n In typical use, the overlays are mutable: nested DML adds overlays\n that are then consumed by code in enclosing contexts.\n\n In some places, however, we need to temporarily customize the\n overlay environment (during policy and trigger compilation, for\n example).\n\n The original version of overlays were implemented as a dict of\n dicts of lists. Doing temporary customizations required doing\n at least some copying. Doing a full deep copy always felt excessive\n but doing anything short of that left me constantly terrified.\n\n So instead we represent the overlays as a mutable object that\n contains immutable maps. When we add overlays, we update the maps\n and then reassign their values.\n\n When we want to do a temporary adjustment, we can cheaply make a\n fresh RelOverlays object and then modify that without touching the\n original.\n \"\"\"\n\n #: Relations used to \"overlay\" the main table for\n #: the type. Mostly used with DML statements.\n type: immu.Map[\n Optional[irast.MutatingLikeStmt],\n immu.Map[\n uuid.UUID,\n tuple[OverlayEntry, ...],\n ],\n ] = immu.Map()\n\n #: Relations used to \"overlay\" the main table for\n #: the pointer. Mostly used with DML statements.\n ptr: immu.Map[\n Optional[irast.MutatingLikeStmt],\n immu.Map[\n Tuple[uuid.UUID, str],\n Tuple[\n Tuple[\n str,\n Union[pgast.BaseRelation, pgast.CommonTableExpr],\n irast.PathId,\n ], ...\n ],\n ],\n ] = immu.Map()\n\n def copy(self) -> RelOverlays:\n return RelOverlays(type=self.type, ptr=self.ptr)\n\n\nclass CompilerContextLevel(compiler.ContextLevel):\n #: static compilation environment\n env: Environment\n\n #: mapping of named args to position\n argmap: Dict[str, pgast.Param]\n\n #: whether compiling in singleton expression mode\n singleton_mode: bool\n\n #: whether compiling a trigger\n trigger_mode: bool\n\n #: the top-level SQL statement\n toplevel_stmt: pgast.Query\n\n #: Record of DML CTEs generated for the corresponding IR DML.\n #: CTEs generated for DML-containing FOR statements are keyed\n #: by their iterator set.\n dml_stmts: Dict[Union[irast.MutatingStmt, irast.Set],\n pgast.CommonTableExpr]\n\n #: SQL statement corresponding to the IR statement\n #: currently being compiled.\n stmt: pgast.SelectStmt\n\n #: Current SQL subquery\n rel: pgast.SelectStmt\n\n #: SQL query hierarchy\n rel_hierarchy: Dict[pgast.Query, pgast.Query]\n\n #: CTEs representing decoded parameters\n param_ctes: Dict[str, pgast.CommonTableExpr]\n\n #: CTEs representing schema types, when rewritten based on access policy\n type_ctes: Dict[FullRewriteKey, pgast.CommonTableExpr]\n\n #: A set of type CTEs currently being generated\n pending_type_ctes: Set[RewriteKey]\n\n #: The logical parent of the current query in the\n #: query hierarchy\n parent_rel: Optional[pgast.Query]\n\n #: Query to become current in the next SUBSTMT switch.\n pending_query: Optional[pgast.SelectStmt]\n\n #: Sets currently being materialized\n materializing: FrozenSet[irast.Stmt]\n\n #: Whether the expression currently being processed is\n #: directly exposed to the output of the statement.\n expr_exposed: Optional[bool]\n\n #: A hack that indicates a tuple element that should be treated as\n #: exposed. This enables us to treat 'bar' in (foo, bar).1 as exposed,\n #: which eta-expansion and some casts rely on.\n expr_exposed_tuple_cheat: Optional[irast.TupleElement]\n\n #: Expression to use to force SQL expression volatility in this context\n #: (Delayed with a lambda to avoid inserting it when not used.)\n volatility_ref: Tuple[\n Callable[[pgast.SelectStmt, CompilerContextLevel],\n Optional[pgast.BaseExpr]], ...]\n\n # Current path_id we are INSERTing, so that we can avoid creating\n # a bogus volatility ref to it...\n current_insert_path_id: Optional[irast.PathId]\n\n #: Paths, for which semi-join is banned in this context.\n disable_semi_join: FrozenSet[irast.PathId]\n\n #: Paths, which need to be explicitly wrapped into SQL\n #: optionality scaffolding.\n force_optional: FrozenSet[irast.PathId]\n\n #: Paths that can be ignored when they appear as the source of a\n # computable. This is key to optimizing away free object sources in\n # group by aggregates.\n skippable_sources: FrozenSet[irast.PathId]\n\n #: Specifies that references to a specific Set must be narrowed\n #: by only selecting instances of type specified by the mapping value.\n intersection_narrowing: Dict[irast.Set, irast.Set]\n\n #: Which SQL query holds the SQL scope for the given PathId\n path_scope: ChainMap[irast.PathId, Optional[pgast.SelectStmt]]\n\n #: Relevant IR scope for this context.\n scope_tree: irast.ScopeTreeNode\n\n #: A stack of dml statements currently being compiled. Used for\n #: figuring out what to record in type_rel_overlays.\n dml_stmt_stack: List[irast.MutatingLikeStmt]\n\n #: Relations used to \"overlay\" the main table for\n #: the type. Mostly used with DML statements.\n rel_overlays: RelOverlays\n\n #: Mapping from path ids to \"external\" rels given by a particular relation\n external_rels: Mapping[\n irast.PathId,\n Tuple[pgast.BaseRelation | pgast.CommonTableExpr, Tuple[str, ...]]\n ]\n\n #: The CTE and some metadata of any enclosing iterator-like\n #: construct (which includes iterators, insert/update, and INSERT\n #: ELSE select clauses) currently being compiled.\n enclosing_cte_iterator: Optional[pgast.IteratorCTE]\n\n #: Sets to force shape compilation on, because the values are\n #: needed by DML.\n shapes_needed_by_dml: Set[irast.Set]\n\n def __init__(\n self,\n prevlevel: Optional[CompilerContextLevel],\n mode: ContextSwitchMode,\n *,\n env: Optional[Environment] = None,\n scope_tree: Optional[irast.ScopeTreeNode] = None,\n ) -> None:\n if prevlevel is None:\n assert env is not None\n assert scope_tree is not None\n\n self.env = env\n self.argmap = collections.OrderedDict()\n\n self.singleton_mode = False\n\n self.toplevel_stmt = NO_STMT\n self.stmt = NO_STMT\n self.rel = NO_STMT\n self.rel_hierarchy = {}\n self.param_ctes = {}\n self.type_ctes = {}\n self.pending_type_ctes = set()\n self.dml_stmts = {}\n self.parent_rel = None\n self.pending_query = None\n self.materializing = frozenset()\n\n self.expr_exposed = None\n self.expr_exposed_tuple_cheat = None\n self.volatility_ref = ()\n self.current_insert_path_id = None\n\n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n self.skippable_sources = frozenset()\n self.intersection_narrowing = {}\n\n self.path_scope = collections.ChainMap()\n self.scope_tree = scope_tree\n self.dml_stmt_stack = []\n self.rel_overlays = RelOverlays()\n\n self.external_rels = {}\n self.enclosing_cte_iterator = None\n self.shapes_needed_by_dml = set()\n\n self.trigger_mode = False\n\n else:\n self.env = prevlevel.env\n self.argmap = prevlevel.argmap\n\n self.singleton_mode = prevlevel.singleton_mode\n\n self.toplevel_stmt = prevlevel.toplevel_stmt\n self.stmt = prevlevel.stmt\n self.rel = prevlevel.rel\n self.rel_hierarchy = prevlevel.rel_hierarchy\n self.param_ctes = prevlevel.param_ctes\n self.type_ctes = prevlevel.type_ctes\n self.pending_type_ctes = prevlevel.pending_type_ctes\n self.dml_stmts = prevlevel.dml_stmts\n self.parent_rel = prevlevel.parent_rel\n self.pending_query = prevlevel.pending_query\n self.materializing = prevlevel.materializing\n\n self.expr_exposed = prevlevel.expr_exposed\n self.expr_exposed_tuple_cheat = prevlevel.expr_exposed_tuple_cheat\n self.volatility_ref = prevlevel.volatility_ref\n self.current_insert_path_id = prevlevel.current_insert_path_id\n\n self.disable_semi_join = prevlevel.disable_semi_join\n self.force_optional = prevlevel.force_optional\n self.skippable_sources = prevlevel.skippable_sources\n self.intersection_narrowing = prevlevel.intersection_narrowing\n\n self.path_scope = prevlevel.path_scope\n self.scope_tree = prevlevel.scope_tree\n self.dml_stmt_stack = prevlevel.dml_stmt_stack\n self.rel_overlays = prevlevel.rel_overlays\n self.enclosing_cte_iterator = prevlevel.enclosing_cte_iterator\n self.shapes_needed_by_dml = prevlevel.shapes_needed_by_dml\n self.external_rels = prevlevel.external_rels\n\n self.trigger_mode = prevlevel.trigger_mode\n\n if mode is ContextSwitchMode.SUBSTMT:\n if self.pending_query is not None:\n self.rel = self.pending_query\n else:\n self.rel = pgast.SelectStmt()\n if prevlevel.parent_rel is not None:\n parent_rel = prevlevel.parent_rel\n else:\n parent_rel = prevlevel.rel\n self.rel_hierarchy[self.rel] = parent_rel\n\n self.stmt = self.rel\n self.pending_query = None\n self.parent_rel = None\n\n elif mode is ContextSwitchMode.SUBREL:\n self.rel = pgast.SelectStmt()\n if prevlevel.parent_rel is not None:\n parent_rel = prevlevel.parent_rel\n else:\n parent_rel = prevlevel.rel\n self.rel_hierarchy[self.rel] = parent_rel\n self.pending_query = None\n self.parent_rel = None\n\n elif mode is ContextSwitchMode.NEWREL:\n self.rel = pgast.SelectStmt()\n self.pending_query = None\n self.parent_rel = None\n self.path_scope = collections.ChainMap()\n self.rel_hierarchy = {}\n self.scope_tree = prevlevel.scope_tree.root\n self.volatility_ref = ()\n\n self.disable_semi_join = frozenset()\n self.force_optional = frozenset()\n self.intersection_narrowing = {}\n self.pending_type_ctes = set(prevlevel.pending_type_ctes)\n\n elif mode == ContextSwitchMode.NEWSCOPE:\n self.path_scope = prevlevel.path_scope.new_child()\n\n def subrel(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.SUBREL)\n\n def newrel(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.NEWREL)\n\n def substmt(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.SUBSTMT)\n\n def newscope(\n self,\n ) -> compiler.CompilerContextManager[CompilerContextLevel]:\n return self.new(ContextSwitchMode.NEWSCOPE)\n\n def up_hierarchy(\n self,\n n: int, q: Optional[pgast.Query]=None\n ) -> Optional[pgast.Query]:\n # mostly intended as a debugging helper\n q = q or self.rel\n for _ in range(n):\n if q:\n q = self.rel_hierarchy.get(q)\n return q\n\n\nclass CompilerContext(compiler.CompilerContext[CompilerContextLevel]):\n ContextLevelClass = CompilerContextLevel\n default_mode = ContextSwitchMode.TRANSPARENT\n\n\nRewriteKey = Tuple[uuid.UUID, bool]\nFullRewriteKey = Tuple[\n uuid.UUID, bool, Optional[frozenset['irast.MutatingLikeStmt']]]\n\n\nclass Environment:\n \"\"\"Static compilation environment.\"\"\"\n\n aliases: aliases.AliasGenerator\n output_format: Optional[OutputFormat]\n named_param_prefix: Optional[tuple[str, ...]]\n ptrref_source_visibility: Dict[irast.BasePointerRef, bool]\n expected_cardinality_one: bool\n ignore_object_shapes: bool\n explicit_top_cast: Optional[irast.TypeRef]\n singleton_mode: bool\n query_params: List[irast.Param]\n type_rewrites: Dict[RewriteKey, irast.Set]\n scope_tree_nodes: Dict[int, irast.ScopeTreeNode]\n external_rvars: Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]\n materialized_views: Dict[uuid.UUID, irast.Set]\n backend_runtime_params: pgparams.BackendRuntimeParams\n\n #: A list of CTEs that implement constraint validation at the\n #: query level.\n check_ctes: List[pgast.CommonTableExpr]\n\n def __init__(\n self,\n *,\n output_format: Optional[OutputFormat],\n named_param_prefix: Optional[tuple[str, ...]],\n expected_cardinality_one: bool,\n ignore_object_shapes: bool,\n singleton_mode: bool,\n expand_inhviews: bool,\n explicit_top_cast: Optional[irast.TypeRef],\n query_params: List[irast.Param],\n type_rewrites: Dict[RewriteKey, irast.Set],\n scope_tree_nodes: Dict[int, irast.ScopeTreeNode],\n external_rvars: Optional[\n Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]\n ] = None,\n backend_runtime_params: pgparams.BackendRuntimeParams,\n ) -> None:\n self.aliases = aliases.AliasGenerator()\n self.output_format = output_format\n self.named_param_prefix = named_param_prefix\n self.ptrref_source_visibility = {}\n self.expected_cardinality_one = expected_cardinality_one\n self.ignore_object_shapes = ignore_object_shapes\n self.singleton_mode = singleton_mode\n self.expand_inhviews = expand_inhviews\n self.explicit_top_cast = explicit_top_cast\n self.query_params = query_params\n self.type_rewrites = type_rewrites\n self.scope_tree_nodes = scope_tree_nodes\n self.external_rvars = external_rvars or {}\n self.materialized_views = {}\n self.check_ctes = []\n self.backend_runtime_params = backend_runtime_params\n\n\n# XXX: this context hack is necessary until pathctx is converted\n# to use context levels instead of using env directly.\[email protected]\ndef output_format(\n ctx: CompilerContextLevel,\n output_format: OutputFormat,\n) -> Generator[None, None, None]:\n original_output_format = ctx.env.output_format\n original_ignore_object_shapes = ctx.env.ignore_object_shapes\n ctx.env.output_format = output_format\n ctx.env.ignore_object_shapes = False\n try:\n yield\n finally:\n ctx.env.output_format = original_output_format\n ctx.env.ignore_object_shapes = original_ignore_object_shapes\n", "path": "edb/pgsql/compiler/context.py" } ]
diff --git a/edb/pgsql/compiler/context.py b/edb/pgsql/compiler/context.py index 13319ab80ee..39ba9294d3b 100644 --- a/edb/pgsql/compiler/context.py +++ b/edb/pgsql/compiler/context.py @@ -420,6 +420,7 @@ def __init__( self.path_scope = collections.ChainMap() self.rel_hierarchy = {} self.scope_tree = prevlevel.scope_tree.root + self.volatility_ref = () self.disable_semi_join = frozenset() self.force_optional = frozenset() diff --git a/tests/test_edgeql_insert.py b/tests/test_edgeql_insert.py index 8652cd70c45..ac9afd95a45 100644 --- a/tests/test_edgeql_insert.py +++ b/tests/test_edgeql_insert.py @@ -1218,6 +1218,57 @@ async def test_edgeql_insert_conflict_policy_02(self): "violates exclusivity constraint"): await self.con.execute(Q) + async def test_edgeql_insert_policy_cast(self): + # Test for #6305, where a cast in a global used in an access policy + # was causing a stray volatility ref to show up in the wrong CTE + await self.con.execute(''' + create global sub_id -> uuid; + create global sub := <Subordinate>(global sub_id); + alter type Note { + create access policy asdf allow all using ( + (.subject in global sub) ?? false + ) + }; + ''') + + sub = await self.con.query_single(''' + insert Subordinate { name := "asdf" }; + ''') + + async with self.assertRaisesRegexTx( + edgedb.AccessPolicyError, + "violation on insert of default::Note"): + await self.con.execute(''' + insert Person { notes := (insert Note { name := "" }) }; + ''') + + async with self.assertRaisesRegexTx( + edgedb.AccessPolicyError, + "violation on insert of default::Note"): + await self.con.execute(''' + insert Person { + notes := (insert Note { + name := "", + subject := assert_single( + (select Subordinate filter .name = 'asdf')) + }) + }; + ''') + + await self.con.execute(''' + set global sub_id := <uuid>$0 + ''', sub.id) + + await self.con.execute(''' + insert Person { + notes := (insert Note { + name := "", + subject := assert_single( + (select Subordinate filter .name = 'asdf')) + }) + }; + ''') + async def test_edgeql_insert_for_01(self): await self.con.execute(r''' FOR x IN {3, 5, 7, 2}
InternalServerError: missing FROM-clause entry for table "ins_contents~3" when access policies are applied <!-- Please search existing issues to avoid creating duplicates. --> The `InternalServerError: missing FROM-clause entry for table "ins_contents~3"` error sometimes occurs in deeply nested queries *only when access policies are applied* <!-- For the EdgeDB Version: run `edgedb query 'select sys::get_version_as_str()'` from your project directory (or run `select sys::get_version_as_str();` in the EdgeDB interactive shell). For the EdgeDB CLI Version: Run `edgedb --version` from anywhere --> - EdgeDB Version: `3.4+301ba34` - EdgeDB CLI Version: `3.5.0+907ff37` - OS Version: `Ubuntu 23.04` Please note that the query and schema has been modified to simplify the queries and schema ## 1. Setup Set `global current_accounts_array` to contain `bffeb170-6d5d-11ee-bcd6-ad19361485ad` ```eql insert Account { id := <uuid>"bffeb170-6d5d-11ee-bcd6-ad19361485ad", address := "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0" } ``` ## 2. Observe problem ### 2.1 Enable access policies and run problematic query Expect `InternalServerError: missing FROM-clause entry for table "ins_contents~3"` Please note these queries are generated by the js client, but occur in the ui as well ```eql INSERT default::Policy { account := ( WITH __scope_0_defaultAccount := DETACHED default::Account SELECT __scope_0_defaultAccount { id } FILTER (__scope_0_defaultAccount.address = "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0") ), key := <default::uint16>1, stateHistory := ( INSERT default::PolicyState { proposal := ( INSERT default::TransactionProposal { hash := "0x8ab322da38c5c044c903b2de5bba9d8f93fee416a7fd5047bd516ae1a9aa5eee", account := ( WITH __scope_1_defaultAccount := DETACHED default::Account SELECT __scope_1_defaultAccount { id } FILTER (__scope_1_defaultAccount.address = "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0") ) } ), } ) } ``` ### 2.3 Disable access policies and re-run problematic query Expect query to succeed ## 3. Workaround using WITH Moving the `INSERT default::TransactionProposal` into a with block fixes the issue. ### 3.1 Enable access policies and run query Expect query to succeed ```eql WITH nestedProposal := ( INSERT default::TransactionProposal { hash := "0x8ab322da38c5c044c903b2de5bba9d8f93fee416a7fd5047bd516ae1a9aa5ee5", account := ( WITH __scope_1_defaultAccount := DETACHED default::Account SELECT __scope_1_defaultAccount { id } FILTER (__scope_1_defaultAccount.address = "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0") ), nonce := <default::uint64>5n } ) INSERT default::Policy { account := ( WITH __scope_0_defaultAccount := DETACHED default::Account SELECT __scope_0_defaultAccount { id } FILTER (__scope_0_defaultAccount.address = "0x9D2B268D6e51074c466b823Fd3A3b794cA750CB0") ), key := <default::uint16>5, stateHistory := ( INSERT default::PolicyState { proposal := nestedProposal, } ) } ``` <!-- If the issue is about a query error, please also provide your schema --> --- ## Schema ``` module default { global current_accounts_array: array<uuid>; global current_accounts_set := array_unpack(global current_accounts_array); global current_accounts := <Account>(global current_accounts_set); type Account { required address: str { constraint exclusive; } access policy members_select_insert_update allow select, insert, update using (.id in global current_accounts_set); } type Policy { required account: Account; required key: int32; required multi stateHistory: PolicyState { constraint exclusive; on source delete delete target; on target delete allow; } constraint exclusive on ((.account, .key)); access policy members_select_insert_update allow select, insert, update using (.account in global current_accounts); access policy can_be_deleted_when_inactive allow delete using (not .isActive); } type PolicyState { link policy := .<stateHistory[is Policy]; proposal: TransactionProposal { on source delete delete target; on target delete delete source; } } type TransactionProposal { required hash: str { constraint exclusive; } required account: Account; required nonce: int32; constraint exclusive on ((.account, .nonce)); index on (.nonce); access policy members_only allow all using (.account in global current_accounts); } } ```
ivy-llc__ivy-23796
[ { "content": "# global\nimport ivy\nimport ivy.functional.frontends.tensorflow as tf_frontend\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n map_raw_ops_alias,\n to_ivy_dtype,\n)\n\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n\nAcos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))\nAcosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))\nAddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))\nAddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\nArgMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmax, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\nArgMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmin, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\nAsin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))\nAtan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))\nAtan2 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": \"float16\"},\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.atan2))\n)\nConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))\nCos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))\nCosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))\nCumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))\nCumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))\nDigamma = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.digamma))\nDiv = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))\nEinsum = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"complex128 \",\n \"complex64\",\n \"float64\",\n \"float32\",\n \"float16\",\n \"int64\",\n \"int32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.einsum))\n)\nIdentity = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity)\n)\nIdentityN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity_n)\n)\nIgamma = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float64\",\n \"float32\",\n \"half\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.igamma))\n)\nLeakyRelu = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.leaky_relu,\n )\n )\n)\nLessEqual = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.less_equal))\n)\nLog1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))\nLogSoftmax = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.log_softmax))\n)\nLogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))\nMatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))\nMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_max,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\nMaximum = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.maximum))\n)\nMean = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.math.reduce_mean,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n)\nMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_min,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\nMul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))\nNeg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))\nPow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))\nRealDiv = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.realdiv))\n)\nReciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))\nRelu = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.relu))\n)\nRelu6 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.relu6,\n )\n )\n)\nReshape = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.reshape)\n)\nRoll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))\nShapeN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.shape_n)\n)\nSigmoid = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)\n)\nSin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))\nSize = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))\nSoftmax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"float16\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.softmax))\n)\nSplit = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.split, kwargs_to_update={\"num_split\": \"num_or_size_splits\"}\n )\n)\nSquaredDifference = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n \"int32\",\n \"int64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.squared_difference))\n)\nSqueeze = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.squeeze)\n)\nTan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))\nTanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))\nTile = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.tile))\nXlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))\nZeta = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.zeta))\n)\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _tf_to_ivy_ivy_arguments_for_conv(\n padding, ex_pading, strides, dilations, data_format\n):\n if data_format.find(\"C\") == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n data_format = \"channel_first\"\n pad_index = [4, 8]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n data_format = \"channel_last\"\n pad_index = [2, 6]\n if padding == \"EXPLICIT\":\n padding = [\n (ex_pading[i], ex_pading[i + 1])\n for i in range(pad_index[0], pad_index[1], 2)\n ]\n return padding, strides, dilations, data_format\n\n\n# --- Main --- #\n# ------------ #\n\n\n@to_ivy_arrays_and_back\ndef AccumulateNV2(inputs, shape, name=\"AccumulateNV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Angle(\n *,\n input,\n Tout=ivy.float32,\n name=\"Angle\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.angle(input), Tout)\n\n\n@with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float16\",\n \"bool\",\n \"bfloat16\",\n )\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef ApproximateEqual(\n *,\n x,\n y,\n tolerance=1e-05,\n name=\"ApproximateEqual\",\n):\n x, y = check_tensorflow_casting(x, y)\n return ivy.abs(x - y) < tolerance\n\n\n@to_ivy_arrays_and_back\ndef Atanh(*, x, name=\"Atanh\"):\n return ivy.atanh(x)\n\n\n@to_ivy_arrays_and_back\ndef BandedTriangularSolve(\n matrix,\n rhs,\n lower=True,\n adjoint=False,\n name=\"BandedTriangularSolve\",\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMul(x, y, adj_x=False, adj_y=False, name=\"BatchMatMul\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV2(x, y, adj_x=False, adj_y=False, name=\"BatchMatMulV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name=\"BatchMatMulV3\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BitwiseAnd(*, x, y, name=\"BitwiseAnd\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseOr(*, x, y, name=\"BitwiseOr\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseXor(*, x, y, name=\"BitwiseXor\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BroadcastTo(*, input, shape, name=\"BroadcastTo\"):\n return ivy.broadcast_to(input, shape=shape)\n\n\n@to_ivy_arrays_and_back\ndef Ceil(*, x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef Cholesky(*, input, name=\"Cholesky\"):\n return ivy.astype(ivy.cholesky(input), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Complex(real, imag, Tout=ivy.complex64, name=\"Complex\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Concat(*, concat_dim, values, name=\"Concat\"):\n return ivy.concat(values, axis=concat_dim)\n\n\n@to_ivy_arrays_and_back\ndef Conv2D(\n *,\n input,\n filter,\n strides,\n padding,\n use_cudnn_on_gpu,\n explicit_paddings,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=\"Conv2D\",\n):\n padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(\n padding, explicit_paddings, strides, dilations, data_format\n )\n return ivy.conv_general_dilated(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n dims=2,\n )\n\n\n@to_ivy_arrays_and_back\ndef Conv3D(\n *,\n input,\n filter,\n strides,\n padding,\n data_format=\"NDHWC\",\n dilations=[1, 1, 1, 1, 1],\n name=\"Conv3D\",\n):\n # ivy.backends.tensorflow expects strides and dilations to be\n # a single integer value or a list of 3 values whereas the raw op\n # expects a list of 5 values\n if data_format == \"NDHWC\":\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n elif data_format == \"NCDHW\":\n strides = strides[2:]\n dilations = dilations[2:]\n\n return tf_frontend.nn.conv3d(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n name=name,\n )\n\n\n@to_ivy_arrays_and_back\ndef Cross(*, a, b, name=\"Cross\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.cross(a, b)\n\n\n@to_ivy_arrays_and_back\ndef CumulativeLogsumexp(\n x, axis, exclusive=False, reverse=False, name=\"CumulativeLogsumexp\"\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef DebugGradientIdentity(input, name=\"DebugGradientIdentity\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Diag(*, diagonal, name=\"Diag\"):\n return ivy.astype(ivy.diag(diagonal), diagonal.dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"bfloat16\", \"float16\", \"float32\", \"float64\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Elu(features, name=None):\n zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))\n ones = ivy.ones_like(features, dtype=ivy.dtype(features))\n ret_val = ivy.where(\n # if x > 0 => x; else e^x - 1\n features > zeros,\n features,\n ivy.subtract(ivy.exp(features), ones),\n )\n return ret_val\n\n\n@to_ivy_arrays_and_back\ndef Equal(*, x, y, incompatible_shape_error=True, name=\"Equal\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.equal(x, y)\n\n try:\n return ivy.equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(False)\n\n\n@to_ivy_arrays_and_back\ndef EuclideanNorm(*, input, axis, keep_dims=False, name=\"EuclideanNorm\"):\n return ivy.astype(\n ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef Exp(*, x, name=\"Exp\"):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef Expm1(*, x, name=\"Expm1\"):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef FFT(*, input, name=\"FFT\"):\n return ivy.astype(ivy.fft(input, -1), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef FFT2D(*, input, name=\"FFT2D\"):\n return ivy.astype(ivy.fft2(input, dim=(-2, -1)), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Fill(*, dims, value, name=\"Full\"):\n return ivy.full(dims, value)\n\n\n@to_ivy_arrays_and_back\ndef Floor(*, x, name=\"Floor\"):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef FloorDiv(*, x, y, name=\"FloorDiv\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.floor_divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef FloorMod(*, x, y, name=\"FloorMod\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Gather(*, params, indices, validate_indices=None, name=\"Gather\"):\n return ivy.gather(params, indices, axis=0, batch_dims=0)\n\n\n@to_ivy_arrays_and_back\ndef Greater(*, x, y, name=\"Greater\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef GreaterEqual(*, x, y, name=\"GreaterEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Imag(\n *,\n input,\n Tout=ivy.float32,\n name=\"Imag\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.imag(input), Tout)\n\n\n@to_ivy_arrays_and_back\ndef Inv(*, x, name=\"Inv\"):\n return ivy.astype(ivy.reciprocal(x), x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef InvGrad(*, y, dy, name=\"InvGrad\"):\n return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))\n\n\n@to_ivy_arrays_and_back\ndef Invert(*, x, name=\"Invert\"):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef LeftShift(*, x, y, name=\"LeftShift\"):\n return ivy.bitwise_left_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Less(*, x, y, name=\"Less\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less(x, y)\n\n\n@to_ivy_arrays_and_back\ndef LinSpace(*, start, stop, num, name=None):\n return ivy.linspace(start, stop, num)\n\n\n@to_ivy_arrays_and_back\ndef Log(*, x, name=\"Log\"):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef LogicalNot(*, x, name=\"LogicalNot\"):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef MatMul(*, a, b, transpose_a=False, transpose_b=False, name=\"MatMul\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)\n\n\n@to_ivy_arrays_and_back\ndef MatrixInverse(*, input, adjoint=False, name=\"MatrixInverse\"):\n return ivy.inv(input, adjoint=adjoint)\n\n\n@to_ivy_arrays_and_back\ndef Minimum(*, x, y, name=\"Minimum\"):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef NotEqual(*, x, y, incompatible_shape_error=True, name=\"NotEqual\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.not_equal(x, y)\n\n try:\n return ivy.not_equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(True)\n\n\n@to_ivy_arrays_and_back\ndef NthElement(*, input, n, reverse=False, name=\"NthElement\"):\n return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef OnesLike(*, x, name=\"OnesLike\"):\n return ivy.ones_like(x)\n\n\n@to_ivy_arrays_and_back\ndef Pack(*, values, axis=0, name=\"Pack\"):\n return ivy.stack(values, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Pad(*, input, paddings, name=\"Pad\"):\n return ivy.constant_pad(input, paddings.to_list())\n\n\n@to_ivy_arrays_and_back\ndef PadV2(*, input, paddings, constant_values, name=\"PadV2\"):\n return ivy.constant_pad(input, paddings.to_list(), value=constant_values)\n\n\n@to_ivy_arrays_and_back\ndef Prod(*, input, axis, keep_dims=False, name=\"Prod\"):\n return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Real(input, Tout=ivy.float32, name=\"Real\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Reverse(*, tensor, dims, name=\"Reverse\"):\n ret = tensor\n for dim in enumerate(dims):\n if dim[1]:\n ret = ivy.flip(ret, axis=dim[0])\n return ret\n\n\n@to_ivy_arrays_and_back\ndef RightShift(*, x, y, name=\"RightShift\"):\n return ivy.bitwise_right_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Round(*, x, name=\"Round\"):\n return ivy.round(x)\n\n\n@to_ivy_arrays_and_back\ndef Rsqrt(*, x, name=\"Rsqrt\"):\n return ivy.sqrt(ivy.reciprocal(x))\n\n\n@to_ivy_arrays_and_back\ndef Shape(*, input, output_type=ivy.int32, name=\"Shape\"):\n output_type = to_ivy_dtype(output_type)\n return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)\n\n\n@with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"unsigned\",)},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Sign(*, x, name=\"Sign\"):\n return ivy.sign(x, np_variant=False)\n\n\n@to_ivy_arrays_and_back\ndef Sinh(*, x, name=\"Sinh\"):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef Softplus(*, features, name=\"Softplus\"):\n return ivy.softplus(features)\n\n\n# Softsign\n@to_ivy_arrays_and_back\ndef Softsign(*, features, name=\"Softsign\"):\n return ivy.softsign(features)\n\n\n@to_ivy_arrays_and_back\ndef SplitV(*, value, size_splits, axis, num_split, name=\"SplitV\"):\n return ivy.split(value, num_or_size_splits=size_splits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Sqrt(*, x, name=\"Sqrt\"):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef Square(*, x, name=\"Square\"):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef Sum(*, input, axis, keep_dims=False, name=\"Sum\"):\n return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"float64\", \"float128\", \"halfcomplex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Svd(*, input, full_matrices=False, compute_uv=True, name=None):\n return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)\n\n\n@to_ivy_arrays_and_back\ndef TanhGrad(*, y, dy, name=\"TanhGrad\"):\n return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))\n\n\n@to_ivy_arrays_and_back\ndef Transpose(*, x, perm, name=\"Transpose\"):\n ret = ivy.permute_dims(x, axes=perm)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef TruncateDiv(*, x, y, name=\"TruncateDiv\"):\n return ivy.astype(ivy.trunc_divide(x, y), x.dtype)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"float16\", \"bfloat16\")}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Unpack(*, value, num, axis=0, name=\"Unpack\"):\n return ivy.unstack(value, axis=axis)[:num]\n\n\n@to_ivy_arrays_and_back\ndef Xdivy(*, x, y, name=\"Xdivy\"):\n if (x == 0).all():\n return 0.0\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"bfloat16\",)}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Xlog1py(*, x, y, name=\"Xlog1py\"):\n if (x == 0).all():\n return 0.0\n return ivy.multiply(x, ivy.log1p(y))\n\n\n@to_ivy_arrays_and_back\ndef ZerosLike(*, x, name=\"ZerosLike\"):\n return ivy.zeros_like(x)\n\n\nAdd = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\nSlice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))\nSub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))\n", "path": "ivy/functional/frontends/tensorflow/raw_ops.py" } ]
[ { "content": "# global\nimport ivy\nimport ivy.functional.frontends.tensorflow as tf_frontend\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n map_raw_ops_alias,\n to_ivy_dtype,\n)\n\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n\nAcos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))\nAcosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))\nAddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))\nAddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\nArgMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmax, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\nArgMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmin, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\nAsin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))\nAtan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))\nAtan2 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": \"float16\"},\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.atan2))\n)\nConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))\nCos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))\nCosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))\nCumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))\nCumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))\nDigamma = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.digamma))\nDiv = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))\nEinsum = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"complex128 \",\n \"complex64\",\n \"float64\",\n \"float32\",\n \"float16\",\n \"int64\",\n \"int32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.einsum))\n)\nIdentity = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity)\n)\nIdentityN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity_n)\n)\nIgamma = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float64\",\n \"float32\",\n \"half\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.igamma))\n)\nLeakyRelu = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.leaky_relu,\n )\n )\n)\nLessEqual = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.less_equal))\n)\nLog1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))\nLogSoftmax = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.log_softmax))\n)\nLogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))\nMatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))\nMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_max,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\nMaximum = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.maximum))\n)\nMean = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.math.reduce_mean,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n)\nMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_min,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\nMod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.mod))\nMul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))\nNeg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))\nPow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))\nRealDiv = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.realdiv))\n)\nReciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))\nRelu = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.relu))\n)\nRelu6 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.relu6,\n )\n )\n)\nReshape = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.reshape)\n)\nRoll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))\nShapeN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.shape_n)\n)\nSigmoid = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)\n)\nSin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))\nSize = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))\nSoftmax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"float16\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.softmax))\n)\nSplit = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.split, kwargs_to_update={\"num_split\": \"num_or_size_splits\"}\n )\n)\nSquaredDifference = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n \"int32\",\n \"int64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.squared_difference))\n)\nSqueeze = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.squeeze)\n)\nTan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))\nTanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))\nTile = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.tile))\nXlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))\nZeta = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.zeta))\n)\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _tf_to_ivy_ivy_arguments_for_conv(\n padding, ex_pading, strides, dilations, data_format\n):\n if data_format.find(\"C\") == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n data_format = \"channel_first\"\n pad_index = [4, 8]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n data_format = \"channel_last\"\n pad_index = [2, 6]\n if padding == \"EXPLICIT\":\n padding = [\n (ex_pading[i], ex_pading[i + 1])\n for i in range(pad_index[0], pad_index[1], 2)\n ]\n return padding, strides, dilations, data_format\n\n\n# --- Main --- #\n# ------------ #\n\n\n@to_ivy_arrays_and_back\ndef AccumulateNV2(inputs, shape, name=\"AccumulateNV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Angle(\n *,\n input,\n Tout=ivy.float32,\n name=\"Angle\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.angle(input), Tout)\n\n\n@with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float16\",\n \"bool\",\n \"bfloat16\",\n )\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef ApproximateEqual(\n *,\n x,\n y,\n tolerance=1e-05,\n name=\"ApproximateEqual\",\n):\n x, y = check_tensorflow_casting(x, y)\n return ivy.abs(x - y) < tolerance\n\n\n@to_ivy_arrays_and_back\ndef Atanh(*, x, name=\"Atanh\"):\n return ivy.atanh(x)\n\n\n@to_ivy_arrays_and_back\ndef BandedTriangularSolve(\n matrix,\n rhs,\n lower=True,\n adjoint=False,\n name=\"BandedTriangularSolve\",\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMul(x, y, adj_x=False, adj_y=False, name=\"BatchMatMul\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV2(x, y, adj_x=False, adj_y=False, name=\"BatchMatMulV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name=\"BatchMatMulV3\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BitwiseAnd(*, x, y, name=\"BitwiseAnd\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseOr(*, x, y, name=\"BitwiseOr\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseXor(*, x, y, name=\"BitwiseXor\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BroadcastTo(*, input, shape, name=\"BroadcastTo\"):\n return ivy.broadcast_to(input, shape=shape)\n\n\n@to_ivy_arrays_and_back\ndef Ceil(*, x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef Cholesky(*, input, name=\"Cholesky\"):\n return ivy.astype(ivy.cholesky(input), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Complex(real, imag, Tout=ivy.complex64, name=\"Complex\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Concat(*, concat_dim, values, name=\"Concat\"):\n return ivy.concat(values, axis=concat_dim)\n\n\n@to_ivy_arrays_and_back\ndef Conv2D(\n *,\n input,\n filter,\n strides,\n padding,\n use_cudnn_on_gpu,\n explicit_paddings,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=\"Conv2D\",\n):\n padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(\n padding, explicit_paddings, strides, dilations, data_format\n )\n return ivy.conv_general_dilated(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n dims=2,\n )\n\n\n@to_ivy_arrays_and_back\ndef Conv3D(\n *,\n input,\n filter,\n strides,\n padding,\n data_format=\"NDHWC\",\n dilations=[1, 1, 1, 1, 1],\n name=\"Conv3D\",\n):\n # ivy.backends.tensorflow expects strides and dilations to be\n # a single integer value or a list of 3 values whereas the raw op\n # expects a list of 5 values\n if data_format == \"NDHWC\":\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n elif data_format == \"NCDHW\":\n strides = strides[2:]\n dilations = dilations[2:]\n\n return tf_frontend.nn.conv3d(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n name=name,\n )\n\n\n@to_ivy_arrays_and_back\ndef Cross(*, a, b, name=\"Cross\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.cross(a, b)\n\n\n@to_ivy_arrays_and_back\ndef CumulativeLogsumexp(\n x, axis, exclusive=False, reverse=False, name=\"CumulativeLogsumexp\"\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef DebugGradientIdentity(input, name=\"DebugGradientIdentity\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Diag(*, diagonal, name=\"Diag\"):\n return ivy.astype(ivy.diag(diagonal), diagonal.dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"bfloat16\", \"float16\", \"float32\", \"float64\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Elu(features, name=None):\n zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))\n ones = ivy.ones_like(features, dtype=ivy.dtype(features))\n ret_val = ivy.where(\n # if x > 0 => x; else e^x - 1\n features > zeros,\n features,\n ivy.subtract(ivy.exp(features), ones),\n )\n return ret_val\n\n\n@to_ivy_arrays_and_back\ndef Equal(*, x, y, incompatible_shape_error=True, name=\"Equal\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.equal(x, y)\n\n try:\n return ivy.equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(False)\n\n\n@to_ivy_arrays_and_back\ndef EuclideanNorm(*, input, axis, keep_dims=False, name=\"EuclideanNorm\"):\n return ivy.astype(\n ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef Exp(*, x, name=\"Exp\"):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef Expm1(*, x, name=\"Expm1\"):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef FFT(*, input, name=\"FFT\"):\n return ivy.astype(ivy.fft(input, -1), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef FFT2D(*, input, name=\"FFT2D\"):\n return ivy.astype(ivy.fft2(input, dim=(-2, -1)), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Fill(*, dims, value, name=\"Full\"):\n return ivy.full(dims, value)\n\n\n@to_ivy_arrays_and_back\ndef Floor(*, x, name=\"Floor\"):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef FloorDiv(*, x, y, name=\"FloorDiv\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.floor_divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef FloorMod(*, x, y, name=\"FloorMod\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Gather(*, params, indices, validate_indices=None, name=\"Gather\"):\n return ivy.gather(params, indices, axis=0, batch_dims=0)\n\n\n@to_ivy_arrays_and_back\ndef Greater(*, x, y, name=\"Greater\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef GreaterEqual(*, x, y, name=\"GreaterEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Imag(\n *,\n input,\n Tout=ivy.float32,\n name=\"Imag\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.imag(input), Tout)\n\n\n@to_ivy_arrays_and_back\ndef Inv(*, x, name=\"Inv\"):\n return ivy.astype(ivy.reciprocal(x), x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef InvGrad(*, y, dy, name=\"InvGrad\"):\n return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))\n\n\n@to_ivy_arrays_and_back\ndef Invert(*, x, name=\"Invert\"):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef LeftShift(*, x, y, name=\"LeftShift\"):\n return ivy.bitwise_left_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Less(*, x, y, name=\"Less\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less(x, y)\n\n\n@to_ivy_arrays_and_back\ndef LinSpace(*, start, stop, num, name=None):\n return ivy.linspace(start, stop, num)\n\n\n@to_ivy_arrays_and_back\ndef Log(*, x, name=\"Log\"):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef LogicalNot(*, x, name=\"LogicalNot\"):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef MatMul(*, a, b, transpose_a=False, transpose_b=False, name=\"MatMul\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)\n\n\n@to_ivy_arrays_and_back\ndef MatrixInverse(*, input, adjoint=False, name=\"MatrixInverse\"):\n return ivy.inv(input, adjoint=adjoint)\n\n\n@to_ivy_arrays_and_back\ndef Minimum(*, x, y, name=\"Minimum\"):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef NotEqual(*, x, y, incompatible_shape_error=True, name=\"NotEqual\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.not_equal(x, y)\n\n try:\n return ivy.not_equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(True)\n\n\n@to_ivy_arrays_and_back\ndef NthElement(*, input, n, reverse=False, name=\"NthElement\"):\n return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef OnesLike(*, x, name=\"OnesLike\"):\n return ivy.ones_like(x)\n\n\n@to_ivy_arrays_and_back\ndef Pack(*, values, axis=0, name=\"Pack\"):\n return ivy.stack(values, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Pad(*, input, paddings, name=\"Pad\"):\n return ivy.constant_pad(input, paddings.to_list())\n\n\n@to_ivy_arrays_and_back\ndef PadV2(*, input, paddings, constant_values, name=\"PadV2\"):\n return ivy.constant_pad(input, paddings.to_list(), value=constant_values)\n\n\n@to_ivy_arrays_and_back\ndef Prod(*, input, axis, keep_dims=False, name=\"Prod\"):\n return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Real(input, Tout=ivy.float32, name=\"Real\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Reverse(*, tensor, dims, name=\"Reverse\"):\n ret = tensor\n for dim in enumerate(dims):\n if dim[1]:\n ret = ivy.flip(ret, axis=dim[0])\n return ret\n\n\n@to_ivy_arrays_and_back\ndef RightShift(*, x, y, name=\"RightShift\"):\n return ivy.bitwise_right_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Round(*, x, name=\"Round\"):\n return ivy.round(x)\n\n\n@to_ivy_arrays_and_back\ndef Rsqrt(*, x, name=\"Rsqrt\"):\n return ivy.sqrt(ivy.reciprocal(x))\n\n\n@to_ivy_arrays_and_back\ndef Shape(*, input, output_type=ivy.int32, name=\"Shape\"):\n output_type = to_ivy_dtype(output_type)\n return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)\n\n\n@with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"unsigned\",)},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Sign(*, x, name=\"Sign\"):\n return ivy.sign(x, np_variant=False)\n\n\n@to_ivy_arrays_and_back\ndef Sinh(*, x, name=\"Sinh\"):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef Softplus(*, features, name=\"Softplus\"):\n return ivy.softplus(features)\n\n\n# Softsign\n@to_ivy_arrays_and_back\ndef Softsign(*, features, name=\"Softsign\"):\n return ivy.softsign(features)\n\n\n@to_ivy_arrays_and_back\ndef SplitV(*, value, size_splits, axis, num_split, name=\"SplitV\"):\n return ivy.split(value, num_or_size_splits=size_splits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Sqrt(*, x, name=\"Sqrt\"):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef Square(*, x, name=\"Square\"):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef Sum(*, input, axis, keep_dims=False, name=\"Sum\"):\n return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"float64\", \"float128\", \"halfcomplex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Svd(*, input, full_matrices=False, compute_uv=True, name=None):\n return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)\n\n\n@to_ivy_arrays_and_back\ndef TanhGrad(*, y, dy, name=\"TanhGrad\"):\n return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))\n\n\n@to_ivy_arrays_and_back\ndef Transpose(*, x, perm, name=\"Transpose\"):\n ret = ivy.permute_dims(x, axes=perm)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef TruncateDiv(*, x, y, name=\"TruncateDiv\"):\n return ivy.astype(ivy.trunc_divide(x, y), x.dtype)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"float16\", \"bfloat16\")}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Unpack(*, value, num, axis=0, name=\"Unpack\"):\n return ivy.unstack(value, axis=axis)[:num]\n\n\n@to_ivy_arrays_and_back\ndef Xdivy(*, x, y, name=\"Xdivy\"):\n if (x == 0).all():\n return 0.0\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"bfloat16\",)}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Xlog1py(*, x, y, name=\"Xlog1py\"):\n if (x == 0).all():\n return 0.0\n return ivy.multiply(x, ivy.log1p(y))\n\n\n@to_ivy_arrays_and_back\ndef ZerosLike(*, x, name=\"ZerosLike\"):\n return ivy.zeros_like(x)\n\n\nAdd = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\nSlice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))\nSub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))\n", "path": "ivy/functional/frontends/tensorflow/raw_ops.py" } ]
diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py index 354afa54bdb71..5dbd860c52d63 100644 --- a/ivy/functional/frontends/tensorflow/raw_ops.py +++ b/ivy/functional/frontends/tensorflow/raw_ops.py @@ -170,6 +170,7 @@ ) ) ) +Mod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.mod)) Mul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply)) Neg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative)) Pow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow)) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py index eccdd054aa54a..b2cf3eb575b81 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py @@ -2911,6 +2911,38 @@ def test_tensorflow_Minimum( # NOQA ) +# Mod +@handle_frontend_test( + fn_tree="tensorflow.raw_ops.Mod", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + shared_dtype=True, + ), + test_with_out=st.just(False), +) +def test_tensorflow_Mod( # NOQA + *, + dtype_and_x, + frontend, + test_flags, + fn_tree, + backend_fw, + on_device, +): + input_dtype, xs = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=xs[0], + y=xs[1], + ) + + @handle_frontend_test( fn_tree="tensorflow.raw_ops.Mul", dtype_and_x=helpers.dtype_and_values(
mod
open-telemetry__opentelemetry-python-contrib-172
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py" } ]
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py" } ]
diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py index 63f23ae79b..3419559319 100644 --- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py @@ -52,7 +52,6 @@ from opentelemetry.instrumentation.jinja2.version import __version__ from opentelemetry.instrumentation.utils import unwrap from opentelemetry.trace import SpanKind, get_tracer -from opentelemetry.trace.status import Status, StatusCode logger = logging.getLogger(__name__)
Remove unused import As per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation
carpentries__amy-932
[ { "content": "import re\n\nfrom django import forms\nfrom django.core.validators import RegexValidator\nfrom django.forms import (\n HiddenInput, CheckboxSelectMultiple, TextInput, modelformset_factory,\n RadioSelect,\n)\n\nfrom captcha.fields import ReCaptchaField\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, HTML, Submit\nfrom crispy_forms.bootstrap import FormActions\nfrom django_countries import Countries\nfrom django_countries.fields import CountryField\nfrom selectable import forms as selectable\n\nfrom workshops.models import (\n Award, Event, Lesson, Person, Task, Airport, Organization,\n EventRequest, ProfileUpdateRequest, TodoItem, Membership,\n Sponsorship, InvoiceRequest, EventSubmission, Language,\n TrainingRequest,\n DCSelfOrganizedEventRequest,\n)\nfrom workshops import lookups\n\n\nAUTOCOMPLETE_HELP_TEXT = (\n \"Autocomplete field; type characters to view available options, \"\n \"then select desired item from list.\"\n)\n\n\nclass BootstrapHelper(FormHelper):\n \"\"\"Layout and behavior for crispy-displayed forms.\"\"\"\n form_class = 'form-horizontal'\n label_class = 'col-lg-2'\n field_class = 'col-lg-8'\n html5_required = True\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('submit', 'Submit'))\n\n\nclass BootstrapHelperGet(BootstrapHelper):\n \"\"\"Force form to use GET instead of default POST.\"\"\"\n form_method = 'get'\n\n\nclass BootstrapHelperWithAdd(BootstrapHelper):\n \"\"\"Change form's 'Submit' to 'Add'.\"\"\"\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.inputs[-1] = Submit('submit', 'Add')\n\n\nclass BootstrapHelperFilter(FormHelper):\n \"\"\"A differently shaped forms (more space-efficient) for use in sidebar as\n filter forms.\"\"\"\n form_method = 'get'\n\n def __init__(self, form=None):\n super().__init__(form)\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('', 'Submit'))\n\n\nclass BootstrapHelperWiderLabels(BootstrapHelper):\n \"\"\"SWCEventRequestForm and DCEventRequestForm have long labels, so this\n helper is used to address that issue.\"\"\"\n label_class = 'col-lg-3'\n field_class = 'col-lg-7'\n\n\nclass BootstrapHelperFormsetInline(BootstrapHelper):\n \"\"\"For use in inline formsets.\"\"\"\n template = 'bootstrap/table_inline_formset.html'\n\n\nbootstrap_helper = BootstrapHelper()\nbootstrap_helper_get = BootstrapHelperGet()\nbootstrap_helper_with_add = BootstrapHelperWithAdd()\nbootstrap_helper_filter = BootstrapHelperFilter()\nbootstrap_helper_wider_labels = BootstrapHelperWiderLabels()\nbootstrap_helper_inline_formsets = BootstrapHelperFormsetInline()\n\n\nclass WorkshopStaffForm(forms.Form):\n '''Represent instructor matching form.'''\n\n latitude = forms.FloatField(label='Latitude',\n min_value=-90.0,\n max_value=90.0,\n required=False)\n longitude = forms.FloatField(label='Longitude',\n min_value=-180.0,\n max_value=180.0,\n required=False)\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n widget=selectable.AutoComboboxSelectWidget(\n lookup_class=lookups.AirportLookup,\n ),\n )\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n country = forms.MultipleChoiceField(choices=[])\n\n lessons = forms.ModelMultipleChoiceField(queryset=Lesson.objects.all(),\n widget=CheckboxSelectMultiple(),\n required=False)\n\n INSTRUCTOR_BADGE_CHOICES = (\n ('swc-instructor', 'Software Carpentry Instructor'),\n ('dc-instructor', 'Data Carpentry Instructor'),\n )\n instructor_badges = forms.MultipleChoiceField(\n choices=INSTRUCTOR_BADGE_CHOICES,\n widget=CheckboxSelectMultiple(),\n required=False,\n )\n\n GENDER_CHOICES = ((None, '---------'), ) + Person.GENDER_CHOICES\n gender = forms.ChoiceField(choices=GENDER_CHOICES, required=False)\n\n was_helper = forms.BooleanField(\n required=False, label='Was helper at least once before')\n was_organizer = forms.BooleanField(\n required=False, label='Was organizer at least once before')\n is_in_progress_trainee = forms.BooleanField(\n required=False, label='Is an in-progress instructor trainee')\n\n def __init__(self, *args, **kwargs):\n '''Build form layout dynamically.'''\n super().__init__(*args, **kwargs)\n\n # dynamically build choices for country field\n only = Airport.objects.distinct().exclude(country='') \\\n .exclude(country=None) \\\n .values_list('country', flat=True)\n countries = Countries()\n countries.only = only\n\n choices = list(countries)\n self.fields['country'] = forms.MultipleChoiceField(choices=choices,\n required=False)\n\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-inline'\n self.helper.form_method = 'get'\n self.helper.layout = Layout(\n Div(\n Div(HTML('Location close to'), css_class='panel-heading'),\n Div('airport', css_class='panel-body'),\n Div(HTML('<b>OR</b>'), css_class='panel-footer'),\n Div('country', css_class='panel-body'),\n Div(HTML('<b>OR</b>'), css_class='panel-footer'),\n Div('latitude', 'longitude', css_class='panel-body'),\n css_class='panel panel-default ',\n ),\n 'instructor_badges',\n 'was_helper',\n 'was_organizer',\n 'is_in_progress_trainee',\n 'languages',\n 'gender',\n 'lessons',\n FormActions(\n Submit('submit', 'Submit'),\n ),\n )\n\n def clean(self):\n cleaned_data = super().clean()\n lat = bool(cleaned_data.get('latitude'))\n lng = bool(cleaned_data.get('longitude'))\n airport = bool(cleaned_data.get('airport'))\n country = bool(cleaned_data.get('country'))\n latlng = lat and lng\n\n # if searching by coordinates, then there must be both lat & lng\n # present\n if lat ^ lng:\n raise forms.ValidationError(\n 'Must specify both latitude and longitude if searching by '\n 'coordinates')\n\n # User must search by airport, or country, or coordinates, or none\n # of them. Sum of boolean elements must be equal 0 (if general search)\n # or 1 (if searching by airport OR country OR lat/lng).\n if sum([airport, country, latlng]) not in [0, 1]:\n raise forms.ValidationError(\n 'Must specify an airport OR a country, OR use coordinates, OR '\n 'none of them.')\n return cleaned_data\n\n\nclass PersonBulkAddForm(forms.Form):\n '''Represent CSV upload form for bulk adding people.'''\n\n file = forms.FileField()\n\n\nclass SearchForm(forms.Form):\n '''Represent general searching form.'''\n\n term = forms.CharField(label='term',\n max_length=100)\n in_organizations = forms.BooleanField(label='in organizations',\n required=False,\n initial=True)\n in_events = forms.BooleanField(label='in events',\n required=False,\n initial=True)\n in_persons = forms.BooleanField(label='in persons',\n required=False,\n initial=True)\n in_airports = forms.BooleanField(label='in airports',\n required=False,\n initial=True)\n\n\nclass DebriefForm(forms.Form):\n '''Represent general debrief form.'''\n begin_date = forms.DateField(\n label='Begin date as YYYY-MM-DD',\n input_formats=['%Y-%m-%d', ]\n )\n end_date = forms.DateField(\n label='End date as YYYY-MD-DD',\n input_formats=['%Y-%m-%d', ]\n )\n\n\nclass EventForm(forms.ModelForm):\n host = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Host',\n required=True,\n help_text=Event._meta.get_field('host').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n administrator = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Administrator',\n required=False,\n help_text=Event._meta.get_field('administrator').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n assigned_to = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AdminLookup,\n label='Assigned to',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n language = selectable.AutoCompleteSelectField(\n lookup_class=lookups.LanguageLookup,\n label='Language',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n country = CountryField().formfield(\n required=False,\n help_text=Event._meta.get_field('country').help_text,\n )\n\n admin_fee = forms.DecimalField(min_value=0, decimal_places=2,\n required=False, widget=TextInput)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['slug'].widget.attrs['placeholder'] = 'YYYY-MM-DD-location'\n self.fields['start'].widget.attrs['placeholder'] = 'YYYY-MM-DD'\n self.fields['end'].widget.attrs['placeholder'] = 'YYYY-MM-DD'\n\n self.helper = BootstrapHelper(self)\n\n idx_start = self.helper['country'].slice[0][0][0]\n idx_end = self.helper['longitude'].slice[0][0][0]\n # wrap all venue fields within <div class='panel-body'>\n self.helper[idx_start:idx_end + 1] \\\n .wrap_together(Div, css_class='panel-body')\n # wrap <div class='panel-body'> within <div class='panel panel-…'>\n self.helper[idx_start].wrap_together(Div,\n css_class='panel panel-default')\n # add <div class='panel-heading'>Loc. details</div> inside \"div.panel\"\n self.helper.layout[idx_start].insert(0, Div(HTML('Location details'),\n css_class='panel-heading'))\n\n id_learners_pre = self.helper['learners_pre'].slice[0][0][0]\n id_learners_longterm = self.helper['learners_longterm'].slice[0][0][0]\n # wrap all survey fields within <div class='panel-body'>\n self.helper[id_learners_pre:id_learners_longterm + 1] \\\n .wrap_together(Div, css_class='panel-body')\n # wrap <div class='panel-body'> within <div class='panel panel-…'>\n self.helper[id_learners_pre].wrap_together(\n Div, css_class='panel panel-default')\n # add <div class='panel-heading'>Venue details</div> inside \"div.panel\"\n self.helper.layout[id_learners_pre].insert(\n 0, Div(HTML('Survey results'), css_class='panel-heading'))\n\n def clean_slug(self):\n # Ensure slug is in \"YYYY-MM-DD-location\" format\n data = self.cleaned_data['slug']\n match = re.match('(\\d{4}|x{4})-(\\d{2}|x{2})-(\\d{2}|x{2})-.+', data)\n if not match:\n raise forms.ValidationError('Slug must be in \"YYYY-MM-DD-location\"'\n ' format, where \"YYYY\", \"MM\", \"DD\" can'\n ' be unspecified (ie. \"xx\").')\n return data\n\n def clean_end(self):\n \"\"\"Ensure end >= start.\"\"\"\n start = self.cleaned_data['start']\n end = self.cleaned_data['end']\n\n if start and end and end < start:\n raise forms.ValidationError('Must not be earlier than start date.')\n return end\n\n class Meta:\n model = Event\n # reorder fields, don't display 'deleted' field\n fields = ('slug', 'completed', 'start', 'end', 'host', 'administrator',\n 'assigned_to', 'tags', 'url', 'language', 'reg_key',\n 'admin_fee', 'invoice_status', 'attendance', 'contact',\n 'notes', 'country', 'venue', 'address', 'latitude',\n 'longitude', 'learners_pre', 'learners_post',\n 'instructors_pre', 'instructors_post', 'learners_longterm')\n # WARNING: don't change put any fields between 'country' and\n # 'longitude' that don't relate to the venue of the event\n\n widgets = {\n 'attendance': TextInput,\n 'latitude': TextInput,\n 'longitude': TextInput,\n 'invoice_status': RadioSelect,\n }\n\n class Media:\n # thanks to this, {{ form.media }} in the template will generate\n # a <link href=\"\"> (for CSS files) or <script src=\"\"> (for JS files)\n js = (\n 'import_from_url.js', 'update_from_url.js',\n 'online_country.js',\n )\n\n\nclass TaskForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'event': HiddenInput}\n\n\nclass TaskFullForm(TaskForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n\n\nclass PersonForm(forms.ModelForm):\n\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = Person\n # don't display the 'password', 'user_permissions',\n # 'groups' or 'is_superuser' fields\n # + reorder fields\n fields = ['username', 'personal', 'middle', 'family', 'may_contact',\n 'email', 'gender', 'airport', 'affiliation', 'github',\n 'twitter', 'url', 'occupation', 'orcid', 'notes', 'lessons',\n 'domains', 'languages']\n\n\nclass PersonCreateForm(PersonForm):\n class Meta(PersonForm.Meta):\n # remove 'username' field as it's being populated after form save\n # in the `views.PersonCreate.form_valid`\n fields = PersonForm.Meta.fields.copy()\n fields.remove('username')\n\n\nclass PersonPermissionsForm(forms.ModelForm):\n class Meta:\n model = Person\n # only display administration-related fields: groups, permissions,\n # being a superuser or being active (== ability to log in)\n fields = [\n 'is_active',\n 'is_superuser',\n 'user_permissions',\n 'groups',\n ]\n\n\nclass PersonsSelectionForm(forms.Form):\n\n person_a = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person From',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n person_b = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person To',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass PersonsMergeForm(forms.Form):\n TWO = (\n ('obj_a', 'Use A'),\n ('obj_b', 'Use B'),\n )\n THREE = TWO + (('combine', 'Combine'), )\n DEFAULT = 'obj_a'\n\n person_a = forms.ModelChoiceField(queryset=Person.objects.all(),\n widget=forms.HiddenInput)\n\n person_b = forms.ModelChoiceField(queryset=Person.objects.all(),\n widget=forms.HiddenInput)\n\n id = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n username = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n personal = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n middle = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n family = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n email = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n may_contact = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n gender = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n airport = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n github = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n twitter = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n url = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n notes = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n affiliation = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n occupation = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n orcid = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n award_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n qualification_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n label='Lessons',\n )\n domains = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n languages = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n task_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n is_active = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n\n\nclass BadgeAwardForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n awarded_by = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Awarded by',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'badge': HiddenInput}\n\n\nclass PersonAwardForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n awarded_by = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Awarded by',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass PersonTaskForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass OrganizationForm(forms.ModelForm):\n domain = forms.CharField(\n max_length=Organization._meta.get_field('domain').max_length,\n validators=[\n RegexValidator(\n '[^\\w\\.-]+', inverse_match=True,\n message='Please enter only the domain (such as \"math.esu.edu\")'\n ' without a leading \"http://\" or a trailing \"/\".')\n ],\n )\n\n class Meta:\n model = Organization\n fields = ['domain', 'fullname', 'country', 'notes']\n\n\nclass MembershipForm(forms.ModelForm):\n class Meta:\n model = Membership\n fields = '__all__'\n widgets = {'host': HiddenInput, }\n\n\nclass SponsorshipForm(forms.ModelForm):\n organization = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Organization',\n required=True,\n help_text=Sponsorship._meta.get_field('organization').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n contact = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Contact',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Sponsorship\n fields = '__all__'\n widgets = {'event': HiddenInput, }\n\n\nclass SWCEventRequestForm(forms.ModelForm):\n captcha = ReCaptchaField()\n workshop_type = forms.CharField(initial='swc', widget=forms.HiddenInput())\n understand_admin_fee = forms.BooleanField(\n required=True,\n initial=False,\n label='I understand the Software Carpentry Foundation\\'s '\n 'administration fee.',\n help_text='<a href=\"http://software-carpentry.org/blog/2015/07/changes'\n '-to-admin-fee.html\" target=\"_blank\">Look up administration '\n 'fees</a>.',\n )\n language = selectable.AutoCompleteSelectField(\n lookup_class=lookups.LanguageLookup,\n label='Language',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = EventRequest\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',\n 'data_types', 'data_types_other',\n 'attendee_data_analysis_level', 'fee_waiver_request')\n widgets = {\n 'approx_attendees': forms.RadioSelect(),\n 'attendee_domains': forms.CheckboxSelectMultiple(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_computing_levels': forms.CheckboxSelectMultiple(),\n 'travel_reimbursement': forms.RadioSelect(),\n 'admin_fee_payment': forms.RadioSelect(),\n }\n\n\nclass DCEventRequestForm(SWCEventRequestForm):\n workshop_type = forms.CharField(initial='dc', widget=forms.HiddenInput())\n understand_admin_fee = forms.BooleanField(\n required=True,\n initial=False,\n label='I understand the Data Carpentry\\'s administration fee.',\n help_text='There is a per-workshop fee for Data Carpentry to cover '\n 'administrative and core development costs. The per-workshop fee is '\n 'currently $2500. We work to find local instructors when possible, but'\n ' the host institute will also need to pay for instructors travel and'\n ' lodging if they need to travel. Therefore overall workshop costs are'\n ' $2500 - $6000.',\n )\n\n class Meta(SWCEventRequestForm.Meta):\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',\n 'admin_fee_payment', 'attendee_computing_levels')\n widgets = {\n 'approx_attendees': forms.RadioSelect(),\n 'attendee_domains': forms.CheckboxSelectMultiple(),\n 'data_types': forms.RadioSelect(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),\n 'travel_reimbursement': forms.RadioSelect(),\n }\n\n\nclass EventSubmitFormNoCaptcha(forms.ModelForm):\n class Meta:\n model = EventSubmission\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')\n\n\nclass EventSubmitForm(EventSubmitFormNoCaptcha):\n captcha = ReCaptchaField()\n\n\nclass DCSelfOrganizedEventRequestFormNoCaptcha(forms.ModelForm):\n # the easiest way to make these fields required without rewriting their\n # verbose names or help texts\n handle_registration = DCSelfOrganizedEventRequest._meta \\\n .get_field('handle_registration').formfield(required=True)\n distribute_surveys = DCSelfOrganizedEventRequest._meta \\\n .get_field('distribute_surveys').formfield(required=True)\n follow_code_of_conduct = DCSelfOrganizedEventRequest._meta \\\n .get_field('follow_code_of_conduct').formfield(required=True)\n\n class Meta:\n model = DCSelfOrganizedEventRequest\n exclude = ('created_at', 'last_updated_at', 'assigned_to')\n widgets = {\n 'instructor_status': forms.RadioSelect(),\n 'is_partner': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'topics': forms.CheckboxSelectMultiple(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),\n 'payment': forms.RadioSelect(),\n }\n\n\nclass DCSelfOrganizedEventRequestForm(\n DCSelfOrganizedEventRequestFormNoCaptcha):\n captcha = ReCaptchaField()\n\n class Meta(DCSelfOrganizedEventRequestFormNoCaptcha.Meta):\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')\n\n\nclass ProfileUpdateRequestFormNoCaptcha(forms.ModelForm):\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages you can teach in',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = ProfileUpdateRequest\n exclude = ('active', 'created_at', 'last_updated_at')\n widgets = {\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n 'occupation': forms.RadioSelect(),\n 'gender': forms.RadioSelect(),\n }\n\n def clean_twitter(self):\n \"\"\"Remove '@'s from the beginning of the Twitter handle.\"\"\"\n twitter_handle = self.cleaned_data['twitter']\n return re.sub('^@+', '', twitter_handle)\n\n\nclass ProfileUpdateRequestForm(ProfileUpdateRequestFormNoCaptcha):\n captcha = ReCaptchaField()\n\n\nclass PersonLookupForm(forms.Form):\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass AdminLookupForm(forms.Form):\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AdminLookup,\n label='Administrator',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass SimpleTodoForm(forms.ModelForm):\n class Meta:\n model = TodoItem\n fields = ('title', 'due', 'additional', 'completed', 'event')\n widgets = {'event': HiddenInput, }\n\n# `extra`: number of forms populated via `initial` parameter; it's hardcoded in\n# `views.todos_add`\nTodoFormSet = modelformset_factory(TodoItem, form=SimpleTodoForm, extra=10)\n\n\nclass EventsSelectionForm(forms.Form):\n event_a = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event A',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event_b = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event B',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass EventsMergeForm(forms.Form):\n TWO = (\n ('obj_a', 'Use A'),\n ('obj_b', 'Use B'),\n )\n THREE = TWO + (('combine', 'Combine'), )\n DEFAULT = 'obj_a'\n\n event_a = forms.ModelChoiceField(queryset=Event.objects.all(),\n widget=forms.HiddenInput)\n\n event_b = forms.ModelChoiceField(queryset=Event.objects.all(),\n widget=forms.HiddenInput)\n\n id = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n slug = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n completed = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n assigned_to = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n start = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n end = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n host = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n administrator = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n tags = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n url = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n language = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n reg_key = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n admin_fee = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n invoice_status = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n attendance = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n contact = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n country = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n venue = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n address = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n latitude = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n longitude = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_pre = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_post = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n instructors_pre = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n instructors_post = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_longterm = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n notes = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n task_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n todoitem_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n\n\nclass InvoiceRequestForm(forms.ModelForm):\n class Meta:\n model = InvoiceRequest\n fields = (\n 'organization', 'reason', 'reason_other', 'date', 'event',\n 'event_location', 'item_id', 'postal_number', 'contact_name',\n 'contact_email', 'contact_phone', 'full_address', 'amount',\n 'currency', 'currency_other', 'breakdown', 'vendor_form_required',\n 'vendor_form_link', 'form_W9', 'receipts_sent',\n 'shared_receipts_link', 'notes',\n )\n widgets = {\n 'reason': RadioSelect,\n 'currency': RadioSelect,\n 'vendor_form_required': RadioSelect,\n 'receipts_sent': RadioSelect,\n }\n\n\nclass InvoiceRequestUpdateForm(forms.ModelForm):\n class Meta:\n model = InvoiceRequest\n fields = (\n 'status', 'sent_date', 'paid_date', 'notes'\n )\n\n\nclass TrainingRequestForm(forms.ModelForm):\n agreed_to_code_of_conduct = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to abide by Software and Data Carpentry\\'s Code of Conduct',\n help_text='The Code of Conduct can be found at '\n '<a href=\"http://software-carpentry.org/conduct/\" target=\"_blank\">'\n 'http://software-carpentry.org/conduct/</a>'\n 'and <a href=\"http://datacarpentry.org/code-of-conduct/\" target=\"_blank\">'\n 'http://datacarpentry.org/code-of-conduct/</a>',\n )\n agreed_to_complete_training = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to complete this training within three months of the Training Course',\n help_text='The completion steps are described at '\n '<a href=\"http://swcarpentry.github.io/instructor-training/checkout/\" target=\"_blank\">'\n 'http://swcarpentry.github.io/instructor-training/checkout/</a> '\n 'and take a total of approximately 8-10 hours.',\n )\n agreed_to_teach_workshops = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to teach a Software Carpentry or Data Carpentry '\n 'workshop within 12 months of this Training Course',\n )\n captcha = ReCaptchaField()\n\n class Meta:\n model = TrainingRequest\n fields = (\n 'group_name',\n 'personal',\n 'family',\n 'email',\n 'github',\n 'occupation',\n 'occupation_other',\n 'affiliation',\n 'location',\n 'country',\n 'domains',\n 'domains_other',\n 'gender',\n 'gender_other',\n 'previous_involvement',\n 'previous_training',\n 'previous_training_other',\n 'previous_training_explanation',\n 'previous_experience',\n 'previous_experience_other',\n 'previous_experience_explanation',\n 'programming_language_usage_frequency',\n 'reason',\n 'teaching_frequency_expectation',\n 'teaching_frequency_expectation_other',\n 'max_travelling_frequency',\n 'max_travelling_frequency_other',\n 'additional_skills',\n 'comment',\n )\n widgets = {\n 'occupation': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'gender': forms.RadioSelect(),\n 'previous_involvement': forms.CheckboxSelectMultiple(),\n 'previous_training': forms.RadioSelect(),\n 'previous_experience': forms.RadioSelect(),\n 'programming_language_usage_frequency': forms.RadioSelect(),\n 'teaching_frequency_expectation': forms.RadioSelect(),\n 'max_travelling_frequency': forms.RadioSelect(),\n }\n\n\nclass AutoUpdateProfileForm(forms.ModelForm):\n username = forms.CharField(disabled=True, required=False)\n github = forms.CharField(\n disabled=True, required=False,\n help_text='If you want to change your github username, please email '\n 'us at <a href=\"mailto:[email protected]\">'\n '[email protected]</a>.')\n\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = Person\n fields = [\n 'personal',\n 'middle',\n 'family',\n 'email',\n 'gender',\n 'may_contact',\n 'airport',\n 'github',\n 'twitter',\n 'url',\n 'username',\n 'affiliation',\n 'domains',\n 'lessons',\n 'languages',\n ]\n readonly_fields = (\n 'username',\n 'github',\n )\n widgets = {\n 'occupation': forms.RadioSelect(),\n 'gender': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n }\n", "path": "workshops/forms.py" } ]
[ { "content": "import re\n\nfrom django import forms\nfrom django.core.validators import RegexValidator\nfrom django.forms import (\n HiddenInput, CheckboxSelectMultiple, TextInput, modelformset_factory,\n RadioSelect,\n)\n\nfrom captcha.fields import ReCaptchaField\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, HTML, Submit\nfrom crispy_forms.bootstrap import FormActions\nfrom django_countries import Countries\nfrom django_countries.fields import CountryField\nfrom selectable import forms as selectable\n\nfrom workshops.models import (\n Award, Event, Lesson, Person, Task, Airport, Organization,\n EventRequest, ProfileUpdateRequest, TodoItem, Membership,\n Sponsorship, InvoiceRequest, EventSubmission, Language,\n TrainingRequest,\n DCSelfOrganizedEventRequest,\n)\nfrom workshops import lookups\n\n\nAUTOCOMPLETE_HELP_TEXT = (\n \"Autocomplete field; type characters to view available options, \"\n \"then select desired item from list.\"\n)\n\n\nclass BootstrapHelper(FormHelper):\n \"\"\"Layout and behavior for crispy-displayed forms.\"\"\"\n form_class = 'form-horizontal'\n label_class = 'col-lg-2'\n field_class = 'col-lg-8'\n html5_required = True\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('submit', 'Submit'))\n\n\nclass BootstrapHelperGet(BootstrapHelper):\n \"\"\"Force form to use GET instead of default POST.\"\"\"\n form_method = 'get'\n\n\nclass BootstrapHelperWithAdd(BootstrapHelper):\n \"\"\"Change form's 'Submit' to 'Add'.\"\"\"\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.inputs[-1] = Submit('submit', 'Add')\n\n\nclass BootstrapHelperFilter(FormHelper):\n \"\"\"A differently shaped forms (more space-efficient) for use in sidebar as\n filter forms.\"\"\"\n form_method = 'get'\n\n def __init__(self, form=None):\n super().__init__(form)\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('', 'Submit'))\n\n\nclass BootstrapHelperWiderLabels(BootstrapHelper):\n \"\"\"SWCEventRequestForm and DCEventRequestForm have long labels, so this\n helper is used to address that issue.\"\"\"\n label_class = 'col-lg-3'\n field_class = 'col-lg-7'\n\n\nclass BootstrapHelperFormsetInline(BootstrapHelper):\n \"\"\"For use in inline formsets.\"\"\"\n template = 'bootstrap/table_inline_formset.html'\n\n\nbootstrap_helper = BootstrapHelper()\nbootstrap_helper_get = BootstrapHelperGet()\nbootstrap_helper_with_add = BootstrapHelperWithAdd()\nbootstrap_helper_filter = BootstrapHelperFilter()\nbootstrap_helper_wider_labels = BootstrapHelperWiderLabels()\nbootstrap_helper_inline_formsets = BootstrapHelperFormsetInline()\n\n\nclass WorkshopStaffForm(forms.Form):\n '''Represent instructor matching form.'''\n\n latitude = forms.FloatField(label='Latitude',\n min_value=-90.0,\n max_value=90.0,\n required=False)\n longitude = forms.FloatField(label='Longitude',\n min_value=-180.0,\n max_value=180.0,\n required=False)\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n widget=selectable.AutoComboboxSelectWidget(\n lookup_class=lookups.AirportLookup,\n ),\n )\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n country = forms.MultipleChoiceField(choices=[])\n\n lessons = forms.ModelMultipleChoiceField(queryset=Lesson.objects.all(),\n widget=CheckboxSelectMultiple(),\n required=False)\n\n INSTRUCTOR_BADGE_CHOICES = (\n ('swc-instructor', 'Software Carpentry Instructor'),\n ('dc-instructor', 'Data Carpentry Instructor'),\n )\n instructor_badges = forms.MultipleChoiceField(\n choices=INSTRUCTOR_BADGE_CHOICES,\n widget=CheckboxSelectMultiple(),\n required=False,\n )\n\n GENDER_CHOICES = ((None, '---------'), ) + Person.GENDER_CHOICES\n gender = forms.ChoiceField(choices=GENDER_CHOICES, required=False)\n\n was_helper = forms.BooleanField(\n required=False, label='Was helper at least once before')\n was_organizer = forms.BooleanField(\n required=False, label='Was organizer at least once before')\n is_in_progress_trainee = forms.BooleanField(\n required=False, label='Is an in-progress instructor trainee')\n\n def __init__(self, *args, **kwargs):\n '''Build form layout dynamically.'''\n super().__init__(*args, **kwargs)\n\n # dynamically build choices for country field\n only = Airport.objects.distinct().exclude(country='') \\\n .exclude(country=None) \\\n .values_list('country', flat=True)\n countries = Countries()\n countries.only = only\n\n choices = list(countries)\n self.fields['country'] = forms.MultipleChoiceField(choices=choices,\n required=False)\n\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-inline'\n self.helper.form_method = 'get'\n self.helper.layout = Layout(\n Div(\n Div(HTML('Location close to'), css_class='panel-heading'),\n Div('airport', css_class='panel-body'),\n Div(HTML('<b>OR</b>'), css_class='panel-footer'),\n Div('country', css_class='panel-body'),\n Div(HTML('<b>OR</b>'), css_class='panel-footer'),\n Div('latitude', 'longitude', css_class='panel-body'),\n css_class='panel panel-default ',\n ),\n 'instructor_badges',\n 'was_helper',\n 'was_organizer',\n 'is_in_progress_trainee',\n 'languages',\n 'gender',\n 'lessons',\n FormActions(\n Submit('submit', 'Submit'),\n ),\n )\n\n def clean(self):\n cleaned_data = super().clean()\n lat = bool(cleaned_data.get('latitude'))\n lng = bool(cleaned_data.get('longitude'))\n airport = bool(cleaned_data.get('airport'))\n country = bool(cleaned_data.get('country'))\n latlng = lat and lng\n\n # if searching by coordinates, then there must be both lat & lng\n # present\n if lat ^ lng:\n raise forms.ValidationError(\n 'Must specify both latitude and longitude if searching by '\n 'coordinates')\n\n # User must search by airport, or country, or coordinates, or none\n # of them. Sum of boolean elements must be equal 0 (if general search)\n # or 1 (if searching by airport OR country OR lat/lng).\n if sum([airport, country, latlng]) not in [0, 1]:\n raise forms.ValidationError(\n 'Must specify an airport OR a country, OR use coordinates, OR '\n 'none of them.')\n return cleaned_data\n\n\nclass PersonBulkAddForm(forms.Form):\n '''Represent CSV upload form for bulk adding people.'''\n\n file = forms.FileField()\n\n\nclass SearchForm(forms.Form):\n '''Represent general searching form.'''\n\n term = forms.CharField(label='term',\n max_length=100)\n in_organizations = forms.BooleanField(label='in organizations',\n required=False,\n initial=True)\n in_events = forms.BooleanField(label='in events',\n required=False,\n initial=True)\n in_persons = forms.BooleanField(label='in persons',\n required=False,\n initial=True)\n in_airports = forms.BooleanField(label='in airports',\n required=False,\n initial=True)\n\n\nclass DebriefForm(forms.Form):\n '''Represent general debrief form.'''\n begin_date = forms.DateField(\n label='Begin date as YYYY-MM-DD',\n input_formats=['%Y-%m-%d', ]\n )\n end_date = forms.DateField(\n label='End date as YYYY-MD-DD',\n input_formats=['%Y-%m-%d', ]\n )\n\n\nclass EventForm(forms.ModelForm):\n host = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Host',\n required=True,\n help_text=Event._meta.get_field('host').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n administrator = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Administrator',\n required=False,\n help_text=Event._meta.get_field('administrator').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n assigned_to = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AdminLookup,\n label='Assigned to',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n language = selectable.AutoCompleteSelectField(\n lookup_class=lookups.LanguageLookup,\n label='Language',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n country = CountryField().formfield(\n required=False,\n help_text=Event._meta.get_field('country').help_text,\n )\n\n admin_fee = forms.DecimalField(min_value=0, decimal_places=2,\n required=False, widget=TextInput)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['slug'].widget.attrs['placeholder'] = 'YYYY-MM-DD-location'\n self.fields['start'].widget.attrs['placeholder'] = 'YYYY-MM-DD'\n self.fields['end'].widget.attrs['placeholder'] = 'YYYY-MM-DD'\n\n self.helper = BootstrapHelper(self)\n\n idx_start = self.helper['country'].slice[0][0][0]\n idx_end = self.helper['longitude'].slice[0][0][0]\n # wrap all venue fields within <div class='panel-body'>\n self.helper[idx_start:idx_end + 1] \\\n .wrap_together(Div, css_class='panel-body')\n # wrap <div class='panel-body'> within <div class='panel panel-…'>\n self.helper[idx_start].wrap_together(Div,\n css_class='panel panel-default')\n # add <div class='panel-heading'>Loc. details</div> inside \"div.panel\"\n self.helper.layout[idx_start].insert(0, Div(HTML('Location details'),\n css_class='panel-heading'))\n\n id_learners_pre = self.helper['learners_pre'].slice[0][0][0]\n id_learners_longterm = self.helper['learners_longterm'].slice[0][0][0]\n # wrap all survey fields within <div class='panel-body'>\n self.helper[id_learners_pre:id_learners_longterm + 1] \\\n .wrap_together(Div, css_class='panel-body')\n # wrap <div class='panel-body'> within <div class='panel panel-…'>\n self.helper[id_learners_pre].wrap_together(\n Div, css_class='panel panel-default')\n # add <div class='panel-heading'>Venue details</div> inside \"div.panel\"\n self.helper.layout[id_learners_pre].insert(\n 0, Div(HTML('Survey results'), css_class='panel-heading'))\n\n def clean_slug(self):\n # Ensure slug is in \"YYYY-MM-DD-location\" format\n data = self.cleaned_data['slug']\n match = re.match('(\\d{4}|x{4})-(\\d{2}|x{2})-(\\d{2}|x{2})-.+', data)\n if not match:\n raise forms.ValidationError('Slug must be in \"YYYY-MM-DD-location\"'\n ' format, where \"YYYY\", \"MM\", \"DD\" can'\n ' be unspecified (ie. \"xx\").')\n return data\n\n def clean_end(self):\n \"\"\"Ensure end >= start.\"\"\"\n start = self.cleaned_data['start']\n end = self.cleaned_data['end']\n\n if start and end and end < start:\n raise forms.ValidationError('Must not be earlier than start date.')\n return end\n\n class Meta:\n model = Event\n # reorder fields, don't display 'deleted' field\n fields = ('slug', 'completed', 'start', 'end', 'host', 'administrator',\n 'assigned_to', 'tags', 'url', 'language', 'reg_key',\n 'admin_fee', 'invoice_status', 'attendance', 'contact',\n 'notes', 'country', 'venue', 'address', 'latitude',\n 'longitude', 'learners_pre', 'learners_post',\n 'instructors_pre', 'instructors_post', 'learners_longterm')\n # WARNING: don't change put any fields between 'country' and\n # 'longitude' that don't relate to the venue of the event\n\n widgets = {\n 'attendance': TextInput,\n 'latitude': TextInput,\n 'longitude': TextInput,\n 'invoice_status': RadioSelect,\n }\n\n class Media:\n # thanks to this, {{ form.media }} in the template will generate\n # a <link href=\"\"> (for CSS files) or <script src=\"\"> (for JS files)\n js = (\n 'date_yyyymmdd.js',\n 'import_from_url.js', 'update_from_url.js',\n 'online_country.js',\n )\n\n\nclass TaskForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'event': HiddenInput}\n\n\nclass TaskFullForm(TaskForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n\n\nclass PersonForm(forms.ModelForm):\n\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = Person\n # don't display the 'password', 'user_permissions',\n # 'groups' or 'is_superuser' fields\n # + reorder fields\n fields = ['username', 'personal', 'middle', 'family', 'may_contact',\n 'email', 'gender', 'airport', 'affiliation', 'github',\n 'twitter', 'url', 'occupation', 'orcid', 'notes', 'lessons',\n 'domains', 'languages']\n\n\nclass PersonCreateForm(PersonForm):\n class Meta(PersonForm.Meta):\n # remove 'username' field as it's being populated after form save\n # in the `views.PersonCreate.form_valid`\n fields = PersonForm.Meta.fields.copy()\n fields.remove('username')\n\n\nclass PersonPermissionsForm(forms.ModelForm):\n class Meta:\n model = Person\n # only display administration-related fields: groups, permissions,\n # being a superuser or being active (== ability to log in)\n fields = [\n 'is_active',\n 'is_superuser',\n 'user_permissions',\n 'groups',\n ]\n\n\nclass PersonsSelectionForm(forms.Form):\n\n person_a = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person From',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n person_b = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person To',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass PersonsMergeForm(forms.Form):\n TWO = (\n ('obj_a', 'Use A'),\n ('obj_b', 'Use B'),\n )\n THREE = TWO + (('combine', 'Combine'), )\n DEFAULT = 'obj_a'\n\n person_a = forms.ModelChoiceField(queryset=Person.objects.all(),\n widget=forms.HiddenInput)\n\n person_b = forms.ModelChoiceField(queryset=Person.objects.all(),\n widget=forms.HiddenInput)\n\n id = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n username = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n personal = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n middle = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n family = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n email = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n may_contact = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n gender = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n airport = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n github = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n twitter = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n url = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n notes = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n affiliation = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n occupation = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n orcid = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n award_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n qualification_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n label='Lessons',\n )\n domains = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n languages = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n task_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n is_active = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n\n\nclass BadgeAwardForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n awarded_by = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Awarded by',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'badge': HiddenInput}\n\n\nclass PersonAwardForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n awarded_by = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Awarded by',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass PersonTaskForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass OrganizationForm(forms.ModelForm):\n domain = forms.CharField(\n max_length=Organization._meta.get_field('domain').max_length,\n validators=[\n RegexValidator(\n '[^\\w\\.-]+', inverse_match=True,\n message='Please enter only the domain (such as \"math.esu.edu\")'\n ' without a leading \"http://\" or a trailing \"/\".')\n ],\n )\n\n class Meta:\n model = Organization\n fields = ['domain', 'fullname', 'country', 'notes']\n\n\nclass MembershipForm(forms.ModelForm):\n class Meta:\n model = Membership\n fields = '__all__'\n widgets = {'host': HiddenInput, }\n\n\nclass SponsorshipForm(forms.ModelForm):\n organization = selectable.AutoCompleteSelectField(\n lookup_class=lookups.OrganizationLookup,\n label='Organization',\n required=True,\n help_text=Sponsorship._meta.get_field('organization').help_text,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n contact = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Contact',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Sponsorship\n fields = '__all__'\n widgets = {'event': HiddenInput, }\n\n\nclass SWCEventRequestForm(forms.ModelForm):\n captcha = ReCaptchaField()\n workshop_type = forms.CharField(initial='swc', widget=forms.HiddenInput())\n understand_admin_fee = forms.BooleanField(\n required=True,\n initial=False,\n label='I understand the Software Carpentry Foundation\\'s '\n 'administration fee.',\n help_text='<a href=\"http://software-carpentry.org/blog/2015/07/changes'\n '-to-admin-fee.html\" target=\"_blank\">Look up administration '\n 'fees</a>.',\n )\n language = selectable.AutoCompleteSelectField(\n lookup_class=lookups.LanguageLookup,\n label='Language',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = EventRequest\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',\n 'data_types', 'data_types_other',\n 'attendee_data_analysis_level', 'fee_waiver_request')\n widgets = {\n 'approx_attendees': forms.RadioSelect(),\n 'attendee_domains': forms.CheckboxSelectMultiple(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_computing_levels': forms.CheckboxSelectMultiple(),\n 'travel_reimbursement': forms.RadioSelect(),\n 'admin_fee_payment': forms.RadioSelect(),\n }\n\n\nclass DCEventRequestForm(SWCEventRequestForm):\n workshop_type = forms.CharField(initial='dc', widget=forms.HiddenInput())\n understand_admin_fee = forms.BooleanField(\n required=True,\n initial=False,\n label='I understand the Data Carpentry\\'s administration fee.',\n help_text='There is a per-workshop fee for Data Carpentry to cover '\n 'administrative and core development costs. The per-workshop fee is '\n 'currently $2500. We work to find local instructors when possible, but'\n ' the host institute will also need to pay for instructors travel and'\n ' lodging if they need to travel. Therefore overall workshop costs are'\n ' $2500 - $6000.',\n )\n\n class Meta(SWCEventRequestForm.Meta):\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to',\n 'admin_fee_payment', 'attendee_computing_levels')\n widgets = {\n 'approx_attendees': forms.RadioSelect(),\n 'attendee_domains': forms.CheckboxSelectMultiple(),\n 'data_types': forms.RadioSelect(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),\n 'travel_reimbursement': forms.RadioSelect(),\n }\n\n\nclass EventSubmitFormNoCaptcha(forms.ModelForm):\n class Meta:\n model = EventSubmission\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')\n\n\nclass EventSubmitForm(EventSubmitFormNoCaptcha):\n captcha = ReCaptchaField()\n\n\nclass DCSelfOrganizedEventRequestFormNoCaptcha(forms.ModelForm):\n # the easiest way to make these fields required without rewriting their\n # verbose names or help texts\n handle_registration = DCSelfOrganizedEventRequest._meta \\\n .get_field('handle_registration').formfield(required=True)\n distribute_surveys = DCSelfOrganizedEventRequest._meta \\\n .get_field('distribute_surveys').formfield(required=True)\n follow_code_of_conduct = DCSelfOrganizedEventRequest._meta \\\n .get_field('follow_code_of_conduct').formfield(required=True)\n\n class Meta:\n model = DCSelfOrganizedEventRequest\n exclude = ('created_at', 'last_updated_at', 'assigned_to')\n widgets = {\n 'instructor_status': forms.RadioSelect(),\n 'is_partner': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'topics': forms.CheckboxSelectMultiple(),\n 'attendee_academic_levels': forms.CheckboxSelectMultiple(),\n 'attendee_data_analysis_level': forms.CheckboxSelectMultiple(),\n 'payment': forms.RadioSelect(),\n }\n\n\nclass DCSelfOrganizedEventRequestForm(\n DCSelfOrganizedEventRequestFormNoCaptcha):\n captcha = ReCaptchaField()\n\n class Meta(DCSelfOrganizedEventRequestFormNoCaptcha.Meta):\n exclude = ('active', 'created_at', 'last_updated_at', 'assigned_to')\n\n\nclass ProfileUpdateRequestFormNoCaptcha(forms.ModelForm):\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages you can teach in',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = ProfileUpdateRequest\n exclude = ('active', 'created_at', 'last_updated_at')\n widgets = {\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n 'occupation': forms.RadioSelect(),\n 'gender': forms.RadioSelect(),\n }\n\n def clean_twitter(self):\n \"\"\"Remove '@'s from the beginning of the Twitter handle.\"\"\"\n twitter_handle = self.cleaned_data['twitter']\n return re.sub('^@+', '', twitter_handle)\n\n\nclass ProfileUpdateRequestForm(ProfileUpdateRequestFormNoCaptcha):\n captcha = ReCaptchaField()\n\n\nclass PersonLookupForm(forms.Form):\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass AdminLookupForm(forms.Form):\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AdminLookup,\n label='Administrator',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass SimpleTodoForm(forms.ModelForm):\n class Meta:\n model = TodoItem\n fields = ('title', 'due', 'additional', 'completed', 'event')\n widgets = {'event': HiddenInput, }\n\n# `extra`: number of forms populated via `initial` parameter; it's hardcoded in\n# `views.todos_add`\nTodoFormSet = modelformset_factory(TodoItem, form=SimpleTodoForm, extra=10)\n\n\nclass EventsSelectionForm(forms.Form):\n event_a = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event A',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event_b = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event B',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass EventsMergeForm(forms.Form):\n TWO = (\n ('obj_a', 'Use A'),\n ('obj_b', 'Use B'),\n )\n THREE = TWO + (('combine', 'Combine'), )\n DEFAULT = 'obj_a'\n\n event_a = forms.ModelChoiceField(queryset=Event.objects.all(),\n widget=forms.HiddenInput)\n\n event_b = forms.ModelChoiceField(queryset=Event.objects.all(),\n widget=forms.HiddenInput)\n\n id = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n slug = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n completed = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n assigned_to = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n start = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n end = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n host = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n administrator = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n tags = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n url = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n language = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n reg_key = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n admin_fee = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n invoice_status = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n attendance = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n contact = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n country = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n venue = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n address = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n latitude = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n longitude = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_pre = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_post = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n instructors_pre = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n instructors_post = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n learners_longterm = forms.ChoiceField(\n choices=TWO, initial=DEFAULT, widget=forms.RadioSelect,\n )\n notes = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n task_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n todoitem_set = forms.ChoiceField(\n choices=THREE, initial=DEFAULT, widget=forms.RadioSelect,\n )\n\n\nclass InvoiceRequestForm(forms.ModelForm):\n class Meta:\n model = InvoiceRequest\n fields = (\n 'organization', 'reason', 'reason_other', 'date', 'event',\n 'event_location', 'item_id', 'postal_number', 'contact_name',\n 'contact_email', 'contact_phone', 'full_address', 'amount',\n 'currency', 'currency_other', 'breakdown', 'vendor_form_required',\n 'vendor_form_link', 'form_W9', 'receipts_sent',\n 'shared_receipts_link', 'notes',\n )\n widgets = {\n 'reason': RadioSelect,\n 'currency': RadioSelect,\n 'vendor_form_required': RadioSelect,\n 'receipts_sent': RadioSelect,\n }\n\n\nclass InvoiceRequestUpdateForm(forms.ModelForm):\n class Meta:\n model = InvoiceRequest\n fields = (\n 'status', 'sent_date', 'paid_date', 'notes'\n )\n\n\nclass TrainingRequestForm(forms.ModelForm):\n agreed_to_code_of_conduct = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to abide by Software and Data Carpentry\\'s Code of Conduct',\n help_text='The Code of Conduct can be found at '\n '<a href=\"http://software-carpentry.org/conduct/\" target=\"_blank\">'\n 'http://software-carpentry.org/conduct/</a>'\n 'and <a href=\"http://datacarpentry.org/code-of-conduct/\" target=\"_blank\">'\n 'http://datacarpentry.org/code-of-conduct/</a>',\n )\n agreed_to_complete_training = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to complete this training within three months of the Training Course',\n help_text='The completion steps are described at '\n '<a href=\"http://swcarpentry.github.io/instructor-training/checkout/\" target=\"_blank\">'\n 'http://swcarpentry.github.io/instructor-training/checkout/</a> '\n 'and take a total of approximately 8-10 hours.',\n )\n agreed_to_teach_workshops = forms.BooleanField(\n required=True,\n initial=False,\n label='*I agree to teach a Software Carpentry or Data Carpentry '\n 'workshop within 12 months of this Training Course',\n )\n captcha = ReCaptchaField()\n\n class Meta:\n model = TrainingRequest\n fields = (\n 'group_name',\n 'personal',\n 'family',\n 'email',\n 'github',\n 'occupation',\n 'occupation_other',\n 'affiliation',\n 'location',\n 'country',\n 'domains',\n 'domains_other',\n 'gender',\n 'gender_other',\n 'previous_involvement',\n 'previous_training',\n 'previous_training_other',\n 'previous_training_explanation',\n 'previous_experience',\n 'previous_experience_other',\n 'previous_experience_explanation',\n 'programming_language_usage_frequency',\n 'reason',\n 'teaching_frequency_expectation',\n 'teaching_frequency_expectation_other',\n 'max_travelling_frequency',\n 'max_travelling_frequency_other',\n 'additional_skills',\n 'comment',\n )\n widgets = {\n 'occupation': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'gender': forms.RadioSelect(),\n 'previous_involvement': forms.CheckboxSelectMultiple(),\n 'previous_training': forms.RadioSelect(),\n 'previous_experience': forms.RadioSelect(),\n 'programming_language_usage_frequency': forms.RadioSelect(),\n 'teaching_frequency_expectation': forms.RadioSelect(),\n 'max_travelling_frequency': forms.RadioSelect(),\n }\n\n\nclass AutoUpdateProfileForm(forms.ModelForm):\n username = forms.CharField(disabled=True, required=False)\n github = forms.CharField(\n disabled=True, required=False,\n help_text='If you want to change your github username, please email '\n 'us at <a href=\"mailto:[email protected]\">'\n '[email protected]</a>.')\n\n languages = selectable.AutoCompleteSelectMultipleField(\n lookup_class=lookups.LanguageLookup,\n label='Languages',\n required=False,\n widget=selectable.AutoComboboxSelectMultipleWidget,\n )\n\n class Meta:\n model = Person\n fields = [\n 'personal',\n 'middle',\n 'family',\n 'email',\n 'gender',\n 'may_contact',\n 'airport',\n 'github',\n 'twitter',\n 'url',\n 'username',\n 'affiliation',\n 'domains',\n 'lessons',\n 'languages',\n ]\n readonly_fields = (\n 'username',\n 'github',\n )\n widgets = {\n 'occupation': forms.RadioSelect(),\n 'gender': forms.RadioSelect(),\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n }\n", "path": "workshops/forms.py" } ]
diff --git a/workshops/forms.py b/workshops/forms.py index 362aab25c..3e0653d42 100644 --- a/workshops/forms.py +++ b/workshops/forms.py @@ -359,6 +359,7 @@ class Media: # thanks to this, {{ form.media }} in the template will generate # a <link href=""> (for CSS files) or <script src=""> (for JS files) js = ( + 'date_yyyymmdd.js', 'import_from_url.js', 'update_from_url.js', 'online_country.js', ) diff --git a/workshops/static/date_yyyymmdd.js b/workshops/static/date_yyyymmdd.js new file mode 100644 index 000000000..c5e28e827 --- /dev/null +++ b/workshops/static/date_yyyymmdd.js @@ -0,0 +1,9 @@ +Date.prototype.yyyymmdd = function() { + // adapted from http://stackoverflow.com/a/3067896 + var yyyy = this.getFullYear(); + var mm_aux = this.getMonth() + 1; // getMonth() is zero-based + var mm = mm_aux < 10 ? "0" + mm_aux : mm_aux; + var dd_aux = this.getDate(); + var dd = dd_aux < 10 ? "0" + dd_aux : dd_aux; + return "".concat(yyyy, "-", mm, "-", dd); +}; diff --git a/workshops/static/update_from_url.js b/workshops/static/update_from_url.js index e4aaace33..ff32ca0f2 100644 --- a/workshops/static/update_from_url.js +++ b/workshops/static/update_from_url.js @@ -106,9 +106,7 @@ $('#update_url_form').submit(function(e) { } // append notes var today = new Date(); - var today_str = "\n\n---------\nUPDATE " + - today.getFullYear() + "-" + today.getMonth() + "-" + today.getDay() + - ":\n"; + var today_str = "\n\n---------\nUPDATE " + today.yyyymmdd() + ":\n"; $("#id_event-notes").val( $("#id_event-notes").val() + today_str + "INSTRUCTORS: " + data.instructors.join(", ") + "\n\n" +
Date(?) stamp when updating record from url When updating an event from url, the list of instructors/helpers is recorded in the notes field with a heading that looks like a date but isn't. For example an update I made today has the heading "UPDATE 2016-5-4:" What is this heading supposed to be? Can we make it be the real date of the update (i.e., UPDATE 2016-06-16:")
pex-tool__pex-577
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.4.7'\n\n# Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n# for pex code so we exclude that range.\nSETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*'\n\nWHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32'\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.4.8'\n\n# Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n# for pex code so we exclude that range.\nSETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*'\n\nWHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32'\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index e28be8891..eac2ee2dc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,40 @@ Release Notes ============= +1.4.8 +----- + +This release adds support for `-c` and `-m` pexfile runtime options that emulate the behavior of the +same arguments to `python` as well a fix for handling the non-standard platform reported by +setuptools for Apple system interpreters in addition to several other bug fixes. + +* Fix PEXBuilder.clone. (#575) + `PR #575 <https://github.com/pantsbuild/pex/pull/575>`_ + +* Fix PEXEnvironment platform determination. (#568) + `PR #568 <https://github.com/pantsbuild/pex/pull/568>`_ + +* Apply more pinning to jupyter in IT. (#573) + `PR #573 <https://github.com/pantsbuild/pex/pull/573>`_ + +* Minimize interpreter bootstrapping in tests. (#571) + `PR #571 <https://github.com/pantsbuild/pex/pull/571>`_ + +* Introduce 3.7 to CI and release. (#567) + `PR #567 <https://github.com/pantsbuild/pex/pull/567>`_ + +* Add OSX shards. (#565) + `PR #565 <https://github.com/pantsbuild/pex/pull/565>`_ + +* Add support for `-m` and `-c` in interpreter mode. (#563) + `PR #563 <https://github.com/pantsbuild/pex/pull/563>`_ + +* Ignore concurrent-rename failures. (#558) + `PR #558 <https://github.com/pantsbuild/pex/pull/558>`_ + +* Fixup test_jupyter_appnope_env_markers. (#562) + `PR #562 <https://github.com/pantsbuild/pex/pull/562>`_ + 1.4.7 ----- diff --git a/pex/version.py b/pex/version.py index a6493f470..d1c4d491f 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,7 +1,7 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '1.4.7' +__version__ = '1.4.8' # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems # for pex code so we exclude that range.
Release 1.4.8 User facing issues on the docket: + non-zip_safe pex extraction has a race on renaming #557 + Execute pex archive at runtime with -m parameter #547 + OSX's python 2.7.10 interpreter reports a bad local platform, bdists fail to resolve #523 + `pex.resolver.resolve` does the wrong thing when given an `interpreter` and no `platform` #511 + [Errno 13] Permission denied when executing with bdist_pex #570
sopel-irc__sopel-1527
[ { "content": "#!/usr/bin/env python2.7\n# coding=utf-8\n\"\"\"\nSopel - An IRC Bot\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright © 2012-2014, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport argparse\nimport os\nimport platform\nimport signal\nimport sys\nimport time\nimport traceback\n\nfrom sopel import bot, logger, tools, __version__\nfrom sopel.config import (\n Config,\n _create_config,\n ConfigurationError,\n ConfigurationNotFound,\n DEFAULT_HOMEDIR,\n _wizard\n)\nfrom . import utils\n\nif sys.version_info < (2, 7):\n tools.stderr('Error: Requires Python 2.7 or later. Try python2.7 sopel')\n sys.exit(1)\nif sys.version_info.major == 2:\n tools.stderr('Warning: Python 2.x is near end of life. Sopel support at that point is TBD.')\nif sys.version_info.major == 3 and sys.version_info.minor < 3:\n tools.stderr('Error: When running on Python 3, Python 3.3 is required.')\n sys.exit(1)\n\nERR_CODE = 1\n\"\"\"Error code: program exited with an error\"\"\"\nERR_CODE_NO_RESTART = 2\n\"\"\"Error code: program exited with an error and should not be restarted\n\nThis error code is used to prevent systemd from restarting the bot when it\nencounters such an error case.\n\"\"\"\n\n\ndef run(config, pid_file, daemon=False):\n delay = 20\n # Inject ca_certs from config to web for SSL validation of web requests\n if not config.core.ca_certs:\n tools.stderr(\n 'Could not open CA certificates file. SSL will not work properly!')\n\n def signal_handler(sig, frame):\n if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:\n tools.stderr('Got quit signal, shutting down.')\n p.quit('Closing')\n elif sig == signal.SIGUSR2 or sig == signal.SIGILL:\n tools.stderr('Got restart signal.')\n p.restart('Restarting')\n\n while True:\n try:\n p = bot.Sopel(config, daemon=daemon)\n if hasattr(signal, 'SIGUSR1'):\n signal.signal(signal.SIGUSR1, signal_handler)\n if hasattr(signal, 'SIGTERM'):\n signal.signal(signal.SIGTERM, signal_handler)\n if hasattr(signal, 'SIGINT'):\n signal.signal(signal.SIGINT, signal_handler)\n if hasattr(signal, 'SIGUSR2'):\n signal.signal(signal.SIGUSR2, signal_handler)\n if hasattr(signal, 'SIGILL'):\n signal.signal(signal.SIGILL, signal_handler)\n logger.setup_logging(p)\n p.run(config.core.host, int(config.core.port))\n except KeyboardInterrupt:\n break\n except Exception: # TODO: Be specific\n trace = traceback.format_exc()\n try:\n tools.stderr(trace)\n except Exception: # TODO: Be specific\n pass\n logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a')\n logfile.write('Critical exception in core')\n logfile.write(trace)\n logfile.write('----------------------------------------\\n\\n')\n logfile.close()\n # TODO: This should be handled by command_start\n # All we should need here is a return value, but replacing the\n # os._exit() call below (at the end) broke ^C.\n # This one is much harder to test, so until that one's sorted it\n # isn't worth the risk of trying to remove this one.\n os.unlink(pid_file)\n os._exit(1)\n\n if not isinstance(delay, int):\n break\n if p.wantsrestart:\n return -1\n if p.hasquit:\n break\n tools.stderr(\n 'Warning: Disconnected. Reconnecting in %s seconds...' % delay)\n time.sleep(delay)\n # TODO: This should be handled by command_start\n # All we should need here is a return value, but making this\n # a return makes Sopel hang on ^C after it says \"Closed!\"\n os.unlink(pid_file)\n os._exit(0)\n\n\ndef add_legacy_options(parser):\n parser.add_argument(\"-d\", '--fork', action=\"store_true\",\n dest=\"daemonize\", help=\"Daemonize Sopel\")\n parser.add_argument(\"-q\", '--quit', action=\"store_true\", dest=\"quit\",\n help=(\n \"Gracefully quit Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel stop` instead)\"))\n parser.add_argument(\"-k\", '--kill', action=\"store_true\", dest=\"kill\",\n help=(\n \"Kill Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel stop --kill` instead)\"))\n parser.add_argument(\"-r\", '--restart', action=\"store_true\", dest=\"restart\",\n help=(\n \"Restart Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel restart` instead)\"))\n parser.add_argument(\"-l\", '--list', action=\"store_true\",\n dest=\"list_configs\",\n help=\"List all config files found\")\n parser.add_argument('--quiet', action=\"store_true\", dest=\"quiet\",\n help=\"Suppress all output\")\n parser.add_argument('-w', '--configure-all', action='store_true',\n dest='wizard',\n help=(\n \"Run the configuration wizard \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel configure` instead)\"))\n parser.add_argument('--configure-modules', action='store_true',\n dest='mod_wizard',\n help=(\n \"Run the configuration wizard, but only for the \"\n \"module configuration options \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel configure --modules` instead)\"))\n parser.add_argument('-v', action=\"store_true\",\n dest='version_legacy',\n help=(\n \"Show version number and exit \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use -V/--version instead)\"))\n parser.add_argument('-V', '--version', action='store_true',\n dest='version',\n help='Show version number and exit')\n\n\ndef build_parser():\n \"\"\"Build an ``argparse.ArgumentParser`` for the bot\"\"\"\n parser = argparse.ArgumentParser(description='Sopel IRC Bot',\n usage='%(prog)s [options]')\n add_legacy_options(parser)\n utils.add_common_arguments(parser)\n\n subparsers = parser.add_subparsers(\n title='sub-commands',\n description='List of Sopel\\'s sub-commands',\n dest='action',\n metavar='{start,configure,stop,restart}')\n\n # manage `legacy` sub-command\n parser_legacy = subparsers.add_parser('legacy')\n add_legacy_options(parser_legacy)\n utils.add_common_arguments(parser_legacy)\n\n # manage `start` sub-command\n parser_start = subparsers.add_parser(\n 'start',\n description='Start a Sopel instance',\n help='Start a Sopel instance')\n parser_start.add_argument(\n '-d', '--fork',\n dest='daemonize',\n action='store_true',\n default=False,\n help='Run Sopel as a daemon (fork)')\n parser_start.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_start)\n\n # manage `configure` sub-command\n parser_configure = subparsers.add_parser(\n 'configure', help='Sopel\\'s Wizard tool')\n parser_configure.add_argument(\n '--modules',\n action='store_true',\n default=False,\n dest='modules')\n utils.add_common_arguments(parser_configure)\n\n # manage `stop` sub-command\n parser_stop = subparsers.add_parser(\n 'stop',\n description='Stop a running Sopel instance',\n help='Stop a running Sopel instance')\n parser_stop.add_argument(\n '-k', '--kill',\n action='store_true',\n default=False,\n help='Kill Sopel without a graceful quit')\n parser_stop.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_stop)\n\n # manage `restart` sub-command\n parser_restart = subparsers.add_parser(\n 'restart',\n description='Restart a running Sopel instance',\n help='Restart a running Sopel instance')\n parser_restart.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_restart)\n\n return parser\n\n\ndef check_not_root():\n \"\"\"Check if root is running the bot.\n\n It raises a ``RuntimeError`` if the user has root privileges on Linux or\n if it is the ``Administrator`` account on Windows.\n \"\"\"\n opersystem = platform.system()\n if opersystem in [\"Linux\", \"Darwin\"]:\n # Linux/Mac\n if os.getuid() == 0 or os.geteuid() == 0:\n raise RuntimeError('Error: Do not run Sopel with root privileges.')\n elif opersystem in [\"Windows\"]:\n # Windows\n if os.environ.get(\"USERNAME\") == \"Administrator\":\n raise RuntimeError('Error: Do not run Sopel as Administrator.')\n else:\n tools.stderr(\n \"Warning: %s is an uncommon operating system platform. \"\n \"Sopel should still work, but please contact Sopel's developers \"\n \"if you experience issues.\"\n % opersystem)\n\n\ndef print_version():\n \"\"\"Print Python version and Sopel version on stdout.\"\"\"\n py_ver = '%s.%s.%s' % (sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n print('Sopel %s (running on Python %s)' % (__version__, py_ver))\n print('https://sopel.chat/')\n\n\ndef print_config():\n \"\"\"Print list of available configurations from default homedir.\"\"\"\n configs = utils.enumerate_configs(DEFAULT_HOMEDIR)\n print('Config files in %s:' % DEFAULT_HOMEDIR)\n config = None\n for config in configs:\n print('\\t%s' % config)\n if not config:\n print('\\tNone found')\n\n print('-------------------------')\n\n\ndef get_configuration(options):\n \"\"\"Get or create a configuration object from ``options``.\n\n :param options: argument parser's options\n :type options: ``argparse.Namespace``\n :return: a configuration object\n :rtype: :class:`sopel.config.Config`\n\n This may raise a :exc:`sopel.config.ConfigurationError` if the\n configuration file is invalid.\n\n .. seealso::\n\n The configuration file is loaded by\n :func:`~sopel.cli.run.utils.load_settings` or created using the\n configuration wizard.\n\n \"\"\"\n try:\n bot_config = utils.load_settings(options)\n except ConfigurationNotFound as error:\n print(\n \"Welcome to Sopel!\\n\"\n \"I can't seem to find the configuration file, \"\n \"so let's generate it!\\n\")\n\n config_path = error.filename\n if not config_path.endswith('.cfg'):\n config_path = config_path + '.cfg'\n\n config_path = _create_config(config_path)\n # try to reload it now that it's created\n bot_config = Config(config_path)\n\n bot_config._is_daemonized = options.daemonize\n return bot_config\n\n\ndef get_pid_filename(options, pid_dir):\n \"\"\"Get the pid file name in ``pid_dir`` from the given ``options``.\n\n :param options: command line options\n :param str pid_dir: path to the pid directory\n :return: absolute filename of the pid file\n\n By default, it's ``sopel.pid``, but if a configuration filename is given\n in the ``options``, its basename is used to generate the filename, as:\n ``sopel-{basename}.pid`` instead.\n \"\"\"\n name = 'sopel.pid'\n if options.config:\n basename = os.path.basename(options.config)\n if basename.endswith('.cfg'):\n basename = basename[:-4]\n name = 'sopel-%s.pid' % basename\n\n return os.path.abspath(os.path.join(pid_dir, name))\n\n\ndef get_running_pid(filename):\n \"\"\"Retrieve the PID number from the given ``filename``.\n\n :param str filename: path to file to read the PID from\n :return: the PID number of a Sopel instance if running, ``None`` otherwise\n :rtype: integer\n\n This function tries to retrieve a PID number from the given ``filename``,\n as an integer, and returns ``None`` if the file is not found or if the\n content is not an integer.\n \"\"\"\n if not os.path.isfile(filename):\n return\n\n with open(filename, 'r') as pid_file:\n try:\n return int(pid_file.read())\n except ValueError:\n pass\n\n\ndef command_start(opts):\n \"\"\"Start a Sopel instance\"\"\"\n # Step One: Get the configuration file and prepare to run\n try:\n config_module = get_configuration(opts)\n except ConfigurationError as e:\n tools.stderr(e)\n return ERR_CODE_NO_RESTART\n\n if config_module.core.not_configured:\n tools.stderr('Bot is not configured, can\\'t start')\n return ERR_CODE_NO_RESTART\n\n # Step Two: Manage logfile, stdout and stderr\n utils.redirect_outputs(config_module, opts.quiet)\n\n # Step Three: Handle process-lifecycle options and manage the PID file\n pid_dir = config_module.core.pid_dir\n pid_file_path = get_pid_filename(opts, pid_dir)\n pid = get_running_pid(pid_file_path)\n\n if pid is not None and tools.check_pid(pid):\n tools.stderr('There\\'s already a Sopel instance running '\n 'with this config file.')\n tools.stderr('Try using either the `sopel stop` '\n 'or the `sopel restart` command.')\n return ERR_CODE\n\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n return\n\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Four: Run Sopel\n ret = run(config_module, pid_file_path)\n\n # Step Five: Shutdown Clean-Up\n os.unlink(pid_file_path)\n\n if ret == -1:\n # Restart\n os.execv(sys.executable, ['python'] + sys.argv)\n else:\n # Quit\n return ret\n\n\ndef command_configure(opts):\n \"\"\"Sopel Configuration Wizard\"\"\"\n if getattr(opts, 'modules', False):\n _wizard('mod', opts.config)\n else:\n _wizard('all', opts.config)\n\n\ndef command_stop(opts):\n \"\"\"Stop a running Sopel instance\"\"\"\n # Get Configuration\n try:\n settings = utils.load_settings(opts)\n except ConfigurationNotFound as error:\n tools.stderr('Configuration \"%s\" not found' % error.filename)\n return ERR_CODE\n\n if settings.core.not_configured:\n tools.stderr('Sopel is not configured, can\\'t stop')\n return ERR_CODE\n\n # Redirect Outputs\n utils.redirect_outputs(settings, opts.quiet)\n\n # Get Sopel's PID\n filename = get_pid_filename(opts, settings.core.pid_dir)\n pid = get_running_pid(filename)\n\n if pid is None or not tools.check_pid(pid):\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n # Stop Sopel\n if opts.kill:\n tools.stderr('Killing the Sopel')\n os.kill(pid, signal.SIGKILL)\n return\n\n tools.stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(pid, signal.SIGUSR1)\n else:\n # Windows will not generate SIGTERM itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(pid, signal.SIGTERM)\n\n\ndef command_restart(opts):\n \"\"\"Restart a running Sopel instance\"\"\"\n # Get Configuration\n try:\n settings = utils.load_settings(opts)\n except ConfigurationNotFound as error:\n tools.stderr('Configuration \"%s\" not found' % error.filename)\n return ERR_CODE\n\n if settings.core.not_configured:\n tools.stderr('Sopel is not configured, can\\'t stop')\n return ERR_CODE\n\n # Redirect Outputs\n utils.redirect_outputs(settings, opts.quiet)\n\n # Get Sopel's PID\n filename = get_pid_filename(opts, settings.core.pid_dir)\n pid = get_running_pid(filename)\n\n if pid is None or not tools.check_pid(pid):\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n tools.stderr('Asking Sopel to restart')\n if hasattr(signal, 'SIGUSR2'):\n os.kill(pid, signal.SIGUSR2)\n else:\n # Windows will not generate SIGILL itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(pid, signal.SIGILL)\n\n\ndef command_legacy(opts):\n \"\"\"Legacy Sopel run script\n\n The ``legacy`` command manages the old-style ``sopel`` command line tool.\n Most of its features are replaced by the following commands:\n\n * ``sopel start`` replaces the default behavior (run the bot)\n * ``sopel stop`` replaces the ``--quit/--kill`` options\n * ``sopel restart`` replaces the ``--restart`` option\n * ``sopel configure`` replaces the\n ``-w/--configure-all/--configure-modules`` options\n\n The ``-v`` option for \"version\" is deprecated, ``-V/--version`` should be\n used instead.\n\n .. seealso::\n\n The github issue `#1471`__ tracks various changes requested for future\n versions of Sopel, some of them related to this legacy command.\n\n .. __: https://github.com/sopel-irc/sopel/issues/1471\n\n \"\"\"\n # Step One: Handle \"No config needed\" options\n if opts.version:\n print_version()\n return\n elif opts.version_legacy:\n tools.stderr(\n 'WARNING: option -v is deprecated; '\n 'use `sopel -V/--version` instead')\n print_version()\n return\n\n if opts.wizard:\n tools.stderr(\n 'WARNING: option -w/--configure-all is deprecated; '\n 'use `sopel configure` instead')\n _wizard('all', opts.config)\n return\n\n if opts.mod_wizard:\n tools.stderr(\n 'WARNING: option --configure-modules is deprecated; '\n 'use `sopel configure --modules` instead')\n _wizard('mod', opts.config)\n return\n\n if opts.list_configs:\n print_config()\n return\n\n # Step Two: Get the configuration file and prepare to run\n try:\n config_module = get_configuration(opts)\n except ConfigurationError as e:\n tools.stderr(e)\n return ERR_CODE_NO_RESTART\n\n if config_module.core.not_configured:\n tools.stderr('Bot is not configured, can\\'t start')\n return ERR_CODE_NO_RESTART\n\n # Step Three: Manage logfile, stdout and stderr\n utils.redirect_outputs(config_module, opts.quiet)\n\n # Step Four: Handle process-lifecycle options and manage the PID file\n pid_dir = config_module.core.pid_dir\n pid_file_path = get_pid_filename(opts, pid_dir)\n old_pid = get_running_pid(pid_file_path)\n\n if old_pid is not None and tools.check_pid(old_pid):\n if not opts.quit and not opts.kill and not opts.restart:\n tools.stderr(\n 'There\\'s already a Sopel instance running with this config file')\n tools.stderr(\n 'Try using either the `sopel stop` command or the `sopel restart` command')\n return ERR_CODE\n elif opts.kill:\n tools.stderr(\n 'WARNING: option -k/--kill is deprecated; '\n 'use `sopel stop --kill` instead')\n tools.stderr('Killing the Sopel')\n os.kill(old_pid, signal.SIGKILL)\n return\n elif opts.quit:\n tools.stderr(\n 'WARNING: options -q/--quit is deprecated; '\n 'use `sopel stop` instead')\n tools.stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(old_pid, signal.SIGUSR1)\n else:\n # Windows will not generate SIGTERM itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(old_pid, signal.SIGTERM)\n return\n elif opts.restart:\n tools.stderr(\n 'WARNING: options --restart is deprecated; '\n 'use `sopel restart` instead')\n tools.stderr('Asking Sopel to restart')\n if hasattr(signal, 'SIGUSR2'):\n os.kill(old_pid, signal.SIGUSR2)\n else:\n # Windows will not generate SIGILL itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(old_pid, signal.SIGILL)\n return\n elif opts.kill or opts.quit or opts.restart:\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n return\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Five: Initialize and run Sopel\n ret = run(config_module, pid_file_path)\n os.unlink(pid_file_path)\n if ret == -1:\n os.execv(sys.executable, ['python'] + sys.argv)\n else:\n return ret\n\n\ndef main(argv=None):\n \"\"\"Sopel run script entry point\"\"\"\n try:\n # Step One: Parse The Command Line\n parser = build_parser()\n\n # make sure to have an action first (`legacy` by default)\n # TODO: `start` should be the default in Sopel 8\n argv = argv or sys.argv[1:]\n if not argv:\n argv = ['legacy']\n elif argv[0].startswith('-') and argv[0] not in ['-h', '--help']:\n argv = ['legacy'] + argv\n\n opts = parser.parse_args(argv)\n\n # Step Two: \"Do not run as root\" checks\n try:\n check_not_root()\n except RuntimeError as err:\n tools.stderr('%s' % err)\n return ERR_CODE\n\n # Step Three: Handle command\n action = getattr(opts, 'action', 'legacy')\n command = {\n 'legacy': command_legacy,\n 'start': command_start,\n 'configure': command_configure,\n 'stop': command_stop,\n 'restart': command_restart,\n }.get(action)\n return command(opts)\n except KeyboardInterrupt:\n print(\"\\n\\nInterrupted\")\n return ERR_CODE\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "sopel/cli/run.py" } ]
[ { "content": "#!/usr/bin/env python2.7\n# coding=utf-8\n\"\"\"\nSopel - An IRC Bot\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright © 2012-2014, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport sys\n\nfrom sopel import tools\n\nif sys.version_info < (2, 7):\n tools.stderr('Error: Requires Python 2.7 or later. Try python2.7 sopel')\n sys.exit(1)\nif sys.version_info.major == 2:\n tools.stderr('Warning: Python 2.x is near end of life. Sopel support at that point is TBD.')\nif sys.version_info.major == 3 and sys.version_info.minor < 3:\n tools.stderr('Error: When running on Python 3, Python 3.3 is required.')\n sys.exit(1)\n\nimport argparse\nimport os\nimport platform\nimport signal\nimport time\nimport traceback\n\nfrom sopel import bot, logger, __version__\nfrom sopel.config import (\n Config,\n _create_config,\n ConfigurationError,\n ConfigurationNotFound,\n DEFAULT_HOMEDIR,\n _wizard\n)\nfrom . import utils\n\n\nERR_CODE = 1\n\"\"\"Error code: program exited with an error\"\"\"\nERR_CODE_NO_RESTART = 2\n\"\"\"Error code: program exited with an error and should not be restarted\n\nThis error code is used to prevent systemd from restarting the bot when it\nencounters such an error case.\n\"\"\"\n\n\ndef run(config, pid_file, daemon=False):\n delay = 20\n # Inject ca_certs from config to web for SSL validation of web requests\n if not config.core.ca_certs:\n tools.stderr(\n 'Could not open CA certificates file. SSL will not work properly!')\n\n def signal_handler(sig, frame):\n if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:\n tools.stderr('Got quit signal, shutting down.')\n p.quit('Closing')\n elif sig == signal.SIGUSR2 or sig == signal.SIGILL:\n tools.stderr('Got restart signal.')\n p.restart('Restarting')\n\n # Define empty variable `p` for bot\n p = None\n while True:\n if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase\n break\n try:\n p = bot.Sopel(config, daemon=daemon)\n if hasattr(signal, 'SIGUSR1'):\n signal.signal(signal.SIGUSR1, signal_handler)\n if hasattr(signal, 'SIGTERM'):\n signal.signal(signal.SIGTERM, signal_handler)\n if hasattr(signal, 'SIGINT'):\n signal.signal(signal.SIGINT, signal_handler)\n if hasattr(signal, 'SIGUSR2'):\n signal.signal(signal.SIGUSR2, signal_handler)\n if hasattr(signal, 'SIGILL'):\n signal.signal(signal.SIGILL, signal_handler)\n logger.setup_logging(p)\n p.run(config.core.host, int(config.core.port))\n except KeyboardInterrupt:\n break\n except Exception: # TODO: Be specific\n trace = traceback.format_exc()\n try:\n tools.stderr(trace)\n except Exception: # TODO: Be specific\n pass\n logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a')\n logfile.write('Critical exception in core')\n logfile.write(trace)\n logfile.write('----------------------------------------\\n\\n')\n logfile.close()\n # TODO: This should be handled by command_start\n # All we should need here is a return value, but replacing the\n # os._exit() call below (at the end) broke ^C.\n # This one is much harder to test, so until that one's sorted it\n # isn't worth the risk of trying to remove this one.\n os.unlink(pid_file)\n os._exit(1)\n\n if not isinstance(delay, int):\n break\n if p.wantsrestart:\n return -1\n if p.hasquit:\n break\n tools.stderr(\n 'Warning: Disconnected. Reconnecting in %s seconds...' % delay)\n time.sleep(delay)\n # TODO: This should be handled by command_start\n # All we should need here is a return value, but making this\n # a return makes Sopel hang on ^C after it says \"Closed!\"\n os.unlink(pid_file)\n os._exit(0)\n\n\ndef add_legacy_options(parser):\n parser.add_argument(\"-d\", '--fork', action=\"store_true\",\n dest=\"daemonize\", help=\"Daemonize Sopel\")\n parser.add_argument(\"-q\", '--quit', action=\"store_true\", dest=\"quit\",\n help=(\n \"Gracefully quit Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel stop` instead)\"))\n parser.add_argument(\"-k\", '--kill', action=\"store_true\", dest=\"kill\",\n help=(\n \"Kill Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel stop --kill` instead)\"))\n parser.add_argument(\"-r\", '--restart', action=\"store_true\", dest=\"restart\",\n help=(\n \"Restart Sopel \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel restart` instead)\"))\n parser.add_argument(\"-l\", '--list', action=\"store_true\",\n dest=\"list_configs\",\n help=\"List all config files found\")\n parser.add_argument('--quiet', action=\"store_true\", dest=\"quiet\",\n help=\"Suppress all output\")\n parser.add_argument('-w', '--configure-all', action='store_true',\n dest='wizard',\n help=(\n \"Run the configuration wizard \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel configure` instead)\"))\n parser.add_argument('--configure-modules', action='store_true',\n dest='mod_wizard',\n help=(\n \"Run the configuration wizard, but only for the \"\n \"module configuration options \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use `sopel configure --modules` instead)\"))\n parser.add_argument('-v', action=\"store_true\",\n dest='version_legacy',\n help=(\n \"Show version number and exit \"\n \"(deprecated, and will be removed in Sopel 8; \"\n \"use -V/--version instead)\"))\n parser.add_argument('-V', '--version', action='store_true',\n dest='version',\n help='Show version number and exit')\n\n\ndef build_parser():\n \"\"\"Build an ``argparse.ArgumentParser`` for the bot\"\"\"\n parser = argparse.ArgumentParser(description='Sopel IRC Bot',\n usage='%(prog)s [options]')\n add_legacy_options(parser)\n utils.add_common_arguments(parser)\n\n subparsers = parser.add_subparsers(\n title='sub-commands',\n description='List of Sopel\\'s sub-commands',\n dest='action',\n metavar='{start,configure,stop,restart}')\n\n # manage `legacy` sub-command\n parser_legacy = subparsers.add_parser('legacy')\n add_legacy_options(parser_legacy)\n utils.add_common_arguments(parser_legacy)\n\n # manage `start` sub-command\n parser_start = subparsers.add_parser(\n 'start',\n description='Start a Sopel instance',\n help='Start a Sopel instance')\n parser_start.add_argument(\n '-d', '--fork',\n dest='daemonize',\n action='store_true',\n default=False,\n help='Run Sopel as a daemon (fork)')\n parser_start.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_start)\n\n # manage `configure` sub-command\n parser_configure = subparsers.add_parser(\n 'configure', help='Sopel\\'s Wizard tool')\n parser_configure.add_argument(\n '--modules',\n action='store_true',\n default=False,\n dest='modules')\n utils.add_common_arguments(parser_configure)\n\n # manage `stop` sub-command\n parser_stop = subparsers.add_parser(\n 'stop',\n description='Stop a running Sopel instance',\n help='Stop a running Sopel instance')\n parser_stop.add_argument(\n '-k', '--kill',\n action='store_true',\n default=False,\n help='Kill Sopel without a graceful quit')\n parser_stop.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_stop)\n\n # manage `restart` sub-command\n parser_restart = subparsers.add_parser(\n 'restart',\n description='Restart a running Sopel instance',\n help='Restart a running Sopel instance')\n parser_restart.add_argument(\n '--quiet',\n action=\"store_true\",\n dest=\"quiet\",\n help=\"Suppress all output\")\n utils.add_common_arguments(parser_restart)\n\n return parser\n\n\ndef check_not_root():\n \"\"\"Check if root is running the bot.\n\n It raises a ``RuntimeError`` if the user has root privileges on Linux or\n if it is the ``Administrator`` account on Windows.\n \"\"\"\n opersystem = platform.system()\n if opersystem in [\"Linux\", \"Darwin\"]:\n # Linux/Mac\n if os.getuid() == 0 or os.geteuid() == 0:\n raise RuntimeError('Error: Do not run Sopel with root privileges.')\n elif opersystem in [\"Windows\"]:\n # Windows\n if os.environ.get(\"USERNAME\") == \"Administrator\":\n raise RuntimeError('Error: Do not run Sopel as Administrator.')\n else:\n tools.stderr(\n \"Warning: %s is an uncommon operating system platform. \"\n \"Sopel should still work, but please contact Sopel's developers \"\n \"if you experience issues.\"\n % opersystem)\n\n\ndef print_version():\n \"\"\"Print Python version and Sopel version on stdout.\"\"\"\n py_ver = '%s.%s.%s' % (sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n print('Sopel %s (running on Python %s)' % (__version__, py_ver))\n print('https://sopel.chat/')\n\n\ndef print_config():\n \"\"\"Print list of available configurations from default homedir.\"\"\"\n configs = utils.enumerate_configs(DEFAULT_HOMEDIR)\n print('Config files in %s:' % DEFAULT_HOMEDIR)\n config = None\n for config in configs:\n print('\\t%s' % config)\n if not config:\n print('\\tNone found')\n\n print('-------------------------')\n\n\ndef get_configuration(options):\n \"\"\"Get or create a configuration object from ``options``.\n\n :param options: argument parser's options\n :type options: ``argparse.Namespace``\n :return: a configuration object\n :rtype: :class:`sopel.config.Config`\n\n This may raise a :exc:`sopel.config.ConfigurationError` if the\n configuration file is invalid.\n\n .. seealso::\n\n The configuration file is loaded by\n :func:`~sopel.cli.run.utils.load_settings` or created using the\n configuration wizard.\n\n \"\"\"\n try:\n bot_config = utils.load_settings(options)\n except ConfigurationNotFound as error:\n print(\n \"Welcome to Sopel!\\n\"\n \"I can't seem to find the configuration file, \"\n \"so let's generate it!\\n\")\n\n config_path = error.filename\n if not config_path.endswith('.cfg'):\n config_path = config_path + '.cfg'\n\n config_path = _create_config(config_path)\n # try to reload it now that it's created\n bot_config = Config(config_path)\n\n bot_config._is_daemonized = options.daemonize\n return bot_config\n\n\ndef get_pid_filename(options, pid_dir):\n \"\"\"Get the pid file name in ``pid_dir`` from the given ``options``.\n\n :param options: command line options\n :param str pid_dir: path to the pid directory\n :return: absolute filename of the pid file\n\n By default, it's ``sopel.pid``, but if a configuration filename is given\n in the ``options``, its basename is used to generate the filename, as:\n ``sopel-{basename}.pid`` instead.\n \"\"\"\n name = 'sopel.pid'\n if options.config:\n basename = os.path.basename(options.config)\n if basename.endswith('.cfg'):\n basename = basename[:-4]\n name = 'sopel-%s.pid' % basename\n\n return os.path.abspath(os.path.join(pid_dir, name))\n\n\ndef get_running_pid(filename):\n \"\"\"Retrieve the PID number from the given ``filename``.\n\n :param str filename: path to file to read the PID from\n :return: the PID number of a Sopel instance if running, ``None`` otherwise\n :rtype: integer\n\n This function tries to retrieve a PID number from the given ``filename``,\n as an integer, and returns ``None`` if the file is not found or if the\n content is not an integer.\n \"\"\"\n if not os.path.isfile(filename):\n return\n\n with open(filename, 'r') as pid_file:\n try:\n return int(pid_file.read())\n except ValueError:\n pass\n\n\ndef command_start(opts):\n \"\"\"Start a Sopel instance\"\"\"\n # Step One: Get the configuration file and prepare to run\n try:\n config_module = get_configuration(opts)\n except ConfigurationError as e:\n tools.stderr(e)\n return ERR_CODE_NO_RESTART\n\n if config_module.core.not_configured:\n tools.stderr('Bot is not configured, can\\'t start')\n return ERR_CODE_NO_RESTART\n\n # Step Two: Manage logfile, stdout and stderr\n utils.redirect_outputs(config_module, opts.quiet)\n\n # Step Three: Handle process-lifecycle options and manage the PID file\n pid_dir = config_module.core.pid_dir\n pid_file_path = get_pid_filename(opts, pid_dir)\n pid = get_running_pid(pid_file_path)\n\n if pid is not None and tools.check_pid(pid):\n tools.stderr('There\\'s already a Sopel instance running '\n 'with this config file.')\n tools.stderr('Try using either the `sopel stop` '\n 'or the `sopel restart` command.')\n return ERR_CODE\n\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n return\n\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Four: Run Sopel\n ret = run(config_module, pid_file_path)\n\n # Step Five: Shutdown Clean-Up\n os.unlink(pid_file_path)\n\n if ret == -1:\n # Restart\n os.execv(sys.executable, ['python'] + sys.argv)\n else:\n # Quit\n return ret\n\n\ndef command_configure(opts):\n \"\"\"Sopel Configuration Wizard\"\"\"\n if getattr(opts, 'modules', False):\n _wizard('mod', opts.config)\n else:\n _wizard('all', opts.config)\n\n\ndef command_stop(opts):\n \"\"\"Stop a running Sopel instance\"\"\"\n # Get Configuration\n try:\n settings = utils.load_settings(opts)\n except ConfigurationNotFound as error:\n tools.stderr('Configuration \"%s\" not found' % error.filename)\n return ERR_CODE\n\n if settings.core.not_configured:\n tools.stderr('Sopel is not configured, can\\'t stop')\n return ERR_CODE\n\n # Redirect Outputs\n utils.redirect_outputs(settings, opts.quiet)\n\n # Get Sopel's PID\n filename = get_pid_filename(opts, settings.core.pid_dir)\n pid = get_running_pid(filename)\n\n if pid is None or not tools.check_pid(pid):\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n # Stop Sopel\n if opts.kill:\n tools.stderr('Killing the Sopel')\n os.kill(pid, signal.SIGKILL)\n return\n\n tools.stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(pid, signal.SIGUSR1)\n else:\n # Windows will not generate SIGTERM itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(pid, signal.SIGTERM)\n\n\ndef command_restart(opts):\n \"\"\"Restart a running Sopel instance\"\"\"\n # Get Configuration\n try:\n settings = utils.load_settings(opts)\n except ConfigurationNotFound as error:\n tools.stderr('Configuration \"%s\" not found' % error.filename)\n return ERR_CODE\n\n if settings.core.not_configured:\n tools.stderr('Sopel is not configured, can\\'t stop')\n return ERR_CODE\n\n # Redirect Outputs\n utils.redirect_outputs(settings, opts.quiet)\n\n # Get Sopel's PID\n filename = get_pid_filename(opts, settings.core.pid_dir)\n pid = get_running_pid(filename)\n\n if pid is None or not tools.check_pid(pid):\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n tools.stderr('Asking Sopel to restart')\n if hasattr(signal, 'SIGUSR2'):\n os.kill(pid, signal.SIGUSR2)\n else:\n # Windows will not generate SIGILL itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(pid, signal.SIGILL)\n\n\ndef command_legacy(opts):\n \"\"\"Legacy Sopel run script\n\n The ``legacy`` command manages the old-style ``sopel`` command line tool.\n Most of its features are replaced by the following commands:\n\n * ``sopel start`` replaces the default behavior (run the bot)\n * ``sopel stop`` replaces the ``--quit/--kill`` options\n * ``sopel restart`` replaces the ``--restart`` option\n * ``sopel configure`` replaces the\n ``-w/--configure-all/--configure-modules`` options\n\n The ``-v`` option for \"version\" is deprecated, ``-V/--version`` should be\n used instead.\n\n .. seealso::\n\n The github issue `#1471`__ tracks various changes requested for future\n versions of Sopel, some of them related to this legacy command.\n\n .. __: https://github.com/sopel-irc/sopel/issues/1471\n\n \"\"\"\n # Step One: Handle \"No config needed\" options\n if opts.version:\n print_version()\n return\n elif opts.version_legacy:\n tools.stderr(\n 'WARNING: option -v is deprecated; '\n 'use `sopel -V/--version` instead')\n print_version()\n return\n\n if opts.wizard:\n tools.stderr(\n 'WARNING: option -w/--configure-all is deprecated; '\n 'use `sopel configure` instead')\n _wizard('all', opts.config)\n return\n\n if opts.mod_wizard:\n tools.stderr(\n 'WARNING: option --configure-modules is deprecated; '\n 'use `sopel configure --modules` instead')\n _wizard('mod', opts.config)\n return\n\n if opts.list_configs:\n print_config()\n return\n\n # Step Two: Get the configuration file and prepare to run\n try:\n config_module = get_configuration(opts)\n except ConfigurationError as e:\n tools.stderr(e)\n return ERR_CODE_NO_RESTART\n\n if config_module.core.not_configured:\n tools.stderr('Bot is not configured, can\\'t start')\n return ERR_CODE_NO_RESTART\n\n # Step Three: Manage logfile, stdout and stderr\n utils.redirect_outputs(config_module, opts.quiet)\n\n # Step Four: Handle process-lifecycle options and manage the PID file\n pid_dir = config_module.core.pid_dir\n pid_file_path = get_pid_filename(opts, pid_dir)\n old_pid = get_running_pid(pid_file_path)\n\n if old_pid is not None and tools.check_pid(old_pid):\n if not opts.quit and not opts.kill and not opts.restart:\n tools.stderr(\n 'There\\'s already a Sopel instance running with this config file')\n tools.stderr(\n 'Try using either the `sopel stop` command or the `sopel restart` command')\n return ERR_CODE\n elif opts.kill:\n tools.stderr(\n 'WARNING: option -k/--kill is deprecated; '\n 'use `sopel stop --kill` instead')\n tools.stderr('Killing the Sopel')\n os.kill(old_pid, signal.SIGKILL)\n return\n elif opts.quit:\n tools.stderr(\n 'WARNING: options -q/--quit is deprecated; '\n 'use `sopel stop` instead')\n tools.stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(old_pid, signal.SIGUSR1)\n else:\n # Windows will not generate SIGTERM itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(old_pid, signal.SIGTERM)\n return\n elif opts.restart:\n tools.stderr(\n 'WARNING: options --restart is deprecated; '\n 'use `sopel restart` instead')\n tools.stderr('Asking Sopel to restart')\n if hasattr(signal, 'SIGUSR2'):\n os.kill(old_pid, signal.SIGUSR2)\n else:\n # Windows will not generate SIGILL itself\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal\n os.kill(old_pid, signal.SIGILL)\n return\n elif opts.kill or opts.quit or opts.restart:\n tools.stderr('Sopel is not running!')\n return ERR_CODE\n\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n return\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Five: Initialize and run Sopel\n ret = run(config_module, pid_file_path)\n os.unlink(pid_file_path)\n if ret == -1:\n os.execv(sys.executable, ['python'] + sys.argv)\n else:\n return ret\n\n\ndef main(argv=None):\n \"\"\"Sopel run script entry point\"\"\"\n try:\n # Step One: Parse The Command Line\n parser = build_parser()\n\n # make sure to have an action first (`legacy` by default)\n # TODO: `start` should be the default in Sopel 8\n argv = argv or sys.argv[1:]\n if not argv:\n argv = ['legacy']\n elif argv[0].startswith('-') and argv[0] not in ['-h', '--help']:\n argv = ['legacy'] + argv\n\n opts = parser.parse_args(argv)\n\n # Step Two: \"Do not run as root\" checks\n try:\n check_not_root()\n except RuntimeError as err:\n tools.stderr('%s' % err)\n return ERR_CODE\n\n # Step Three: Handle command\n action = getattr(opts, 'action', 'legacy')\n command = {\n 'legacy': command_legacy,\n 'start': command_start,\n 'configure': command_configure,\n 'stop': command_stop,\n 'restart': command_restart,\n }.get(action)\n return command(opts)\n except KeyboardInterrupt:\n print(\"\\n\\nInterrupted\")\n return ERR_CODE\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "sopel/cli/run.py" } ]
diff --git a/sopel/cli/run.py b/sopel/cli/run.py index 867d0c8f4f..364b1922de 100755 --- a/sopel/cli/run.py +++ b/sopel/cli/run.py @@ -67,7 +67,11 @@ def signal_handler(sig, frame): tools.stderr('Got restart signal.') p.restart('Restarting') + # Define empty variable `p` for bot + p = None while True: + if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase + break try: p = bot.Sopel(config, daemon=daemon) if hasattr(signal, 'SIGUSR1'):
Broken interrupt handling when connection is inactive For the record, I'm testing on macOS today because that's what I have available. However, the same behavior occurs with identical or similar tracebacks on my Ubuntu machine at home, so I don't think it's related to the fact that I'm using Homebrew Python or anything. It's probably related to Sopel's sometimes-wonky exception handling. Here's what I did: 1. Run `sopel` 2. Press Ctrl-C after "Connecting to &lt;server&gt;..." 3. Run `sopel` again 4. Press Ctrl-C while Sopel is waiting to reconnect after an expected SSL failure Interrupting the connection phase resulted in an `AttributeError`, probably as expected (since quitting tries to send something to the socket, and the socket object doesn't exist before connecting finishes): ``` Connecting to irc.network.net:6667... ^CGot quit signal, shutting down. Traceback (most recent call last): File "/Users/dgw/github/sopel/sopel/__init__.py", line 91, in run p.run(config.core.host, int(config.core.port)) File "/Users/dgw/github/sopel/sopel/irc.py", line 167, in run self.initiate_connect(host, port) File "/Users/dgw/github/sopel/sopel/irc.py", line 177, in initiate_connect source_address=source_address)) File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 707, in create_connection for res in getaddrinfo(host, port, 0, SOCK_STREAM): File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 748, in getaddrinfo for res in _socket.getaddrinfo(host, port, family, type, proto, flags): File "/Users/dgw/github/sopel/sopel/__init__.py", line 73, in signal_handler p.quit('Closing') File "/Users/dgw/github/sopel/sopel/irc.py", line 199, in quit self.write(['QUIT'], message) File "/Users/dgw/github/sopel/sopel/bot.py", line 166, in write irc.Bot.write(self, args, text=text) File "/Users/dgw/github/sopel/sopel/irc.py", line 161, in write self.send(temp.encode('utf-8')) File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncore.py", line 360, in send result = self.socket.send(data) AttributeError: 'NoneType' object has no attribute 'send' ``` Interrupting the reconnect delay yields a completely different traceback, and additionally requires pressing Ctrl-C twice: ``` Warning: Disconnected. Reconnecting in 20 seconds... ^CGot quit signal, shutting down. Traceback (most recent call last): File "./sopel.py", line 7, in <module> sys.exit(run_script.main()) File "/Users/dgw/github/sopel/sopel/run_script.py", line 351, in main ret = run(config_module, pid_file_path) File "/Users/dgw/github/sopel/sopel/__init__.py", line 120, in run time.sleep(delay) File "/Users/dgw/github/sopel/sopel/__init__.py", line 73, in signal_handler p.quit('Closing') File "/Users/dgw/github/sopel/sopel/irc.py", line 199, in quit self.write(['QUIT'], message) File "/Users/dgw/github/sopel/sopel/bot.py", line 166, in write irc.Bot.write(self, args, text=text) File "/Users/dgw/github/sopel/sopel/irc.py", line 161, in write self.send(temp.encode('utf-8')) File "/Users/dgw/github/sopel/sopel/irc.py", line 327, in _ssl_send result = self.socket.send(data) OSError: [Errno 9] Bad file descriptor ^CGot quit signal, shutting down. Exception ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'> Traceback (most recent call last): File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1273, in _shutdown t.join() File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1032, in join self._wait_for_tstate_lock() File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1048, in _wait_for_tstate_lock elif lock.acquire(block, timeout): File "/Users/dgw/github/sopel/sopel/__init__.py", line 73, in signal_handler p.quit('Closing') File "/Users/dgw/github/sopel/sopel/irc.py", line 199, in quit self.write(['QUIT'], message) File "/Users/dgw/github/sopel/sopel/bot.py", line 166, in write irc.Bot.write(self, args, text=text) File "/Users/dgw/github/sopel/sopel/irc.py", line 161, in write self.send(temp.encode('utf-8')) File "/Users/dgw/github/sopel/sopel/irc.py", line 327, in _ssl_send result = self.socket.send(data) OSError: [Errno 9] Bad file descriptor ``` I've run into this second issue relatively often while testing things in the last few months, since I often intentionally interrupt Sopel during states other than "Connected to network and running normally". The exception itself isn't a big deal (though it would be nice not to spit it out). I consider needing to press Ctrl-C twice to be the main bug here. That simply shouldn't be required. Interrupting Sopel after the "Loading modules..." line (but before it starts to connect) yields a clean exit, though. That's kind of weird, and I haven't had time to look into why. Since I know we have a few people digging around in Sopel's internals and refactoring things now (and doing damn good work, too!), I'm hoping one of them will look into this at some point. 😹 This isn't a high-severity bug, but I do want to get it fixed if possible, ideally in the next year or two. That need to press Ctrl-C twice carries over into, for example, `sopel --quit`. Sopel should _never_ need to be told to quit twice, unless something is catastrophically wrong—and waiting to reconnect is a totally normal situation that shouldn't break things the way it does now.
pallets__click-2714
[ { "content": "from pallets_sphinx_themes import get_version\nfrom pallets_sphinx_themes import ProjectLink\n\n# Project --------------------------------------------------------------\n\nproject = \"Click\"\ncopyright = \"2014 Pallets\"\nauthor = \"Pallets\"\nrelease, version = get_version(\"Click\")\n\n# General --------------------------------------------------------------\n\ndefault_role = \"code\"\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinxcontrib.log_cabinet\",\n \"pallets_sphinx_themes\",\n]\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\nautodoc_preserve_defaults = True\nextlinks = {\n \"issue\": (\"https://github.com/pallets/flask/issues/%s\", \"#%s\"),\n \"pr\": (\"https://github.com/pallets/flask/pull/%s\", \"#%s\"),\n}\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n}\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = \"click\"\nhtml_theme_options = {\"index_sidebar_logo\": False}\nhtml_context = {\n \"project_links\": [\n ProjectLink(\"Donate\", \"https://palletsprojects.com/donate\"),\n ProjectLink(\"PyPI Releases\", \"https://pypi.org/project/click/\"),\n ProjectLink(\"Source Code\", \"https://github.com/pallets/click/\"),\n ProjectLink(\"Issue Tracker\", \"https://github.com/pallets/click/issues/\"),\n ProjectLink(\"Chat\", \"https://discord.gg/pallets\"),\n ]\n}\nhtml_sidebars = {\n \"index\": [\"project.html\", \"localtoc.html\", \"searchbox.html\", \"ethicalads.html\"],\n \"**\": [\"localtoc.html\", \"relations.html\", \"searchbox.html\", \"ethicalads.html\"],\n}\nsinglehtml_sidebars = {\"index\": [\"project.html\", \"localtoc.html\", \"ethicalads.html\"]}\nhtml_static_path = [\"_static\"]\nhtml_favicon = \"_static/click-icon.png\"\nhtml_logo = \"_static/click-logo-sidebar.png\"\nhtml_title = f\"Click Documentation ({version})\"\nhtml_show_sourcelink = False\n", "path": "docs/conf.py" } ]
[ { "content": "from pallets_sphinx_themes import get_version\nfrom pallets_sphinx_themes import ProjectLink\n\n# Project --------------------------------------------------------------\n\nproject = \"Click\"\ncopyright = \"2014 Pallets\"\nauthor = \"Pallets\"\nrelease, version = get_version(\"Click\")\n\n# General --------------------------------------------------------------\n\ndefault_role = \"code\"\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinxcontrib.log_cabinet\",\n \"pallets_sphinx_themes\",\n]\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\nautodoc_preserve_defaults = True\nextlinks = {\n \"issue\": (\"https://github.com/pallets/click/issues/%s\", \"#%s\"),\n \"pr\": (\"https://github.com/pallets/click/pull/%s\", \"#%s\"),\n}\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n}\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = \"click\"\nhtml_theme_options = {\"index_sidebar_logo\": False}\nhtml_context = {\n \"project_links\": [\n ProjectLink(\"Donate\", \"https://palletsprojects.com/donate\"),\n ProjectLink(\"PyPI Releases\", \"https://pypi.org/project/click/\"),\n ProjectLink(\"Source Code\", \"https://github.com/pallets/click/\"),\n ProjectLink(\"Issue Tracker\", \"https://github.com/pallets/click/issues/\"),\n ProjectLink(\"Chat\", \"https://discord.gg/pallets\"),\n ]\n}\nhtml_sidebars = {\n \"index\": [\"project.html\", \"localtoc.html\", \"searchbox.html\", \"ethicalads.html\"],\n \"**\": [\"localtoc.html\", \"relations.html\", \"searchbox.html\", \"ethicalads.html\"],\n}\nsinglehtml_sidebars = {\"index\": [\"project.html\", \"localtoc.html\", \"ethicalads.html\"]}\nhtml_static_path = [\"_static\"]\nhtml_favicon = \"_static/click-icon.png\"\nhtml_logo = \"_static/click-logo-sidebar.png\"\nhtml_title = f\"Click Documentation ({version})\"\nhtml_show_sourcelink = False\n", "path": "docs/conf.py" } ]
diff --git a/docs/conf.py b/docs/conf.py index c579af52d..db253ea2c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -23,8 +23,8 @@ autodoc_typehints = "description" autodoc_preserve_defaults = True extlinks = { - "issue": ("https://github.com/pallets/flask/issues/%s", "#%s"), - "pr": ("https://github.com/pallets/flask/pull/%s", "#%s"), + "issue": ("https://github.com/pallets/click/issues/%s", "#%s"), + "pr": ("https://github.com/pallets/click/pull/%s", "#%s"), } intersphinx_mapping = { "python": ("https://docs.python.org/3/", None),
Docs wrongly links PRs and Issues to flask ![image](https://github.com/pallets/click/assets/13086194/cc68ea55-7c69-4e24-a2dd-f296d54a9c61) Environment: - Python version: N/A - Click version: N/A Docs wrongly links PRs and Issues to flask ![image](https://github.com/pallets/click/assets/13086194/cc68ea55-7c69-4e24-a2dd-f296d54a9c61) Environment: - Python version: N/A - Click version: N/A
python__mypy-3593
[ { "content": "\"\"\"Plugin system for extending mypy.\"\"\"\n\nfrom abc import abstractmethod\nfrom typing import Callable, List, Tuple, Optional, NamedTuple, TypeVar\n\nfrom mypy.nodes import Expression, StrExpr, IntExpr, UnaryExpr, Context\nfrom mypy.types import (\n Type, Instance, CallableType, TypedDictType, UnionType, NoneTyp, FunctionLike, TypeVarType,\n AnyType, TypeList, UnboundType\n)\nfrom mypy.messages import MessageBuilder\nfrom mypy.options import Options\n\n\nclass AnalyzerPluginInterface:\n \"\"\"Interface for accessing semantic analyzer functionality in plugins.\"\"\"\n\n @abstractmethod\n def fail(self, msg: str, ctx: Context) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def named_type(self, name: str, args: List[Type]) -> Instance:\n raise NotImplementedError\n\n @abstractmethod\n def analyze_type(self, typ: Type) -> Type:\n raise NotImplementedError\n\n @abstractmethod\n def analyze_callable_args(self, arglist: TypeList) -> Optional[Tuple[List[Type],\n List[int],\n List[Optional[str]]]]:\n raise NotImplementedError\n\n\n# A context for a hook that semantically analyzes an unbound type.\nAnalyzeTypeContext = NamedTuple(\n 'AnalyzeTypeContext', [\n ('type', UnboundType), # Type to analyze\n ('context', Context),\n ('api', AnalyzerPluginInterface)])\n\n\nclass CheckerPluginInterface:\n \"\"\"Interface for accessing type checker functionality in plugins.\"\"\"\n\n msg = None # type: MessageBuilder\n\n @abstractmethod\n def named_generic_type(self, name: str, args: List[Type]) -> Instance:\n raise NotImplementedError\n\n\n# A context for a function hook that infers the return type of a function with\n# a special signature.\n#\n# A no-op callback would just return the inferred return type, but a useful\n# callback at least sometimes can infer a more precise type.\nFunctionContext = NamedTuple(\n 'FunctionContext', [\n ('arg_types', List[List[Type]]), # List of actual caller types for each formal argument\n ('default_return_type', Type), # Return type inferred from signature\n ('args', List[List[Expression]]), # Actual expressions for each formal argument\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for a method signature hook that infers a better signature for a\n# method. Note that argument types aren't available yet. If you need them,\n# you have to use a method hook instead.\nMethodSigContext = NamedTuple(\n 'MethodSigContext', [\n ('type', Type), # Base object type for method call\n ('args', List[List[Expression]]), # Actual expressions for each formal argument\n ('default_signature', CallableType), # Original signature of the method\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for a method hook that infers the return type of a method with a\n# special signature.\n#\n# This is very similar to FunctionContext (only differences are documented).\nMethodContext = NamedTuple(\n 'MethodContext', [\n ('type', Type), # Base object type for method call\n ('arg_types', List[List[Type]]),\n ('default_return_type', Type),\n ('args', List[List[Expression]]),\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for an attribute type hook that infers the type of an attribute.\nAttributeContext = NamedTuple(\n 'AttributeContext', [\n ('type', Type), # Type of object with attribute\n ('default_attr_type', Type), # Original attribute type\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n\nclass Plugin:\n \"\"\"Base class of all type checker plugins.\n\n This defines a no-op plugin. Subclasses can override some methods to\n provide some actual functionality.\n\n All get_ methods are treated as pure functions (you should assume that\n results might be cached).\n\n Look at the comments of various *Context objects for descriptions of\n various hooks.\n \"\"\"\n\n def __init__(self, options: Options) -> None:\n self.options = options\n self.python_version = options.python_version\n\n def get_type_analyze_hook(self, fullname: str\n ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:\n return None\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n return None\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n return None\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n return None\n\n def get_attribute_hook(self, fullname: str\n ) -> Optional[Callable[[AttributeContext], Type]]:\n return None\n\n # TODO: metaclass / class decorator hook\n\n\nT = TypeVar('T')\n\n\nclass ChainedPlugin(Plugin):\n \"\"\"A plugin that represents a sequence of chained plugins.\n\n Each lookup method returns the hook for the first plugin that\n reports a match.\n\n This class should not be subclassed -- use Plugin as the base class\n for all plugins.\n \"\"\"\n\n # TODO: Support caching of lookup results (through a LRU cache, for example).\n\n def __init__(self, options: Options, plugins: List[Plugin]) -> None:\n \"\"\"Initialize chained plugin.\n\n Assume that the child plugins aren't mutated (results may be cached).\n \"\"\"\n super().__init__(options)\n self._plugins = plugins\n\n def get_type_analyze_hook(self, fullname: str\n ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_type_analyze_hook(fullname))\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_function_hook(fullname))\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n return self._find_hook(lambda plugin: plugin.get_method_signature_hook(fullname))\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_method_hook(fullname))\n\n def get_attribute_hook(self, fullname: str\n ) -> Optional[Callable[[AttributeContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_attribute_hook(fullname))\n\n def _find_hook(self, lookup: Callable[[Plugin], T]) -> Optional[T]:\n for plugin in self._plugins:\n hook = lookup(plugin)\n if hook:\n return hook\n return None\n\n\nclass DefaultPlugin(Plugin):\n \"\"\"Type checker plugin that is enabled by default.\"\"\"\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n if fullname == 'contextlib.contextmanager':\n return contextmanager_callback\n elif fullname == 'builtins.open' and self.python_version[0] == 3:\n return open_callback\n return None\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n if fullname == 'typing.Mapping.get':\n return typed_dict_get_signature_callback\n return None\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n if fullname == 'typing.Mapping.get':\n return typed_dict_get_callback\n elif fullname == 'builtins.int.__pow__':\n return int_pow_callback\n return None\n\n\ndef open_callback(ctx: FunctionContext) -> Type:\n \"\"\"Infer a better return type for 'open'.\n\n Infer TextIO or BinaryIO as the return value if the mode argument is not\n given or is a literal.\n \"\"\"\n mode = None\n if not ctx.arg_types or len(ctx.arg_types[1]) != 1:\n mode = 'r'\n elif isinstance(ctx.args[1][0], StrExpr):\n mode = ctx.args[1][0].value\n if mode is not None:\n assert isinstance(ctx.default_return_type, Instance)\n if 'b' in mode:\n return ctx.api.named_generic_type('typing.BinaryIO', [])\n else:\n return ctx.api.named_generic_type('typing.TextIO', [])\n return ctx.default_return_type\n\n\ndef contextmanager_callback(ctx: FunctionContext) -> Type:\n \"\"\"Infer a better return type for 'contextlib.contextmanager'.\"\"\"\n # Be defensive, just in case.\n if ctx.arg_types and len(ctx.arg_types[0]) == 1:\n arg_type = ctx.arg_types[0][0]\n if (isinstance(arg_type, CallableType)\n and isinstance(ctx.default_return_type, CallableType)):\n # The stub signature doesn't preserve information about arguments so\n # add them back here.\n return ctx.default_return_type.copy_modified(\n arg_types=arg_type.arg_types,\n arg_kinds=arg_type.arg_kinds,\n arg_names=arg_type.arg_names)\n return ctx.default_return_type\n\n\ndef typed_dict_get_signature_callback(ctx: MethodSigContext) -> CallableType:\n \"\"\"Try to infer a better signature type for TypedDict.get.\n\n This is used to get better type context for the second argument that\n depends on a TypedDict value type.\n \"\"\"\n signature = ctx.default_signature\n if (isinstance(ctx.type, TypedDictType)\n and len(ctx.args) == 2\n and len(ctx.args[0]) == 1\n and isinstance(ctx.args[0][0], StrExpr)\n and len(signature.arg_types) == 2\n and len(signature.variables) == 1):\n key = ctx.args[0][0].value\n value_type = ctx.type.items.get(key)\n if value_type:\n # Tweak the signature to include the value type as context. It's\n # only needed for type inference since there's a union with a type\n # variable that accepts everything.\n tv = TypeVarType(signature.variables[0])\n return signature.copy_modified(\n arg_types=[signature.arg_types[0],\n UnionType.make_simplified_union([value_type, tv])])\n return signature\n\n\ndef typed_dict_get_callback(ctx: MethodContext) -> Type:\n \"\"\"Infer a precise return type for TypedDict.get with literal first argument.\"\"\"\n if (isinstance(ctx.type, TypedDictType)\n and len(ctx.arg_types) >= 1\n and len(ctx.arg_types[0]) == 1):\n if isinstance(ctx.args[0][0], StrExpr):\n key = ctx.args[0][0].value\n value_type = ctx.type.items.get(key)\n if value_type:\n if len(ctx.arg_types) == 1:\n return UnionType.make_simplified_union([value_type, NoneTyp()])\n elif len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1:\n return UnionType.make_simplified_union([value_type, ctx.arg_types[1][0]])\n else:\n ctx.api.msg.typeddict_item_name_not_found(ctx.type, key, ctx.context)\n return AnyType()\n return ctx.default_return_type\n\n\ndef int_pow_callback(ctx: MethodContext) -> Type:\n \"\"\"Infer a more precise return type for int.__pow__.\"\"\"\n if (len(ctx.arg_types) == 1\n and len(ctx.arg_types[0]) == 1):\n arg = ctx.args[0][0]\n if isinstance(arg, IntExpr):\n exponent = arg.value\n elif isinstance(arg, UnaryExpr) and arg.op == '-' and isinstance(arg.expr, IntExpr):\n exponent = -arg.expr.value\n else:\n # Right operand not an int literal or a negated literal -- give up.\n return ctx.default_return_type\n if exponent >= 0:\n return ctx.api.named_generic_type('builtins.int', [])\n else:\n return ctx.api.named_generic_type('builtins.float', [])\n return ctx.default_return_type\n", "path": "mypy/plugin.py" } ]
[ { "content": "\"\"\"Plugin system for extending mypy.\"\"\"\n\nfrom abc import abstractmethod\nfrom typing import Callable, List, Tuple, Optional, NamedTuple, TypeVar\n\nfrom mypy.nodes import Expression, StrExpr, IntExpr, UnaryExpr, Context\nfrom mypy.types import (\n Type, Instance, CallableType, TypedDictType, UnionType, NoneTyp, FunctionLike, TypeVarType,\n AnyType, TypeList, UnboundType\n)\nfrom mypy.messages import MessageBuilder\nfrom mypy.options import Options\n\n\nclass AnalyzerPluginInterface:\n \"\"\"Interface for accessing semantic analyzer functionality in plugins.\"\"\"\n\n @abstractmethod\n def fail(self, msg: str, ctx: Context) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def named_type(self, name: str, args: List[Type]) -> Instance:\n raise NotImplementedError\n\n @abstractmethod\n def analyze_type(self, typ: Type) -> Type:\n raise NotImplementedError\n\n @abstractmethod\n def analyze_callable_args(self, arglist: TypeList) -> Optional[Tuple[List[Type],\n List[int],\n List[Optional[str]]]]:\n raise NotImplementedError\n\n\n# A context for a hook that semantically analyzes an unbound type.\nAnalyzeTypeContext = NamedTuple(\n 'AnalyzeTypeContext', [\n ('type', UnboundType), # Type to analyze\n ('context', Context),\n ('api', AnalyzerPluginInterface)])\n\n\nclass CheckerPluginInterface:\n \"\"\"Interface for accessing type checker functionality in plugins.\"\"\"\n\n msg = None # type: MessageBuilder\n\n @abstractmethod\n def named_generic_type(self, name: str, args: List[Type]) -> Instance:\n raise NotImplementedError\n\n\n# A context for a function hook that infers the return type of a function with\n# a special signature.\n#\n# A no-op callback would just return the inferred return type, but a useful\n# callback at least sometimes can infer a more precise type.\nFunctionContext = NamedTuple(\n 'FunctionContext', [\n ('arg_types', List[List[Type]]), # List of actual caller types for each formal argument\n ('default_return_type', Type), # Return type inferred from signature\n ('args', List[List[Expression]]), # Actual expressions for each formal argument\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for a method signature hook that infers a better signature for a\n# method. Note that argument types aren't available yet. If you need them,\n# you have to use a method hook instead.\nMethodSigContext = NamedTuple(\n 'MethodSigContext', [\n ('type', Type), # Base object type for method call\n ('args', List[List[Expression]]), # Actual expressions for each formal argument\n ('default_signature', CallableType), # Original signature of the method\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for a method hook that infers the return type of a method with a\n# special signature.\n#\n# This is very similar to FunctionContext (only differences are documented).\nMethodContext = NamedTuple(\n 'MethodContext', [\n ('type', Type), # Base object type for method call\n ('arg_types', List[List[Type]]),\n ('default_return_type', Type),\n ('args', List[List[Expression]]),\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n# A context for an attribute type hook that infers the type of an attribute.\nAttributeContext = NamedTuple(\n 'AttributeContext', [\n ('type', Type), # Type of object with attribute\n ('default_attr_type', Type), # Original attribute type\n ('context', Context),\n ('api', CheckerPluginInterface)])\n\n\nclass Plugin:\n \"\"\"Base class of all type checker plugins.\n\n This defines a no-op plugin. Subclasses can override some methods to\n provide some actual functionality.\n\n All get_ methods are treated as pure functions (you should assume that\n results might be cached).\n\n Look at the comments of various *Context objects for descriptions of\n various hooks.\n \"\"\"\n\n def __init__(self, options: Options) -> None:\n self.options = options\n self.python_version = options.python_version\n\n def get_type_analyze_hook(self, fullname: str\n ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:\n return None\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n return None\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n return None\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n return None\n\n def get_attribute_hook(self, fullname: str\n ) -> Optional[Callable[[AttributeContext], Type]]:\n return None\n\n # TODO: metaclass / class decorator hook\n\n\nT = TypeVar('T')\n\n\nclass ChainedPlugin(Plugin):\n \"\"\"A plugin that represents a sequence of chained plugins.\n\n Each lookup method returns the hook for the first plugin that\n reports a match.\n\n This class should not be subclassed -- use Plugin as the base class\n for all plugins.\n \"\"\"\n\n # TODO: Support caching of lookup results (through a LRU cache, for example).\n\n def __init__(self, options: Options, plugins: List[Plugin]) -> None:\n \"\"\"Initialize chained plugin.\n\n Assume that the child plugins aren't mutated (results may be cached).\n \"\"\"\n super().__init__(options)\n self._plugins = plugins\n\n def get_type_analyze_hook(self, fullname: str\n ) -> Optional[Callable[[AnalyzeTypeContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_type_analyze_hook(fullname))\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_function_hook(fullname))\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n return self._find_hook(lambda plugin: plugin.get_method_signature_hook(fullname))\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_method_hook(fullname))\n\n def get_attribute_hook(self, fullname: str\n ) -> Optional[Callable[[AttributeContext], Type]]:\n return self._find_hook(lambda plugin: plugin.get_attribute_hook(fullname))\n\n def _find_hook(self, lookup: Callable[[Plugin], T]) -> Optional[T]:\n for plugin in self._plugins:\n hook = lookup(plugin)\n if hook:\n return hook\n return None\n\n\nclass DefaultPlugin(Plugin):\n \"\"\"Type checker plugin that is enabled by default.\"\"\"\n\n def get_function_hook(self, fullname: str\n ) -> Optional[Callable[[FunctionContext], Type]]:\n if fullname == 'contextlib.contextmanager':\n return contextmanager_callback\n elif fullname == 'builtins.open' and self.python_version[0] == 3:\n return open_callback\n return None\n\n def get_method_signature_hook(self, fullname: str\n ) -> Optional[Callable[[MethodSigContext], CallableType]]:\n if fullname == 'typing.Mapping.get':\n return typed_dict_get_signature_callback\n return None\n\n def get_method_hook(self, fullname: str\n ) -> Optional[Callable[[MethodContext], Type]]:\n if fullname == 'typing.Mapping.get':\n return typed_dict_get_callback\n elif fullname == 'builtins.int.__pow__':\n return int_pow_callback\n return None\n\n\ndef open_callback(ctx: FunctionContext) -> Type:\n \"\"\"Infer a better return type for 'open'.\n\n Infer TextIO or BinaryIO as the return value if the mode argument is not\n given or is a literal.\n \"\"\"\n mode = None\n if not ctx.arg_types or len(ctx.arg_types[1]) != 1:\n mode = 'r'\n elif isinstance(ctx.args[1][0], StrExpr):\n mode = ctx.args[1][0].value\n if mode is not None:\n assert isinstance(ctx.default_return_type, Instance)\n if 'b' in mode:\n return ctx.api.named_generic_type('typing.BinaryIO', [])\n else:\n return ctx.api.named_generic_type('typing.TextIO', [])\n return ctx.default_return_type\n\n\ndef contextmanager_callback(ctx: FunctionContext) -> Type:\n \"\"\"Infer a better return type for 'contextlib.contextmanager'.\"\"\"\n # Be defensive, just in case.\n if ctx.arg_types and len(ctx.arg_types[0]) == 1:\n arg_type = ctx.arg_types[0][0]\n if (isinstance(arg_type, CallableType)\n and isinstance(ctx.default_return_type, CallableType)):\n # The stub signature doesn't preserve information about arguments so\n # add them back here.\n return ctx.default_return_type.copy_modified(\n arg_types=arg_type.arg_types,\n arg_kinds=arg_type.arg_kinds,\n arg_names=arg_type.arg_names,\n variables=arg_type.variables,\n is_ellipsis_args=arg_type.is_ellipsis_args)\n return ctx.default_return_type\n\n\ndef typed_dict_get_signature_callback(ctx: MethodSigContext) -> CallableType:\n \"\"\"Try to infer a better signature type for TypedDict.get.\n\n This is used to get better type context for the second argument that\n depends on a TypedDict value type.\n \"\"\"\n signature = ctx.default_signature\n if (isinstance(ctx.type, TypedDictType)\n and len(ctx.args) == 2\n and len(ctx.args[0]) == 1\n and isinstance(ctx.args[0][0], StrExpr)\n and len(signature.arg_types) == 2\n and len(signature.variables) == 1):\n key = ctx.args[0][0].value\n value_type = ctx.type.items.get(key)\n if value_type:\n # Tweak the signature to include the value type as context. It's\n # only needed for type inference since there's a union with a type\n # variable that accepts everything.\n tv = TypeVarType(signature.variables[0])\n return signature.copy_modified(\n arg_types=[signature.arg_types[0],\n UnionType.make_simplified_union([value_type, tv])])\n return signature\n\n\ndef typed_dict_get_callback(ctx: MethodContext) -> Type:\n \"\"\"Infer a precise return type for TypedDict.get with literal first argument.\"\"\"\n if (isinstance(ctx.type, TypedDictType)\n and len(ctx.arg_types) >= 1\n and len(ctx.arg_types[0]) == 1):\n if isinstance(ctx.args[0][0], StrExpr):\n key = ctx.args[0][0].value\n value_type = ctx.type.items.get(key)\n if value_type:\n if len(ctx.arg_types) == 1:\n return UnionType.make_simplified_union([value_type, NoneTyp()])\n elif len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1:\n return UnionType.make_simplified_union([value_type, ctx.arg_types[1][0]])\n else:\n ctx.api.msg.typeddict_item_name_not_found(ctx.type, key, ctx.context)\n return AnyType()\n return ctx.default_return_type\n\n\ndef int_pow_callback(ctx: MethodContext) -> Type:\n \"\"\"Infer a more precise return type for int.__pow__.\"\"\"\n if (len(ctx.arg_types) == 1\n and len(ctx.arg_types[0]) == 1):\n arg = ctx.args[0][0]\n if isinstance(arg, IntExpr):\n exponent = arg.value\n elif isinstance(arg, UnaryExpr) and arg.op == '-' and isinstance(arg.expr, IntExpr):\n exponent = -arg.expr.value\n else:\n # Right operand not an int literal or a negated literal -- give up.\n return ctx.default_return_type\n if exponent >= 0:\n return ctx.api.named_generic_type('builtins.int', [])\n else:\n return ctx.api.named_generic_type('builtins.float', [])\n return ctx.default_return_type\n", "path": "mypy/plugin.py" } ]
diff --git a/mypy/plugin.py b/mypy/plugin.py index f94790a06e96..97cc635e9ab4 100644 --- a/mypy/plugin.py +++ b/mypy/plugin.py @@ -247,7 +247,9 @@ def contextmanager_callback(ctx: FunctionContext) -> Type: return ctx.default_return_type.copy_modified( arg_types=arg_type.arg_types, arg_kinds=arg_type.arg_kinds, - arg_names=arg_type.arg_names) + arg_names=arg_type.arg_names, + variables=arg_type.variables, + is_ellipsis_args=arg_type.is_ellipsis_args) return ctx.default_return_type diff --git a/mypy/test/testcheck.py b/mypy/test/testcheck.py index 39a2d3a3a308..3aae19f1b40f 100644 --- a/mypy/test/testcheck.py +++ b/mypy/test/testcheck.py @@ -77,6 +77,7 @@ 'check-enum.test', 'check-incomplete-fixture.test', 'check-custom-plugin.test', + 'check-default-plugin.test', ] diff --git a/test-data/unit/check-default-plugin.test b/test-data/unit/check-default-plugin.test new file mode 100644 index 000000000000..c6a5677159e2 --- /dev/null +++ b/test-data/unit/check-default-plugin.test @@ -0,0 +1,33 @@ +-- Test cases for the default plugin +-- +-- Note that we have additional test cases in pythoneval.test (that use real typeshed stubs). + + +[case testContextManagerWithGenericFunction] +from contextlib import contextmanager +from typing import TypeVar, Iterator + +T = TypeVar('T') + +@contextmanager +def yield_id(item: T) -> Iterator[T]: + yield item + +reveal_type(yield_id) # E: Revealed type is 'def [T] (item: T`-1) -> contextlib.GeneratorContextManager[T`-1]' + +with yield_id(1) as x: + reveal_type(x) # E: Revealed type is 'builtins.int*' + +f = yield_id +def g(x, y): pass +f = g # E: Incompatible types in assignment (expression has type Callable[[Any, Any], Any], variable has type Callable[[T], GeneratorContextManager[T]]) +[typing fixtures/typing-full.pyi] + +[case testContextManagerWithUnspecifiedArguments] +from contextlib import contextmanager +from typing import Callable, Iterator + +c: Callable[..., Iterator[int]] +reveal_type(c) # E: Revealed type is 'def (*Any, **Any) -> typing.Iterator[builtins.int]' +reveal_type(contextmanager(c)) # E: Revealed type is 'def (*Any, **Any) -> contextlib.GeneratorContextManager[builtins.int*]' +[typing fixtures/typing-full.pyi] diff --git a/test-data/unit/fixtures/typing-full.pyi b/test-data/unit/fixtures/typing-full.pyi index 463b117db48d..1a8ba9acaf5d 100644 --- a/test-data/unit/fixtures/typing-full.pyi +++ b/test-data/unit/fixtures/typing-full.pyi @@ -111,4 +111,8 @@ class Mapping(Generic[T, U]): class MutableMapping(Generic[T, U]): pass +class ContextManager(Generic[T]): + def __enter__(self) -> T: ... + def __exit__(self, exc_type, exc_value, traceback): ... + TYPE_CHECKING = 1 diff --git a/test-data/unit/lib-stub/contextlib.pyi b/test-data/unit/lib-stub/contextlib.pyi new file mode 100644 index 000000000000..fa4760c71054 --- /dev/null +++ b/test-data/unit/lib-stub/contextlib.pyi @@ -0,0 +1,10 @@ +from typing import Generic, TypeVar, Callable, Iterator +from typing import ContextManager as ContextManager + +_T = TypeVar('_T') + +class GeneratorContextManager(ContextManager[_T], Generic[_T]): + def __call__(self, func: Callable[..., _T]) -> Callable[..., _T]: ... + +def contextmanager(func: Callable[..., Iterator[_T]]) -> Callable[..., GeneratorContextManager[_T]]: + ...
Typing for @contextmanager doesn't play well with generic functions ``` from contextlib import contextmanager from typing import TypeVar, Iterator _T = TypeVar('_T') @contextmanager def yield_id(item): # type: (_T) -> Iterator[_T] yield item with yield_id(1): pass ``` ... results in... `example.py:11: error: Argument 1 to "yield_id" has incompatible type "int"; expected "_T"`
koxudaxi__datamodel-code-generator-1669
[ { "content": "import re\nfrom abc import ABC, abstractmethod\nfrom enum import Enum, auto\nfrom functools import lru_cache\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Iterable,\n Iterator,\n List,\n Optional,\n Pattern,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport pydantic\nfrom packaging import version\nfrom pydantic import (\n StrictBool,\n StrictInt,\n StrictStr,\n create_model,\n)\n\nfrom datamodel_code_generator.format import PythonVersion\nfrom datamodel_code_generator.imports import (\n IMPORT_ABC_MAPPING,\n IMPORT_ABC_SEQUENCE,\n IMPORT_ABC_SET,\n IMPORT_DICT,\n IMPORT_FROZEN_SET,\n IMPORT_LIST,\n IMPORT_LITERAL,\n IMPORT_LITERAL_BACKPORT,\n IMPORT_MAPPING,\n IMPORT_OPTIONAL,\n IMPORT_SEQUENCE,\n IMPORT_SET,\n IMPORT_UNION,\n Import,\n)\nfrom datamodel_code_generator.reference import Reference, _BaseModel\nfrom datamodel_code_generator.util import (\n PYDANTIC_V2,\n ConfigDict,\n Protocol,\n runtime_checkable,\n)\n\nif PYDANTIC_V2:\n from pydantic import GetCoreSchemaHandler\n from pydantic_core import core_schema\n\nT = TypeVar('T')\n\nOPTIONAL = 'Optional'\nOPTIONAL_PREFIX = f'{OPTIONAL}['\n\nUNION = 'Union'\nUNION_PREFIX = f'{UNION}['\nUNION_DELIMITER = ', '\nUNION_PATTERN: Pattern[str] = re.compile(r'\\s*,\\s*')\nUNION_OPERATOR_DELIMITER = ' | '\nUNION_OPERATOR_PATTERN: Pattern[str] = re.compile(r'\\s*\\|\\s*')\nNONE = 'None'\nANY = 'Any'\nLITERAL = 'Literal'\nSEQUENCE = 'Sequence'\nFROZEN_SET = 'FrozenSet'\nMAPPING = 'Mapping'\nDICT = 'Dict'\nSET = 'Set'\nLIST = 'List'\nSTANDARD_DICT = 'dict'\nSTANDARD_LIST = 'list'\nSTANDARD_SET = 'set'\nSTR = 'str'\n\nNOT_REQUIRED = 'NotRequired'\nNOT_REQUIRED_PREFIX = f'{NOT_REQUIRED}['\n\n\nclass StrictTypes(Enum):\n str = 'str'\n bytes = 'bytes'\n int = 'int'\n float = 'float'\n bool = 'bool'\n\n\nclass UnionIntFloat:\n def __init__(self, value: Union[int, float]) -> None:\n self.value: Union[int, float] = value\n\n def __int__(self) -> int:\n return int(self.value)\n\n def __float__(self) -> float:\n return float(self.value)\n\n def __str__(self) -> str:\n return str(self.value)\n\n @classmethod\n def __get_validators__(cls) -> Iterator[Callable[[Any], Any]]:\n yield cls.validate\n\n @classmethod\n def __get_pydantic_core_schema__(\n cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'\n ) -> 'core_schema.CoreSchema':\n from_int_schema = core_schema.chain_schema(\n [\n core_schema.union_schema(\n [core_schema.int_schema(), core_schema.float_schema()]\n ),\n core_schema.no_info_plain_validator_function(cls.validate),\n ]\n )\n\n return core_schema.json_or_python_schema(\n json_schema=core_schema.no_info_plain_validator_function(cls.validate),\n python_schema=core_schema.union_schema(\n [\n # check if it's an instance first before doing any further work\n core_schema.is_instance_schema(UnionIntFloat),\n from_int_schema,\n ]\n ),\n serialization=core_schema.plain_serializer_function_ser_schema(\n lambda instance: instance.value\n ),\n )\n\n @classmethod\n def validate(cls, v: Any) -> 'UnionIntFloat':\n if isinstance(v, UnionIntFloat):\n return v\n elif not isinstance(v, (int, float)): # pragma: no cover\n try:\n int(v)\n return cls(v)\n except (TypeError, ValueError):\n pass\n try:\n float(v)\n return cls(v)\n except (TypeError, ValueError):\n pass\n\n raise TypeError(f'{v} is not int or float')\n return cls(v)\n\n\ndef chain_as_tuple(*iterables: Iterable[T]) -> Tuple[T, ...]:\n return tuple(chain(*iterables))\n\n\n@lru_cache()\ndef _remove_none_from_type(\n type_: str, split_pattern: Pattern[str], delimiter: str\n) -> List[str]:\n types: List[str] = []\n split_type: str = ''\n inner_count: int = 0\n for part in re.split(split_pattern, type_):\n if part == NONE:\n continue\n inner_count += part.count('[') - part.count(']')\n if split_type:\n split_type += delimiter\n if inner_count == 0:\n if split_type:\n types.append(f'{split_type}{part}')\n else:\n types.append(part)\n split_type = ''\n continue\n else:\n split_type += part\n return types\n\n\ndef _remove_none_from_union(type_: str, use_union_operator: bool) -> str:\n if use_union_operator:\n if not re.match(r'^\\w+ | ', type_):\n return type_\n return UNION_OPERATOR_DELIMITER.join(\n _remove_none_from_type(\n type_, UNION_OPERATOR_PATTERN, UNION_OPERATOR_DELIMITER\n )\n )\n\n if not type_.startswith(UNION_PREFIX):\n return type_\n inner_types = _remove_none_from_type(\n type_[len(UNION_PREFIX) :][:-1], UNION_PATTERN, UNION_DELIMITER\n )\n\n if len(inner_types) == 1:\n return inner_types[0]\n return f'{UNION_PREFIX}{UNION_DELIMITER.join(inner_types)}]'\n\n\n@lru_cache()\ndef get_optional_type(type_: str, use_union_operator: bool) -> str:\n type_ = _remove_none_from_union(type_, use_union_operator)\n\n if not type_ or type_ == NONE:\n return NONE\n if use_union_operator:\n return f'{type_} | {NONE}'\n return f'{OPTIONAL_PREFIX}{type_}]'\n\n\n@runtime_checkable\nclass Modular(Protocol):\n @property\n def module_name(self) -> str:\n raise NotImplementedError\n\n\n@runtime_checkable\nclass Nullable(Protocol):\n @property\n def nullable(self) -> bool:\n raise NotImplementedError\n\n\nclass DataType(_BaseModel):\n if PYDANTIC_V2:\n # TODO[pydantic]: The following keys were removed: `copy_on_model_validation`.\n # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-config for more information.\n model_config = ConfigDict(\n extra='forbid',\n revalidate_instances='never',\n )\n else:\n if not TYPE_CHECKING:\n\n @classmethod\n def model_rebuild(cls) -> None:\n cls.update_forward_refs()\n\n class Config:\n extra = 'forbid'\n copy_on_model_validation = (\n False\n if version.parse(pydantic.VERSION) < version.parse('1.9.2')\n else 'none'\n )\n\n type: Optional[str] = None\n reference: Optional[Reference] = None\n data_types: List['DataType'] = []\n is_func: bool = False\n kwargs: Optional[Dict[str, Any]] = None\n import_: Optional[Import] = None\n python_version: PythonVersion = PythonVersion.PY_37\n is_optional: bool = False\n is_dict: bool = False\n is_list: bool = False\n is_set: bool = False\n is_custom_type: bool = False\n literals: List[Union[StrictBool, StrictInt, StrictStr]] = []\n use_standard_collections: bool = False\n use_generic_container: bool = False\n use_union_operator: bool = False\n alias: Optional[str] = None\n parent: Optional[Any] = None\n children: List[Any] = []\n strict: bool = False\n dict_key: Optional['DataType'] = None\n\n _exclude_fields: ClassVar[Set[str]] = {'parent', 'children'}\n _pass_fields: ClassVar[Set[str]] = {'parent', 'children', 'data_types', 'reference'}\n\n @classmethod\n def from_import(\n cls: Type['DataTypeT'],\n import_: Import,\n *,\n is_optional: bool = False,\n is_dict: bool = False,\n is_list: bool = False,\n is_set: bool = False,\n is_custom_type: bool = False,\n strict: bool = False,\n kwargs: Optional[Dict[str, Any]] = None,\n ) -> 'DataTypeT':\n return cls(\n type=import_.import_,\n import_=import_,\n is_optional=is_optional,\n is_dict=is_dict,\n is_list=is_list,\n is_set=is_set,\n is_func=True if kwargs else False,\n is_custom_type=is_custom_type,\n strict=strict,\n kwargs=kwargs,\n )\n\n @property\n def unresolved_types(self) -> FrozenSet[str]:\n return frozenset(\n {\n t.reference.path\n for data_types in self.data_types\n for t in data_types.all_data_types\n if t.reference\n }\n | ({self.reference.path} if self.reference else set())\n )\n\n def replace_reference(self, reference: Optional[Reference]) -> None:\n if not self.reference: # pragma: no cover\n raise Exception(\n f\"`{self.__class__.__name__}.replace_reference()` can't be called\"\n f' when `reference` field is empty.'\n )\n self_id = id(self)\n self.reference.children = [\n c for c in self.reference.children if id(c) != self_id\n ]\n self.reference = reference\n if reference:\n reference.children.append(self)\n\n def remove_reference(self) -> None:\n self.replace_reference(None)\n\n @property\n def module_name(self) -> Optional[str]:\n if self.reference and isinstance(self.reference.source, Modular):\n return self.reference.source.module_name\n return None # pragma: no cover\n\n @property\n def full_name(self) -> str:\n module_name = self.module_name\n if module_name:\n return f'{module_name}.{self.reference.short_name}' # type: ignore\n return self.reference.short_name # type: ignore\n\n @property\n def all_data_types(self) -> Iterator['DataType']:\n for data_type in self.data_types:\n yield from data_type.all_data_types\n yield self\n\n @property\n def all_imports(self) -> Iterator[Import]:\n for data_type in self.data_types:\n yield from data_type.all_imports\n yield from self.imports\n\n @property\n def imports(self) -> Iterator[Import]:\n if self.import_:\n yield self.import_\n imports: Tuple[Tuple[bool, Import], ...] = (\n (self.is_optional and not self.use_union_operator, IMPORT_OPTIONAL),\n (len(self.data_types) > 1 and not self.use_union_operator, IMPORT_UNION),\n )\n if any(self.literals):\n import_literal = (\n IMPORT_LITERAL\n if self.python_version.has_literal_type\n else IMPORT_LITERAL_BACKPORT\n )\n imports = (\n *imports,\n (any(self.literals), import_literal),\n )\n\n if self.use_generic_container:\n if self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_ABC_SEQUENCE),\n (self.is_set, IMPORT_ABC_SET),\n (self.is_dict, IMPORT_ABC_MAPPING),\n )\n else:\n imports = (\n *imports,\n (self.is_list, IMPORT_SEQUENCE),\n (self.is_set, IMPORT_FROZEN_SET),\n (self.is_dict, IMPORT_MAPPING),\n )\n elif not self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_LIST),\n (self.is_set, IMPORT_SET),\n (self.is_dict, IMPORT_DICT),\n )\n for field, import_ in imports:\n if field and import_ != self.import_:\n yield import_\n\n if self.dict_key:\n yield from self.dict_key.imports\n\n def __init__(self, **values: Any) -> None:\n if not TYPE_CHECKING:\n super().__init__(**values)\n\n for type_ in self.data_types:\n if type_.type == ANY and type_.is_optional:\n if any(t for t in self.data_types if t.type != ANY): # pragma: no cover\n self.is_optional = True\n self.data_types = [\n t\n for t in self.data_types\n if not (t.type == ANY and t.is_optional)\n ]\n break # pragma: no cover\n\n for data_type in self.data_types:\n if data_type.reference or data_type.data_types:\n data_type.parent = self\n\n if self.reference:\n self.reference.children.append(self)\n\n @property\n def type_hint(self) -> str:\n type_: Optional[str] = self.alias or self.type\n if not type_:\n if self.is_union:\n data_types: List[str] = []\n for data_type in self.data_types:\n data_type_type = data_type.type_hint\n if data_type_type in data_types: # pragma: no cover\n continue\n data_types.append(data_type_type)\n if NONE in data_types:\n data_types = [d for d in data_types if d != NONE]\n self.is_optional = True\n if len(data_types) == 1:\n type_ = data_types[0]\n else:\n if self.use_union_operator:\n type_ = UNION_OPERATOR_DELIMITER.join(data_types)\n else:\n type_ = f'{UNION_PREFIX}{UNION_DELIMITER.join(data_types)}]'\n elif len(self.data_types) == 1:\n type_ = self.data_types[0].type_hint\n elif self.literals:\n type_ = f\"{LITERAL}[{', '.join(repr(literal) for literal in self.literals)}]\"\n else:\n if self.reference:\n type_ = self.reference.short_name\n else:\n # TODO support strict Any\n # type_ = 'Any'\n type_ = ''\n if self.reference:\n source = self.reference.source\n if isinstance(source, Nullable) and source.nullable:\n self.is_optional = True\n if self.reference and self.python_version == PythonVersion.PY_36:\n type_ = f\"'{type_}'\"\n if self.is_list:\n if self.use_generic_container:\n list_ = SEQUENCE\n elif self.use_standard_collections:\n list_ = STANDARD_LIST\n else:\n list_ = LIST\n type_ = f'{list_}[{type_}]' if type_ else list_\n elif self.is_set:\n if self.use_generic_container:\n set_ = FROZEN_SET\n elif self.use_standard_collections:\n set_ = STANDARD_SET\n else:\n set_ = SET\n type_ = f'{set_}[{type_}]' if type_ else set_\n elif self.is_dict:\n if self.use_generic_container:\n dict_ = MAPPING\n elif self.use_standard_collections:\n dict_ = STANDARD_DICT\n else:\n dict_ = DICT\n if self.dict_key or type_:\n key = self.dict_key.type_hint if self.dict_key else STR\n type_ = f'{dict_}[{key}, {type_ or ANY}]'\n else: # pragma: no cover\n type_ = dict_\n if self.is_optional and type_ != ANY:\n return get_optional_type(type_, self.use_union_operator)\n elif self.is_func:\n if self.kwargs:\n kwargs: str = ', '.join(f'{k}={v}' for k, v in self.kwargs.items())\n return f'{type_}({kwargs})'\n return f'{type_}()'\n return type_\n\n @property\n def is_union(self) -> bool:\n return len(self.data_types) > 1\n\n\nDataType.model_rebuild()\n\nDataTypeT = TypeVar('DataTypeT', bound=DataType)\n\n\nclass EmptyDataType(DataType):\n pass\n\n\nclass Types(Enum):\n integer = auto()\n int32 = auto()\n int64 = auto()\n number = auto()\n float = auto()\n double = auto()\n decimal = auto()\n time = auto()\n string = auto()\n byte = auto()\n binary = auto()\n date = auto()\n date_time = auto()\n password = auto()\n email = auto()\n uuid = auto()\n uuid1 = auto()\n uuid2 = auto()\n uuid3 = auto()\n uuid4 = auto()\n uuid5 = auto()\n uri = auto()\n hostname = auto()\n ipv4 = auto()\n ipv4_network = auto()\n ipv6 = auto()\n ipv6_network = auto()\n boolean = auto()\n object = auto()\n null = auto()\n array = auto()\n any = auto()\n\n\nclass DataTypeManager(ABC):\n def __init__(\n self,\n python_version: PythonVersion = PythonVersion.PY_37,\n use_standard_collections: bool = False,\n use_generic_container_types: bool = False,\n strict_types: Optional[Sequence[StrictTypes]] = None,\n use_non_positive_negative_number_constrained_types: bool = False,\n use_union_operator: bool = False,\n ) -> None:\n self.python_version = python_version\n self.use_standard_collections: bool = use_standard_collections\n self.use_generic_container_types: bool = use_generic_container_types\n self.strict_types: Sequence[StrictTypes] = strict_types or ()\n self.use_non_positive_negative_number_constrained_types: bool = (\n use_non_positive_negative_number_constrained_types\n )\n self.use_union_operator: bool = use_union_operator\n\n if (\n use_generic_container_types and python_version == PythonVersion.PY_36\n ): # pragma: no cover\n raise Exception(\n 'use_generic_container_types can not be used with target_python_version 3.6.\\n'\n ' The version will be not supported in a future version'\n )\n\n if TYPE_CHECKING:\n self.data_type: Type[DataType]\n else:\n self.data_type: Type[DataType] = create_model(\n 'ContextDataType',\n python_version=(PythonVersion, python_version),\n use_standard_collections=(bool, use_standard_collections),\n use_generic_container=(bool, use_generic_container_types),\n use_union_operator=(bool, use_union_operator),\n __base__=DataType,\n )\n\n @abstractmethod\n def get_data_type(self, types: Types, **kwargs: Any) -> DataType:\n raise NotImplementedError\n\n def get_data_type_from_full_path(\n self, full_path: str, is_custom_type: bool\n ) -> DataType:\n return self.data_type.from_import(\n Import.from_full_path(full_path), is_custom_type=is_custom_type\n )\n\n def get_data_type_from_value(self, value: Any) -> DataType:\n type_: Optional[Types] = None\n if isinstance(value, str):\n type_ = Types.string\n elif isinstance(value, bool):\n type_ = Types.boolean\n elif isinstance(value, int):\n type_ = Types.integer\n elif isinstance(value, float):\n type_ = Types.float\n elif isinstance(value, dict):\n return self.data_type.from_import(IMPORT_DICT)\n elif isinstance(value, list):\n return self.data_type.from_import(IMPORT_LIST)\n else:\n type_ = Types.any\n return self.get_data_type(type_)\n", "path": "datamodel_code_generator/types.py" } ]
[ { "content": "import re\nfrom abc import ABC, abstractmethod\nfrom enum import Enum, auto\nfrom functools import lru_cache\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Iterable,\n Iterator,\n List,\n Optional,\n Pattern,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport pydantic\nfrom packaging import version\nfrom pydantic import (\n StrictBool,\n StrictInt,\n StrictStr,\n create_model,\n)\n\nfrom datamodel_code_generator.format import PythonVersion\nfrom datamodel_code_generator.imports import (\n IMPORT_ABC_MAPPING,\n IMPORT_ABC_SEQUENCE,\n IMPORT_ABC_SET,\n IMPORT_DICT,\n IMPORT_FROZEN_SET,\n IMPORT_LIST,\n IMPORT_LITERAL,\n IMPORT_LITERAL_BACKPORT,\n IMPORT_MAPPING,\n IMPORT_OPTIONAL,\n IMPORT_SEQUENCE,\n IMPORT_SET,\n IMPORT_UNION,\n Import,\n)\nfrom datamodel_code_generator.reference import Reference, _BaseModel\nfrom datamodel_code_generator.util import (\n PYDANTIC_V2,\n ConfigDict,\n Protocol,\n runtime_checkable,\n)\n\nif PYDANTIC_V2:\n from pydantic import GetCoreSchemaHandler\n from pydantic_core import core_schema\n\nT = TypeVar('T')\n\nOPTIONAL = 'Optional'\nOPTIONAL_PREFIX = f'{OPTIONAL}['\n\nUNION = 'Union'\nUNION_PREFIX = f'{UNION}['\nUNION_DELIMITER = ', '\nUNION_PATTERN: Pattern[str] = re.compile(r'\\s*,\\s*')\nUNION_OPERATOR_DELIMITER = ' | '\nUNION_OPERATOR_PATTERN: Pattern[str] = re.compile(r'\\s*\\|\\s*')\nNONE = 'None'\nANY = 'Any'\nLITERAL = 'Literal'\nSEQUENCE = 'Sequence'\nFROZEN_SET = 'FrozenSet'\nMAPPING = 'Mapping'\nDICT = 'Dict'\nSET = 'Set'\nLIST = 'List'\nSTANDARD_DICT = 'dict'\nSTANDARD_LIST = 'list'\nSTANDARD_SET = 'set'\nSTR = 'str'\n\nNOT_REQUIRED = 'NotRequired'\nNOT_REQUIRED_PREFIX = f'{NOT_REQUIRED}['\n\n\nclass StrictTypes(Enum):\n str = 'str'\n bytes = 'bytes'\n int = 'int'\n float = 'float'\n bool = 'bool'\n\n\nclass UnionIntFloat:\n def __init__(self, value: Union[int, float]) -> None:\n self.value: Union[int, float] = value\n\n def __int__(self) -> int:\n return int(self.value)\n\n def __float__(self) -> float:\n return float(self.value)\n\n def __str__(self) -> str:\n return str(self.value)\n\n @classmethod\n def __get_validators__(cls) -> Iterator[Callable[[Any], Any]]:\n yield cls.validate\n\n @classmethod\n def __get_pydantic_core_schema__(\n cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'\n ) -> 'core_schema.CoreSchema':\n from_int_schema = core_schema.chain_schema(\n [\n core_schema.union_schema(\n [core_schema.int_schema(), core_schema.float_schema()]\n ),\n core_schema.no_info_plain_validator_function(cls.validate),\n ]\n )\n\n return core_schema.json_or_python_schema(\n json_schema=from_int_schema,\n python_schema=core_schema.union_schema(\n [\n # check if it's an instance first before doing any further work\n core_schema.is_instance_schema(UnionIntFloat),\n from_int_schema,\n ]\n ),\n serialization=core_schema.plain_serializer_function_ser_schema(\n lambda instance: instance.value\n ),\n )\n\n @classmethod\n def validate(cls, v: Any) -> 'UnionIntFloat':\n if isinstance(v, UnionIntFloat):\n return v\n elif not isinstance(v, (int, float)): # pragma: no cover\n try:\n int(v)\n return cls(v)\n except (TypeError, ValueError):\n pass\n try:\n float(v)\n return cls(v)\n except (TypeError, ValueError):\n pass\n\n raise TypeError(f'{v} is not int or float')\n return cls(v)\n\n\ndef chain_as_tuple(*iterables: Iterable[T]) -> Tuple[T, ...]:\n return tuple(chain(*iterables))\n\n\n@lru_cache()\ndef _remove_none_from_type(\n type_: str, split_pattern: Pattern[str], delimiter: str\n) -> List[str]:\n types: List[str] = []\n split_type: str = ''\n inner_count: int = 0\n for part in re.split(split_pattern, type_):\n if part == NONE:\n continue\n inner_count += part.count('[') - part.count(']')\n if split_type:\n split_type += delimiter\n if inner_count == 0:\n if split_type:\n types.append(f'{split_type}{part}')\n else:\n types.append(part)\n split_type = ''\n continue\n else:\n split_type += part\n return types\n\n\ndef _remove_none_from_union(type_: str, use_union_operator: bool) -> str:\n if use_union_operator:\n if not re.match(r'^\\w+ | ', type_):\n return type_\n return UNION_OPERATOR_DELIMITER.join(\n _remove_none_from_type(\n type_, UNION_OPERATOR_PATTERN, UNION_OPERATOR_DELIMITER\n )\n )\n\n if not type_.startswith(UNION_PREFIX):\n return type_\n inner_types = _remove_none_from_type(\n type_[len(UNION_PREFIX) :][:-1], UNION_PATTERN, UNION_DELIMITER\n )\n\n if len(inner_types) == 1:\n return inner_types[0]\n return f'{UNION_PREFIX}{UNION_DELIMITER.join(inner_types)}]'\n\n\n@lru_cache()\ndef get_optional_type(type_: str, use_union_operator: bool) -> str:\n type_ = _remove_none_from_union(type_, use_union_operator)\n\n if not type_ or type_ == NONE:\n return NONE\n if use_union_operator:\n return f'{type_} | {NONE}'\n return f'{OPTIONAL_PREFIX}{type_}]'\n\n\n@runtime_checkable\nclass Modular(Protocol):\n @property\n def module_name(self) -> str:\n raise NotImplementedError\n\n\n@runtime_checkable\nclass Nullable(Protocol):\n @property\n def nullable(self) -> bool:\n raise NotImplementedError\n\n\nclass DataType(_BaseModel):\n if PYDANTIC_V2:\n # TODO[pydantic]: The following keys were removed: `copy_on_model_validation`.\n # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-config for more information.\n model_config = ConfigDict(\n extra='forbid',\n revalidate_instances='never',\n )\n else:\n if not TYPE_CHECKING:\n\n @classmethod\n def model_rebuild(cls) -> None:\n cls.update_forward_refs()\n\n class Config:\n extra = 'forbid'\n copy_on_model_validation = (\n False\n if version.parse(pydantic.VERSION) < version.parse('1.9.2')\n else 'none'\n )\n\n type: Optional[str] = None\n reference: Optional[Reference] = None\n data_types: List['DataType'] = []\n is_func: bool = False\n kwargs: Optional[Dict[str, Any]] = None\n import_: Optional[Import] = None\n python_version: PythonVersion = PythonVersion.PY_37\n is_optional: bool = False\n is_dict: bool = False\n is_list: bool = False\n is_set: bool = False\n is_custom_type: bool = False\n literals: List[Union[StrictBool, StrictInt, StrictStr]] = []\n use_standard_collections: bool = False\n use_generic_container: bool = False\n use_union_operator: bool = False\n alias: Optional[str] = None\n parent: Optional[Any] = None\n children: List[Any] = []\n strict: bool = False\n dict_key: Optional['DataType'] = None\n\n _exclude_fields: ClassVar[Set[str]] = {'parent', 'children'}\n _pass_fields: ClassVar[Set[str]] = {'parent', 'children', 'data_types', 'reference'}\n\n @classmethod\n def from_import(\n cls: Type['DataTypeT'],\n import_: Import,\n *,\n is_optional: bool = False,\n is_dict: bool = False,\n is_list: bool = False,\n is_set: bool = False,\n is_custom_type: bool = False,\n strict: bool = False,\n kwargs: Optional[Dict[str, Any]] = None,\n ) -> 'DataTypeT':\n return cls(\n type=import_.import_,\n import_=import_,\n is_optional=is_optional,\n is_dict=is_dict,\n is_list=is_list,\n is_set=is_set,\n is_func=True if kwargs else False,\n is_custom_type=is_custom_type,\n strict=strict,\n kwargs=kwargs,\n )\n\n @property\n def unresolved_types(self) -> FrozenSet[str]:\n return frozenset(\n {\n t.reference.path\n for data_types in self.data_types\n for t in data_types.all_data_types\n if t.reference\n }\n | ({self.reference.path} if self.reference else set())\n )\n\n def replace_reference(self, reference: Optional[Reference]) -> None:\n if not self.reference: # pragma: no cover\n raise Exception(\n f\"`{self.__class__.__name__}.replace_reference()` can't be called\"\n f' when `reference` field is empty.'\n )\n self_id = id(self)\n self.reference.children = [\n c for c in self.reference.children if id(c) != self_id\n ]\n self.reference = reference\n if reference:\n reference.children.append(self)\n\n def remove_reference(self) -> None:\n self.replace_reference(None)\n\n @property\n def module_name(self) -> Optional[str]:\n if self.reference and isinstance(self.reference.source, Modular):\n return self.reference.source.module_name\n return None # pragma: no cover\n\n @property\n def full_name(self) -> str:\n module_name = self.module_name\n if module_name:\n return f'{module_name}.{self.reference.short_name}' # type: ignore\n return self.reference.short_name # type: ignore\n\n @property\n def all_data_types(self) -> Iterator['DataType']:\n for data_type in self.data_types:\n yield from data_type.all_data_types\n yield self\n\n @property\n def all_imports(self) -> Iterator[Import]:\n for data_type in self.data_types:\n yield from data_type.all_imports\n yield from self.imports\n\n @property\n def imports(self) -> Iterator[Import]:\n if self.import_:\n yield self.import_\n imports: Tuple[Tuple[bool, Import], ...] = (\n (self.is_optional and not self.use_union_operator, IMPORT_OPTIONAL),\n (len(self.data_types) > 1 and not self.use_union_operator, IMPORT_UNION),\n )\n if any(self.literals):\n import_literal = (\n IMPORT_LITERAL\n if self.python_version.has_literal_type\n else IMPORT_LITERAL_BACKPORT\n )\n imports = (\n *imports,\n (any(self.literals), import_literal),\n )\n\n if self.use_generic_container:\n if self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_ABC_SEQUENCE),\n (self.is_set, IMPORT_ABC_SET),\n (self.is_dict, IMPORT_ABC_MAPPING),\n )\n else:\n imports = (\n *imports,\n (self.is_list, IMPORT_SEQUENCE),\n (self.is_set, IMPORT_FROZEN_SET),\n (self.is_dict, IMPORT_MAPPING),\n )\n elif not self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_LIST),\n (self.is_set, IMPORT_SET),\n (self.is_dict, IMPORT_DICT),\n )\n for field, import_ in imports:\n if field and import_ != self.import_:\n yield import_\n\n if self.dict_key:\n yield from self.dict_key.imports\n\n def __init__(self, **values: Any) -> None:\n if not TYPE_CHECKING:\n super().__init__(**values)\n\n for type_ in self.data_types:\n if type_.type == ANY and type_.is_optional:\n if any(t for t in self.data_types if t.type != ANY): # pragma: no cover\n self.is_optional = True\n self.data_types = [\n t\n for t in self.data_types\n if not (t.type == ANY and t.is_optional)\n ]\n break # pragma: no cover\n\n for data_type in self.data_types:\n if data_type.reference or data_type.data_types:\n data_type.parent = self\n\n if self.reference:\n self.reference.children.append(self)\n\n @property\n def type_hint(self) -> str:\n type_: Optional[str] = self.alias or self.type\n if not type_:\n if self.is_union:\n data_types: List[str] = []\n for data_type in self.data_types:\n data_type_type = data_type.type_hint\n if data_type_type in data_types: # pragma: no cover\n continue\n data_types.append(data_type_type)\n if NONE in data_types:\n data_types = [d for d in data_types if d != NONE]\n self.is_optional = True\n if len(data_types) == 1:\n type_ = data_types[0]\n else:\n if self.use_union_operator:\n type_ = UNION_OPERATOR_DELIMITER.join(data_types)\n else:\n type_ = f'{UNION_PREFIX}{UNION_DELIMITER.join(data_types)}]'\n elif len(self.data_types) == 1:\n type_ = self.data_types[0].type_hint\n elif self.literals:\n type_ = f\"{LITERAL}[{', '.join(repr(literal) for literal in self.literals)}]\"\n else:\n if self.reference:\n type_ = self.reference.short_name\n else:\n # TODO support strict Any\n # type_ = 'Any'\n type_ = ''\n if self.reference:\n source = self.reference.source\n if isinstance(source, Nullable) and source.nullable:\n self.is_optional = True\n if self.reference and self.python_version == PythonVersion.PY_36:\n type_ = f\"'{type_}'\"\n if self.is_list:\n if self.use_generic_container:\n list_ = SEQUENCE\n elif self.use_standard_collections:\n list_ = STANDARD_LIST\n else:\n list_ = LIST\n type_ = f'{list_}[{type_}]' if type_ else list_\n elif self.is_set:\n if self.use_generic_container:\n set_ = FROZEN_SET\n elif self.use_standard_collections:\n set_ = STANDARD_SET\n else:\n set_ = SET\n type_ = f'{set_}[{type_}]' if type_ else set_\n elif self.is_dict:\n if self.use_generic_container:\n dict_ = MAPPING\n elif self.use_standard_collections:\n dict_ = STANDARD_DICT\n else:\n dict_ = DICT\n if self.dict_key or type_:\n key = self.dict_key.type_hint if self.dict_key else STR\n type_ = f'{dict_}[{key}, {type_ or ANY}]'\n else: # pragma: no cover\n type_ = dict_\n if self.is_optional and type_ != ANY:\n return get_optional_type(type_, self.use_union_operator)\n elif self.is_func:\n if self.kwargs:\n kwargs: str = ', '.join(f'{k}={v}' for k, v in self.kwargs.items())\n return f'{type_}({kwargs})'\n return f'{type_}()'\n return type_\n\n @property\n def is_union(self) -> bool:\n return len(self.data_types) > 1\n\n\nDataType.model_rebuild()\n\nDataTypeT = TypeVar('DataTypeT', bound=DataType)\n\n\nclass EmptyDataType(DataType):\n pass\n\n\nclass Types(Enum):\n integer = auto()\n int32 = auto()\n int64 = auto()\n number = auto()\n float = auto()\n double = auto()\n decimal = auto()\n time = auto()\n string = auto()\n byte = auto()\n binary = auto()\n date = auto()\n date_time = auto()\n password = auto()\n email = auto()\n uuid = auto()\n uuid1 = auto()\n uuid2 = auto()\n uuid3 = auto()\n uuid4 = auto()\n uuid5 = auto()\n uri = auto()\n hostname = auto()\n ipv4 = auto()\n ipv4_network = auto()\n ipv6 = auto()\n ipv6_network = auto()\n boolean = auto()\n object = auto()\n null = auto()\n array = auto()\n any = auto()\n\n\nclass DataTypeManager(ABC):\n def __init__(\n self,\n python_version: PythonVersion = PythonVersion.PY_37,\n use_standard_collections: bool = False,\n use_generic_container_types: bool = False,\n strict_types: Optional[Sequence[StrictTypes]] = None,\n use_non_positive_negative_number_constrained_types: bool = False,\n use_union_operator: bool = False,\n ) -> None:\n self.python_version = python_version\n self.use_standard_collections: bool = use_standard_collections\n self.use_generic_container_types: bool = use_generic_container_types\n self.strict_types: Sequence[StrictTypes] = strict_types or ()\n self.use_non_positive_negative_number_constrained_types: bool = (\n use_non_positive_negative_number_constrained_types\n )\n self.use_union_operator: bool = use_union_operator\n\n if (\n use_generic_container_types and python_version == PythonVersion.PY_36\n ): # pragma: no cover\n raise Exception(\n 'use_generic_container_types can not be used with target_python_version 3.6.\\n'\n ' The version will be not supported in a future version'\n )\n\n if TYPE_CHECKING:\n self.data_type: Type[DataType]\n else:\n self.data_type: Type[DataType] = create_model(\n 'ContextDataType',\n python_version=(PythonVersion, python_version),\n use_standard_collections=(bool, use_standard_collections),\n use_generic_container=(bool, use_generic_container_types),\n use_union_operator=(bool, use_union_operator),\n __base__=DataType,\n )\n\n @abstractmethod\n def get_data_type(self, types: Types, **kwargs: Any) -> DataType:\n raise NotImplementedError\n\n def get_data_type_from_full_path(\n self, full_path: str, is_custom_type: bool\n ) -> DataType:\n return self.data_type.from_import(\n Import.from_full_path(full_path), is_custom_type=is_custom_type\n )\n\n def get_data_type_from_value(self, value: Any) -> DataType:\n type_: Optional[Types] = None\n if isinstance(value, str):\n type_ = Types.string\n elif isinstance(value, bool):\n type_ = Types.boolean\n elif isinstance(value, int):\n type_ = Types.integer\n elif isinstance(value, float):\n type_ = Types.float\n elif isinstance(value, dict):\n return self.data_type.from_import(IMPORT_DICT)\n elif isinstance(value, list):\n return self.data_type.from_import(IMPORT_LIST)\n else:\n type_ = Types.any\n return self.get_data_type(type_)\n", "path": "datamodel_code_generator/types.py" } ]
diff --git a/datamodel_code_generator/types.py b/datamodel_code_generator/types.py index 04671484d..2d16919e1 100644 --- a/datamodel_code_generator/types.py +++ b/datamodel_code_generator/types.py @@ -129,7 +129,7 @@ def __get_pydantic_core_schema__( ) return core_schema.json_or_python_schema( - json_schema=core_schema.no_info_plain_validator_function(cls.validate), + json_schema=from_int_schema, python_schema=core_schema.union_schema( [ # check if it's an instance first before doing any further work
Impossible to get the json schema of a json schema object **Describe the bug** ```python from datamodel_code_generator.parser.jsonschema import JsonSchemaObject if __name__ == "__main__": print(JsonSchemaObject.model_json_schema()) ``` Raises ``` pydantic.errors.PydanticInvalidForJsonSchema: Cannot generate a JsonSchema for core_schema.PlainValidatorFunctionSchema ({'type': 'no-info', 'function': <bound method UnionIntFloat.validate of <class 'datamodel_code_generator.types.UnionIntFloat'>>}) ``` **To Reproduce** See code above **Expected behavior** The json schema of a json schema object. **Version:** - OS: Linux 6.2.0 - Python version: 3.11.4 - datamodel-code-generator version: 0.22.1
keras-team__keras-13342
[ { "content": "\"\"\"Training-related utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport collections\nimport copy\nimport numpy as np\nimport six\nimport warnings\nfrom collections import OrderedDict\n\nfrom .. import backend as K\nfrom .. import losses\nfrom .. import metrics as metrics_module\nfrom ..utils import Sequence\nfrom ..utils import generic_utils\nfrom ..utils import losses_utils\n\n\ndef standardize_single_array(x):\n if x is None:\n return None\n elif K.is_tensor(x):\n shape = K.int_shape(x)\n if shape is None or shape[0] is None:\n raise ValueError(\n 'When feeding symbolic tensors to a model, we expect the '\n 'tensors to have a static batch size. '\n 'Got tensor with shape: %s' % str(shape))\n return x\n elif x.ndim == 1:\n x = np.expand_dims(x, 1)\n return x\n\n\ndef standardize_input_data(data,\n names,\n shapes=None,\n check_batch_axis=True,\n exception_prefix=''):\n \"\"\"Normalizes inputs and targets provided by users.\n\n Users may pass data as a list of arrays, dictionary of arrays,\n or as a single array. We normalize this to an ordered list of\n arrays (same order as `names`), while checking that the provided\n arrays have shapes that match the network's expectations.\n\n # Arguments\n data: User-provided input data (polymorphic).\n names: List of expected array names.\n shapes: Optional list of expected array shapes.\n check_batch_axis: Boolean; whether to check that\n the batch axis of the arrays matches the expected\n value found in `shapes`.\n exception_prefix: String prefix used for exception formatting.\n\n # Returns\n List of standardized input arrays (one array per model input).\n\n # Raises\n ValueError: in case of improperly formatted user-provided data.\n \"\"\"\n if not names:\n if data is not None and hasattr(data, '__len__') and len(data):\n raise ValueError('Error when checking model ' +\n exception_prefix + ': '\n 'expected no data, but got:', data)\n return []\n if data is None:\n return [None for _ in range(len(names))]\n\n if isinstance(data, dict):\n try:\n data = [\n data[x].values\n if data[x].__class__.__name__ == 'DataFrame' else data[x]\n for x in names\n ]\n except KeyError as e:\n raise ValueError('No data provided for \"' + e.args[0] +\n '\". Need data '\n 'for each key in: ' + str(names))\n elif isinstance(data, list):\n if isinstance(data[0], list):\n data = [np.asarray(d) for d in data]\n elif len(names) == 1 and isinstance(data[0], (float, int)):\n data = [np.asarray(data)]\n else:\n data = [\n x.values if x.__class__.__name__ == 'DataFrame'\n else x for x in data\n ]\n else:\n data = data.values if data.__class__.__name__ == 'DataFrame' else data\n data = [data]\n data = [standardize_single_array(x) for x in data]\n\n if len(data) != len(names):\n if data and hasattr(data[0], 'shape'):\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': the list of Numpy arrays that you are passing to '\n 'your model is not the size the model expected. '\n 'Expected to see ' + str(len(names)) + ' array(s), '\n 'but instead got the following list of ' +\n str(len(data)) + ' arrays: ' + str(data)[:200] + '...')\n elif len(names) > 1:\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': you are passing a list as input to your model, '\n 'but the model expects a list of ' + str(len(names)) +\n ' Numpy arrays instead. '\n 'The list you passed was: ' + str(data)[:200])\n elif len(data) == 1 and not hasattr(data[0], 'shape'):\n raise TypeError('Error when checking model ' + exception_prefix +\n ': data should be a Numpy array, or list/dict of '\n 'Numpy arrays. Found: ' + str(data)[:200] + '...')\n elif len(names) == 1:\n data = [np.asarray(data)]\n\n # Check shapes compatibility.\n if shapes:\n for i in range(len(names)):\n if shapes[i] is not None and not K.is_tensor(data[i]):\n data_shape = data[i].shape\n shape = shapes[i]\n if data[i].ndim != len(shape):\n raise ValueError(\n 'Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have ' +\n str(len(shape)) + ' dimensions, but got array '\n 'with shape ' + str(data_shape))\n if not check_batch_axis:\n data_shape = data_shape[1:]\n shape = shape[1:]\n for dim, ref_dim in zip(data_shape, shape):\n if ref_dim != dim and ref_dim:\n raise ValueError(\n 'Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have shape ' +\n str(shape) + ' but got array with shape ' +\n str(data_shape))\n return data\n\n\ndef standardize_sample_or_class_weights(x_weight,\n output_names,\n weight_type):\n \"\"\"Maps `sample_weight` or `class_weight` to model outputs.\n\n # Arguments\n x_weight: User-provided `sample_weight` or `class_weight` argument.\n output_names: List of output names (strings) in the model.\n weight_type: A string used purely for exception printing.\n\n # Returns\n A list of `sample_weight` or `class_weight` where there are exactly\n one element per model output.\n\n # Raises\n ValueError: In case of invalid user-provided argument.\n \"\"\"\n if x_weight is None or len(x_weight) == 0:\n return [None for _ in output_names]\n if len(output_names) == 1:\n if isinstance(x_weight, list) and len(x_weight) == 1:\n return x_weight\n if isinstance(x_weight, dict) and output_names[0] in x_weight:\n return [x_weight[output_names[0]]]\n else:\n return [x_weight]\n if isinstance(x_weight, list):\n if len(x_weight) != len(output_names):\n raise ValueError('Provided `' + weight_type + '` was a list of ' +\n str(len(x_weight)) +\n ' elements, but the model has ' +\n str(len(output_names)) + ' outputs. '\n 'You should provide one `' + weight_type + '`'\n 'array per model output.')\n return x_weight\n if isinstance(x_weight, dict):\n x_weights = []\n for name in output_names:\n x_weights.append(x_weight.get(name))\n return x_weights\n else:\n raise TypeError('The model has multiple outputs, so `' +\n weight_type + '` '\n 'should be either a list or a dict. '\n 'Provided `' + weight_type +\n '` type not understood: ' +\n str(x_weight))\n\n\ndef standardize_class_weights(class_weight, output_names):\n return standardize_sample_or_class_weights(class_weight,\n output_names,\n 'class_weight')\n\n\ndef standardize_sample_weights(sample_weight, output_names):\n return standardize_sample_or_class_weights(sample_weight,\n output_names,\n 'sample_weight')\n\n\ndef check_array_length_consistency(inputs, targets, weights=None):\n \"\"\"Checks if batch axes are the same for Numpy arrays.\n\n # Arguments\n inputs: list of Numpy arrays of inputs.\n targets: list of Numpy arrays of targets.\n weights: list of Numpy arrays of sample weights.\n\n # Raises\n ValueError: in case of incorrectly formatted data.\n \"\"\"\n def set_of_lengths(x):\n # return a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {0}\n else:\n return set([0 if y is None else int(y.shape[0]) for y in x])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')\n\n\ndef check_loss_and_target_compatibility(targets, loss_fns, output_shapes):\n \"\"\"Does validation on the compatibility of targets and loss functions.\n\n This helps prevent users from using loss functions incorrectly. This check\n is purely for UX purposes.\n\n # Arguments\n targets: list of Numpy arrays of targets.\n loss_fns: list of loss functions.\n output_shapes: list of shapes of model outputs.\n\n # Raises\n ValueError: if a loss function or target array\n is incompatible with an output.\n \"\"\"\n key_loss_fns = {\n losses.mean_squared_error, losses.binary_crossentropy,\n losses.categorical_crossentropy\n }\n key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,\n losses.CategoricalCrossentropy)\n for y, loss, shape in zip(targets, loss_fns, output_shapes):\n if y is None or loss is None:\n continue\n if losses.is_categorical_crossentropy(loss):\n if y.shape[-1] == 1:\n raise ValueError(\n 'You are passing a target array of shape ' + str(y.shape) +\n ' while using as loss `categorical_crossentropy`. '\n '`categorical_crossentropy` expects '\n 'targets to be binary matrices (1s and 0s) '\n 'of shape (samples, classes). '\n 'If your targets are integer classes, '\n 'you can convert them to the expected format via:\\n'\n '```\\n'\n 'from keras.utils import to_categorical\\n'\n 'y_binary = to_categorical(y_int)\\n'\n '```\\n'\n '\\n'\n 'Alternatively, you can use the loss function '\n '`sparse_categorical_crossentropy` instead, '\n 'which does expect integer targets.')\n is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)\n if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and\n (loss.fn in key_loss_fns))):\n for target_dim, out_dim in zip(y.shape[1:], shape[1:]):\n if out_dim is not None and target_dim != out_dim:\n loss_name = loss.name\n if loss_name is None:\n loss_type = loss.fn if is_loss_wrapper else type(loss)\n loss_name = loss_type.__name__\n raise ValueError(\n 'A target array with shape ' + str(y.shape) +\n ' was passed for an output of shape ' + str(shape) +\n ' while using as loss `' + loss_name + '`. '\n 'This loss expects targets to have the same shape '\n 'as the output.')\n\n\ndef check_generator_arguments(y=None, sample_weight=None,\n validation_split=None):\n \"\"\"Validates arguments passed when using a generator.\"\"\"\n if y is not None:\n raise ValueError('`y` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass targets'\n ' as the second element of the generator.')\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass sample'\n ' weights as the third element of the generator.')\n if validation_split:\n raise ValueError('If your data is in the form of a Python generator, '\n 'you cannot use `validation_split`.')\n\n\ndef batch_shuffle(index_array, batch_size):\n \"\"\"Shuffles an array in a batch-wise fashion.\n\n Useful for shuffling HDF5 arrays\n (where one cannot access arbitrary indices).\n\n # Arguments\n index_array: array of indices to be shuffled.\n batch_size: integer.\n\n # Returns\n The `index_array` array, shuffled in a batch-wise fashion.\n \"\"\"\n batch_count = int(len(index_array) / batch_size)\n # to reshape we need to be cleanly divisible by batch size\n # we stash extra items and reappend them after shuffling\n last_batch = index_array[batch_count * batch_size:]\n index_array = index_array[:batch_count * batch_size]\n index_array = index_array.reshape((batch_count, batch_size))\n np.random.shuffle(index_array)\n index_array = index_array.flatten()\n return np.append(index_array, last_batch)\n\n\ndef make_batches(size, batch_size):\n \"\"\"Returns a list of batch indices (tuples of indices).\n\n # Arguments\n size: Integer, total size of the data to slice into batches.\n batch_size: Integer, batch size.\n\n # Returns\n A list of tuples of array indices.\n \"\"\"\n num_batches = (size + batch_size - 1) // batch_size # round up\n return [(i * batch_size, min(size, (i + 1) * batch_size))\n for i in range(num_batches)]\n\n\ndef weighted_masked_objective(fn):\n \"\"\"Adds support for masking and sample-weighting to an objective function.\n\n It transforms an objective function `fn(y_true, y_pred)`\n into a sample-weighted, cost-masked objective function\n `fn(y_true, y_pred, weights, mask)`.\n\n # Arguments\n fn: The objective function to wrap,\n with signature `fn(y_true, y_pred)`.\n\n # Returns\n A function with signature `fn(y_true, y_pred, weights, mask)`.\n \"\"\"\n if fn is None:\n return None\n\n def weighted(y_true, y_pred, weights, mask=None):\n \"\"\"Wrapper function.\n\n # Arguments\n y_true: `y_true` argument of `fn`.\n y_pred: `y_pred` argument of `fn`.\n weights: Weights tensor.\n mask: Mask tensor.\n\n # Returns\n Scalar tensor.\n \"\"\"\n # score_array has ndim >= 2\n score_array = fn(y_true, y_pred)\n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting in Theano\n mask = K.cast(mask, K.floatx())\n # mask should have the same shape as score_array\n score_array *= mask\n # the loss per batch should be proportional\n # to the number of unmasked samples.\n score_array /= K.mean(mask) + K.epsilon()\n\n # apply sample weighting\n if weights is not None:\n # reduce score_array to same ndim as weight array\n ndim = K.ndim(score_array)\n weight_ndim = K.ndim(weights)\n score_array = K.mean(score_array,\n axis=list(range(weight_ndim, ndim)))\n score_array *= weights\n score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))\n return K.mean(score_array)\n return weighted\n\n\ndef standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n \"\"\"Performs sample weight validation and standardization.\n\n Everything gets normalized to a single sample-wise (or timestep-wise)\n weight array. If both `sample_weights` and `class_weights` are provided,\n the weights are multiplied together.\n\n # Arguments\n y: Numpy array of model targets to be weighted.\n sample_weight: User-provided `sample_weight` argument.\n class_weight: User-provided `class_weight` argument.\n sample_weight_mode: One of `None` or `\"temporal\"`.\n `\"temporal\"` indicated that we expect 2D weight data\n that will be applied to the last 2 dimensions of\n the targets (i.e. we are weighting timesteps, not samples).\n\n # Returns\n A Numpy array of target weights, one entry per sample to weight.\n\n # Raises\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n if sample_weight_mode is not None:\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' +\n str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weights, '\n 'you should specify '\n 'sample_weight_mode=\"temporal\" '\n 'in compile(). If you just mean to use '\n 'sample-wise weights, make sure your '\n 'sample_weight array is 1D.')\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' +\n str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if y.shape[:sample_weight.ndim] != sample_weight.shape:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) +\n ' for an input with shape ' +\n str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n else:\n y_classes = y\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError('`class_weight` must contain '\n 'all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.'\n % (existing_classes - existing_class_weight))\n\n if sample_weight is not None and class_sample_weight is not None:\n return sample_weight * class_sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n\n # Everything has weight 1 by default.\n if sample_weight_mode is None:\n return np.ones((y.shape[0],), dtype=K.floatx())\n else:\n return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())\n\n\ndef check_num_samples(ins,\n batch_size=None,\n steps=None,\n steps_name='steps'):\n \"\"\"Checks the number of samples provided for training and evaluation.\n\n The number of samples is not defined when running with `steps`,\n in which case the number of samples is set to `None`.\n\n # Arguments\n ins: List of tensors to be fed to the Keras function.\n batch_size: Integer batch size or `None` if not defined.\n steps: Total number of steps (batches of samples)\n before declaring `predict_loop` finished.\n Ignored with the default value of `None`.\n steps_name: The public API's parameter name for `steps`.\n\n # Raises\n ValueError: when `steps` is `None` and the attribute `ins.shape`\n does not exist. Also raises ValueError when `steps` is not `None`\n and `batch_size` is not `None` because they are mutually\n exclusive.\n\n # Returns\n When `steps` is `None`, returns the number of samples to be\n processed based on the size of the first dimension of the\n first input Numpy array. When `steps` is not `None` and\n `batch_size` is `None`, returns `None`.\n\n # Raises\n ValueError: In case of invalid arguments.\n \"\"\"\n if steps is not None and batch_size is not None:\n raise ValueError(\n 'If ' + steps_name + ' is set, the `batch_size` must be None.')\n\n if not ins or any(K.is_tensor(x) for x in ins):\n if steps is None:\n raise ValueError(\n 'If your data is in the form of symbolic tensors, '\n 'you should specify the `' + steps_name + '` argument '\n '(instead of the `batch_size` argument, '\n 'because symbolic tensors are expected to produce '\n 'batches of input data).')\n return None\n\n if hasattr(ins[0], 'shape'):\n return int(ins[0].shape[0])\n return None # Edge case where ins == [static_learning_phase]\n\n\ndef iter_sequence_infinite(seq):\n \"\"\"Iterate indefinitely over a Sequence.\n\n # Arguments\n seq: Sequence object\n\n # Returns\n Generator yielding batches.\n \"\"\"\n while True:\n for item in seq:\n yield item\n\n\ndef is_sequence(seq):\n \"\"\"Determine if an object follows the Sequence API.\n\n # Arguments\n seq: a possible Sequence object\n\n # Returns\n boolean, whether the object follows the Sequence API.\n \"\"\"\n # TODO Dref360: Decide which pattern to follow. First needs a new TF Version.\n return (getattr(seq, 'use_sequence_api', False)\n or set(dir(Sequence())).issubset(set(dir(seq) + ['use_sequence_api'])))\n\n\ndef is_generator_or_sequence(x):\n \"\"\"Check if `x` is a Keras generator type.\"\"\"\n return inspect.isgenerator(x) or is_sequence(x)\n\n\ndef should_run_validation(validation_freq, epoch):\n \"\"\"Checks if validation should be run this epoch.\n\n # Arguments\n validation_freq: Integer or list. If an integer, specifies how many training\n epochs to run before a new validation run is performed. If a list,\n specifies the epochs on which to run validation.\n epoch: Integer, the number of the training epoch just completed.\n\n # Returns\n Bool, True if validation should be run.\n\n # Raises\n ValueError: if `validation_freq` is an Integer and less than 1, or if\n it is neither an Integer nor a Sequence.\n \"\"\"\n # `epoch` is 0-indexed internally but 1-indexed in the public API.\n one_indexed_epoch = epoch + 1\n\n if isinstance(validation_freq, int):\n if validation_freq < 1:\n raise ValueError('`validation_freq` can not be less than 1.')\n return one_indexed_epoch % validation_freq == 0\n\n if not isinstance(validation_freq, collections.Container):\n raise ValueError('`validation_freq` must be an Integer or '\n '`collections.Container` (e.g. list, tuple, etc.)')\n return one_indexed_epoch in validation_freq\n\n\ndef get_static_batch_size(layer):\n \"\"\"Gets the static batch size of a Layer.\n\n # Arguments\n layer: a `Layer` instance.\n\n # Returns\n The static batch size of a Layer.\n \"\"\"\n batch_input_shape, _ = get_input_shape_and_dtype(layer)\n if batch_input_shape is not None:\n return batch_input_shape[0]\n return None\n\n\ndef get_input_shape_and_dtype(layer):\n \"\"\"Retrieves input shape and input dtype of layer if applicable.\n\n # Arguments\n layer: Layer (or model) instance.\n\n # Returns\n Tuple (input_shape, input_dtype). Both could be None if the layer\n does not have a defined input shape.\n\n # Raises\n ValueError: in case an empty Sequential or Functional model is passed.\n \"\"\"\n def _is_graph_model(layer):\n return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or\n layer.__class__.__name__ == 'Sequential')\n\n # In case of nested models: recover the first layer\n # of the deepest model to infer input shape and dtype.\n # Subclassed Models may not have been built so can't be checked.\n while _is_graph_model(layer):\n if not layer.layers:\n raise ValueError('An empty Model cannot be used as a Layer.')\n layer = layer.layers[0]\n\n if hasattr(layer, '_batch_input_shape'):\n return layer._batch_input_shape, layer.dtype\n return None, None\n\n\ndef get_loss_function(loss):\n \"\"\"Returns the loss corresponding to the loss input in `compile` API.\"\"\"\n if loss is None or isinstance(loss, losses.Loss):\n return loss\n\n # Deserialize loss configuration, if needed.\n if isinstance(loss, collections.Mapping):\n loss = losses.get(loss)\n\n # Custom callable class.\n if callable(loss) and not hasattr(loss, '__name__'):\n return loss\n\n # Wrap loss function with signature `(y_true, y_pred, **kwargs)`\n # in `LossFunctionWrapper` class.\n loss_fn = losses.get(loss)\n\n # For losses which are given as strings/functions in the compile API,\n # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`..\n return losses.LossFunctionWrapper(\n loss_fn,\n name=loss_fn.__name__,\n reduction=losses_utils.Reduction.SUM_OVER_BATCH_SIZE)\n\n\ndef get_output_sample_weight_and_mode(skip_target_weighing_indices,\n sample_weight_mode, output_name,\n output_index):\n \"\"\"Returns the sample weight and weight mode for a single output.\"\"\"\n if output_index in skip_target_weighing_indices:\n return None, None\n\n if sample_weight_mode == 'temporal':\n shape = [None, None]\n mode = 'temporal'\n else:\n shape = [None]\n mode = None\n weight = K.placeholder(\n shape=shape,\n name=output_name + '_sample_weights')\n return weight, mode\n\n\ndef prepare_sample_weights(output_names, sample_weight_mode,\n skip_target_weighing_indices):\n \"\"\"Prepares sample weights for the model.\n\n # Arguments\n output_names: List of model output names.\n sample_weight_mode: sample weight mode user input passed from compile API.\n skip_target_weighing_indices: Indices of output for which sample weights\n should be skipped.\n\n # Returns\n A pair of list of sample weights and sample weight modes\n (one for each output).\n\n # Raises\n ValueError: In case of invalid `sample_weight_mode` input.\n \"\"\"\n sample_weights = []\n sample_weight_modes = []\n if isinstance(sample_weight_mode, dict):\n unknown_output = set(sample_weight_mode.keys()) - set(output_names)\n if unknown_output:\n raise ValueError(\n 'Unknown entry in '\n 'sample_weight_mode dictionary: \"' + str(unknown_output) +\n '\". Only expected the following keys: ' + str(output_names))\n for i, name in enumerate(output_names):\n if (i not in skip_target_weighing_indices and\n name not in sample_weight_mode):\n raise ValueError(\n 'Output missing from sample_weight_modes dictionary')\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices,\n sample_weight_mode.get(name),\n name,\n i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n elif isinstance(sample_weight_mode, list):\n if len(sample_weight_mode) != len(output_names):\n raise ValueError('When passing a list as sample_weight_mode, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed ' +\n str(len(sample_weight_mode)) + 'sample_weight_modes')\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode[i], name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n else:\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode, name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n return sample_weights, sample_weight_modes\n\n\ndef prepare_loss_functions(loss, output_names):\n \"\"\"Converts loss to a list of loss functions.\n\n # Arguments\n loss: String (name of objective function), objective function or\n `Loss` instance. If the model has multiple outputs, you can use\n a different loss on each output by passing a dictionary or a\n list of losses. The loss value that will be minimized by the model\n will then be the sum of all individual losses.\n output_names: List of model output names.\n\n # Returns\n A list of loss objective functions.\n\n # Raises:\n ValueError: If loss is a dict with keys not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if isinstance(loss, collections.Mapping):\n generic_utils.check_for_unexpected_keys('loss', loss, output_names)\n loss_functions = []\n for name in output_names:\n if name not in loss:\n warnings.warn(\n 'Output {0} missing from loss dictionary. We assume '\n 'this was done on purpose. The fit and evaluate APIs will not '\n 'be expecting any data to be passed to {0}.'.format(name))\n loss_functions.append(get_loss_function(loss.get(name, None)))\n elif isinstance(loss, six.string_types):\n loss_functions = [get_loss_function(loss) for _ in output_names]\n elif isinstance(loss, collections.Sequence):\n if len(loss) != len(output_names):\n raise ValueError('When passing a list as loss, it should have one entry '\n 'per model outputs. The model has {} outputs, but you '\n 'passed loss={}'.format(len(output_names), loss))\n loss_functions = [get_loss_function(l) for l in loss]\n else:\n loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]\n\n return loss_functions\n\n\ndef prepare_loss_weights(output_names, loss_weights=None):\n \"\"\"Converts loss weights to a list of loss weights.\n\n # Arguments\n output_names: List of model output names.\n loss_weights: Optional list or dictionary specifying scalar coefficients\n (Python floats) to weight the loss contributions of different model\n outputs. The loss value that will be minimized by the model will then be\n the *weighted sum* of all individual losses, weighted by the\n `loss_weights` coefficients. If a list, it is expected to have a 1:1\n mapping to the model's outputs. If a dict, it is expected to map\n output names (strings) to scalar coefficients.\n\n # Returns\n A list of loss weights of python floats.\n\n # Raises\n ValueError: If loss weight is a dict with key not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if loss_weights is None:\n weights_list = [1.] * len(output_names)\n elif isinstance(loss_weights, collections.Mapping):\n generic_utils.check_for_unexpected_keys('loss_weights', loss_weights,\n output_names)\n weights_list = [loss_weights.get(name, 1.) for name in output_names]\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(output_names):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n weights_list = loss_weights\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')\n\n return weights_list\n\n\ndef collect_per_output_metric_info(metrics,\n output_names,\n output_shapes,\n loss_fns,\n is_weighted=False):\n \"\"\"Maps metric names and functions to model outputs.\n\n # Arguments\n metrics: a list or a list of lists or a dict of metric functions.\n output_names: a list of the names (strings) of model outputs.\n output_shapes: a list of the shapes (strings) of model outputs.\n loss_fns: a list of the loss functions corresponding to the model outputs.\n is_weighted: Boolean indicating whether the given metrics are weighted.\n\n # Returns\n A list (one entry per model output) of dicts.\n For instance, if the model has 2 outputs, and for the first output\n we want to compute \"binary_accuracy\" and \"binary_crossentropy\",\n and just \"binary_accuracy\" for the second output,\n the list would look like: `[{\n 'acc': binary_accuracy(),\n 'ce': binary_crossentropy(),\n }, {\n 'acc': binary_accuracy(),\n }]`\n\n # Raises\n TypeError: if an incorrect type is passed for the `metrics` argument.\n \"\"\"\n if not metrics:\n return [{} for _ in output_names]\n\n if isinstance(metrics, list):\n any_sub_list = any(isinstance(m, list) for m in metrics)\n if any_sub_list:\n if len(metrics) != len(output_names):\n raise ValueError('When passing a list of lists as `metrics`, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed metrics=' + str(metrics))\n # User has provided a list of len = len(outputs).\n nested_metrics = [generic_utils.to_list(m) for m in metrics]\n else:\n # If it is a single list we then apply all metrics to all outputs.\n if len(output_names) > 1:\n nested_metrics = []\n for _ in output_names:\n nested_metrics.append(\n [metrics_module.clone_metric(m) for m in metrics])\n else:\n nested_metrics = [metrics]\n elif isinstance(metrics, collections.Mapping):\n generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)\n nested_metrics = []\n for name in output_names:\n output_metrics = generic_utils.to_list(metrics.get(name, []))\n nested_metrics.append(output_metrics)\n else:\n raise TypeError('Type of `metrics` argument not understood. '\n 'Expected a list or dictionary, found: ' + str(metrics))\n\n per_output_metrics = []\n for i, metrics in enumerate(nested_metrics):\n metrics_dict = OrderedDict()\n for metric in metrics:\n metric_name = get_metric_name(metric, is_weighted)\n metric_fn = get_metric_function(\n metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])\n\n # If the metric function is not stateful, we create a stateful version.\n if not isinstance(metric_fn, metrics_module.Metric):\n metric_fn = metrics_module.MeanMetricWrapper(\n metric_fn, name=metric_name)\n metrics_dict[metric_name] = metric_fn\n per_output_metrics.append(metrics_dict)\n\n return per_output_metrics\n\n\ndef get_metric_name(metric, weighted=False):\n \"\"\"Returns the name corresponding to the given metric input.\n\n # Arguments\n metric: Metric function name or reference.\n weighted: Boolean indicating if the given metric is weighted.\n\n # Returns\n The metric name.\n \"\"\"\n # We keep the string that the user has set in compile as the metric name.\n if isinstance(metric, six.string_types):\n return metric\n\n metric = metrics_module.get(metric)\n return metric.name if hasattr(metric, 'name') else metric.__name__\n\n\ndef get_metric_function(metric, output_shape=None, loss_fn=None):\n \"\"\"Returns the metric function corresponding to the given metric input.\n\n # Arguments\n metric: Metric function name or reference.\n output_shape: The shape of the output that this metric will be calculated\n for.\n loss_fn: The loss function used.\n\n # Returns\n The metric function.\n \"\"\"\n if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n return metrics_module.get(metric)\n\n is_sparse_categorical_crossentropy = (\n isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.sparse_categorical_crossentropy))\n\n is_binary_crossentropy = (\n isinstance(loss_fn, losses.BinaryCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.binary_crossentropy))\n\n if metric in ['accuracy', 'acc']:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_accuracy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_accuracy\n # If the output_shape[-1] is not 1, then we know output is `categorical`.\n # We assume it is sparse categorical only if loss is explicitly given\n # as sparse categorical crossentropy loss.\n return metrics_module.categorical_accuracy\n else:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_crossentropy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_crossentropy\n return metrics_module.categorical_crossentropy\n\n\ndef call_metric_function(metric_fn,\n y_true,\n y_pred=None,\n weights=None,\n mask=None):\n \"\"\"Invokes metric function and returns the metric result tensor.\"\"\"\n if mask is not None:\n mask = math_ops.cast(mask, y_pred.dtype)\n if weights is None:\n # Use mask as sample weight.\n weights = mask\n else:\n # Update dimensions of weights to match with mask.\n mask, _, weights = losses_utils.squeeze_or_expand_dimensions(\n mask, sample_weight=weights)\n weights *= mask\n\n if y_pred is not None:\n update_ops = metric_fn.update_state(y_true, y_pred, sample_weight=weights)\n with K.control_dependencies(update_ops): # For TF\n metric_fn.result()\n else:\n # `Mean` metric only takes a single value.\n update_ops = metric_fn.update_state(y_true, sample_weight=weights)\n with K.control_dependencies(update_ops): # For TF\n metric_fn.result()\n", "path": "keras/engine/training_utils.py" } ]
[ { "content": "\"\"\"Training-related utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport collections\nimport copy\nimport numpy as np\nimport six\nimport warnings\nfrom collections import OrderedDict\n\nfrom .. import backend as K\nfrom .. import losses\nfrom .. import metrics as metrics_module\nfrom ..utils import Sequence\nfrom ..utils import generic_utils\nfrom ..utils import losses_utils\n\n\ndef standardize_single_array(x):\n if x is None:\n return None\n elif K.is_tensor(x):\n shape = K.int_shape(x)\n if shape is None or shape[0] is None:\n raise ValueError(\n 'When feeding symbolic tensors to a model, we expect the '\n 'tensors to have a static batch size. '\n 'Got tensor with shape: %s' % str(shape))\n return x\n elif x.ndim == 1:\n x = np.expand_dims(x, 1)\n return x\n\n\ndef standardize_input_data(data,\n names,\n shapes=None,\n check_batch_axis=True,\n exception_prefix=''):\n \"\"\"Normalizes inputs and targets provided by users.\n\n Users may pass data as a list of arrays, dictionary of arrays,\n or as a single array. We normalize this to an ordered list of\n arrays (same order as `names`), while checking that the provided\n arrays have shapes that match the network's expectations.\n\n # Arguments\n data: User-provided input data (polymorphic).\n names: List of expected array names.\n shapes: Optional list of expected array shapes.\n check_batch_axis: Boolean; whether to check that\n the batch axis of the arrays matches the expected\n value found in `shapes`.\n exception_prefix: String prefix used for exception formatting.\n\n # Returns\n List of standardized input arrays (one array per model input).\n\n # Raises\n ValueError: in case of improperly formatted user-provided data.\n \"\"\"\n if not names:\n if data is not None and hasattr(data, '__len__') and len(data):\n raise ValueError('Error when checking model ' +\n exception_prefix + ': '\n 'expected no data, but got:', data)\n return []\n if data is None:\n return [None for _ in range(len(names))]\n\n if isinstance(data, dict):\n try:\n data = [\n data[x].values\n if data[x].__class__.__name__ == 'DataFrame' else data[x]\n for x in names\n ]\n except KeyError as e:\n raise ValueError('No data provided for \"' + e.args[0] +\n '\". Need data '\n 'for each key in: ' + str(names))\n elif isinstance(data, list):\n if isinstance(data[0], list):\n data = [np.asarray(d) for d in data]\n elif len(names) == 1 and isinstance(data[0], (float, int)):\n data = [np.asarray(data)]\n else:\n data = [\n x.values if x.__class__.__name__ == 'DataFrame'\n else x for x in data\n ]\n else:\n data = data.values if data.__class__.__name__ == 'DataFrame' else data\n data = [data]\n data = [standardize_single_array(x) for x in data]\n\n if len(data) != len(names):\n if data and hasattr(data[0], 'shape'):\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': the list of Numpy arrays that you are passing to '\n 'your model is not the size the model expected. '\n 'Expected to see ' + str(len(names)) + ' array(s), '\n 'but instead got the following list of ' +\n str(len(data)) + ' arrays: ' + str(data)[:200] + '...')\n elif len(names) > 1:\n raise ValueError(\n 'Error when checking model ' + exception_prefix +\n ': you are passing a list as input to your model, '\n 'but the model expects a list of ' + str(len(names)) +\n ' Numpy arrays instead. '\n 'The list you passed was: ' + str(data)[:200])\n elif len(data) == 1 and not hasattr(data[0], 'shape'):\n raise TypeError('Error when checking model ' + exception_prefix +\n ': data should be a Numpy array, or list/dict of '\n 'Numpy arrays. Found: ' + str(data)[:200] + '...')\n elif len(names) == 1:\n data = [np.asarray(data)]\n\n # Check shapes compatibility.\n if shapes:\n for i in range(len(names)):\n if shapes[i] is not None and not K.is_tensor(data[i]):\n data_shape = data[i].shape\n shape = shapes[i]\n if data[i].ndim != len(shape):\n raise ValueError(\n 'Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have ' +\n str(len(shape)) + ' dimensions, but got array '\n 'with shape ' + str(data_shape))\n if not check_batch_axis:\n data_shape = data_shape[1:]\n shape = shape[1:]\n for dim, ref_dim in zip(data_shape, shape):\n if ref_dim != dim and ref_dim:\n raise ValueError(\n 'Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have shape ' +\n str(shape) + ' but got array with shape ' +\n str(data_shape))\n return data\n\n\ndef standardize_sample_or_class_weights(x_weight,\n output_names,\n weight_type):\n \"\"\"Maps `sample_weight` or `class_weight` to model outputs.\n\n # Arguments\n x_weight: User-provided `sample_weight` or `class_weight` argument.\n output_names: List of output names (strings) in the model.\n weight_type: A string used purely for exception printing.\n\n # Returns\n A list of `sample_weight` or `class_weight` where there are exactly\n one element per model output.\n\n # Raises\n ValueError: In case of invalid user-provided argument.\n \"\"\"\n if x_weight is None or len(x_weight) == 0:\n return [None for _ in output_names]\n if len(output_names) == 1:\n if isinstance(x_weight, list) and len(x_weight) == 1:\n return x_weight\n if isinstance(x_weight, dict) and output_names[0] in x_weight:\n return [x_weight[output_names[0]]]\n else:\n return [x_weight]\n if isinstance(x_weight, list):\n if len(x_weight) != len(output_names):\n raise ValueError('Provided `' + weight_type + '` was a list of ' +\n str(len(x_weight)) +\n ' elements, but the model has ' +\n str(len(output_names)) + ' outputs. '\n 'You should provide one `' + weight_type + '`'\n 'array per model output.')\n return x_weight\n if isinstance(x_weight, dict):\n x_weights = []\n for name in output_names:\n x_weights.append(x_weight.get(name))\n return x_weights\n else:\n raise TypeError('The model has multiple outputs, so `' +\n weight_type + '` '\n 'should be either a list or a dict. '\n 'Provided `' + weight_type +\n '` type not understood: ' +\n str(x_weight))\n\n\ndef standardize_class_weights(class_weight, output_names):\n return standardize_sample_or_class_weights(class_weight,\n output_names,\n 'class_weight')\n\n\ndef standardize_sample_weights(sample_weight, output_names):\n return standardize_sample_or_class_weights(sample_weight,\n output_names,\n 'sample_weight')\n\n\ndef check_array_length_consistency(inputs, targets, weights=None):\n \"\"\"Checks if batch axes are the same for Numpy arrays.\n\n # Arguments\n inputs: list of Numpy arrays of inputs.\n targets: list of Numpy arrays of targets.\n weights: list of Numpy arrays of sample weights.\n\n # Raises\n ValueError: in case of incorrectly formatted data.\n \"\"\"\n def set_of_lengths(x):\n # return a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {0}\n else:\n return set([0 if y is None else int(y.shape[0]) for y in x])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')\n\n\ndef check_loss_and_target_compatibility(targets, loss_fns, output_shapes):\n \"\"\"Does validation on the compatibility of targets and loss functions.\n\n This helps prevent users from using loss functions incorrectly. This check\n is purely for UX purposes.\n\n # Arguments\n targets: list of Numpy arrays of targets.\n loss_fns: list of loss functions.\n output_shapes: list of shapes of model outputs.\n\n # Raises\n ValueError: if a loss function or target array\n is incompatible with an output.\n \"\"\"\n key_loss_fns = {\n losses.mean_squared_error, losses.binary_crossentropy,\n losses.categorical_crossentropy\n }\n key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,\n losses.CategoricalCrossentropy)\n for y, loss, shape in zip(targets, loss_fns, output_shapes):\n if y is None or loss is None:\n continue\n if losses.is_categorical_crossentropy(loss):\n if y.shape[-1] == 1:\n raise ValueError(\n 'You are passing a target array of shape ' + str(y.shape) +\n ' while using as loss `categorical_crossentropy`. '\n '`categorical_crossentropy` expects '\n 'targets to be binary matrices (1s and 0s) '\n 'of shape (samples, classes). '\n 'If your targets are integer classes, '\n 'you can convert them to the expected format via:\\n'\n '```\\n'\n 'from keras.utils import to_categorical\\n'\n 'y_binary = to_categorical(y_int)\\n'\n '```\\n'\n '\\n'\n 'Alternatively, you can use the loss function '\n '`sparse_categorical_crossentropy` instead, '\n 'which does expect integer targets.')\n is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)\n if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and\n (loss.fn in key_loss_fns))):\n for target_dim, out_dim in zip(y.shape[1:], shape[1:]):\n if out_dim is not None and target_dim != out_dim:\n loss_name = loss.name\n if loss_name is None:\n loss_type = loss.fn if is_loss_wrapper else type(loss)\n loss_name = loss_type.__name__\n raise ValueError(\n 'A target array with shape ' + str(y.shape) +\n ' was passed for an output of shape ' + str(shape) +\n ' while using as loss `' + loss_name + '`. '\n 'This loss expects targets to have the same shape '\n 'as the output.')\n\n\ndef check_generator_arguments(y=None, sample_weight=None,\n validation_split=None):\n \"\"\"Validates arguments passed when using a generator.\"\"\"\n if y is not None:\n raise ValueError('`y` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass targets'\n ' as the second element of the generator.')\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass sample'\n ' weights as the third element of the generator.')\n if validation_split:\n raise ValueError('If your data is in the form of a Python generator, '\n 'you cannot use `validation_split`.')\n\n\ndef batch_shuffle(index_array, batch_size):\n \"\"\"Shuffles an array in a batch-wise fashion.\n\n Useful for shuffling HDF5 arrays\n (where one cannot access arbitrary indices).\n\n # Arguments\n index_array: array of indices to be shuffled.\n batch_size: integer.\n\n # Returns\n The `index_array` array, shuffled in a batch-wise fashion.\n \"\"\"\n batch_count = int(len(index_array) / batch_size)\n # to reshape we need to be cleanly divisible by batch size\n # we stash extra items and reappend them after shuffling\n last_batch = index_array[batch_count * batch_size:]\n index_array = index_array[:batch_count * batch_size]\n index_array = index_array.reshape((batch_count, batch_size))\n np.random.shuffle(index_array)\n index_array = index_array.flatten()\n return np.append(index_array, last_batch)\n\n\ndef make_batches(size, batch_size):\n \"\"\"Returns a list of batch indices (tuples of indices).\n\n # Arguments\n size: Integer, total size of the data to slice into batches.\n batch_size: Integer, batch size.\n\n # Returns\n A list of tuples of array indices.\n \"\"\"\n num_batches = (size + batch_size - 1) // batch_size # round up\n return [(i * batch_size, min(size, (i + 1) * batch_size))\n for i in range(num_batches)]\n\n\ndef weighted_masked_objective(fn):\n \"\"\"Adds support for masking and sample-weighting to an objective function.\n\n It transforms an objective function `fn(y_true, y_pred)`\n into a sample-weighted, cost-masked objective function\n `fn(y_true, y_pred, weights, mask)`.\n\n # Arguments\n fn: The objective function to wrap,\n with signature `fn(y_true, y_pred)`.\n\n # Returns\n A function with signature `fn(y_true, y_pred, weights, mask)`.\n \"\"\"\n if fn is None:\n return None\n\n def weighted(y_true, y_pred, weights, mask=None):\n \"\"\"Wrapper function.\n\n # Arguments\n y_true: `y_true` argument of `fn`.\n y_pred: `y_pred` argument of `fn`.\n weights: Weights tensor.\n mask: Mask tensor.\n\n # Returns\n Scalar tensor.\n \"\"\"\n # score_array has ndim >= 2\n score_array = fn(y_true, y_pred)\n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting in Theano\n mask = K.cast(mask, K.floatx())\n # mask should have the same shape as score_array\n score_array *= mask\n # the loss per batch should be proportional\n # to the number of unmasked samples.\n score_array /= K.mean(mask) + K.epsilon()\n\n # apply sample weighting\n if weights is not None:\n # reduce score_array to same ndim as weight array\n ndim = K.ndim(score_array)\n weight_ndim = K.ndim(weights)\n score_array = K.mean(score_array,\n axis=list(range(weight_ndim, ndim)))\n score_array *= weights\n score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))\n return K.mean(score_array)\n return weighted\n\n\ndef standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n \"\"\"Performs sample weight validation and standardization.\n\n Everything gets normalized to a single sample-wise (or timestep-wise)\n weight array. If both `sample_weights` and `class_weights` are provided,\n the weights are multiplied together.\n\n # Arguments\n y: Numpy array of model targets to be weighted.\n sample_weight: User-provided `sample_weight` argument.\n class_weight: User-provided `class_weight` argument.\n sample_weight_mode: One of `None` or `\"temporal\"`.\n `\"temporal\"` indicated that we expect 2D weight data\n that will be applied to the last 2 dimensions of\n the targets (i.e. we are weighting timesteps, not samples).\n\n # Returns\n A Numpy array of target weights, one entry per sample to weight.\n\n # Raises\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n if sample_weight_mode is not None:\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' +\n str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weights, '\n 'you should specify '\n 'sample_weight_mode=\"temporal\" '\n 'in compile(). If you just mean to use '\n 'sample-wise weights, make sure your '\n 'sample_weight array is 1D.')\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' +\n str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if y.shape[:sample_weight.ndim] != sample_weight.shape:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) +\n ' for an input with shape ' +\n str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n else:\n y_classes = y\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError('`class_weight` must contain '\n 'all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.'\n % (existing_classes - existing_class_weight))\n\n if sample_weight is not None and class_sample_weight is not None:\n return sample_weight * class_sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n\n # Everything has weight 1 by default.\n if sample_weight_mode is None:\n return np.ones((y.shape[0],), dtype=K.floatx())\n else:\n return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())\n\n\ndef check_num_samples(ins,\n batch_size=None,\n steps=None,\n steps_name='steps'):\n \"\"\"Checks the number of samples provided for training and evaluation.\n\n The number of samples is not defined when running with `steps`,\n in which case the number of samples is set to `None`.\n\n # Arguments\n ins: List of tensors to be fed to the Keras function.\n batch_size: Integer batch size or `None` if not defined.\n steps: Total number of steps (batches of samples)\n before declaring `predict_loop` finished.\n Ignored with the default value of `None`.\n steps_name: The public API's parameter name for `steps`.\n\n # Raises\n ValueError: when `steps` is `None` and the attribute `ins.shape`\n does not exist. Also raises ValueError when `steps` is not `None`\n and `batch_size` is not `None` because they are mutually\n exclusive.\n\n # Returns\n When `steps` is `None`, returns the number of samples to be\n processed based on the size of the first dimension of the\n first input Numpy array. When `steps` is not `None` and\n `batch_size` is `None`, returns `None`.\n\n # Raises\n ValueError: In case of invalid arguments.\n \"\"\"\n if steps is not None and batch_size is not None:\n raise ValueError(\n 'If ' + steps_name + ' is set, the `batch_size` must be None.')\n\n if not ins or any(K.is_tensor(x) for x in ins):\n if steps is None:\n raise ValueError(\n 'If your data is in the form of symbolic tensors, '\n 'you should specify the `' + steps_name + '` argument '\n '(instead of the `batch_size` argument, '\n 'because symbolic tensors are expected to produce '\n 'batches of input data).')\n return None\n\n if hasattr(ins[0], 'shape'):\n return int(ins[0].shape[0])\n return None # Edge case where ins == [static_learning_phase]\n\n\ndef iter_sequence_infinite(seq):\n \"\"\"Iterate indefinitely over a Sequence.\n\n # Arguments\n seq: Sequence object\n\n # Returns\n Generator yielding batches.\n \"\"\"\n while True:\n for item in seq:\n yield item\n\n\ndef is_sequence(seq):\n \"\"\"Determine if an object follows the Sequence API.\n\n # Arguments\n seq: a possible Sequence object\n\n # Returns\n boolean, whether the object follows the Sequence API.\n \"\"\"\n # TODO Dref360: Decide which pattern to follow. First needs a new TF Version.\n return (getattr(seq, 'use_sequence_api', False)\n or set(dir(Sequence())).issubset(set(dir(seq) + ['use_sequence_api'])))\n\n\ndef is_generator_or_sequence(x):\n \"\"\"Check if `x` is a Keras generator type.\"\"\"\n return inspect.isgenerator(x) or is_sequence(x)\n\n\ndef should_run_validation(validation_freq, epoch):\n \"\"\"Checks if validation should be run this epoch.\n\n # Arguments\n validation_freq: Integer or list. If an integer, specifies how many training\n epochs to run before a new validation run is performed. If a list,\n specifies the epochs on which to run validation.\n epoch: Integer, the number of the training epoch just completed.\n\n # Returns\n Bool, True if validation should be run.\n\n # Raises\n ValueError: if `validation_freq` is an Integer and less than 1, or if\n it is neither an Integer nor a Sequence.\n \"\"\"\n # `epoch` is 0-indexed internally but 1-indexed in the public API.\n one_indexed_epoch = epoch + 1\n\n if isinstance(validation_freq, int):\n if validation_freq < 1:\n raise ValueError('`validation_freq` can not be less than 1.')\n return one_indexed_epoch % validation_freq == 0\n\n if not isinstance(validation_freq, collections.Container):\n raise ValueError('`validation_freq` must be an Integer or '\n '`collections.Container` (e.g. list, tuple, etc.)')\n return one_indexed_epoch in validation_freq\n\n\ndef get_static_batch_size(layer):\n \"\"\"Gets the static batch size of a Layer.\n\n # Arguments\n layer: a `Layer` instance.\n\n # Returns\n The static batch size of a Layer.\n \"\"\"\n batch_input_shape, _ = get_input_shape_and_dtype(layer)\n if batch_input_shape is not None:\n return batch_input_shape[0]\n return None\n\n\ndef get_input_shape_and_dtype(layer):\n \"\"\"Retrieves input shape and input dtype of layer if applicable.\n\n # Arguments\n layer: Layer (or model) instance.\n\n # Returns\n Tuple (input_shape, input_dtype). Both could be None if the layer\n does not have a defined input shape.\n\n # Raises\n ValueError: in case an empty Sequential or Functional model is passed.\n \"\"\"\n def _is_graph_model(layer):\n return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or\n layer.__class__.__name__ == 'Sequential')\n\n # In case of nested models: recover the first layer\n # of the deepest model to infer input shape and dtype.\n # Subclassed Models may not have been built so can't be checked.\n while _is_graph_model(layer):\n if not layer.layers:\n raise ValueError('An empty Model cannot be used as a Layer.')\n layer = layer.layers[0]\n\n if hasattr(layer, '_batch_input_shape'):\n return layer._batch_input_shape, layer.dtype\n return None, None\n\n\ndef get_loss_function(loss):\n \"\"\"Returns the loss corresponding to the loss input in `compile` API.\"\"\"\n if loss is None or isinstance(loss, losses.Loss):\n return loss\n\n # Deserialize loss configuration, if needed.\n if isinstance(loss, collections.Mapping):\n loss = losses.get(loss)\n\n # Custom callable class.\n if callable(loss) and not hasattr(loss, '__name__'):\n return loss\n\n # Wrap loss function with signature `(y_true, y_pred, **kwargs)`\n # in `LossFunctionWrapper` class.\n loss_fn = losses.get(loss)\n\n # For losses which are given as strings/functions in the compile API,\n # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`..\n return losses.LossFunctionWrapper(\n loss_fn,\n name=loss_fn.__name__,\n reduction=losses_utils.Reduction.SUM_OVER_BATCH_SIZE)\n\n\ndef get_output_sample_weight_and_mode(skip_target_weighing_indices,\n sample_weight_mode, output_name,\n output_index):\n \"\"\"Returns the sample weight and weight mode for a single output.\"\"\"\n if output_index in skip_target_weighing_indices:\n return None, None\n\n if sample_weight_mode == 'temporal':\n shape = [None, None]\n mode = 'temporal'\n else:\n shape = [None]\n mode = None\n weight = K.placeholder(\n shape=shape,\n name=output_name + '_sample_weights')\n return weight, mode\n\n\ndef prepare_sample_weights(output_names, sample_weight_mode,\n skip_target_weighing_indices):\n \"\"\"Prepares sample weights for the model.\n\n # Arguments\n output_names: List of model output names.\n sample_weight_mode: sample weight mode user input passed from compile API.\n skip_target_weighing_indices: Indices of output for which sample weights\n should be skipped.\n\n # Returns\n A pair of list of sample weights and sample weight modes\n (one for each output).\n\n # Raises\n ValueError: In case of invalid `sample_weight_mode` input.\n \"\"\"\n sample_weights = []\n sample_weight_modes = []\n if isinstance(sample_weight_mode, dict):\n unknown_output = set(sample_weight_mode.keys()) - set(output_names)\n if unknown_output:\n raise ValueError(\n 'Unknown entry in '\n 'sample_weight_mode dictionary: \"' + str(unknown_output) +\n '\". Only expected the following keys: ' + str(output_names))\n for i, name in enumerate(output_names):\n if (i not in skip_target_weighing_indices and\n name not in sample_weight_mode):\n raise ValueError(\n 'Output missing from sample_weight_modes dictionary')\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices,\n sample_weight_mode.get(name),\n name,\n i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n elif isinstance(sample_weight_mode, list):\n if len(sample_weight_mode) != len(output_names):\n raise ValueError('When passing a list as sample_weight_mode, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed ' +\n str(len(sample_weight_mode)) + 'sample_weight_modes')\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode[i], name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n else:\n for i, name in enumerate(output_names):\n weight, mode = get_output_sample_weight_and_mode(\n skip_target_weighing_indices, sample_weight_mode, name, i)\n sample_weights.append(weight)\n sample_weight_modes.append(mode)\n return sample_weights, sample_weight_modes\n\n\ndef prepare_loss_functions(loss, output_names):\n \"\"\"Converts loss to a list of loss functions.\n\n # Arguments\n loss: String (name of objective function), objective function or\n `Loss` instance. If the model has multiple outputs, you can use\n a different loss on each output by passing a dictionary or a\n list of losses. The loss value that will be minimized by the model\n will then be the sum of all individual losses.\n output_names: List of model output names.\n\n # Returns\n A list of loss objective functions.\n\n # Raises:\n ValueError: If loss is a dict with keys not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if isinstance(loss, collections.Mapping):\n generic_utils.check_for_unexpected_keys('loss', loss, output_names)\n loss_functions = []\n for name in output_names:\n if name not in loss:\n warnings.warn(\n 'Output {0} missing from loss dictionary. We assume '\n 'this was done on purpose. The fit and evaluate APIs will not '\n 'be expecting any data to be passed to {0}.'.format(name))\n loss_functions.append(get_loss_function(loss.get(name, None)))\n elif isinstance(loss, six.string_types):\n loss_functions = [get_loss_function(loss) for _ in output_names]\n elif isinstance(loss, collections.Sequence):\n if len(loss) != len(output_names):\n raise ValueError('When passing a list as loss, it should have one entry '\n 'per model outputs. The model has {} outputs, but you '\n 'passed loss={}'.format(len(output_names), loss))\n loss_functions = [get_loss_function(l) for l in loss]\n else:\n loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]\n\n return loss_functions\n\n\ndef prepare_loss_weights(output_names, loss_weights=None):\n \"\"\"Converts loss weights to a list of loss weights.\n\n # Arguments\n output_names: List of model output names.\n loss_weights: Optional list or dictionary specifying scalar coefficients\n (Python floats) to weight the loss contributions of different model\n outputs. The loss value that will be minimized by the model will then be\n the *weighted sum* of all individual losses, weighted by the\n `loss_weights` coefficients. If a list, it is expected to have a 1:1\n mapping to the model's outputs. If a dict, it is expected to map\n output names (strings) to scalar coefficients.\n\n # Returns\n A list of loss weights of python floats.\n\n # Raises\n ValueError: If loss weight is a dict with key not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if loss_weights is None:\n weights_list = [1.] * len(output_names)\n elif isinstance(loss_weights, collections.Mapping):\n generic_utils.check_for_unexpected_keys('loss_weights', loss_weights,\n output_names)\n weights_list = [loss_weights.get(name, 1.) for name in output_names]\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(output_names):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n weights_list = loss_weights\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')\n\n return weights_list\n\n\ndef collect_per_output_metric_info(metrics,\n output_names,\n output_shapes,\n loss_fns,\n is_weighted=False):\n \"\"\"Maps metric names and functions to model outputs.\n\n # Arguments\n metrics: a list or a list of lists or a dict of metric functions.\n output_names: a list of the names (strings) of model outputs.\n output_shapes: a list of the shapes (strings) of model outputs.\n loss_fns: a list of the loss functions corresponding to the model outputs.\n is_weighted: Boolean indicating whether the given metrics are weighted.\n\n # Returns\n A list (one entry per model output) of dicts.\n For instance, if the model has 2 outputs, and for the first output\n we want to compute \"binary_accuracy\" and \"binary_crossentropy\",\n and just \"binary_accuracy\" for the second output,\n the list would look like: `[{\n 'acc': binary_accuracy(),\n 'ce': binary_crossentropy(),\n }, {\n 'acc': binary_accuracy(),\n }]`\n\n # Raises\n TypeError: if an incorrect type is passed for the `metrics` argument.\n \"\"\"\n if not metrics:\n return [{} for _ in output_names]\n\n if isinstance(metrics, list):\n any_sub_list = any(isinstance(m, list) for m in metrics)\n if any_sub_list:\n if len(metrics) != len(output_names):\n raise ValueError('When passing a list of lists as `metrics`, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed metrics=' + str(metrics))\n # User has provided a list of len = len(outputs).\n nested_metrics = [generic_utils.to_list(m) for m in metrics]\n else:\n # If it is a single list we then apply all metrics to all outputs.\n if len(output_names) > 1:\n nested_metrics = []\n for _ in output_names:\n nested_metrics.append(\n [metrics_module.clone_metric(m) for m in metrics])\n else:\n nested_metrics = [metrics]\n elif isinstance(metrics, collections.Mapping):\n generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)\n nested_metrics = []\n for name in output_names:\n output_metrics = generic_utils.to_list(metrics.get(name, []))\n nested_metrics.append(output_metrics)\n else:\n raise TypeError('Type of `metrics` argument not understood. '\n 'Expected a list or dictionary, found: ' + str(metrics))\n\n per_output_metrics = []\n for i, metrics in enumerate(nested_metrics):\n metrics_dict = OrderedDict()\n for metric in metrics:\n metric_name = get_metric_name(metric, is_weighted)\n metric_fn = get_metric_function(\n metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])\n\n # If the metric function is not stateful, we create a stateful version.\n if not isinstance(metric_fn, metrics_module.Metric):\n metric_fn = metrics_module.MeanMetricWrapper(\n metric_fn, name=metric_name)\n metrics_dict[metric_name] = metric_fn\n per_output_metrics.append(metrics_dict)\n\n return per_output_metrics\n\n\ndef get_metric_name(metric, weighted=False):\n \"\"\"Returns the name corresponding to the given metric input.\n\n # Arguments\n metric: Metric function name or reference.\n weighted: Boolean indicating if the given metric is weighted.\n\n # Returns\n The metric name.\n \"\"\"\n # We keep the string that the user has set in compile as the metric name.\n if isinstance(metric, six.string_types):\n return metric\n\n metric = metrics_module.get(metric)\n return metric.name if hasattr(metric, 'name') else metric.__name__\n\n\ndef get_metric_function(metric, output_shape=None, loss_fn=None):\n \"\"\"Returns the metric function corresponding to the given metric input.\n\n # Arguments\n metric: Metric function name or reference.\n output_shape: The shape of the output that this metric will be calculated\n for.\n loss_fn: The loss function used.\n\n # Returns\n The metric function.\n \"\"\"\n if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n return metrics_module.get(metric)\n\n is_sparse_categorical_crossentropy = (\n isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.sparse_categorical_crossentropy))\n\n is_binary_crossentropy = (\n isinstance(loss_fn, losses.BinaryCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.binary_crossentropy))\n\n if metric in ['accuracy', 'acc']:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_accuracy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_accuracy\n # If the output_shape[-1] is not 1, then we know output is `categorical`.\n # We assume it is sparse categorical only if loss is explicitly given\n # as sparse categorical crossentropy loss.\n return metrics_module.categorical_accuracy\n else:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_crossentropy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_crossentropy\n return metrics_module.categorical_crossentropy\n\n\ndef call_metric_function(metric_fn,\n y_true,\n y_pred=None,\n weights=None,\n mask=None):\n \"\"\"Invokes metric function and returns the metric result tensor.\"\"\"\n if mask is not None:\n mask = K.cast(mask, y_pred.dtype)\n if weights is None:\n # Use mask as sample weight.\n weights = mask\n else:\n # Update dimensions of weights to match with mask.\n mask, _, weights = losses_utils.squeeze_or_expand_dimensions(\n mask, sample_weight=weights)\n weights *= mask\n\n if y_pred is not None:\n update_ops = metric_fn.update_state(y_true, y_pred, sample_weight=weights)\n with K.control_dependencies(update_ops): # For TF\n metric_fn.result()\n else:\n # `Mean` metric only takes a single value.\n update_ops = metric_fn.update_state(y_true, sample_weight=weights)\n with K.control_dependencies(update_ops): # For TF\n metric_fn.result()\n", "path": "keras/engine/training_utils.py" } ]
diff --git a/keras/engine/training_utils.py b/keras/engine/training_utils.py index 4908d5c6be4e..ac42229b612d 100644 --- a/keras/engine/training_utils.py +++ b/keras/engine/training_utils.py @@ -1019,7 +1019,7 @@ def call_metric_function(metric_fn, mask=None): """Invokes metric function and returns the metric result tensor.""" if mask is not None: - mask = math_ops.cast(mask, y_pred.dtype) + mask = K.cast(mask, y_pred.dtype) if weights is None: # Use mask as sample weight. weights = mask
NameError: name 'math_ops' is not defined **System information** - Have I written custom code (as opposed to using example directory): - OS Platform and Distribution (e.g., Linux Ubuntu 16.04): - TensorFlow backend (yes / no): - TensorFlow version: 1.14.0 - Keras version: 2.3.0 - Python version: - CUDA/cuDNN version: - GPU model and memory: **Describe the current behavior** File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 222, in compile masks=masks) File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 871, in _handle_metrics self._per_output_metrics[i], target, output, output_mask) File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 842, in _handle_per_output_metrics metric_fn, y_true, y_pred, weights=weights, mask=mask) File "/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py", line 1022, in call_metric_function mask = math_ops.cast(mask, y_pred.dtype) NameError: name 'math_ops' is not defined **Describe the expected behavior** **Code to reproduce the issue** **Other info / logs**
aio-libs-abandoned__aioredis-py-658
[ { "content": "import types\nimport asyncio\nimport socket\nfrom functools import partial\nfrom collections import deque\nfrom contextlib import contextmanager\n\nfrom .util import (\n encode_command,\n wait_ok,\n _NOTSET,\n _set_result,\n _set_exception,\n coerced_keys_dict,\n decode,\n parse_url,\n )\nfrom .parser import Reader\nfrom .stream import open_connection, open_unix_connection\nfrom .errors import (\n ConnectionClosedError,\n ConnectionForcedCloseError,\n RedisError,\n ProtocolError,\n ReplyError,\n WatchVariableError,\n ReadOnlyError,\n MaxClientsError\n )\nfrom .pubsub import Channel\nfrom .abc import AbcChannel\nfrom .abc import AbcConnection\nfrom .log import logger\n\n\n__all__ = ['create_connection', 'RedisConnection']\n\nMAX_CHUNK_SIZE = 65536\n\n_PUBSUB_COMMANDS = (\n 'SUBSCRIBE', b'SUBSCRIBE',\n 'PSUBSCRIBE', b'PSUBSCRIBE',\n 'UNSUBSCRIBE', b'UNSUBSCRIBE',\n 'PUNSUBSCRIBE', b'PUNSUBSCRIBE',\n )\n\n\nasync def create_connection(address, *, db=None, password=None, ssl=None,\n encoding=None, parser=None, loop=None,\n timeout=None, connection_cls=None):\n \"\"\"Creates redis connection.\n\n Opens connection to Redis server specified by address argument.\n Address argument can be one of the following:\n * A tuple representing (host, port) pair for TCP connections;\n * A string representing either Redis URI or unix domain socket path.\n\n SSL argument is passed through to asyncio.create_connection.\n By default SSL/TLS is not used.\n\n By default any timeout is applied at the connection stage, however\n you can set a limitted time used trying to open a connection via\n the `timeout` Kw.\n\n Encoding argument can be used to decode byte-replies to strings.\n By default no decoding is done.\n\n Parser parameter can be used to pass custom Redis protocol parser class.\n By default hiredis.Reader is used (unless it is missing or platform\n is not CPython).\n\n Return value is RedisConnection instance or a connection_cls if it is\n given.\n\n This function is a coroutine.\n \"\"\"\n assert isinstance(address, (tuple, list, str)), \"tuple or str expected\"\n if isinstance(address, str):\n address, options = parse_url(address)\n logger.debug(\"Parsed Redis URI %r\", address)\n db = options.setdefault('db', db)\n password = options.setdefault('password', password)\n encoding = options.setdefault('encoding', encoding)\n timeout = options.setdefault('timeout', timeout)\n if 'ssl' in options:\n assert options['ssl'] or (not options['ssl'] and not ssl), (\n \"Conflicting ssl options are set\", options['ssl'], ssl)\n ssl = ssl or options['ssl']\n\n if timeout is not None and timeout <= 0:\n raise ValueError(\"Timeout has to be None or a number greater than 0\")\n\n if connection_cls:\n assert issubclass(connection_cls, AbcConnection),\\\n \"connection_class does not meet the AbcConnection contract\"\n cls = connection_cls\n else:\n cls = RedisConnection\n\n if loop is None:\n loop = asyncio.get_event_loop()\n\n if isinstance(address, (list, tuple)):\n host, port = address\n logger.debug(\"Creating tcp connection to %r\", address)\n reader, writer = await asyncio.wait_for(open_connection(\n host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop),\n timeout, loop=loop)\n sock = writer.transport.get_extra_info('socket')\n if sock is not None:\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n address = sock.getpeername()\n address = tuple(address[:2])\n else:\n logger.debug(\"Creating unix connection to %r\", address)\n reader, writer = await asyncio.wait_for(open_unix_connection(\n address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop),\n timeout, loop=loop)\n sock = writer.transport.get_extra_info('socket')\n if sock is not None:\n address = sock.getpeername()\n\n conn = cls(reader, writer, encoding=encoding,\n address=address, parser=parser,\n loop=loop)\n\n try:\n if password is not None:\n await conn.auth(password)\n if db is not None:\n await conn.select(db)\n except Exception:\n conn.close()\n await conn.wait_closed()\n raise\n return conn\n\n\nclass RedisConnection(AbcConnection):\n \"\"\"Redis connection.\"\"\"\n\n def __init__(self, reader, writer, *, address, encoding=None,\n parser=None, loop=None):\n if loop is None:\n loop = asyncio.get_event_loop()\n if parser is None:\n parser = Reader\n assert callable(parser), (\n \"Parser argument is not callable\", parser)\n self._reader = reader\n self._writer = writer\n self._address = address\n self._loop = loop\n self._waiters = deque()\n self._reader.set_parser(\n parser(protocolError=ProtocolError, replyError=ReplyError)\n )\n self._reader_task = asyncio.ensure_future(self._read_data(),\n loop=self._loop)\n self._close_msg = None\n self._db = 0\n self._closing = False\n self._closed = False\n self._close_state = asyncio.Event()\n self._reader_task.add_done_callback(lambda x: self._close_state.set())\n self._in_transaction = None\n self._transaction_error = None # XXX: never used?\n self._in_pubsub = 0\n self._pubsub_channels = coerced_keys_dict()\n self._pubsub_patterns = coerced_keys_dict()\n self._encoding = encoding\n self._pipeline_buffer = None\n\n def __repr__(self):\n return '<RedisConnection [db:{}]>'.format(self._db)\n\n async def _read_data(self):\n \"\"\"Response reader task.\"\"\"\n last_error = ConnectionClosedError(\n \"Connection has been closed by server\")\n while not self._reader.at_eof():\n try:\n obj = await self._reader.readobj()\n except asyncio.CancelledError:\n # NOTE: reader can get cancelled from `close()` method only.\n last_error = RuntimeError('this is unexpected')\n break\n except ProtocolError as exc:\n # ProtocolError is fatal\n # so connection must be closed\n if self._in_transaction is not None:\n self._transaction_error = exc\n last_error = exc\n break\n except Exception as exc:\n # NOTE: for QUIT command connection error can be received\n # before response\n last_error = exc\n break\n else:\n if (obj == b'' or obj is None) and self._reader.at_eof():\n logger.debug(\"Connection has been closed by server,\"\n \" response: %r\", obj)\n last_error = ConnectionClosedError(\"Reader at end of file\")\n break\n\n if isinstance(obj, MaxClientsError):\n last_error = obj\n break\n if self._in_pubsub:\n self._process_pubsub(obj)\n else:\n self._process_data(obj)\n self._closing = True\n self._loop.call_soon(self._do_close, last_error)\n\n def _process_data(self, obj):\n \"\"\"Processes command results.\"\"\"\n assert len(self._waiters) > 0, (type(obj), obj)\n waiter, encoding, cb = self._waiters.popleft()\n if isinstance(obj, RedisError):\n if isinstance(obj, ReplyError):\n if obj.args[0].startswith('READONLY'):\n obj = ReadOnlyError(obj.args[0])\n _set_exception(waiter, obj)\n if self._in_transaction is not None:\n self._transaction_error = obj\n else:\n if encoding is not None:\n try:\n obj = decode(obj, encoding)\n except Exception as exc:\n _set_exception(waiter, exc)\n return\n if cb is not None:\n try:\n obj = cb(obj)\n except Exception as exc:\n _set_exception(waiter, exc)\n return\n _set_result(waiter, obj)\n if self._in_transaction is not None:\n self._in_transaction.append((encoding, cb))\n\n def _process_pubsub(self, obj, *, process_waiters=True):\n \"\"\"Processes pubsub messages.\"\"\"\n kind, *args, data = obj\n if kind in (b'subscribe', b'unsubscribe'):\n chan, = args\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(obj)\n if kind == b'unsubscribe':\n ch = self._pubsub_channels.pop(chan, None)\n if ch:\n ch.close()\n self._in_pubsub = data\n elif kind in (b'psubscribe', b'punsubscribe'):\n chan, = args\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(obj)\n if kind == b'punsubscribe':\n ch = self._pubsub_patterns.pop(chan, None)\n if ch:\n ch.close()\n self._in_pubsub = data\n elif kind == b'message':\n chan, = args\n self._pubsub_channels[chan].put_nowait(data)\n elif kind == b'pmessage':\n pattern, chan = args\n self._pubsub_patterns[pattern].put_nowait((chan, data))\n elif kind == b'pong':\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(data or b'PONG')\n else:\n logger.warning(\"Unknown pubsub message received %r\", obj)\n\n @contextmanager\n def _buffered(self):\n # XXX: we must ensure that no await happens\n # as long as we buffer commands.\n # Probably we can set some error-raising callback on enter\n # and remove it on exit\n # if some await happens in between -> throw an error.\n # This is creepy solution, 'cause some one might want to await\n # on some other source except redis.\n # So we must only raise error we someone tries to await\n # pending aioredis future\n # One of solutions is to return coroutine instead of a future\n # in `execute` method.\n # In a coroutine we can check if buffering is enabled and raise error.\n\n # TODO: describe in docs difference in pipeline mode for\n # conn.execute vs pipeline.execute()\n if self._pipeline_buffer is None:\n self._pipeline_buffer = bytearray()\n try:\n yield self\n buf = self._pipeline_buffer\n self._writer.write(buf)\n finally:\n self._pipeline_buffer = None\n else:\n yield self\n\n def execute(self, command, *args, encoding=_NOTSET):\n \"\"\"Executes redis command and returns Future waiting for the answer.\n\n Raises:\n * TypeError if any of args can not be encoded as bytes.\n * ReplyError on redis '-ERR' responses.\n * ProtocolError when response can not be decoded meaning connection\n is broken.\n * ConnectionClosedError when either client or server has closed the\n connection.\n \"\"\"\n if self._reader is None or self._reader.at_eof():\n msg = self._close_msg or \"Connection closed or corrupted\"\n raise ConnectionClosedError(msg)\n if command is None:\n raise TypeError(\"command must not be None\")\n if None in args:\n raise TypeError(\"args must not contain None\")\n command = command.upper().strip()\n is_pubsub = command in _PUBSUB_COMMANDS\n is_ping = command in ('PING', b'PING')\n if self._in_pubsub and not (is_pubsub or is_ping):\n raise RedisError(\"Connection in SUBSCRIBE mode\")\n elif is_pubsub:\n logger.warning(\"Deprecated. Use `execute_pubsub` method directly\")\n return self.execute_pubsub(command, *args)\n\n if command in ('SELECT', b'SELECT'):\n cb = partial(self._set_db, args=args)\n elif command in ('MULTI', b'MULTI'):\n cb = self._start_transaction\n elif command in ('EXEC', b'EXEC'):\n cb = partial(self._end_transaction, discard=False)\n elif command in ('DISCARD', b'DISCARD'):\n cb = partial(self._end_transaction, discard=True)\n else:\n cb = None\n if encoding is _NOTSET:\n encoding = self._encoding\n fut = self._loop.create_future()\n if self._pipeline_buffer is None:\n self._writer.write(encode_command(command, *args))\n else:\n encode_command(command, *args, buf=self._pipeline_buffer)\n self._waiters.append((fut, encoding, cb))\n return fut\n\n def execute_pubsub(self, command, *channels):\n \"\"\"Executes redis (p)subscribe/(p)unsubscribe commands.\n\n Returns asyncio.gather coroutine waiting for all channels/patterns\n to receive answers.\n \"\"\"\n command = command.upper().strip()\n assert command in _PUBSUB_COMMANDS, (\n \"Pub/Sub command expected\", command)\n if self._reader is None or self._reader.at_eof():\n raise ConnectionClosedError(\"Connection closed or corrupted\")\n if None in set(channels):\n raise TypeError(\"args must not contain None\")\n if not len(channels):\n raise TypeError(\"No channels/patterns supplied\")\n is_pattern = len(command) in (10, 12)\n mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop)\n channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch)\n for ch in channels]\n if not all(ch.is_pattern == is_pattern for ch in channels):\n raise ValueError(\"Not all channels {} match command {}\"\n .format(channels, command))\n cmd = encode_command(command, *(ch.name for ch in channels))\n res = []\n for ch in channels:\n fut = self._loop.create_future()\n res.append(fut)\n cb = partial(self._update_pubsub, ch=ch)\n self._waiters.append((fut, None, cb))\n if self._pipeline_buffer is None:\n self._writer.write(cmd)\n else:\n self._pipeline_buffer.extend(cmd)\n return asyncio.gather(*res, loop=self._loop)\n\n def close(self):\n \"\"\"Close connection.\"\"\"\n self._do_close(ConnectionForcedCloseError())\n\n def _do_close(self, exc):\n if self._closed:\n return\n self._closed = True\n self._closing = False\n self._writer.transport.close()\n self._reader_task.cancel()\n self._reader_task = None\n self._writer = None\n self._reader = None\n self._pipeline_buffer = None\n\n if exc is not None:\n self._close_msg = str(exc)\n\n while self._waiters:\n waiter, *spam = self._waiters.popleft()\n logger.debug(\"Cancelling waiter %r\", (waiter, spam))\n if exc is None:\n _set_exception(waiter, ConnectionForcedCloseError())\n else:\n _set_exception(waiter, exc)\n while self._pubsub_channels:\n _, ch = self._pubsub_channels.popitem()\n logger.debug(\"Closing pubsub channel %r\", ch)\n ch.close(exc)\n while self._pubsub_patterns:\n _, ch = self._pubsub_patterns.popitem()\n logger.debug(\"Closing pubsub pattern %r\", ch)\n ch.close(exc)\n\n @property\n def closed(self):\n \"\"\"True if connection is closed.\"\"\"\n closed = self._closing or self._closed\n if not closed and self._reader and self._reader.at_eof():\n self._closing = closed = True\n self._loop.call_soon(self._do_close, None)\n return closed\n\n async def wait_closed(self):\n \"\"\"Coroutine waiting until connection is closed.\"\"\"\n await self._close_state.wait()\n\n @property\n def db(self):\n \"\"\"Currently selected db index.\"\"\"\n return self._db\n\n @property\n def encoding(self):\n \"\"\"Current set codec or None.\"\"\"\n return self._encoding\n\n @property\n def address(self):\n \"\"\"Redis server address, either host-port tuple or str.\"\"\"\n return self._address\n\n def select(self, db):\n \"\"\"Change the selected database for the current connection.\"\"\"\n if not isinstance(db, int):\n raise TypeError(\"DB must be of int type, not {!r}\".format(db))\n if db < 0:\n raise ValueError(\"DB must be greater or equal 0, got {!r}\"\n .format(db))\n fut = self.execute('SELECT', db)\n return wait_ok(fut)\n\n def _set_db(self, ok, args):\n assert ok in {b'OK', 'OK'}, (\"Unexpected result of SELECT\", ok)\n self._db = args[0]\n return ok\n\n def _start_transaction(self, ok):\n assert self._in_transaction is None, (\n \"Connection is already in transaction\", self._in_transaction)\n self._in_transaction = deque()\n self._transaction_error = None\n return ok\n\n def _end_transaction(self, obj, discard):\n assert self._in_transaction is not None, (\n \"Connection is not in transaction\", obj)\n self._transaction_error = None\n recall, self._in_transaction = self._in_transaction, None\n recall.popleft() # ignore first (its _start_transaction)\n if discard:\n return obj\n assert isinstance(obj, list) or (obj is None and not discard), (\n \"Unexpected MULTI/EXEC result\", obj, recall)\n # TODO: need to be able to re-try transaction\n if obj is None:\n err = WatchVariableError(\"WATCH variable has changed\")\n obj = [err] * len(recall)\n assert len(obj) == len(recall), (\n \"Wrong number of result items in mutli-exec\", obj, recall)\n res = []\n for o, (encoding, cb) in zip(obj, recall):\n if not isinstance(o, RedisError):\n try:\n if encoding:\n o = decode(o, encoding)\n if cb:\n o = cb(o)\n except Exception as err:\n res.append(err)\n continue\n res.append(o)\n return res\n\n def _update_pubsub(self, obj, *, ch):\n kind, *pattern, channel, subscriptions = obj\n self._in_pubsub, was_in_pubsub = subscriptions, self._in_pubsub\n # XXX: the channels/patterns storage should be refactored.\n # if code which supposed to read from channel/pattern\n # failed (exception in reader or else) than\n # the channel object will still reside in memory\n # and leak memory (messages will be put in queue).\n if kind == b'subscribe' and channel not in self._pubsub_channels:\n self._pubsub_channels[channel] = ch\n elif kind == b'psubscribe' and channel not in self._pubsub_patterns:\n self._pubsub_patterns[channel] = ch\n if not was_in_pubsub:\n self._process_pubsub(obj, process_waiters=False)\n return obj\n\n @property\n def in_transaction(self):\n \"\"\"Set to True when MULTI command was issued.\"\"\"\n return self._in_transaction is not None\n\n @property\n def in_pubsub(self):\n \"\"\"Indicates that connection is in PUB/SUB mode.\n\n Provides the number of subscribed channels.\n \"\"\"\n return self._in_pubsub\n\n @property\n def pubsub_channels(self):\n \"\"\"Returns read-only channels dict.\"\"\"\n return types.MappingProxyType(self._pubsub_channels)\n\n @property\n def pubsub_patterns(self):\n \"\"\"Returns read-only patterns dict.\"\"\"\n return types.MappingProxyType(self._pubsub_patterns)\n\n def auth(self, password):\n \"\"\"Authenticate to server.\"\"\"\n fut = self.execute('AUTH', password)\n return wait_ok(fut)\n", "path": "aioredis/connection.py" } ]
[ { "content": "import types\nimport asyncio\nimport socket\nfrom functools import partial\nfrom collections import deque\nfrom contextlib import contextmanager\n\nfrom .util import (\n encode_command,\n wait_ok,\n _NOTSET,\n _set_result,\n _set_exception,\n coerced_keys_dict,\n decode,\n parse_url,\n )\nfrom .parser import Reader\nfrom .stream import open_connection, open_unix_connection\nfrom .errors import (\n ConnectionClosedError,\n ConnectionForcedCloseError,\n RedisError,\n ProtocolError,\n ReplyError,\n WatchVariableError,\n ReadOnlyError,\n MaxClientsError\n )\nfrom .pubsub import Channel\nfrom .abc import AbcChannel\nfrom .abc import AbcConnection\nfrom .log import logger\n\n\n__all__ = ['create_connection', 'RedisConnection']\n\nMAX_CHUNK_SIZE = 65536\n\n_PUBSUB_COMMANDS = (\n 'SUBSCRIBE', b'SUBSCRIBE',\n 'PSUBSCRIBE', b'PSUBSCRIBE',\n 'UNSUBSCRIBE', b'UNSUBSCRIBE',\n 'PUNSUBSCRIBE', b'PUNSUBSCRIBE',\n )\n\n\nasync def create_connection(address, *, db=None, password=None, ssl=None,\n encoding=None, parser=None, loop=None,\n timeout=None, connection_cls=None):\n \"\"\"Creates redis connection.\n\n Opens connection to Redis server specified by address argument.\n Address argument can be one of the following:\n * A tuple representing (host, port) pair for TCP connections;\n * A string representing either Redis URI or unix domain socket path.\n\n SSL argument is passed through to asyncio.create_connection.\n By default SSL/TLS is not used.\n\n By default any timeout is applied at the connection stage, however\n you can set a limitted time used trying to open a connection via\n the `timeout` Kw.\n\n Encoding argument can be used to decode byte-replies to strings.\n By default no decoding is done.\n\n Parser parameter can be used to pass custom Redis protocol parser class.\n By default hiredis.Reader is used (unless it is missing or platform\n is not CPython).\n\n Return value is RedisConnection instance or a connection_cls if it is\n given.\n\n This function is a coroutine.\n \"\"\"\n assert isinstance(address, (tuple, list, str)), \"tuple or str expected\"\n if isinstance(address, str):\n address, options = parse_url(address)\n logger.debug(\"Parsed Redis URI %r\", address)\n db = options.setdefault('db', db)\n password = options.setdefault('password', password)\n encoding = options.setdefault('encoding', encoding)\n timeout = options.setdefault('timeout', timeout)\n if 'ssl' in options:\n assert options['ssl'] or (not options['ssl'] and not ssl), (\n \"Conflicting ssl options are set\", options['ssl'], ssl)\n ssl = ssl or options['ssl']\n\n if timeout is not None and timeout <= 0:\n raise ValueError(\"Timeout has to be None or a number greater than 0\")\n\n if connection_cls:\n assert issubclass(connection_cls, AbcConnection),\\\n \"connection_class does not meet the AbcConnection contract\"\n cls = connection_cls\n else:\n cls = RedisConnection\n\n if loop is None:\n loop = asyncio.get_event_loop()\n\n if isinstance(address, (list, tuple)):\n host, port = address\n logger.debug(\"Creating tcp connection to %r\", address)\n reader, writer = await asyncio.wait_for(open_connection(\n host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop),\n timeout, loop=loop)\n sock = writer.transport.get_extra_info('socket')\n if sock is not None:\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n address = sock.getpeername()\n address = tuple(address[:2])\n else:\n logger.debug(\"Creating unix connection to %r\", address)\n reader, writer = await asyncio.wait_for(open_unix_connection(\n address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop),\n timeout, loop=loop)\n sock = writer.transport.get_extra_info('socket')\n if sock is not None:\n address = sock.getpeername()\n\n conn = cls(reader, writer, encoding=encoding,\n address=address, parser=parser,\n loop=loop)\n\n try:\n if password is not None:\n await conn.auth(password)\n if db is not None:\n await conn.select(db)\n except Exception:\n conn.close()\n await conn.wait_closed()\n raise\n return conn\n\n\nclass RedisConnection(AbcConnection):\n \"\"\"Redis connection.\"\"\"\n\n def __init__(self, reader, writer, *, address, encoding=None,\n parser=None, loop=None):\n if loop is None:\n loop = asyncio.get_event_loop()\n if parser is None:\n parser = Reader\n assert callable(parser), (\n \"Parser argument is not callable\", parser)\n self._reader = reader\n self._writer = writer\n self._address = address\n self._loop = loop\n self._waiters = deque()\n self._reader.set_parser(\n parser(protocolError=ProtocolError, replyError=ReplyError)\n )\n self._reader_task = asyncio.ensure_future(self._read_data(),\n loop=self._loop)\n self._close_msg = None\n self._db = 0\n self._closing = False\n self._closed = False\n self._close_state = asyncio.Event()\n self._reader_task.add_done_callback(lambda x: self._close_state.set())\n self._in_transaction = None\n self._transaction_error = None # XXX: never used?\n self._in_pubsub = 0\n self._pubsub_channels = coerced_keys_dict()\n self._pubsub_patterns = coerced_keys_dict()\n self._encoding = encoding\n self._pipeline_buffer = None\n\n def __repr__(self):\n return '<RedisConnection [db:{}]>'.format(self._db)\n\n async def _read_data(self):\n \"\"\"Response reader task.\"\"\"\n last_error = ConnectionClosedError(\n \"Connection has been closed by server\")\n while not self._reader.at_eof():\n try:\n obj = await self._reader.readobj()\n except asyncio.CancelledError:\n # NOTE: reader can get cancelled from `close()` method only.\n last_error = RuntimeError('this is unexpected')\n break\n except ProtocolError as exc:\n # ProtocolError is fatal\n # so connection must be closed\n if self._in_transaction is not None:\n self._transaction_error = exc\n last_error = exc\n break\n except Exception as exc:\n # NOTE: for QUIT command connection error can be received\n # before response\n last_error = exc\n break\n else:\n if (obj == b'' or obj is None) and self._reader.at_eof():\n logger.debug(\"Connection has been closed by server,\"\n \" response: %r\", obj)\n last_error = ConnectionClosedError(\"Reader at end of file\")\n break\n\n if isinstance(obj, MaxClientsError):\n last_error = obj\n break\n if self._in_pubsub:\n self._process_pubsub(obj)\n else:\n self._process_data(obj)\n self._closing = True\n self._loop.call_soon(self._do_close, last_error)\n\n def _process_data(self, obj):\n \"\"\"Processes command results.\"\"\"\n assert len(self._waiters) > 0, (type(obj), obj)\n waiter, encoding, cb = self._waiters.popleft()\n if isinstance(obj, RedisError):\n if isinstance(obj, ReplyError):\n if obj.args[0].startswith('READONLY'):\n obj = ReadOnlyError(obj.args[0])\n _set_exception(waiter, obj)\n if self._in_transaction is not None:\n self._transaction_error = obj\n else:\n if encoding is not None:\n try:\n obj = decode(obj, encoding)\n except Exception as exc:\n _set_exception(waiter, exc)\n return\n if cb is not None:\n try:\n obj = cb(obj)\n except Exception as exc:\n _set_exception(waiter, exc)\n return\n _set_result(waiter, obj)\n if self._in_transaction is not None:\n self._in_transaction.append((encoding, cb))\n\n def _process_pubsub(self, obj, *, process_waiters=True):\n \"\"\"Processes pubsub messages.\"\"\"\n kind, *args, data = obj\n if kind in (b'subscribe', b'unsubscribe'):\n chan, = args\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(obj)\n if kind == b'unsubscribe':\n ch = self._pubsub_channels.pop(chan, None)\n if ch:\n ch.close()\n self._in_pubsub = data\n elif kind in (b'psubscribe', b'punsubscribe'):\n chan, = args\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(obj)\n if kind == b'punsubscribe':\n ch = self._pubsub_patterns.pop(chan, None)\n if ch:\n ch.close()\n self._in_pubsub = data\n elif kind == b'message':\n chan, = args\n self._pubsub_channels[chan].put_nowait(data)\n elif kind == b'pmessage':\n pattern, chan = args\n self._pubsub_patterns[pattern].put_nowait((chan, data))\n elif kind == b'pong':\n if process_waiters and self._in_pubsub and self._waiters:\n self._process_data(data or b'PONG')\n else:\n logger.warning(\"Unknown pubsub message received %r\", obj)\n\n @contextmanager\n def _buffered(self):\n # XXX: we must ensure that no await happens\n # as long as we buffer commands.\n # Probably we can set some error-raising callback on enter\n # and remove it on exit\n # if some await happens in between -> throw an error.\n # This is creepy solution, 'cause some one might want to await\n # on some other source except redis.\n # So we must only raise error we someone tries to await\n # pending aioredis future\n # One of solutions is to return coroutine instead of a future\n # in `execute` method.\n # In a coroutine we can check if buffering is enabled and raise error.\n\n # TODO: describe in docs difference in pipeline mode for\n # conn.execute vs pipeline.execute()\n if self._pipeline_buffer is None:\n self._pipeline_buffer = bytearray()\n try:\n yield self\n buf = self._pipeline_buffer\n self._writer.write(buf)\n finally:\n self._pipeline_buffer = None\n else:\n yield self\n\n def execute(self, command, *args, encoding=_NOTSET):\n \"\"\"Executes redis command and returns Future waiting for the answer.\n\n Raises:\n * TypeError if any of args can not be encoded as bytes.\n * ReplyError on redis '-ERR' responses.\n * ProtocolError when response can not be decoded meaning connection\n is broken.\n * ConnectionClosedError when either client or server has closed the\n connection.\n \"\"\"\n if self._reader is None or self._reader.at_eof():\n msg = self._close_msg or \"Connection closed or corrupted\"\n raise ConnectionClosedError(msg)\n if command is None:\n raise TypeError(\"command must not be None\")\n if None in args:\n raise TypeError(\"args must not contain None\")\n command = command.upper().strip()\n is_pubsub = command in _PUBSUB_COMMANDS\n is_ping = command in ('PING', b'PING')\n if self._in_pubsub and not (is_pubsub or is_ping):\n raise RedisError(\"Connection in SUBSCRIBE mode\")\n elif is_pubsub:\n logger.warning(\"Deprecated. Use `execute_pubsub` method directly\")\n return self.execute_pubsub(command, *args)\n\n if command in ('SELECT', b'SELECT'):\n cb = partial(self._set_db, args=args)\n elif command in ('MULTI', b'MULTI'):\n cb = self._start_transaction\n elif command in ('EXEC', b'EXEC'):\n cb = partial(self._end_transaction, discard=False)\n encoding = None\n elif command in ('DISCARD', b'DISCARD'):\n cb = partial(self._end_transaction, discard=True)\n else:\n cb = None\n if encoding is _NOTSET:\n encoding = self._encoding\n fut = self._loop.create_future()\n if self._pipeline_buffer is None:\n self._writer.write(encode_command(command, *args))\n else:\n encode_command(command, *args, buf=self._pipeline_buffer)\n self._waiters.append((fut, encoding, cb))\n return fut\n\n def execute_pubsub(self, command, *channels):\n \"\"\"Executes redis (p)subscribe/(p)unsubscribe commands.\n\n Returns asyncio.gather coroutine waiting for all channels/patterns\n to receive answers.\n \"\"\"\n command = command.upper().strip()\n assert command in _PUBSUB_COMMANDS, (\n \"Pub/Sub command expected\", command)\n if self._reader is None or self._reader.at_eof():\n raise ConnectionClosedError(\"Connection closed or corrupted\")\n if None in set(channels):\n raise TypeError(\"args must not contain None\")\n if not len(channels):\n raise TypeError(\"No channels/patterns supplied\")\n is_pattern = len(command) in (10, 12)\n mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop)\n channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch)\n for ch in channels]\n if not all(ch.is_pattern == is_pattern for ch in channels):\n raise ValueError(\"Not all channels {} match command {}\"\n .format(channels, command))\n cmd = encode_command(command, *(ch.name for ch in channels))\n res = []\n for ch in channels:\n fut = self._loop.create_future()\n res.append(fut)\n cb = partial(self._update_pubsub, ch=ch)\n self._waiters.append((fut, None, cb))\n if self._pipeline_buffer is None:\n self._writer.write(cmd)\n else:\n self._pipeline_buffer.extend(cmd)\n return asyncio.gather(*res, loop=self._loop)\n\n def close(self):\n \"\"\"Close connection.\"\"\"\n self._do_close(ConnectionForcedCloseError())\n\n def _do_close(self, exc):\n if self._closed:\n return\n self._closed = True\n self._closing = False\n self._writer.transport.close()\n self._reader_task.cancel()\n self._reader_task = None\n self._writer = None\n self._reader = None\n self._pipeline_buffer = None\n\n if exc is not None:\n self._close_msg = str(exc)\n\n while self._waiters:\n waiter, *spam = self._waiters.popleft()\n logger.debug(\"Cancelling waiter %r\", (waiter, spam))\n if exc is None:\n _set_exception(waiter, ConnectionForcedCloseError())\n else:\n _set_exception(waiter, exc)\n while self._pubsub_channels:\n _, ch = self._pubsub_channels.popitem()\n logger.debug(\"Closing pubsub channel %r\", ch)\n ch.close(exc)\n while self._pubsub_patterns:\n _, ch = self._pubsub_patterns.popitem()\n logger.debug(\"Closing pubsub pattern %r\", ch)\n ch.close(exc)\n\n @property\n def closed(self):\n \"\"\"True if connection is closed.\"\"\"\n closed = self._closing or self._closed\n if not closed and self._reader and self._reader.at_eof():\n self._closing = closed = True\n self._loop.call_soon(self._do_close, None)\n return closed\n\n async def wait_closed(self):\n \"\"\"Coroutine waiting until connection is closed.\"\"\"\n await self._close_state.wait()\n\n @property\n def db(self):\n \"\"\"Currently selected db index.\"\"\"\n return self._db\n\n @property\n def encoding(self):\n \"\"\"Current set codec or None.\"\"\"\n return self._encoding\n\n @property\n def address(self):\n \"\"\"Redis server address, either host-port tuple or str.\"\"\"\n return self._address\n\n def select(self, db):\n \"\"\"Change the selected database for the current connection.\"\"\"\n if not isinstance(db, int):\n raise TypeError(\"DB must be of int type, not {!r}\".format(db))\n if db < 0:\n raise ValueError(\"DB must be greater or equal 0, got {!r}\"\n .format(db))\n fut = self.execute('SELECT', db)\n return wait_ok(fut)\n\n def _set_db(self, ok, args):\n assert ok in {b'OK', 'OK'}, (\"Unexpected result of SELECT\", ok)\n self._db = args[0]\n return ok\n\n def _start_transaction(self, ok):\n assert self._in_transaction is None, (\n \"Connection is already in transaction\", self._in_transaction)\n self._in_transaction = deque()\n self._transaction_error = None\n return ok\n\n def _end_transaction(self, obj, discard):\n assert self._in_transaction is not None, (\n \"Connection is not in transaction\", obj)\n self._transaction_error = None\n recall, self._in_transaction = self._in_transaction, None\n recall.popleft() # ignore first (its _start_transaction)\n if discard:\n return obj\n assert isinstance(obj, list) or (obj is None and not discard), (\n \"Unexpected MULTI/EXEC result\", obj, recall)\n # TODO: need to be able to re-try transaction\n if obj is None:\n err = WatchVariableError(\"WATCH variable has changed\")\n obj = [err] * len(recall)\n assert len(obj) == len(recall), (\n \"Wrong number of result items in mutli-exec\", obj, recall)\n res = []\n for o, (encoding, cb) in zip(obj, recall):\n if not isinstance(o, RedisError):\n try:\n if encoding:\n o = decode(o, encoding)\n if cb:\n o = cb(o)\n except Exception as err:\n res.append(err)\n continue\n res.append(o)\n return res\n\n def _update_pubsub(self, obj, *, ch):\n kind, *pattern, channel, subscriptions = obj\n self._in_pubsub, was_in_pubsub = subscriptions, self._in_pubsub\n # XXX: the channels/patterns storage should be refactored.\n # if code which supposed to read from channel/pattern\n # failed (exception in reader or else) than\n # the channel object will still reside in memory\n # and leak memory (messages will be put in queue).\n if kind == b'subscribe' and channel not in self._pubsub_channels:\n self._pubsub_channels[channel] = ch\n elif kind == b'psubscribe' and channel not in self._pubsub_patterns:\n self._pubsub_patterns[channel] = ch\n if not was_in_pubsub:\n self._process_pubsub(obj, process_waiters=False)\n return obj\n\n @property\n def in_transaction(self):\n \"\"\"Set to True when MULTI command was issued.\"\"\"\n return self._in_transaction is not None\n\n @property\n def in_pubsub(self):\n \"\"\"Indicates that connection is in PUB/SUB mode.\n\n Provides the number of subscribed channels.\n \"\"\"\n return self._in_pubsub\n\n @property\n def pubsub_channels(self):\n \"\"\"Returns read-only channels dict.\"\"\"\n return types.MappingProxyType(self._pubsub_channels)\n\n @property\n def pubsub_patterns(self):\n \"\"\"Returns read-only patterns dict.\"\"\"\n return types.MappingProxyType(self._pubsub_patterns)\n\n def auth(self, password):\n \"\"\"Authenticate to server.\"\"\"\n fut = self.execute('AUTH', password)\n return wait_ok(fut)\n", "path": "aioredis/connection.py" } ]
diff --git a/CHANGES/657.bugfix b/CHANGES/657.bugfix new file mode 100644 index 000000000..a8e35977d --- /dev/null +++ b/CHANGES/657.bugfix @@ -0,0 +1 @@ +Fix transaction data decoding diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index dc5784709..ea3e2e37f 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -47,3 +47,4 @@ Volodymyr Hotsyk Youngmin Koo <youngminz> Dima Kit <curiouscod3> +Dmitry Vasilishin <dmvass> diff --git a/aioredis/connection.py b/aioredis/connection.py index 7c00b26c9..2fce4d905 100644 --- a/aioredis/connection.py +++ b/aioredis/connection.py @@ -336,6 +336,7 @@ def execute(self, command, *args, encoding=_NOTSET): cb = self._start_transaction elif command in ('EXEC', b'EXEC'): cb = partial(self._end_transaction, discard=False) + encoding = None elif command in ('DISCARD', b'DISCARD'): cb = partial(self._end_transaction, discard=True) else: diff --git a/tests/transaction_commands_test.py b/tests/transaction_commands_test.py index 4582d636b..598da2f34 100644 --- a/tests/transaction_commands_test.py +++ b/tests/transaction_commands_test.py @@ -215,13 +215,16 @@ async def test_global_encoding(redis, create_redis, server, loop): tr = redis.multi_exec() fut1 = tr.get('key') fut2 = tr.get('key', encoding='utf-8') - fut3 = tr.hgetall('hash-key', encoding='utf-8') + fut3 = tr.get('key', encoding=None) + fut4 = tr.hgetall('hash-key', encoding='utf-8') await tr.execute() res = await fut1 assert res == 'value' res = await fut2 assert res == 'value' res = await fut3 + assert res == b'value' + res = await fut4 assert res == {'foo': 'val1', 'bar': 'val2'}
Transaction data decoding is incorrect Transaction data always decoded two times if connection encoding was defined: Test case: ```python @pytest.mark.run_loop async def test_global_encoding(redis, create_redis, server, loop): redis = await create_redis( server.tcp_address, loop=loop, encoding='utf-8') res = await redis.set('key', 'value') assert res is True res = await redis.hmset( 'hash-key', 'foo', 'val1', 'bar', 'val2') assert res is True tr = redis.multi_exec() fut1 = tr.get('key') fut2 = tr.get('key', encoding='utf-8') fut3 = tr.get('key', encoding=None) fut4 = tr.hgetall('hash-key', encoding='utf-8') await tr.execute() res = await fut1 assert res == 'value' res = await fut2 assert res == 'value' res = await fut3 assert res == b'value' res = await fut4 assert res == {'foo': 'val1', 'bar': 'val2'} ``` Tracing for `util.decode` ```python decode(b'PONG', utf-8) decode(b'OK', utf-8) decode(b'OK', utf-8) decode(b'OK', utf-8) decode(b'QUEUED', utf-8) decode(b'QUEUED', utf-8) decode(b'QUEUED', utf-8) decode([b'value', b'value', b'value', [b'foo', b'val1', b'bar', b'val2']], utf-8) decode(b'value', utf-8) decode(b'value', utf-8) decode(b'value', utf-8) decode([b'foo', b'val1', b'bar', b'val2'], utf-8) decode(b'foo', utf-8) decode(b'val1', utf-8) decode(b'bar', utf-8) decode(b'val2', utf-8) decode(value, utf-8) decode(value, utf-8) decode(['foo', 'val1', 'bar', 'val2'], utf-8) decode(foo, utf-8) decode(val1, utf-8) decode(bar, utf-8) decode(val2, utf-8) ``` You can see that `multi-exec` response `[b'value', b'value', b'value', [b'foo', b'val1', b'bar', b'val2']]` was decoded twice. In this case decoding in `RedisConnection._end_transaction` is not make sense because we have already decoded it in the `RedisConnection._process_data`.
google__personfinder-328
[ { "content": "#!/usr/bin/python2.7\n# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The main request handler. All dynamic requests except for remote_api are\nhandled by this handler, which dispatches to all other dynamic handlers.\"\"\"\n\nimport django_setup # always keep this first\n\nimport mimetypes\nimport re\nimport os\nimport urlparse\n\nfrom google.appengine.api import memcache\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\n\nimport config\nimport const\nimport django.utils.html\nimport logging\nimport model\nimport pfif\nimport resources\nimport utils\nimport user_agents\nimport setup_pf\n\n\nclass AdminEnv(object):\n \"\"\"Template variables for admin pages.\"\"\"\n\n def __init__(self, request):\n self.request = request\n self.user = users.get_current_user()\n self.logout_url = users.create_logout_url(self.request.url)\n\n @property\n def repo_options(self):\n \"\"\"This is different from env.repo_options because this contains all\n repositories including deactivated ones.\n\n This is defined as a property so that it is evaluated lazily only\n when necessary.\n \"\"\"\n try:\n return [\n utils.Struct(\n repo=repo,\n url=utils.get_repo_url(self.request, repo) + '/admin')\n for repo in sorted(model.Repo.list())]\n except:\n # Logs the exception here because exceptions thrown during template\n # variable evaluation is silently ignored. Note that\n # logging.exception() logs the current exception by default.\n logging.exception('Exception thrown')\n return None\n\n\n# When no action or repo is specified, redirect to this action.\nHOME_ACTION = 'home.html'\n\n# Map of URL actions to Python module and class names.\n# TODO(kpy): Remove the need for this configuration information, either by\n# regularizing the module and class names or adding a URL attribute to handlers.\nHANDLER_CLASSES = dict((x, x.replace('/', '_') + '.Handler') for x in [\n 'start',\n 'query',\n 'results',\n 'create',\n 'view',\n 'multiview',\n 'reveal',\n 'photo',\n 'embed',\n 'extend',\n 'gadget',\n 'delete',\n 'flag_note',\n 'restore',\n 'subscribe',\n 'unsubscribe',\n 'disable_notes',\n 'confirm_disable_notes',\n 'enable_notes',\n 'confirm_enable_notes',\n 'post_flagged_note',\n 'confirm_post_flagged_note',\n 'third_party_search',\n 'admin',\n 'admin/create_repo',\n 'admin/dashboard',\n 'admin/delete_record',\n 'admin/resources',\n 'admin/review',\n 'admin/statistics',\n 'css',\n 'add_note',\n 'tos',\n])\n\n# Exceptional cases where the module name doesn't match the URL.\nHANDLER_CLASSES[''] = 'start.Handler'\nHANDLER_CLASSES['admin/api_keys'] = 'admin_api_keys.CreateOrUpdateApiKey'\nHANDLER_CLASSES['admin/api_keys/list'] = 'admin_api_keys.ListApiKeys'\nHANDLER_CLASSES['api/import'] = 'api.Import'\nHANDLER_CLASSES['api/import/notes'] = 'api.Import'\nHANDLER_CLASSES['api/import/persons'] = 'api.Import'\nHANDLER_CLASSES['api/read'] = 'api.Read'\nHANDLER_CLASSES['api/write'] = 'api.Write'\nHANDLER_CLASSES['api/search'] = 'api.Search'\nHANDLER_CLASSES['api/subscribe'] = 'api.Subscribe'\nHANDLER_CLASSES['api/unsubscribe'] = 'api.Unsubscribe'\nHANDLER_CLASSES['api/stats'] = 'api.Stats'\nHANDLER_CLASSES['api/handle_sms'] = 'api.HandleSMS'\nHANDLER_CLASSES['api/photo_upload'] = 'api.PhotoUpload'\nHANDLER_CLASSES['feeds/repo'] = 'feeds.Repo'\nHANDLER_CLASSES['feeds/note'] = 'feeds.Note'\nHANDLER_CLASSES['feeds/person'] = 'feeds.Person'\nHANDLER_CLASSES['sitemap'] = 'sitemap.SiteMap'\nHANDLER_CLASSES['sitemap/ping'] = 'sitemap.SiteMapPing'\nHANDLER_CLASSES['tasks/count/note'] = 'tasks.CountNote'\nHANDLER_CLASSES['tasks/count/person'] = 'tasks.CountPerson'\nHANDLER_CLASSES['tasks/count/reindex'] = 'tasks.Reindex'\nHANDLER_CLASSES['tasks/count/update_dead_status'] = 'tasks.UpdateDeadStatus'\nHANDLER_CLASSES['tasks/count/update_status'] = 'tasks.UpdateStatus'\nHANDLER_CLASSES['tasks/delete_expired'] = 'tasks.DeleteExpired'\nHANDLER_CLASSES['tasks/delete_old'] = 'tasks.DeleteOld'\nHANDLER_CLASSES['tasks/clean_up_in_test_mode'] = 'tasks.CleanUpInTestMode'\nHANDLER_CLASSES['tasks/notify_many_unreviewed_notes'] = 'tasks.NotifyManyUnreviewedNotes'\n\ndef is_development_server():\n \"\"\"Returns True if the app is running in development.\"\"\"\n server = os.environ.get('SERVER_SOFTWARE', '')\n return 'Development' in server\n\ndef is_cron_task(request):\n \"\"\"Returns True if the request is from appengine cron.\"\"\"\n return 'X-AppEngine-Cron' in request.headers\n\ndef is_task_queue_task(request):\n \"\"\"Returns True if the request is from the appengine task queue.\"\"\"\n return 'X-AppEngine-TaskName' in request.headers\n\ndef get_repo_and_action(request):\n \"\"\"Determines the repo and action for a request. The action is the part\n of the URL path after the repo, with no leading or trailing slashes.\"\"\"\n scheme, netloc, path, _, _ = urlparse.urlsplit(request.url)\n parts = path.lstrip('/').split('/')\n\n # Depending on whether we're serving from appspot directly or\n # google.org/personfinder we could have /global or /personfinder/global\n # as the 'global' prefix.\n if parts[0] == 'personfinder':\n parts.pop(0)\n repo = parts and parts.pop(0) or None\n action = '/'.join(parts)\n if repo == 'global':\n repo = None\n return repo, action\n\ndef select_charset(request):\n \"\"\"Given a request, chooses a charset for encoding the response.\n\n If the selected charset is UTF-8, it always returns\n 'utf-8' (const.CHARSET_UTF8), not 'utf8', 'UTF-8', etc.\n \"\"\"\n # We assume that any client that doesn't support UTF-8 will specify a\n # preferred encoding in the Accept-Charset header, and will use this\n # encoding for content, query parameters, and form data. We make this\n # assumption across all repositories.\n\n # Get a list of the charsets that the client supports.\n if request.get('charsets'):\n charsets = request.get('charsets').split(',')\n elif user_agents.prefer_sjis_charset(request):\n # Some Japanese feature phones don't (fully) support UTF-8.\n # They only support Shift_JIS. But they may not send Accept-Charset\n # header. Also, we haven't confirmed, but there may be phones whose\n # Accept-Charset header includes UTF-8 but its UTF-8 support is buggy.\n # So we always use Shift_JIS regardless of Accept-Charset header.\n charsets = ['Shift_JIS']\n else:\n charsets = request.accept_charset.best_matches()\n\n # Always prefer UTF-8 if the client supports it.\n for charset in charsets:\n if charset.lower().replace('_', '-') in ['utf8', 'utf-8']:\n return const.CHARSET_UTF8\n\n # Otherwise, look for a requested charset that Python supports.\n for charset in charsets:\n try:\n 'xyz'.encode(charset, 'replace') # test if charset is known\n return charset\n except:\n continue\n\n # If Python doesn't know any of the requested charsets, use UTF-8.\n return const.CHARSET_UTF8\n\ndef select_lang(request, config=None):\n \"\"\"Selects the best language to use for a given request. The 'lang' query\n parameter has priority, then the django_language cookie, then the first\n language in the language menu, then the default setting.\"\"\"\n default_lang = (config and\n config.language_menu_options and\n config.language_menu_options[0])\n lang = (request.get('lang') or\n request.cookies.get('django_language', None) or\n default_lang or\n django_setup.LANGUAGE_CODE)\n lang = re.sub('[^A-Za-z0-9-]', '', lang)\n return const.LANGUAGE_SYNONYMS.get(lang, lang)\n\ndef get_repo_options(request, lang):\n \"\"\"Returns a list of the names and titles of the launched repositories.\"\"\"\n options = []\n for repo in model.Repo.list_launched():\n titles = config.get_for_repo(repo, 'repo_titles', {})\n default_title = (titles.values() or ['?'])[0]\n title = titles.get(lang, titles.get('en', default_title))\n url = utils.get_repo_url(request, repo)\n test_mode = config.get_for_repo(repo, 'test_mode')\n options.append(utils.Struct(repo=repo, title=title, url=url,\n test_mode=test_mode))\n return options\n\ndef get_language_options(request, config, current_lang):\n \"\"\"Returns a list of information needed to generate the language menu.\"\"\"\n primary_langs = (config and config.language_menu_options) or ['en']\n all_langs = sorted(\n const.LANGUAGE_ENDONYMS.keys(),\n key=lambda s: const.LANGUAGE_ENDONYMS[s])\n return {\n 'primary':\n [get_language_option(request, lang, lang == current_lang)\n for lang in primary_langs],\n 'all':\n # We put both 'primary' and 'all' languages into a single <select>\n # box (See app/resources/language-menu.html.template).\n # If current_lang is in the primary languages, we mark the\n # language as is_selected in 'primary', not in 'all', to make sure\n # a single option is selected in the <select> box.\n [get_language_option(\n request, lang,\n lang == current_lang and lang not in primary_langs)\n for lang in all_langs],\n }\n\ndef get_language_option(request, lang, is_selected):\n return {\n 'lang': lang,\n 'endonym': const.LANGUAGE_ENDONYMS.get(lang, '?'),\n 'url': utils.set_url_param(request.url, 'lang', lang),\n 'is_selected': is_selected,\n }\n\ndef get_localized_message(localized_messages, lang, default):\n \"\"\"Gets the localized message for lang from a dictionary that maps language\n codes to localized messages. Falls back to English if language 'lang' is\n not available, or to a default message if English is not available.\"\"\"\n if not isinstance(localized_messages, dict):\n return default\n return localized_messages.get(lang, localized_messages.get('en', default))\n\ndef get_hidden_input_tags_for_preserved_query_params(request):\n \"\"\"Gets HTML with <input type=\"hidden\"> tags to preserve query parameters\n listed in utils.PRESERVED_QUERY_PARAM_NAMES e.g. \"ui\".\"\"\"\n tags_str = ''\n for name in utils.PRESERVED_QUERY_PARAM_NAMES:\n value = request.get(name)\n if value:\n tags_str += '<input type=\"hidden\" name=\"%s\" value=\"%s\">\\n' % (\n django.utils.html.escape(name),\n django.utils.html.escape(value))\n return tags_str\n\ndef setup_env(request):\n \"\"\"Constructs the 'env' object, which contains various template variables\n that are commonly used by most handlers.\"\"\"\n env = utils.Struct()\n env.repo, env.action = get_repo_and_action(request)\n env.config = config.Configuration(env.repo or '*')\n # TODO(ryok): Rename to local_test_mode or something alike to disambiguate\n # better from repository's test_mode.\n env.test_mode = (request.remote_addr == '127.0.0.1' and\n request.get('test_mode'))\n\n env.analytics_id = config.get('analytics_id')\n env.maps_api_key = config.get('maps_api_key')\n\n # Internationalization-related stuff.\n env.charset = select_charset(request)\n env.lang = select_lang(request, env.config)\n env.rtl = env.lang in const.LANGUAGES_BIDI\n env.virtual_keyboard_layout = const.VIRTUAL_KEYBOARD_LAYOUTS.get(env.lang)\n\n # Used for parsing query params. This must be done before accessing any\n # query params which may have multi-byte value, such as \"given_name\" below\n # in this function.\n request.charset = env.charset\n\n # Determine the resource bundle to use.\n env.default_resource_bundle = config.get('default_resource_bundle', '1')\n env.resource_bundle = (request.cookies.get('resource_bundle', '') or\n env.default_resource_bundle)\n\n # Information about the request.\n env.url = utils.set_url_param(request.url, 'lang', env.lang)\n env.scheme, env.netloc, env.path, _, _ = urlparse.urlsplit(request.url)\n env.force_https = False\n env.domain = env.netloc.split(':')[0]\n env.global_url = utils.get_repo_url(request, 'global')\n\n # Commonly used information that's rendered or localized for templates.\n env.language_options = get_language_options(request, env.config, env.lang)\n env.repo_options = get_repo_options(request, env.lang)\n env.expiry_options = [\n utils.Struct(value=value, text=const.PERSON_EXPIRY_TEXT[value])\n for value in sorted(const.PERSON_EXPIRY_TEXT.keys(), key=int)\n ]\n env.status_options = [\n utils.Struct(value=value, text=const.NOTE_STATUS_TEXT[value])\n for value in pfif.NOTE_STATUS_VALUES\n if (value != 'believed_dead' or\n not env.config or env.config.allow_believed_dead_via_ui)\n ]\n env.hidden_input_tags_for_preserved_query_params = (\n get_hidden_input_tags_for_preserved_query_params(request))\n\n ui_param = request.get('ui', '').strip().lower()\n\n # Interprets \"small\" and \"style\" parameters for backward compatibility.\n # TODO(ichikawa): Delete these in near future when we decide to drop\n # support of these parameters.\n small_param = request.get('small', '').strip().lower()\n style_param = request.get('style', '').strip().lower()\n if not ui_param and small_param == 'yes':\n ui_param = 'small'\n elif not ui_param and style_param:\n ui_param = style_param\n\n if ui_param:\n env.ui = ui_param\n elif user_agents.is_jp_tier2_mobile_phone(request):\n env.ui = 'light'\n else:\n env.ui = 'default'\n\n # UI configurations.\n #\n # Enables features which require JavaScript.\n env.enable_javascript = True\n # Enables operations which requires Captcha.\n env.enable_captcha = True\n # Enables photo upload.\n env.enable_photo_upload = True\n # Enables to flag/unflag notes as spam, and to reveal spam notes.\n env.enable_spam_ops = True\n # Enables duplicate marking mode.\n env.enable_dup_mode = True\n # Shows a logo on top of the page.\n env.show_logo = True\n # Shows language menu.\n env.show_language_menu = True\n # Uses short labels for buttons.\n env.use_short_buttons = False\n # Optional \"target\" attribute for links to non-small pages.\n env.target_attr = ''\n # Shows record IDs in the results page.\n env.show_record_ids_in_results = True\n\n if env.ui == 'small':\n env.show_logo = False\n env.target_attr = ' target=\"_blank\" '\n\n elif env.ui == 'light':\n # Disables features which requires JavaScript. Some feature phones\n # doesn't support JavaScript.\n env.enable_javascript = False\n # Disables operations which requires Captcha because Captcha requires\n # JavaScript.\n env.enable_captcha = False\n # Uploading is often not supported in feature phones.\n env.enable_photo_upload = False\n # Disables spam operations because it requires JavaScript and\n # supporting more pages on ui=light.\n env.enable_spam_ops = False\n # Disables duplicate marking mode because it doesn't support\n # small screens and it requires JavaScript.\n env.enable_dup_mode = False\n # Hides the logo on the top to save the space. Also, the logo links\n # to the global page which doesn't support small screens.\n env.show_logo = False\n # Hides language menu because the menu in the current position is\n # annoying in feature phones.\n # TODO(ichikawa): Consider layout of the language menu.\n env.show_language_menu = False\n # Too long buttons are not fully shown in some feature phones.\n env.use_short_buttons = True\n # To make it simple.\n env.show_record_ids_in_results = False\n\n env.back_chevron = u'\\xab'\n back_chevron_in_charset = True\n try:\n env.back_chevron.encode(env.charset)\n except UnicodeEncodeError:\n # u'\\xab' is not in the charset (e.g. Shift_JIS).\n back_chevron_in_charset = False\n if not back_chevron_in_charset or env.ui == 'light':\n # Use ASCII characters on ui=light too because some feature phones\n # support UTF-8 but don't render UTF-8 symbols such as u'\\xab'.\n env.back_chevron = u'<<'\n\n env.enable_maps = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.maps_api_key)\n env.enable_analytics = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.analytics_id)\n env.enable_translate = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.config.translate_api_key)\n\n env.admin = AdminEnv(request)\n\n # Repo-specific information.\n if env.repo:\n # repo_url is the root URL for the repository.\n env.repo_url = utils.get_repo_url(request, env.repo)\n # start_url is like repo_url but preserves parameters such as 'ui'.\n env.start_url = utils.get_url(request, env.repo, '')\n # URL of the link in the heading. The link on ui=small links to the\n # normal UI.\n env.repo_title_url = (\n env.repo_url if env.ui == 'small' else env.start_url)\n # URL to force default UI. Note that we show ui=light version in some\n # user agents when ui parameter is not specified.\n env.default_ui_url = utils.get_url(request, env.repo, '', ui='default')\n env.repo_path = urlparse.urlsplit(env.repo_url)[2]\n env.repo_title = get_localized_message(\n env.config.repo_titles, env.lang, '?')\n env.start_page_custom_html = get_localized_message(\n env.config.start_page_custom_htmls, env.lang, '')\n env.results_page_custom_html = get_localized_message(\n env.config.results_page_custom_htmls, env.lang, '')\n env.view_page_custom_html = get_localized_message(\n env.config.view_page_custom_htmls, env.lang, '')\n env.seek_query_form_custom_html = get_localized_message(\n env.config.seek_query_form_custom_htmls, env.lang, '')\n env.footer_custom_html = get_localized_message(\n env.config.footer_custom_htmls, env.lang, '')\n # If the repository is deactivated, we should not show test mode\n # notification.\n env.repo_test_mode = (\n env.config.test_mode and not env.config.deactivated)\n env.force_https = env.config.force_https\n\n env.params_full_name = request.get('full_name', '').strip()\n if not env.params_full_name:\n # Preformat the name from 'given_name' and 'family_name' parameters.\n given_name = request.get('given_name', '').strip()\n family_name = request.get('family_name', '').strip()\n env.params_full_name = utils.get_full_name(\n given_name, family_name, env.config)\n\n return env\n\ndef flush_caches(*keywords):\n \"\"\"Flushes the specified set of caches. Pass '*' to flush everything.\"\"\"\n if '*' in keywords or 'resource' in keywords:\n resources.clear_caches()\n if '*' in keywords or 'memcache' in keywords:\n memcache.flush_all()\n if '*' in keywords or 'config' in keywords:\n config.cache.flush()\n for keyword in keywords:\n if keyword.startswith('config/'):\n config.cache.delete(keyword[7:])\n\n\nclass Main(webapp.RequestHandler):\n \"\"\"The main request handler. All dynamic requests except for remote_api are\n handled by this handler, which dispatches to all other dynamic handlers.\"\"\"\n\n def initialize(self, request, response):\n webapp.RequestHandler.initialize(self, request, response)\n\n # If requested, set the clock before doing anything clock-related.\n # Only works on localhost for testing. Specify ?utcnow=1293840000 to\n # set the clock to 2011-01-01, or ?utcnow=real to revert to real time.\n utcnow = request.get('utcnow')\n if request.remote_addr == '127.0.0.1' and utcnow:\n if utcnow == 'real':\n utils.set_utcnow_for_test(None)\n else:\n utils.set_utcnow_for_test(float(utcnow))\n\n # If requested, flush caches before we touch anything that uses them.\n flush_caches(*request.get('flush', '').split(','))\n\n # Gather commonly used information into self.env.\n self.env = setup_env(request)\n\n # Force a redirect if requested, except where https is not supported:\n # - for cron jobs\n # - for task queue jobs\n # - in development\n if (self.env.force_https and self.env.scheme == 'http'\n and not is_cron_task(self.request)\n and not is_task_queue_task(self.request)\n and not is_development_server()):\n self.redirect(self.env.url.replace('http:', 'https:'))\n\n # Activate the selected language.\n response.headers['Content-Language'] = self.env.lang\n response.headers['Set-Cookie'] = \\\n 'django_language=%s; path=/' % self.env.lang\n django_setup.activate(self.env.lang)\n\n # Activate the appropriate resource bundle.\n resources.set_active_bundle_name(self.env.resource_bundle)\n\n def serve(self):\n request, response, env = self.request, self.response, self.env\n\n # If the Person Finder instance has not been initialized yet,\n # prepend to any served page a warning and a link to the admin\n # page where the datastore can be initialized.\n if not config.get('initialized'):\n if request.get('operation') == 'setup_datastore':\n setup_pf.setup_datastore()\n self.redirect(env.global_url + '/')\n return\n else:\n get_vars = lambda: {'env': env}\n content = resources.get_rendered('setup_datastore.html', env.lang,\n (env.repo, env.charset), get_vars)\n response.out.write(content)\n\n if not env.action and not env.repo:\n # Redirect to the default home page.\n self.redirect(env.global_url + '/' + HOME_ACTION)\n elif env.action in HANDLER_CLASSES:\n # Dispatch to the handler for the specified action.\n module_name, class_name = HANDLER_CLASSES[env.action].split('.')\n handler = getattr(__import__(module_name), class_name)(\n request, response, env)\n getattr(handler, request.method.lower())() # get() or post()\n elif env.action.endswith('.template'):\n # Don't serve template source code.\n response.set_status(404)\n response.out.write('Not found')\n else:\n # Serve a static page or file.\n env.robots_ok = True\n get_vars = lambda: {'env': env, 'config': env.config}\n content = resources.get_rendered(\n env.action, env.lang, (env.repo, env.charset), get_vars)\n if content is None:\n response.set_status(404)\n response.out.write('Not found')\n else:\n content_type, encoding = mimetypes.guess_type(env.action)\n response.headers['Content-Type'] = (\n (content_type or 'text/plain') +\n ('; charset=%s' % encoding if encoding else ''))\n response.out.write(content)\n\n def get(self):\n self.serve()\n\n def post(self):\n self.serve()\n\n def head(self):\n self.request.method = 'GET'\n self.serve()\n self.response.clear()\n\nif __name__ == '__main__':\n webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)]))\n", "path": "app/main.py" } ]
[ { "content": "#!/usr/bin/python2.7\n# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The main request handler. All dynamic requests except for remote_api are\nhandled by this handler, which dispatches to all other dynamic handlers.\"\"\"\n\nimport django_setup # always keep this first\n\nimport mimetypes\nimport re\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), 'vendors'))\n\nimport urlparse\n\nfrom google.appengine.api import memcache\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\n\nimport config\nimport const\nimport django.utils.html\nimport logging\nimport model\nimport pfif\nimport resources\nimport utils\nimport user_agents\nimport setup_pf\n\n\nclass AdminEnv(object):\n \"\"\"Template variables for admin pages.\"\"\"\n\n def __init__(self, request):\n self.request = request\n self.user = users.get_current_user()\n self.logout_url = users.create_logout_url(self.request.url)\n\n @property\n def repo_options(self):\n \"\"\"This is different from env.repo_options because this contains all\n repositories including deactivated ones.\n\n This is defined as a property so that it is evaluated lazily only\n when necessary.\n \"\"\"\n try:\n return [\n utils.Struct(\n repo=repo,\n url=utils.get_repo_url(self.request, repo) + '/admin')\n for repo in sorted(model.Repo.list())]\n except:\n # Logs the exception here because exceptions thrown during template\n # variable evaluation is silently ignored. Note that\n # logging.exception() logs the current exception by default.\n logging.exception('Exception thrown')\n return None\n\n\n# When no action or repo is specified, redirect to this action.\nHOME_ACTION = 'home.html'\n\n# Map of URL actions to Python module and class names.\n# TODO(kpy): Remove the need for this configuration information, either by\n# regularizing the module and class names or adding a URL attribute to handlers.\nHANDLER_CLASSES = dict((x, x.replace('/', '_') + '.Handler') for x in [\n 'start',\n 'query',\n 'results',\n 'create',\n 'view',\n 'multiview',\n 'reveal',\n 'photo',\n 'embed',\n 'extend',\n 'gadget',\n 'delete',\n 'flag_note',\n 'restore',\n 'subscribe',\n 'unsubscribe',\n 'disable_notes',\n 'confirm_disable_notes',\n 'enable_notes',\n 'confirm_enable_notes',\n 'post_flagged_note',\n 'confirm_post_flagged_note',\n 'third_party_search',\n 'admin',\n 'admin/create_repo',\n 'admin/dashboard',\n 'admin/delete_record',\n 'admin/resources',\n 'admin/review',\n 'admin/statistics',\n 'css',\n 'add_note',\n 'tos',\n])\n\n# Exceptional cases where the module name doesn't match the URL.\nHANDLER_CLASSES[''] = 'start.Handler'\nHANDLER_CLASSES['admin/api_keys'] = 'admin_api_keys.CreateOrUpdateApiKey'\nHANDLER_CLASSES['admin/api_keys/list'] = 'admin_api_keys.ListApiKeys'\nHANDLER_CLASSES['api/import'] = 'api.Import'\nHANDLER_CLASSES['api/import/notes'] = 'api.Import'\nHANDLER_CLASSES['api/import/persons'] = 'api.Import'\nHANDLER_CLASSES['api/read'] = 'api.Read'\nHANDLER_CLASSES['api/write'] = 'api.Write'\nHANDLER_CLASSES['api/search'] = 'api.Search'\nHANDLER_CLASSES['api/subscribe'] = 'api.Subscribe'\nHANDLER_CLASSES['api/unsubscribe'] = 'api.Unsubscribe'\nHANDLER_CLASSES['api/stats'] = 'api.Stats'\nHANDLER_CLASSES['api/handle_sms'] = 'api.HandleSMS'\nHANDLER_CLASSES['api/photo_upload'] = 'api.PhotoUpload'\nHANDLER_CLASSES['feeds/repo'] = 'feeds.Repo'\nHANDLER_CLASSES['feeds/note'] = 'feeds.Note'\nHANDLER_CLASSES['feeds/person'] = 'feeds.Person'\nHANDLER_CLASSES['sitemap'] = 'sitemap.SiteMap'\nHANDLER_CLASSES['sitemap/ping'] = 'sitemap.SiteMapPing'\nHANDLER_CLASSES['tasks/count/note'] = 'tasks.CountNote'\nHANDLER_CLASSES['tasks/count/person'] = 'tasks.CountPerson'\nHANDLER_CLASSES['tasks/count/reindex'] = 'tasks.Reindex'\nHANDLER_CLASSES['tasks/count/update_dead_status'] = 'tasks.UpdateDeadStatus'\nHANDLER_CLASSES['tasks/count/update_status'] = 'tasks.UpdateStatus'\nHANDLER_CLASSES['tasks/delete_expired'] = 'tasks.DeleteExpired'\nHANDLER_CLASSES['tasks/delete_old'] = 'tasks.DeleteOld'\nHANDLER_CLASSES['tasks/clean_up_in_test_mode'] = 'tasks.CleanUpInTestMode'\nHANDLER_CLASSES['tasks/notify_many_unreviewed_notes'] = 'tasks.NotifyManyUnreviewedNotes'\n\ndef is_development_server():\n \"\"\"Returns True if the app is running in development.\"\"\"\n server = os.environ.get('SERVER_SOFTWARE', '')\n return 'Development' in server\n\ndef is_cron_task(request):\n \"\"\"Returns True if the request is from appengine cron.\"\"\"\n return 'X-AppEngine-Cron' in request.headers\n\ndef is_task_queue_task(request):\n \"\"\"Returns True if the request is from the appengine task queue.\"\"\"\n return 'X-AppEngine-TaskName' in request.headers\n\ndef get_repo_and_action(request):\n \"\"\"Determines the repo and action for a request. The action is the part\n of the URL path after the repo, with no leading or trailing slashes.\"\"\"\n scheme, netloc, path, _, _ = urlparse.urlsplit(request.url)\n parts = path.lstrip('/').split('/')\n\n # Depending on whether we're serving from appspot directly or\n # google.org/personfinder we could have /global or /personfinder/global\n # as the 'global' prefix.\n if parts[0] == 'personfinder':\n parts.pop(0)\n repo = parts and parts.pop(0) or None\n action = '/'.join(parts)\n if repo == 'global':\n repo = None\n return repo, action\n\ndef select_charset(request):\n \"\"\"Given a request, chooses a charset for encoding the response.\n\n If the selected charset is UTF-8, it always returns\n 'utf-8' (const.CHARSET_UTF8), not 'utf8', 'UTF-8', etc.\n \"\"\"\n # We assume that any client that doesn't support UTF-8 will specify a\n # preferred encoding in the Accept-Charset header, and will use this\n # encoding for content, query parameters, and form data. We make this\n # assumption across all repositories.\n\n # Get a list of the charsets that the client supports.\n if request.get('charsets'):\n charsets = request.get('charsets').split(',')\n elif user_agents.prefer_sjis_charset(request):\n # Some Japanese feature phones don't (fully) support UTF-8.\n # They only support Shift_JIS. But they may not send Accept-Charset\n # header. Also, we haven't confirmed, but there may be phones whose\n # Accept-Charset header includes UTF-8 but its UTF-8 support is buggy.\n # So we always use Shift_JIS regardless of Accept-Charset header.\n charsets = ['Shift_JIS']\n else:\n charsets = request.accept_charset.best_matches()\n\n # Always prefer UTF-8 if the client supports it.\n for charset in charsets:\n if charset.lower().replace('_', '-') in ['utf8', 'utf-8']:\n return const.CHARSET_UTF8\n\n # Otherwise, look for a requested charset that Python supports.\n for charset in charsets:\n try:\n 'xyz'.encode(charset, 'replace') # test if charset is known\n return charset\n except:\n continue\n\n # If Python doesn't know any of the requested charsets, use UTF-8.\n return const.CHARSET_UTF8\n\ndef select_lang(request, config=None):\n \"\"\"Selects the best language to use for a given request. The 'lang' query\n parameter has priority, then the django_language cookie, then the first\n language in the language menu, then the default setting.\"\"\"\n default_lang = (config and\n config.language_menu_options and\n config.language_menu_options[0])\n lang = (request.get('lang') or\n request.cookies.get('django_language', None) or\n default_lang or\n django_setup.LANGUAGE_CODE)\n lang = re.sub('[^A-Za-z0-9-]', '', lang)\n return const.LANGUAGE_SYNONYMS.get(lang, lang)\n\ndef get_repo_options(request, lang):\n \"\"\"Returns a list of the names and titles of the launched repositories.\"\"\"\n options = []\n for repo in model.Repo.list_launched():\n titles = config.get_for_repo(repo, 'repo_titles', {})\n default_title = (titles.values() or ['?'])[0]\n title = titles.get(lang, titles.get('en', default_title))\n url = utils.get_repo_url(request, repo)\n test_mode = config.get_for_repo(repo, 'test_mode')\n options.append(utils.Struct(repo=repo, title=title, url=url,\n test_mode=test_mode))\n return options\n\ndef get_language_options(request, config, current_lang):\n \"\"\"Returns a list of information needed to generate the language menu.\"\"\"\n primary_langs = (config and config.language_menu_options) or ['en']\n all_langs = sorted(\n const.LANGUAGE_ENDONYMS.keys(),\n key=lambda s: const.LANGUAGE_ENDONYMS[s])\n return {\n 'primary':\n [get_language_option(request, lang, lang == current_lang)\n for lang in primary_langs],\n 'all':\n # We put both 'primary' and 'all' languages into a single <select>\n # box (See app/resources/language-menu.html.template).\n # If current_lang is in the primary languages, we mark the\n # language as is_selected in 'primary', not in 'all', to make sure\n # a single option is selected in the <select> box.\n [get_language_option(\n request, lang,\n lang == current_lang and lang not in primary_langs)\n for lang in all_langs],\n }\n\ndef get_language_option(request, lang, is_selected):\n return {\n 'lang': lang,\n 'endonym': const.LANGUAGE_ENDONYMS.get(lang, '?'),\n 'url': utils.set_url_param(request.url, 'lang', lang),\n 'is_selected': is_selected,\n }\n\ndef get_localized_message(localized_messages, lang, default):\n \"\"\"Gets the localized message for lang from a dictionary that maps language\n codes to localized messages. Falls back to English if language 'lang' is\n not available, or to a default message if English is not available.\"\"\"\n if not isinstance(localized_messages, dict):\n return default\n return localized_messages.get(lang, localized_messages.get('en', default))\n\ndef get_hidden_input_tags_for_preserved_query_params(request):\n \"\"\"Gets HTML with <input type=\"hidden\"> tags to preserve query parameters\n listed in utils.PRESERVED_QUERY_PARAM_NAMES e.g. \"ui\".\"\"\"\n tags_str = ''\n for name in utils.PRESERVED_QUERY_PARAM_NAMES:\n value = request.get(name)\n if value:\n tags_str += '<input type=\"hidden\" name=\"%s\" value=\"%s\">\\n' % (\n django.utils.html.escape(name),\n django.utils.html.escape(value))\n return tags_str\n\ndef setup_env(request):\n \"\"\"Constructs the 'env' object, which contains various template variables\n that are commonly used by most handlers.\"\"\"\n env = utils.Struct()\n env.repo, env.action = get_repo_and_action(request)\n env.config = config.Configuration(env.repo or '*')\n # TODO(ryok): Rename to local_test_mode or something alike to disambiguate\n # better from repository's test_mode.\n env.test_mode = (request.remote_addr == '127.0.0.1' and\n request.get('test_mode'))\n\n env.analytics_id = config.get('analytics_id')\n env.maps_api_key = config.get('maps_api_key')\n\n # Internationalization-related stuff.\n env.charset = select_charset(request)\n env.lang = select_lang(request, env.config)\n env.rtl = env.lang in const.LANGUAGES_BIDI\n env.virtual_keyboard_layout = const.VIRTUAL_KEYBOARD_LAYOUTS.get(env.lang)\n\n # Used for parsing query params. This must be done before accessing any\n # query params which may have multi-byte value, such as \"given_name\" below\n # in this function.\n request.charset = env.charset\n\n # Determine the resource bundle to use.\n env.default_resource_bundle = config.get('default_resource_bundle', '1')\n env.resource_bundle = (request.cookies.get('resource_bundle', '') or\n env.default_resource_bundle)\n\n # Information about the request.\n env.url = utils.set_url_param(request.url, 'lang', env.lang)\n env.scheme, env.netloc, env.path, _, _ = urlparse.urlsplit(request.url)\n env.force_https = False\n env.domain = env.netloc.split(':')[0]\n env.global_url = utils.get_repo_url(request, 'global')\n\n # Commonly used information that's rendered or localized for templates.\n env.language_options = get_language_options(request, env.config, env.lang)\n env.repo_options = get_repo_options(request, env.lang)\n env.expiry_options = [\n utils.Struct(value=value, text=const.PERSON_EXPIRY_TEXT[value])\n for value in sorted(const.PERSON_EXPIRY_TEXT.keys(), key=int)\n ]\n env.status_options = [\n utils.Struct(value=value, text=const.NOTE_STATUS_TEXT[value])\n for value in pfif.NOTE_STATUS_VALUES\n if (value != 'believed_dead' or\n not env.config or env.config.allow_believed_dead_via_ui)\n ]\n env.hidden_input_tags_for_preserved_query_params = (\n get_hidden_input_tags_for_preserved_query_params(request))\n\n ui_param = request.get('ui', '').strip().lower()\n\n # Interprets \"small\" and \"style\" parameters for backward compatibility.\n # TODO(ichikawa): Delete these in near future when we decide to drop\n # support of these parameters.\n small_param = request.get('small', '').strip().lower()\n style_param = request.get('style', '').strip().lower()\n if not ui_param and small_param == 'yes':\n ui_param = 'small'\n elif not ui_param and style_param:\n ui_param = style_param\n\n if ui_param:\n env.ui = ui_param\n elif user_agents.is_jp_tier2_mobile_phone(request):\n env.ui = 'light'\n else:\n env.ui = 'default'\n\n # UI configurations.\n #\n # Enables features which require JavaScript.\n env.enable_javascript = True\n # Enables operations which requires Captcha.\n env.enable_captcha = True\n # Enables photo upload.\n env.enable_photo_upload = True\n # Enables to flag/unflag notes as spam, and to reveal spam notes.\n env.enable_spam_ops = True\n # Enables duplicate marking mode.\n env.enable_dup_mode = True\n # Shows a logo on top of the page.\n env.show_logo = True\n # Shows language menu.\n env.show_language_menu = True\n # Uses short labels for buttons.\n env.use_short_buttons = False\n # Optional \"target\" attribute for links to non-small pages.\n env.target_attr = ''\n # Shows record IDs in the results page.\n env.show_record_ids_in_results = True\n\n if env.ui == 'small':\n env.show_logo = False\n env.target_attr = ' target=\"_blank\" '\n\n elif env.ui == 'light':\n # Disables features which requires JavaScript. Some feature phones\n # doesn't support JavaScript.\n env.enable_javascript = False\n # Disables operations which requires Captcha because Captcha requires\n # JavaScript.\n env.enable_captcha = False\n # Uploading is often not supported in feature phones.\n env.enable_photo_upload = False\n # Disables spam operations because it requires JavaScript and\n # supporting more pages on ui=light.\n env.enable_spam_ops = False\n # Disables duplicate marking mode because it doesn't support\n # small screens and it requires JavaScript.\n env.enable_dup_mode = False\n # Hides the logo on the top to save the space. Also, the logo links\n # to the global page which doesn't support small screens.\n env.show_logo = False\n # Hides language menu because the menu in the current position is\n # annoying in feature phones.\n # TODO(ichikawa): Consider layout of the language menu.\n env.show_language_menu = False\n # Too long buttons are not fully shown in some feature phones.\n env.use_short_buttons = True\n # To make it simple.\n env.show_record_ids_in_results = False\n\n env.back_chevron = u'\\xab'\n back_chevron_in_charset = True\n try:\n env.back_chevron.encode(env.charset)\n except UnicodeEncodeError:\n # u'\\xab' is not in the charset (e.g. Shift_JIS).\n back_chevron_in_charset = False\n if not back_chevron_in_charset or env.ui == 'light':\n # Use ASCII characters on ui=light too because some feature phones\n # support UTF-8 but don't render UTF-8 symbols such as u'\\xab'.\n env.back_chevron = u'<<'\n\n env.enable_maps = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.maps_api_key)\n env.enable_analytics = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.analytics_id)\n env.enable_translate = (\n env.enable_javascript\n and not env.config.zero_rating_mode\n and env.config.translate_api_key)\n\n env.admin = AdminEnv(request)\n\n # Repo-specific information.\n if env.repo:\n # repo_url is the root URL for the repository.\n env.repo_url = utils.get_repo_url(request, env.repo)\n # start_url is like repo_url but preserves parameters such as 'ui'.\n env.start_url = utils.get_url(request, env.repo, '')\n # URL of the link in the heading. The link on ui=small links to the\n # normal UI.\n env.repo_title_url = (\n env.repo_url if env.ui == 'small' else env.start_url)\n # URL to force default UI. Note that we show ui=light version in some\n # user agents when ui parameter is not specified.\n env.default_ui_url = utils.get_url(request, env.repo, '', ui='default')\n env.repo_path = urlparse.urlsplit(env.repo_url)[2]\n env.repo_title = get_localized_message(\n env.config.repo_titles, env.lang, '?')\n env.start_page_custom_html = get_localized_message(\n env.config.start_page_custom_htmls, env.lang, '')\n env.results_page_custom_html = get_localized_message(\n env.config.results_page_custom_htmls, env.lang, '')\n env.view_page_custom_html = get_localized_message(\n env.config.view_page_custom_htmls, env.lang, '')\n env.seek_query_form_custom_html = get_localized_message(\n env.config.seek_query_form_custom_htmls, env.lang, '')\n env.footer_custom_html = get_localized_message(\n env.config.footer_custom_htmls, env.lang, '')\n # If the repository is deactivated, we should not show test mode\n # notification.\n env.repo_test_mode = (\n env.config.test_mode and not env.config.deactivated)\n env.force_https = env.config.force_https\n\n env.params_full_name = request.get('full_name', '').strip()\n if not env.params_full_name:\n # Preformat the name from 'given_name' and 'family_name' parameters.\n given_name = request.get('given_name', '').strip()\n family_name = request.get('family_name', '').strip()\n env.params_full_name = utils.get_full_name(\n given_name, family_name, env.config)\n\n return env\n\ndef flush_caches(*keywords):\n \"\"\"Flushes the specified set of caches. Pass '*' to flush everything.\"\"\"\n if '*' in keywords or 'resource' in keywords:\n resources.clear_caches()\n if '*' in keywords or 'memcache' in keywords:\n memcache.flush_all()\n if '*' in keywords or 'config' in keywords:\n config.cache.flush()\n for keyword in keywords:\n if keyword.startswith('config/'):\n config.cache.delete(keyword[7:])\n\n\nclass Main(webapp.RequestHandler):\n \"\"\"The main request handler. All dynamic requests except for remote_api are\n handled by this handler, which dispatches to all other dynamic handlers.\"\"\"\n\n def initialize(self, request, response):\n webapp.RequestHandler.initialize(self, request, response)\n\n # If requested, set the clock before doing anything clock-related.\n # Only works on localhost for testing. Specify ?utcnow=1293840000 to\n # set the clock to 2011-01-01, or ?utcnow=real to revert to real time.\n utcnow = request.get('utcnow')\n if request.remote_addr == '127.0.0.1' and utcnow:\n if utcnow == 'real':\n utils.set_utcnow_for_test(None)\n else:\n utils.set_utcnow_for_test(float(utcnow))\n\n # If requested, flush caches before we touch anything that uses them.\n flush_caches(*request.get('flush', '').split(','))\n\n # Gather commonly used information into self.env.\n self.env = setup_env(request)\n\n # Force a redirect if requested, except where https is not supported:\n # - for cron jobs\n # - for task queue jobs\n # - in development\n if (self.env.force_https and self.env.scheme == 'http'\n and not is_cron_task(self.request)\n and not is_task_queue_task(self.request)\n and not is_development_server()):\n self.redirect(self.env.url.replace('http:', 'https:'))\n\n # Activate the selected language.\n response.headers['Content-Language'] = self.env.lang\n response.headers['Set-Cookie'] = \\\n 'django_language=%s; path=/' % self.env.lang\n django_setup.activate(self.env.lang)\n\n # Activate the appropriate resource bundle.\n resources.set_active_bundle_name(self.env.resource_bundle)\n\n def serve(self):\n request, response, env = self.request, self.response, self.env\n\n # If the Person Finder instance has not been initialized yet,\n # prepend to any served page a warning and a link to the admin\n # page where the datastore can be initialized.\n if not config.get('initialized'):\n if request.get('operation') == 'setup_datastore':\n setup_pf.setup_datastore()\n self.redirect(env.global_url + '/')\n return\n else:\n get_vars = lambda: {'env': env}\n content = resources.get_rendered('setup_datastore.html', env.lang,\n (env.repo, env.charset), get_vars)\n response.out.write(content)\n\n if not env.action and not env.repo:\n # Redirect to the default home page.\n self.redirect(env.global_url + '/' + HOME_ACTION)\n elif env.action in HANDLER_CLASSES:\n # Dispatch to the handler for the specified action.\n module_name, class_name = HANDLER_CLASSES[env.action].split('.')\n handler = getattr(__import__(module_name), class_name)(\n request, response, env)\n getattr(handler, request.method.lower())() # get() or post()\n elif env.action.endswith('.template'):\n # Don't serve template source code.\n response.set_status(404)\n response.out.write('Not found')\n else:\n # Serve a static page or file.\n env.robots_ok = True\n get_vars = lambda: {'env': env, 'config': env.config}\n content = resources.get_rendered(\n env.action, env.lang, (env.repo, env.charset), get_vars)\n if content is None:\n response.set_status(404)\n response.out.write('Not found')\n else:\n content_type, encoding = mimetypes.guess_type(env.action)\n response.headers['Content-Type'] = (\n (content_type or 'text/plain') +\n ('; charset=%s' % encoding if encoding else ''))\n response.out.write(content)\n\n def get(self):\n self.serve()\n\n def post(self):\n self.serve()\n\n def head(self):\n self.request.method = 'GET'\n self.serve()\n self.response.clear()\n\nif __name__ == '__main__':\n webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)]))\n \n", "path": "app/main.py" } ]
diff --git a/app/main.py b/app/main.py index 99879fd4ba..91a9766418 100644 --- a/app/main.py +++ b/app/main.py @@ -21,6 +21,9 @@ import mimetypes import re import os +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), 'vendors')) + import urlparse from google.appengine.api import memcache @@ -597,4 +600,4 @@ def head(self): if __name__ == '__main__': webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)])) - \ No newline at end of file + diff --git a/docker/Dockerfile b/docker/Dockerfile index 6bfcfbe79e..f2df5e5b98 100755 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,5 @@ FROM phusion/baseimage -MAINTAINER Carlo Lobrano <[email protected]> +LABEL authors="Carlo Lobrano <[email protected]>, Mathieu Tortuyaux <[email protected]>" CMD ["/sbin/my_init"] @@ -7,33 +7,33 @@ CMD ["/sbin/my_init"] ENV DEBIAN_FRONTEND noninteractive ENV PATH $PATH:/opt/google_appengine ENV APPENGINE_DIR /opt/google_appengine/ -ENV PERSONFINDER_DIR /opt/personfinder +ENV PERSONFINDER_DIR /opt/personfinder/ ENV INIT_DATASTORE 0 -RUN apt-get update -RUN apt-get install -y build-essential unzip -RUN apt-get install -y python2.7 libpython2.7-dev -RUN apt-get install -y python-setuptools -RUN apt-get install -y git -RUN apt-get install -y wget -RUN apt-get install -y time - -WORKDIR /opt/ -RUN wget http://effbot.org/downloads/Imaging-1.1.7.tar.gz -RUN tar xvfz Imaging-1.1.7.tar.gz && cd Imaging-1.1.7 && python2.7 setup.py install - +RUN apt-get update && apt-get install -y \ + build-essential \ + unzip \ + python2.7 \ + libpython2.7-dev \ + python-pip \ + git \ + time \ + gettext \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN pip install --upgrade pip && pip install pytest lxml + +# Install app engine WORKDIR /opt/ -RUN wget https://storage.googleapis.com/appengine-sdks/featured/google_appengine_1.9.25.zip -RUN unzip google_appengine_1.9.25.zip - -RUN easy_install pytest -RUN apt-get install -y gettext +ADD https://storage.googleapis.com/appengine-sdks/featured/google_appengine_1.9.25.zip /opt/ +RUN unzip -qq google_appengine_1.9.25.zip && rm google_appengine_1.9.25.zip ADD gae-run-app.sh /usr/bin/ ADD setup_datastore.sh /usr/bin/ -RUN mkdir -p /opt/personfinder -WORKDIR /opt/personfinder +WORKDIR /opt/personfinder/ + +# Clean up +RUN rm -rf /tmp/* /var/tmp/* -# Clean up APT when done. -RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..22a306ed79 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +Babel==2.3.4 +pytz==2016.10 +simplejson==3.10.0 +xlrd==1.0.0 diff --git a/tools/common.sh b/tools/common.sh index f28894d1b9..46e8527a96 100644 --- a/tools/common.sh +++ b/tools/common.sh @@ -51,6 +51,7 @@ fi export PYTHONPATH=\ "$APP_DIR":\ +"$APP_DIR/vendors":\ "$TESTS_DIR":\ "$TOOLS_DIR":\ "$APPENGINE_DIR":\
Load third party libraries with PIP We should probably follow "Using pip requirements files" section here to install third-party libraries: https://cloud.google.com/appengine/docs/python/tools/using-libraries-python-27 instead of directly putting third party library *.py files under "app" directory.
joke2k__faker-640
[ { "content": "import os\nfrom importlib import import_module\nimport pkgutil\n\n\ndef list_module(module):\n path = os.path.dirname(module.__file__)\n modules = [name for finder, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n if len(modules) > 0:\n return modules\n return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')]\n\n\ndef find_available_locales(providers):\n available_locales = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n return available_locales\n\n\ndef find_available_providers(modules):\n available_providers = set()\n for providers_mod in modules:\n providers = ['.'.join([providers_mod.__package__, mod]) for mod in list_module(providers_mod)]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py" } ]
[ { "content": "import os\nfrom importlib import import_module\nimport pkgutil\n\n\ndef list_module(module):\n path = os.path.dirname(module.__file__)\n modules = [name for finder, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n return modules\n\n\ndef find_available_locales(providers):\n available_locales = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n return available_locales\n\n\ndef find_available_providers(modules):\n available_providers = set()\n for providers_mod in modules:\n providers = ['.'.join([providers_mod.__package__, mod]) for mod in list_module(providers_mod)]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py" } ]
diff --git a/README.rst b/README.rst index 700e437d45..b782515e3f 100644 --- a/README.rst +++ b/README.rst @@ -171,7 +171,7 @@ When installed, you can invoke faker from the command-line: faker [-h] [--version] [-o output] [-l {bg_BG,cs_CZ,...,zh_CN,zh_TW}] [-r REPEAT] [-s SEP] - [-i {module.containing.custom_provider othermodule.containing.custom_provider}] + [-i {package.containing.custom_provider otherpkg.containing.custom_provider}] [fake] [fake argument [fake argument ...]] Where: @@ -194,7 +194,7 @@ Where: generated output - ``-i {my.custom_provider other.custom_provider}`` list of additional custom providers to use. - Note that is the import path of the module containing your Provider class, not the custom Provider class itself. + Note that is the import path of the package containing your Provider class, not the custom Provider class itself. - ``fake``: is the name of the fake to generate an output for, such as ``name``, ``address``, or ``text`` diff --git a/docs/index.rst b/docs/index.rst index e461d1d737..892e882b87 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -176,7 +176,7 @@ When installed, you can invoke faker from the command-line: faker [-h] [--version] [-o output] [-l {bg_BG,cs_CZ,...,zh_CN,zh_TW}] [-r REPEAT] [-s SEP] - [-i {module.containing.custom_provider othermodule.containing.custom_provider}] + [-i {package.containing.custom_provider otherpkg.containing.custom_provider}] [fake] [fake argument [fake argument ...]] Where: @@ -199,7 +199,7 @@ Where: generated output - ``-i {my.custom_provider other.custom_provider}`` list of additional custom providers to use. - Note that is the import path of the module containing your Provider class, not the custom Provider class itself. + Note that is the import path of the package containing your Provider class, not the custom Provider class itself. - ``fake``: is the name of the fake to generate an output for, such as ``name``, ``address``, or ``text`` diff --git a/faker/utils/loading.py b/faker/utils/loading.py index 40e4b5dae3..d9b81a7c56 100644 --- a/faker/utils/loading.py +++ b/faker/utils/loading.py @@ -6,9 +6,7 @@ def list_module(module): path = os.path.dirname(module.__file__) modules = [name for finder, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg] - if len(modules) > 0: - return modules - return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')] + return modules def find_available_locales(providers):
MacOS 10.13 OSError: [Errno 24] Too many open files ```shell return [i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i)) and not i.startswith('_')] OSError: [Errno 24] Too many open files: '/Users/abcdefg/.pyenv/versions/3.6.3/envs/weixin3/lib/python3.6/site-packages/faker/providers/address' ```
microsoft__AzureTRE-524
[ { "content": "import os\nimport sys\nimport json\nimport socket\nimport asyncio\nimport logging\nfrom shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa\nfrom resources import strings # pylint: disable=import-error # noqa\nfrom contextlib import asynccontextmanager\nfrom azure.servicebus import ServiceBusMessage\nfrom azure.servicebus.aio import ServiceBusClient, AutoLockRenewer\nfrom azure.identity.aio import DefaultAzureCredential\n\nlogger_adapter = initialize_logging(logging.INFO, socket.gethostname())\ndisable_unwanted_loggers()\n\n\n@asynccontextmanager\nasync def default_credentials(msi_id):\n \"\"\"\n Context manager which yields the default credentials.\n \"\"\"\n credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()\n yield credential\n await credential.close()\n\n\nasync def receive_message(env_vars, service_bus_client):\n \"\"\"\n This method is an async generator which receives messages from service bus\n and yields those messages. If the yielded function return True the message is\n marked complete.\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n renewer = AutoLockRenewer()\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n\n async with receiver:\n received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)\n\n for msg in received_msgs:\n result = True\n message = \"\"\n\n try:\n message = json.loads(str(msg))\n result = (yield message)\n except (json.JSONDecodeError) as e:\n logging.error(f\"Received bad service bus resource request message: {e}\")\n if result:\n logging.info(f\"Resource request for {message} is complete\")\n else:\n logging.error('Message processing failed!')\n logger_adapter.info(f\"Message with id = {message['id']} processed as {result} and marked complete.\")\n await receiver.complete_message(msg)\n\n\ndef azure_login_command(env_vars):\n local_login = f\"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}\"\n vmss_login = f\"az login --identity -u {env_vars['vmss_msi_id']}\"\n command = vmss_login if env_vars['vmss_msi_id'] else local_login\n return command\n\n\ndef build_porter_command(msg_body, env_vars):\n porter_parameters = \"\"\n for parameter in msg_body['parameters']:\n porter_parameters = porter_parameters + f\" --param {parameter}={msg_body['parameters'][parameter]}\"\n\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n\n porter_parameters = porter_parameters + f\" --param tfstate_container_name={env_vars['tfstate_container_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}\"\n porter_parameters = porter_parameters + f\" --param arm_use_msi={env_vars['arm_use_msi']}\"\n\n command_line = [f\"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter \"\n f\"{msg_body['action']} {installation_id} \"\n f\" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}\"\n f\" {porter_parameters} --cred ./vm_porter/azure.json --allow-docker-host-access\"\n f\" && porter show {installation_id}\"]\n return command_line\n\n\ndef porter_envs(env_var):\n porter_env_vars = {}\n porter_env_vars[\"HOME\"] = os.environ['HOME']\n porter_env_vars[\"PATH\"] = os.environ['PATH']\n porter_env_vars[\"ARM_CLIENT_ID\"] = env_var[\"arm_client_id\"]\n porter_env_vars[\"ARM_CLIENT_SECRET\"] = env_var[\"arm_client_secret\"]\n porter_env_vars[\"ARM_SUBSCRIPTION_ID\"] = env_var[\"arm_subscription_id\"]\n porter_env_vars[\"ARM_TENANT_ID\"] = env_var[\"arm_tenant_id\"]\n\n return porter_env_vars\n\n\nasync def run_porter(command, env_vars):\n proc = await asyncio.create_subprocess_shell(\n ''.join(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=porter_envs(env_vars))\n\n stdout, stderr = await proc.communicate()\n logging.info(f'[{command!r} exited with {proc.returncode}]')\n result_stdout = None\n result_stderr = None\n if stdout:\n result_stdout = stdout.decode()\n logger_adapter.info('[stdout]')\n for string in result_stdout.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n if stderr:\n result_stderr = stderr.decode()\n logger_adapter.info('[stderr]')\n for string in result_stderr.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n\n return (proc.returncode, result_stdout, result_stderr)\n\n\ndef service_bus_message_generator(sb_message, status, deployment_message):\n installation_id = sb_message['parameters']['tre_id'] + \"-\" + sb_message['parameters']['workspace_id']\n resource_request_message = json.dumps({\n \"id\": sb_message[\"id\"],\n \"status\": status,\n \"message\": f\"{installation_id}: {deployment_message}\"\n })\n return resource_request_message\n\n\nasync def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration starting\")\n sb_sender = sb_client.get_queue_sender(queue_name=env_vars[\"deployment_status_queue\"])\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, \"Deployment job starting\")\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n\n returncode, _, err = await run_porter(build_porter_command(msg_body, env_vars), env_vars)\n if returncode != 0:\n error_message = \"Error context message = \" + \" \".join(err.split('\\n'))\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration failed error = {error_message}\")\n return False\n else:\n success_message = \"Workspace was deployed successfully...\"\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: {success_message}\")\n return True\n\n\nasync def runner(env_vars):\n msi_id = env_vars[\"vmss_msi_id\"]\n service_bus_namespace = env_vars[\"service_bus_namespace\"]\n async with default_credentials(msi_id) as credential:\n service_bus_client = ServiceBusClient(service_bus_namespace, credential)\n logger_adapter.info(\"Starting message receiving loop...\")\n while True:\n logger_adapter.info(\"Checking for new messages...\")\n receive_message_gen = receive_message(env_vars, service_bus_client)\n try:\n async for message in receive_message_gen:\n logger_adapter.info(f\"Message received for id={message['id']}\")\n message_logger_adapter = initialize_logging(logging.INFO, message['id'])\n result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)\n await receive_message_gen.asend(result)\n except StopAsyncIteration: # the async generator when finished signals end with this exception.\n pass\n logger_adapter.info(\"All messages done sleeping...\")\n await asyncio.sleep(60)\n\n\ndef read_env_vars():\n env_vars = {\n # Needed for local dev\n \"app_id\": os.environ.get(\"AZURE_CLIENT_ID\", None),\n \"app_password\": os.environ.get(\"AZURE_CLIENT_SECRET\", None),\n\n \"registry_server\": os.environ[\"REGISTRY_SERVER\"],\n \"tfstate_container_name\": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],\n \"tfstate_resource_group_name\": os.environ['MGMT_RESOURCE_GROUP_NAME'],\n \"tfstate_storage_account_name\": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],\n \"deployment_status_queue\": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],\n \"resource_request_queue\": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],\n \"service_bus_namespace\": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],\n \"vmss_msi_id\": os.environ.get('VMSS_MSI_ID', None),\n\n # Needed for running porter\n \"arm_use_msi\": os.environ[\"ARM_USE_MSI\"],\n \"arm_subscription_id\": os.environ['ARM_SUBSCRIPTION_ID'],\n \"arm_client_id\": os.environ[\"ARM_CLIENT_ID\"],\n \"arm_tenant_id\": os.environ[\"ARM_TENANT_ID\"]\n }\n\n env_vars[\"arm_client_secret\"] = os.environ[\"ARM_CLIENT_SECRET\"] if env_vars[\"arm_use_msi\"] == \"false\" else \"\"\n\n return env_vars\n\n\nif __name__ == \"__main__\":\n try:\n env_vars = read_env_vars()\n except KeyError as e:\n logger_adapter.error(f\"Environment variable {e} is not set correctly...Exiting\")\n sys.exit(1)\n logger_adapter.info(\"Started processor\")\n asyncio.run(runner(env_vars))\n", "path": "processor_function/vm_porter/runner.py" } ]
[ { "content": "import os\nimport sys\nimport json\nimport socket\nimport asyncio\nimport logging\nfrom shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa\nfrom resources import strings # pylint: disable=import-error # noqa\nfrom contextlib import asynccontextmanager\nfrom azure.servicebus import ServiceBusMessage\nfrom azure.servicebus.aio import ServiceBusClient, AutoLockRenewer\nfrom azure.identity.aio import DefaultAzureCredential\n\nlogger_adapter = initialize_logging(logging.INFO, socket.gethostname())\ndisable_unwanted_loggers()\n\n\n@asynccontextmanager\nasync def default_credentials(msi_id):\n \"\"\"\n Context manager which yields the default credentials.\n \"\"\"\n credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()\n yield credential\n await credential.close()\n\n\nasync def receive_message(env_vars, service_bus_client):\n \"\"\"\n This method is an async generator which receives messages from service bus\n and yields those messages. If the yielded function return True the message is\n marked complete.\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n renewer = AutoLockRenewer(max_lock_renewal_duration=1800)\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n\n async with receiver:\n received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)\n\n for msg in received_msgs:\n result = True\n message = \"\"\n\n try:\n message = json.loads(str(msg))\n result = (yield message)\n except (json.JSONDecodeError) as e:\n logging.error(f\"Received bad service bus resource request message: {e}\")\n if result:\n logging.info(f\"Resource request for {message} is complete\")\n else:\n logging.error('Message processing failed!')\n logger_adapter.info(f\"Message with id = {message['id']} processed as {result} and marked complete.\")\n await receiver.complete_message(msg)\n\n\ndef azure_login_command(env_vars):\n local_login = f\"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}\"\n vmss_login = f\"az login --identity -u {env_vars['vmss_msi_id']}\"\n command = vmss_login if env_vars['vmss_msi_id'] else local_login\n return command\n\n\ndef build_porter_command(msg_body, env_vars):\n porter_parameters = \"\"\n for parameter in msg_body['parameters']:\n porter_parameters = porter_parameters + f\" --param {parameter}={msg_body['parameters'][parameter]}\"\n\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n\n porter_parameters = porter_parameters + f\" --param tfstate_container_name={env_vars['tfstate_container_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}\"\n porter_parameters = porter_parameters + f\" --param arm_use_msi={env_vars['arm_use_msi']}\"\n\n command_line = [f\"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter \"\n f\"{msg_body['action']} {installation_id} \"\n f\" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}\"\n f\" {porter_parameters} --cred ./vm_porter/azure.json --allow-docker-host-access\"\n f\" && porter show {installation_id}\"]\n return command_line\n\n\ndef porter_envs(env_var):\n porter_env_vars = {}\n porter_env_vars[\"HOME\"] = os.environ['HOME']\n porter_env_vars[\"PATH\"] = os.environ['PATH']\n porter_env_vars[\"ARM_CLIENT_ID\"] = env_var[\"arm_client_id\"]\n porter_env_vars[\"ARM_CLIENT_SECRET\"] = env_var[\"arm_client_secret\"]\n porter_env_vars[\"ARM_SUBSCRIPTION_ID\"] = env_var[\"arm_subscription_id\"]\n porter_env_vars[\"ARM_TENANT_ID\"] = env_var[\"arm_tenant_id\"]\n\n return porter_env_vars\n\n\nasync def run_porter(command, env_vars):\n proc = await asyncio.create_subprocess_shell(\n ''.join(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=porter_envs(env_vars))\n\n stdout, stderr = await proc.communicate()\n logging.info(f'[{command!r} exited with {proc.returncode}]')\n result_stdout = None\n result_stderr = None\n if stdout:\n result_stdout = stdout.decode()\n logger_adapter.info('[stdout]')\n for string in result_stdout.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n if stderr:\n result_stderr = stderr.decode()\n logger_adapter.info('[stderr]')\n for string in result_stderr.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n\n return (proc.returncode, result_stdout, result_stderr)\n\n\ndef service_bus_message_generator(sb_message, status, deployment_message):\n installation_id = sb_message['parameters']['tre_id'] + \"-\" + sb_message['parameters']['workspace_id']\n resource_request_message = json.dumps({\n \"id\": sb_message[\"id\"],\n \"status\": status,\n \"message\": f\"{installation_id}: {deployment_message}\"\n })\n return resource_request_message\n\n\nasync def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration starting\")\n sb_sender = sb_client.get_queue_sender(queue_name=env_vars[\"deployment_status_queue\"])\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, \"Deployment job starting\")\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n\n returncode, _, err = await run_porter(build_porter_command(msg_body, env_vars), env_vars)\n if returncode != 0:\n error_message = \"Error context message = \" + \" \".join(err.split('\\n'))\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration failed error = {error_message}\")\n return False\n else:\n success_message = \"Workspace was deployed successfully...\"\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: {success_message}\")\n return True\n\n\nasync def runner(env_vars):\n msi_id = env_vars[\"vmss_msi_id\"]\n service_bus_namespace = env_vars[\"service_bus_namespace\"]\n async with default_credentials(msi_id) as credential:\n service_bus_client = ServiceBusClient(service_bus_namespace, credential)\n logger_adapter.info(\"Starting message receiving loop...\")\n while True:\n logger_adapter.info(\"Checking for new messages...\")\n receive_message_gen = receive_message(env_vars, service_bus_client)\n try:\n async for message in receive_message_gen:\n logger_adapter.info(f\"Message received for id={message['id']}\")\n message_logger_adapter = initialize_logging(logging.INFO, message['id'])\n result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)\n await receive_message_gen.asend(result)\n except StopAsyncIteration: # the async generator when finished signals end with this exception.\n pass\n logger_adapter.info(\"All messages done sleeping...\")\n await asyncio.sleep(60)\n\n\ndef read_env_vars():\n env_vars = {\n # Needed for local dev\n \"app_id\": os.environ.get(\"AZURE_CLIENT_ID\", None),\n \"app_password\": os.environ.get(\"AZURE_CLIENT_SECRET\", None),\n\n \"registry_server\": os.environ[\"REGISTRY_SERVER\"],\n \"tfstate_container_name\": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],\n \"tfstate_resource_group_name\": os.environ['MGMT_RESOURCE_GROUP_NAME'],\n \"tfstate_storage_account_name\": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],\n \"deployment_status_queue\": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],\n \"resource_request_queue\": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],\n \"service_bus_namespace\": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],\n \"vmss_msi_id\": os.environ.get('VMSS_MSI_ID', None),\n\n # Needed for running porter\n \"arm_use_msi\": os.environ[\"ARM_USE_MSI\"],\n \"arm_subscription_id\": os.environ['ARM_SUBSCRIPTION_ID'],\n \"arm_client_id\": os.environ[\"ARM_CLIENT_ID\"],\n \"arm_tenant_id\": os.environ[\"ARM_TENANT_ID\"]\n }\n\n env_vars[\"arm_client_secret\"] = os.environ[\"ARM_CLIENT_SECRET\"] if env_vars[\"arm_use_msi\"] == \"false\" else \"\"\n\n return env_vars\n\n\nif __name__ == \"__main__\":\n try:\n env_vars = read_env_vars()\n except KeyError as e:\n logger_adapter.error(f\"Environment variable {e} is not set correctly...Exiting\")\n sys.exit(1)\n logger_adapter.info(\"Started processor\")\n asyncio.run(runner(env_vars))\n", "path": "processor_function/vm_porter/runner.py" } ]
diff --git a/processor_function/vm_porter/runner.py b/processor_function/vm_porter/runner.py index 0f2b9cfb26..d4a42a9d35 100644 --- a/processor_function/vm_porter/runner.py +++ b/processor_function/vm_porter/runner.py @@ -33,7 +33,7 @@ async def receive_message(env_vars, service_bus_client): """ async with service_bus_client: q_name = env_vars["resource_request_queue"] - renewer = AutoLockRenewer() + renewer = AutoLockRenewer(max_lock_renewal_duration=1800) receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer) async with receiver: diff --git a/workspaces/services/devtestlabs/create_and_expose_vm.sh b/workspaces/services/devtestlabs/create_and_expose_vm.sh index 9ee9fd843a..5d5d8012c7 100755 --- a/workspaces/services/devtestlabs/create_and_expose_vm.sh +++ b/workspaces/services/devtestlabs/create_and_expose_vm.sh @@ -112,7 +112,7 @@ fi if NSG_RULE=$(az network nsg rule show -g $vm_resource_group_name --nsg-name nsg-ws --name $lab_name -o json); then echo "NSG rule from firewall already exists for VMs in this worksapce" else - az network nsg rule create -g $vm_resource_group_name --nsg-name nsg-ws --name $lab_name --priority 3000 --destination-port-range 3389 --source-address-prefixes $FIREWALL_SUBNET_ADDRESS_PREFIX --destination-address-prefixes $VM_SUBNET_ADDRESS_PREFIX --protocol Tcp --access Allow + az network nsg rule create -g $vm_resource_group_name --nsg-name nsg-ws --name $lab_name --priority 800 --destination-port-range 3389 --source-address-prefixes $FIREWALL_SUBNET_ADDRESS_PREFIX --destination-address-prefixes $VM_SUBNET_ADDRESS_PREFIX --protocol Tcp --access Allow fi
[BUG] Service bus message times out on deployment of workspace template **Describe the bug** When deploying a template that takes > 10 minutes, although deployment is successful the status is not updated. **Steps to reproduce** 1. Register and deploy the `azureml_devtestlabs` workspace 2. Log on to the VMSS resource processor using bastion 3. View the docker logs, wait until deployment is complete, and see similar to: `LinkDetach("ErrorCodes.LinkDetachForced: The link 'G3:5725658:sender-link-bd7b69d4-9ad4-4b9b-b9f6-2e311be400a3' is force detached. Code: publisher(link3135). Details: AmqpMessagePublisher.IdleTimerExpired: Idle timeout: 00:10:00.")`
pypa__pip-8124
[ { "content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n info: Show information about the cache.\n list: List filenames of packages stored in the cache.\n remove: Remove one or more package from the cache.\n purge: Remove all items from the cache.\n\n <pattern> can be a glob expression or a package name.\n \"\"\"\n\n usage = \"\"\"\n %prog info\n %prog list [<pattern>]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py" } ]
[ { "content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n info: Show information about the cache.\n list: List filenames of packages stored in the cache.\n remove: Remove one or more package from the cache.\n purge: Remove all items from the cache.\n\n <pattern> can be a glob expression or a package name.\n \"\"\"\n\n usage = \"\"\"\n %prog info\n %prog list [<pattern>]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n if not options.cache_dir:\n logger.error(\"pip cache commands can not \"\n \"function since cache is disabled.\")\n return ERROR\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py" } ]
diff --git a/news/8124.bugfix b/news/8124.bugfix new file mode 100644 index 00000000000..a859381e0d8 --- /dev/null +++ b/news/8124.bugfix @@ -0,0 +1 @@ +Abort pip cache commands early when cache is disabled. diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py index 7e3f72e080b..3c345dfa0d1 100644 --- a/src/pip/_internal/commands/cache.py +++ b/src/pip/_internal/commands/cache.py @@ -48,6 +48,11 @@ def run(self, options, args): "purge": self.purge_cache, } + if not options.cache_dir: + logger.error("pip cache commands can not " + "function since cache is disabled.") + return ERROR + # Determine action if not args or args[0] not in handlers: logger.error("Need an action ({}) to perform.".format( diff --git a/tests/functional/test_cache.py b/tests/functional/test_cache.py index a464ece7945..7fec4dd56c4 100644 --- a/tests/functional/test_cache.py +++ b/tests/functional/test_cache.py @@ -216,3 +216,15 @@ def test_cache_purge_too_many_args(script, wheel_cache_files): # Make sure nothing was deleted. for filename in wheel_cache_files: assert os.path.exists(filename) + + [email protected]("command", ["info", "list", "remove", "purge"]) +def test_cache_abort_when_no_cache_dir(script, command): + """Running any pip cache command when cache is disabled should + abort and log an informative error""" + result = script.pip('cache', command, '--no-cache-dir', + expect_error=True) + assert result.stdout == '' + + assert ('ERROR: pip cache commands can not function' + ' since cache is disabled.' in result.stderr.splitlines())
'pip cache info' fails when no-cache-dir set pip version: pip 20.1b1 Python version: CPython 3.8.1 OS: Win 10 64 Testing 20.1 beta, execute 'pip cache info' and crashes. I'm guessing it's due to pip.ini turning off caching. pip.ini: ``` [global] no-cache-dir = false ``` Command execution: ``` > pip cache info ERROR: Exception: Traceback (most recent call last): File "c:\program files\python38\lib\site-packages\pip\_internal\cli\base_command.py", line 188, in _main status = self.run(options, args) File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 62, in run handlers[action](options, args[1:]) File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 74, in get_cache_info num_packages = len(self._find_wheels(options, '*')) File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 145, in _find_wheels wheel_dir = self._wheels_cache_dir(options) File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 141, in _wheels_cache_dir return os.path.join(options.cache_dir, 'wheels') File "c:\program files\python38\lib\ntpath.py", line 78, in join path = os.fspath(path) TypeError: expected str, bytes or os.PathLike object, not bool ```
edgedb__edgedb-3087
[ { "content": "# Copyright (C) 2016-present MagicStack Inc. and the EdgeDB authors.\n# Copyright (C) 2016-present the asyncpg authors and contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"PostgreSQL cluster management.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport asyncio\nimport enum\nimport functools\nimport locale\nimport logging\nimport os\nimport os.path\nimport pathlib\nimport re\nimport shlex\nimport shutil\nimport textwrap\nimport time\nimport urllib.parse\n\nimport asyncpg\n\nfrom edb import buildmeta\nfrom edb.common import supervisor\nfrom edb.common import uuidgen\n\nfrom edb.server import defines\nfrom edb.server.ha import base as ha_base\nfrom edb.pgsql import common as pgcommon\n\nfrom . import pgconnparams\n\n\nlogger = logging.getLogger('edb.pgcluster')\npg_dump_logger = logging.getLogger('pg_dump')\npg_ctl_logger = logging.getLogger('pg_ctl')\npg_config_logger = logging.getLogger('pg_config')\ninitdb_logger = logging.getLogger('initdb')\npostgres_logger = logging.getLogger('postgres')\n\nget_database_backend_name = pgcommon.get_database_backend_name\nget_role_backend_name = pgcommon.get_role_backend_name\n\n\ndef _is_c_utf8_locale_present() -> bool:\n try:\n locale.setlocale(locale.LC_CTYPE, 'C.UTF-8')\n except Exception:\n return False\n else:\n # We specifically don't use locale.getlocale(), because\n # it can lie and return a non-existent locale due to PEP 538.\n locale.setlocale(locale.LC_CTYPE, '')\n return True\n\n\nclass ClusterError(Exception):\n pass\n\n\nclass PostgresPidFileNotReadyError(Exception):\n \"\"\"Raised on an attempt to read non-existent or bad Postgres PID file\"\"\"\n\n\nclass BackendCapabilities(enum.IntFlag):\n\n NONE = 0\n #: Whether CREATE ROLE .. SUPERUSER is allowed\n SUPERUSER_ACCESS = 1 << 0\n #: Whether reading PostgreSQL configuration files\n #: via pg_file_settings is allowed\n CONFIGFILE_ACCESS = 1 << 1\n #: Whether the PostgreSQL server supports the C.UTF-8 locale\n C_UTF8_LOCALE = 1 << 2\n\n\nALL_BACKEND_CAPABILITIES = (\n BackendCapabilities.SUPERUSER_ACCESS\n | BackendCapabilities.CONFIGFILE_ACCESS\n | BackendCapabilities.C_UTF8_LOCALE\n)\n\n\nclass BackendInstanceParams(NamedTuple):\n\n capabilities: BackendCapabilities\n tenant_id: str\n base_superuser: Optional[str] = None\n max_connections: int = 500\n reserved_connections: int = 0\n\n\nclass BackendRuntimeParams(NamedTuple):\n\n instance_params: BackendInstanceParams\n session_authorization_role: Optional[str] = None\n\n\[email protected]_cache\ndef get_default_runtime_params(**instance_params: Any) -> BackendRuntimeParams:\n capabilities = ALL_BACKEND_CAPABILITIES\n if not _is_c_utf8_locale_present():\n capabilities &= ~BackendCapabilities.C_UTF8_LOCALE\n instance_params.setdefault('capabilities', capabilities)\n if 'tenant_id' not in instance_params:\n instance_params = dict(\n tenant_id=buildmeta.get_default_tenant_id(),\n **instance_params,\n )\n\n return BackendRuntimeParams(\n instance_params=BackendInstanceParams(**instance_params),\n )\n\n\nclass BaseCluster:\n\n def __init__(\n self,\n *,\n instance_params: Optional[BackendInstanceParams] = None,\n ) -> None:\n self._connection_addr: Optional[Tuple[str, int]] = None\n self._connection_params: Optional[\n pgconnparams.ConnectionParameters\n ] = None\n self._default_session_auth: Optional[str] = None\n self._pg_config_data: Dict[str, str] = {}\n self._pg_bin_dir: Optional[pathlib.Path] = None\n if instance_params is None:\n self._instance_params = (\n get_default_runtime_params().instance_params)\n else:\n self._instance_params = instance_params\n\n def get_db_name(self, db_name: str) -> str:\n return get_database_backend_name(\n db_name,\n tenant_id=self._instance_params.tenant_id,\n )\n\n def get_role_name(self, role_name: str) -> str:\n return get_database_backend_name(\n role_name,\n tenant_id=self._instance_params.tenant_id,\n )\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: Any,\n ) -> None:\n raise NotImplementedError\n\n async def stop(self, wait: int = 60) -> None:\n raise NotImplementedError\n\n def destroy(self) -> None:\n raise NotImplementedError\n\n async def connect(self, **kwargs: Any) -> asyncpg.Connection:\n conn_info = self.get_connection_spec()\n conn_info.update(kwargs)\n if 'sslmode' in conn_info:\n conn_info['ssl'] = conn_info.pop('sslmode').name\n conn = await asyncpg.connect(**conn_info)\n\n if (not kwargs.get('user')\n and self._default_session_auth\n and conn_info.get('user') != self._default_session_auth):\n # No explicit user given, and the default\n # SESSION AUTHORIZATION is different from the user\n # used to connect.\n await conn.execute(\n f'SET ROLE {pgcommon.quote_ident(self._default_session_auth)}'\n )\n\n return conn\n\n async def start_watching(\n self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None\n ) -> None:\n pass\n\n def stop_watching(self) -> None:\n pass\n\n def get_runtime_params(self) -> BackendRuntimeParams:\n params = self.get_connection_params()\n login_role: Optional[str] = params.user\n sup_role = self.get_role_name(defines.EDGEDB_SUPERUSER)\n return BackendRuntimeParams(\n instance_params=self._instance_params,\n session_authorization_role=(\n None if login_role == sup_role else login_role\n ),\n )\n\n def get_connection_addr(self) -> Optional[Tuple[str, int]]:\n return self._get_connection_addr()\n\n def set_default_session_authorization(self, rolename: str) -> None:\n self._default_session_auth = rolename\n\n def set_connection_params(\n self,\n params: pgconnparams.ConnectionParameters,\n ) -> None:\n self._connection_params = params\n\n def get_connection_params(\n self,\n ) -> pgconnparams.ConnectionParameters:\n assert self._connection_params is not None\n return self._connection_params\n\n def get_connection_spec(self) -> Dict[str, Any]:\n conn_dict: Dict[str, Any] = {}\n addr = self.get_connection_addr()\n assert addr is not None\n conn_dict['host'] = addr[0]\n conn_dict['port'] = addr[1]\n params = self.get_connection_params()\n for k in (\n 'user',\n 'password',\n 'database',\n 'ssl',\n 'sslmode',\n 'server_settings',\n ):\n v = getattr(params, k)\n if v is not None:\n conn_dict[k] = v\n\n cluster_settings = conn_dict.get('server_settings', {})\n\n edgedb_settings = {\n 'client_encoding': 'utf-8',\n 'search_path': 'edgedb',\n 'timezone': 'UTC',\n 'intervalstyle': 'iso_8601',\n 'jit': 'off',\n }\n\n conn_dict['server_settings'] = {**cluster_settings, **edgedb_settings}\n\n return conn_dict\n\n def _get_connection_addr(self) -> Optional[Tuple[str, int]]:\n return self._connection_addr\n\n def is_managed(self) -> bool:\n raise NotImplementedError\n\n async def get_status(self) -> str:\n raise NotImplementedError\n\n async def dump_database(\n self,\n dbname: str,\n *,\n exclude_schemas: Iterable[str] = (),\n dump_object_owners: bool = True,\n ) -> bytes:\n status = await self.get_status()\n if status != 'running':\n raise ClusterError('cannot dump: cluster is not running')\n\n if self._pg_bin_dir is None:\n await self.lookup_postgres()\n pg_dump = self._find_pg_binary('pg_dump')\n conn_spec = self.get_connection_spec()\n\n args = [\n pg_dump,\n '--inserts',\n f'--dbname={dbname}',\n f'--host={conn_spec[\"host\"]}',\n f'--port={conn_spec[\"port\"]}',\n f'--username={conn_spec[\"user\"]}',\n ]\n\n if not dump_object_owners:\n args.append('--no-owner')\n\n env = os.environ.copy()\n if conn_spec.get(\"password\"):\n env['PGPASSWORD'] = conn_spec[\"password\"]\n\n if exclude_schemas:\n for exclude_schema in exclude_schemas:\n args.append(f'--exclude-schema={exclude_schema}')\n\n stdout_lines, _, _ = await _run_logged_subprocess(\n args,\n logger=pg_dump_logger,\n log_stdout=False,\n env=env,\n )\n return b'\\n'.join(stdout_lines)\n\n def _find_pg_binary(self, binary: str) -> str:\n assert self._pg_bin_dir is not None\n bpath = self._pg_bin_dir / binary\n if not bpath.is_file():\n raise ClusterError(\n 'could not find {} executable: '.format(binary) +\n '{!r} does not exist or is not a file'.format(bpath))\n\n return str(bpath)\n\n def _subprocess_error(\n self,\n name: str,\n exitcode: int,\n stderr: Optional[bytes],\n ) -> ClusterError:\n if stderr:\n return ClusterError(\n f'{name} exited with status {exitcode}:\\n'\n + textwrap.indent(stderr.decode(), ' ' * 4),\n )\n else:\n return ClusterError(\n f'{name} exited with status {exitcode}',\n )\n\n async def lookup_postgres(self) -> None:\n self._pg_bin_dir = await get_pg_bin_dir()\n\n\nclass Cluster(BaseCluster):\n def __init__(\n self,\n data_dir: pathlib.Path,\n *,\n runstate_dir: Optional[pathlib.Path] = None,\n instance_params: Optional[BackendInstanceParams] = None,\n log_level: str = 'i',\n ):\n super().__init__(instance_params=instance_params)\n self._data_dir = data_dir\n self._runstate_dir = (\n runstate_dir if runstate_dir is not None else data_dir)\n self._daemon_pid: Optional[int] = None\n self._daemon_process: Optional[asyncio.subprocess.Process] = None\n self._daemon_supervisor: Optional[supervisor.Supervisor] = None\n self._log_level = log_level\n\n def is_managed(self) -> bool:\n return True\n\n def get_data_dir(self) -> pathlib.Path:\n return self._data_dir\n\n async def get_status(self) -> str:\n stdout_lines, stderr_lines, exit_code = (\n await _run_logged_text_subprocess(\n [self._pg_ctl, 'status', '-D', str(self._data_dir)],\n logger=pg_ctl_logger,\n check=False,\n )\n )\n\n if (\n exit_code == 4\n or not os.path.exists(self._data_dir)\n or not os.listdir(self._data_dir)\n ):\n return 'not-initialized'\n elif exit_code == 3:\n return 'stopped'\n elif exit_code == 0:\n output = '\\n'.join(stdout_lines)\n r = re.match(r'.*PID\\s?:\\s+(\\d+).*', output)\n if not r:\n raise ClusterError(\n f'could not parse pg_ctl status output: {output}')\n self._daemon_pid = int(r.group(1))\n if self._connection_addr is None:\n self._connection_addr = self._connection_addr_from_pidfile()\n return 'running'\n else:\n stderr_text = '\\n'.join(stderr_lines)\n raise ClusterError(\n f'`pg_ctl status` exited with status {exit_code}:\\n'\n + textwrap.indent(stderr_text, ' ' * 4),\n )\n\n async def ensure_initialized(self, **settings: Any) -> bool:\n cluster_status = await self.get_status()\n\n if cluster_status == 'not-initialized':\n logger.info(\n 'Initializing database cluster in %s', self._data_dir)\n\n instance_params = self.get_runtime_params().instance_params\n capabilities = instance_params.capabilities\n have_c_utf8 = (\n capabilities & BackendCapabilities.C_UTF8_LOCALE)\n await self.init(\n username='postgres',\n locale='C.UTF-8' if have_c_utf8 else 'en_US.UTF-8',\n lc_collate='C',\n encoding='UTF8',\n )\n self.reset_hba()\n self.add_hba_entry(\n type='local',\n database='all',\n user='postgres',\n auth_method='trust'\n )\n return True\n else:\n return False\n\n async def init(self, **settings: str) -> None:\n \"\"\"Initialize cluster.\"\"\"\n if await self.get_status() != 'not-initialized':\n raise ClusterError(\n 'cluster in {!r} has already been initialized'.format(\n self._data_dir))\n\n if settings:\n settings_args = ['--{}={}'.format(k.replace('_', '-'), v)\n for k, v in settings.items()]\n extra_args = ['-o'] + [' '.join(settings_args)]\n else:\n extra_args = []\n\n await _run_logged_subprocess(\n [self._pg_ctl, 'init', '-D', str(self._data_dir)] + extra_args,\n logger=initdb_logger,\n )\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: str,\n ) -> None:\n \"\"\"Start the cluster.\"\"\"\n status = await self.get_status()\n if status == 'running':\n return\n elif status == 'not-initialized':\n raise ClusterError(\n 'cluster in {!r} has not been initialized'.format(\n self._data_dir))\n\n extra_args = ['--{}={}'.format(k, v) for k, v in opts.items()]\n\n start_settings = {\n 'listen_addresses': '', # we use Unix sockets\n 'unix_socket_permissions': '0700',\n 'unix_socket_directories': str(self._runstate_dir),\n # here we are not setting superuser_reserved_connections because\n # we're using superuser only now (so all connections available),\n # and we don't support reserving connections for now\n 'max_connections': str(self._instance_params.max_connections),\n # From Postgres docs:\n #\n # You might need to raise this value if you have queries that\n # touch many different tables in a single transaction, e.g.,\n # query of a parent table with many children.\n #\n # EdgeDB queries might touch _lots_ of tables, especially in deep\n # inheritance hierarchies. This is especially important in low\n # `max_connections` scenarios.\n 'max_locks_per_transaction': 256,\n }\n\n if os.getenv('EDGEDB_DEBUG_PGSERVER'):\n start_settings['log_min_messages'] = 'info'\n start_settings['log_statement'] = 'all'\n else:\n log_level_map = {\n 'd': 'INFO',\n 'i': 'NOTICE',\n 'w': 'WARNING',\n 'e': 'ERROR',\n 's': 'PANIC',\n }\n start_settings['log_min_messages'] = log_level_map[self._log_level]\n start_settings['log_statement'] = 'none'\n start_settings['log_line_prefix'] = ''\n\n if server_settings:\n start_settings.update(server_settings)\n\n ssl_key = start_settings.get('ssl_key_file')\n if ssl_key:\n # Make sure server certificate key file has correct permissions.\n keyfile = os.path.join(self._data_dir, 'srvkey.pem')\n assert isinstance(ssl_key, str)\n shutil.copy(ssl_key, keyfile)\n os.chmod(keyfile, 0o600)\n start_settings['ssl_key_file'] = keyfile\n\n for k, v in start_settings.items():\n extra_args.extend(['-c', '{}={}'.format(k, v)])\n\n self._daemon_process, *loggers = await _start_logged_subprocess(\n [self._postgres, '-D', str(self._data_dir), *extra_args],\n capture_stdout=False,\n capture_stderr=False,\n logger=postgres_logger,\n log_processor=postgres_log_processor,\n )\n self._daemon_pid = self._daemon_process.pid\n\n sup = await supervisor.Supervisor.create(name=\"postgres loggers\")\n for logger_coro in loggers:\n sup.create_task(logger_coro)\n self._daemon_supervisor = sup\n\n await self._test_connection(timeout=wait)\n\n async def reload(self) -> None:\n \"\"\"Reload server configuration.\"\"\"\n status = await self.get_status()\n if status != 'running':\n raise ClusterError('cannot reload: cluster is not running')\n\n await _run_logged_subprocess(\n [self._pg_ctl, 'reload', '-D', str(self._data_dir)],\n logger=pg_ctl_logger,\n )\n\n async def stop(self, wait: int = 60) -> None:\n await _run_logged_subprocess(\n [\n self._pg_ctl,\n 'stop', '-D', str(self._data_dir),\n '-t', str(wait), '-m', 'fast'\n ],\n logger=pg_ctl_logger,\n )\n\n if (\n self._daemon_process is not None and\n self._daemon_process.returncode is None\n ):\n self._daemon_process.terminate()\n await asyncio.wait_for(self._daemon_process.wait(), timeout=wait)\n\n if self._daemon_supervisor is not None:\n await self._daemon_supervisor.cancel()\n self._daemon_supervisor = None\n\n def destroy(self) -> None:\n shutil.rmtree(self._data_dir)\n\n def reset_hba(self) -> None:\n \"\"\"Remove all records from pg_hba.conf.\"\"\"\n pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')\n\n try:\n with open(pg_hba, 'w'):\n pass\n except IOError as e:\n raise ClusterError(\n 'cannot modify HBA records: {}'.format(e)) from e\n\n def add_hba_entry(\n self,\n *,\n type: str = 'host',\n database: str,\n user: str,\n address: Optional[str] = None,\n auth_method: str,\n auth_options: Optional[Mapping[str, Any]] = None,\n ) -> None:\n \"\"\"Add a record to pg_hba.conf.\"\"\"\n if type not in {'local', 'host', 'hostssl', 'hostnossl'}:\n raise ValueError('invalid HBA record type: {!r}'.format(type))\n\n pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')\n\n record = '{} {} {}'.format(type, database, user)\n\n if type != 'local':\n if address is None:\n raise ValueError(\n '{!r} entry requires a valid address'.format(type))\n else:\n record += ' {}'.format(address)\n\n record += ' {}'.format(auth_method)\n\n if auth_options is not None:\n record += ' ' + ' '.join(\n '{}={}'.format(k, v) for k, v in auth_options.items())\n\n try:\n with open(pg_hba, 'a') as f:\n print(record, file=f)\n except IOError as e:\n raise ClusterError(\n 'cannot modify HBA records: {}'.format(e)) from e\n\n async def trust_local_connections(self) -> None:\n self.reset_hba()\n\n self.add_hba_entry(type='local', database='all',\n user='all', auth_method='trust')\n self.add_hba_entry(type='host', address='127.0.0.1/32',\n database='all', user='all',\n auth_method='trust')\n self.add_hba_entry(type='host', address='::1/128',\n database='all', user='all',\n auth_method='trust')\n status = await self.get_status()\n if status == 'running':\n await self.reload()\n\n async def lookup_postgres(self) -> None:\n await super().lookup_postgres()\n self._pg_ctl = self._find_pg_binary('pg_ctl')\n self._postgres = self._find_pg_binary('postgres')\n\n def _get_connection_addr(self) -> Tuple[str, int]:\n if self._connection_addr is None:\n self._connection_addr = self._connection_addr_from_pidfile()\n\n return self._connection_addr\n\n def _connection_addr_from_pidfile(self) -> Tuple[str, int]:\n pidfile = os.path.join(self._data_dir, 'postmaster.pid')\n\n try:\n with open(pidfile, 'rt') as f:\n piddata = f.read()\n except FileNotFoundError:\n raise PostgresPidFileNotReadyError\n\n lines = piddata.splitlines()\n\n if len(lines) < 6:\n # A complete postgres pidfile is at least 6 lines\n raise PostgresPidFileNotReadyError\n\n pmpid = int(lines[0])\n if self._daemon_pid and pmpid != self._daemon_pid:\n # This might be an old pidfile left from previous postgres\n # daemon run.\n raise PostgresPidFileNotReadyError\n\n portnum = int(lines[3])\n sockdir = lines[4]\n hostaddr = lines[5]\n\n if sockdir:\n if sockdir[0] != '/':\n # Relative sockdir\n sockdir = os.path.normpath(\n os.path.join(self._data_dir, sockdir))\n host_str = sockdir\n else:\n host_str = hostaddr\n\n if host_str == '*':\n host_str = 'localhost'\n elif host_str == '0.0.0.0':\n host_str = '127.0.0.1'\n elif host_str == '::':\n host_str = '::1'\n\n return (host_str, portnum)\n\n async def _test_connection(self, timeout: int = 60) -> str:\n self._connection_addr = None\n connected = False\n\n for n in range(timeout + 1):\n # pg usually comes up pretty quickly, but not so\n # quickly that we don't hit the wait case. Make our\n # first sleep pretty short, to shave almost a second\n # off the happy case.\n sleep_time = 1 if n else 0.10\n\n try:\n conn_addr = self._get_connection_addr()\n except PostgresPidFileNotReadyError:\n time.sleep(sleep_time)\n continue\n\n try:\n con = await asyncpg.connect(\n database='postgres',\n user='postgres',\n timeout=5,\n host=conn_addr[0],\n port=conn_addr[1],\n )\n except (\n OSError,\n asyncio.TimeoutError,\n asyncpg.CannotConnectNowError,\n asyncpg.PostgresConnectionError,\n ):\n time.sleep(sleep_time)\n continue\n except asyncpg.PostgresError:\n # Any other error other than ServerNotReadyError or\n # ConnectionError is interpreted to indicate the server is\n # up.\n break\n else:\n connected = True\n await con.close()\n break\n\n if connected:\n return 'running'\n else:\n return 'not-initialized'\n\n\nclass RemoteCluster(BaseCluster):\n def __init__(\n self,\n addr: Tuple[str, int],\n params: pgconnparams.ConnectionParameters,\n *,\n instance_params: Optional[BackendInstanceParams] = None,\n ha_backend: Optional[ha_base.HABackend] = None,\n ):\n super().__init__(instance_params=instance_params)\n self._connection_addr = addr\n self._connection_params = params\n self._ha_backend = ha_backend\n\n def _get_connection_addr(self) -> Optional[Tuple[str, int]]:\n if self._ha_backend is not None:\n return self._ha_backend.get_master_addr()\n return self._connection_addr\n\n async def ensure_initialized(self, **settings: Any) -> bool:\n return False\n\n def is_managed(self) -> bool:\n return False\n\n async def get_status(self) -> str:\n return 'running'\n\n def init(self, **settings: str) -> str:\n pass\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: Any,\n ) -> None:\n pass\n\n async def stop(self, wait: int = 60) -> None:\n pass\n\n def destroy(self) -> None:\n pass\n\n def reset_hba(self) -> None:\n raise ClusterError('cannot modify HBA records of unmanaged cluster')\n\n def add_hba_entry(\n self,\n *,\n type: str = 'host',\n database: str,\n user: str,\n address: Optional[str] = None,\n auth_method: str,\n auth_options: Optional[Mapping[str, Any]] = None,\n ) -> None:\n raise ClusterError('cannot modify HBA records of unmanaged cluster')\n\n async def start_watching(\n self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None\n ) -> None:\n if self._ha_backend is not None:\n await self._ha_backend.start_watching(cluster_protocol)\n\n def stop_watching(self) -> None:\n if self._ha_backend is not None:\n self._ha_backend.stop_watching()\n\n\nasync def get_pg_bin_dir() -> pathlib.Path:\n pg_config_data = await get_pg_config()\n pg_bin_dir = pg_config_data.get('bindir')\n if not pg_bin_dir:\n raise ClusterError(\n 'pg_config output did not provide the BINDIR value')\n return pathlib.Path(pg_bin_dir)\n\n\nasync def get_pg_config() -> Dict[str, str]:\n stdout_lines, _, _ = await _run_logged_text_subprocess(\n [str(buildmeta.get_pg_config_path())],\n logger=pg_config_logger,\n )\n\n config = {}\n for line in stdout_lines:\n k, eq, v = line.partition('=')\n if eq:\n config[k.strip().lower()] = v.strip()\n\n return config\n\n\nasync def get_local_pg_cluster(\n data_dir: pathlib.Path,\n *,\n runstate_dir: Optional[pathlib.Path] = None,\n max_connections: Optional[int] = None,\n tenant_id: Optional[str] = None,\n log_level: Optional[str] = None,\n) -> Cluster:\n if log_level is None:\n log_level = 'i'\n if tenant_id is None:\n tenant_id = buildmeta.get_default_tenant_id()\n instance_params = None\n if max_connections is not None:\n instance_params = get_default_runtime_params(\n max_connections=max_connections,\n tenant_id=tenant_id,\n ).instance_params\n cluster = Cluster(\n data_dir=data_dir,\n runstate_dir=runstate_dir,\n instance_params=instance_params,\n log_level=log_level,\n )\n await cluster.lookup_postgres()\n return cluster\n\n\nasync def get_remote_pg_cluster(\n dsn: str,\n *,\n tenant_id: Optional[str] = None,\n) -> RemoteCluster:\n parsed = urllib.parse.urlparse(dsn)\n ha_backend = None\n\n if parsed.scheme not in {'postgresql', 'postgres'}:\n ha_backend = ha_base.get_backend(parsed)\n if ha_backend is None:\n raise ValueError(\n 'invalid DSN: scheme is expected to be \"postgresql\", '\n '\"postgres\" or one of the supported HA backend, '\n 'got {!r}'.format(parsed.scheme))\n\n addr = await ha_backend.get_cluster_consensus()\n dsn = 'postgresql://{}:{}'.format(*addr)\n\n addrs, params = pgconnparams.parse_dsn(dsn)\n if len(addrs) > 1:\n raise ValueError('multiple hosts in Postgres DSN are not supported')\n if tenant_id is None:\n t_id = buildmeta.get_default_tenant_id()\n else:\n t_id = tenant_id\n rcluster = RemoteCluster(addrs[0], params)\n\n async def _get_cluster_type(\n conn: asyncpg.Connection,\n ) -> Tuple[Type[RemoteCluster], Optional[str]]:\n managed_clouds = {\n 'rds_superuser': RemoteCluster, # Amazon RDS\n 'cloudsqlsuperuser': RemoteCluster, # GCP Cloud SQL\n }\n\n managed_cloud_super = await conn.fetchval(\n \"\"\"\n SELECT\n rolname\n FROM\n pg_roles\n WHERE\n rolname = any($1::text[])\n LIMIT\n 1\n \"\"\",\n list(managed_clouds),\n )\n\n if managed_cloud_super is not None:\n return managed_clouds[managed_cloud_super], managed_cloud_super\n else:\n return RemoteCluster, None\n\n async def _detect_capabilities(\n conn: asyncpg.Connection,\n ) -> BackendCapabilities:\n caps = BackendCapabilities.NONE\n\n try:\n await conn.execute(f'ALTER SYSTEM SET foo = 10')\n except asyncpg.InsufficientPrivilegeError:\n configfile_access = False\n except asyncpg.UndefinedObjectError:\n configfile_access = True\n else:\n configfile_access = True\n\n if configfile_access:\n caps |= BackendCapabilities.CONFIGFILE_ACCESS\n\n tx = conn.transaction()\n await tx.start()\n rname = str(uuidgen.uuid1mc())\n\n try:\n await conn.execute(f'CREATE ROLE \"{rname}\" WITH SUPERUSER')\n except asyncpg.InsufficientPrivilegeError:\n can_make_superusers = False\n else:\n can_make_superusers = True\n finally:\n await tx.rollback()\n\n if can_make_superusers:\n caps |= BackendCapabilities.SUPERUSER_ACCESS\n\n coll = await conn.fetchval('''\n SELECT collname FROM pg_collation\n WHERE lower(replace(collname, '-', '')) = 'c.utf8' LIMIT 1;\n ''')\n\n if coll is not None:\n caps |= BackendCapabilities.C_UTF8_LOCALE\n\n return caps\n\n async def _get_pg_settings(\n conn: asyncpg.Connection,\n name: str,\n ) -> str:\n return await conn.fetchval( # type: ignore\n 'SELECT setting FROM pg_settings WHERE name = $1', name\n )\n\n async def _get_reserved_connections(\n conn: asyncpg.Connection,\n ) -> int:\n rv = int(\n await _get_pg_settings(conn, 'superuser_reserved_connections')\n )\n for name in [\n 'rds.rds_superuser_reserved_connections',\n ]:\n value = await _get_pg_settings(conn, name)\n if value:\n rv += int(value)\n return rv\n\n conn = await rcluster.connect()\n try:\n cluster_type, superuser_name = await _get_cluster_type(conn)\n max_connections = await _get_pg_settings(conn, 'max_connections')\n instance_params = BackendInstanceParams(\n capabilities=await _detect_capabilities(conn),\n base_superuser=superuser_name,\n max_connections=int(max_connections),\n reserved_connections=await _get_reserved_connections(conn),\n tenant_id=t_id,\n )\n finally:\n await conn.close()\n\n return cluster_type(\n addrs[0],\n params,\n instance_params=instance_params,\n ha_backend=ha_backend,\n )\n\n\nasync def _run_logged_text_subprocess(\n args: Sequence[str],\n logger: logging.Logger,\n level: int = logging.DEBUG,\n check: bool = True,\n log_stdout: bool = True,\n timeout: Optional[float] = None,\n **kwargs: Any,\n) -> Tuple[List[str], List[str], int]:\n stdout_lines, stderr_lines, exit_code = await _run_logged_subprocess(\n args,\n logger=logger,\n level=level,\n check=check,\n log_stdout=log_stdout,\n timeout=timeout,\n **kwargs,\n )\n\n return (\n [line.decode() for line in stdout_lines],\n [line.decode() for line in stderr_lines],\n exit_code,\n )\n\n\nasync def _run_logged_subprocess(\n args: Sequence[str],\n logger: logging.Logger,\n level: int = logging.DEBUG,\n check: bool = True,\n log_stdout: bool = True,\n log_stderr: bool = True,\n capture_stdout: bool = True,\n capture_stderr: bool = True,\n timeout: Optional[float] = None,\n **kwargs: Any,\n) -> Tuple[List[bytes], List[bytes], int]:\n process, stdout_reader, stderr_reader = await _start_logged_subprocess(\n args,\n logger=logger,\n level=level,\n log_stdout=log_stdout,\n log_stderr=log_stderr,\n capture_stdout=capture_stdout,\n capture_stderr=capture_stderr,\n **kwargs,\n )\n\n exit_code, stdout_lines, stderr_lines = await asyncio.wait_for(\n asyncio.gather(process.wait(), stdout_reader, stderr_reader),\n timeout=timeout,\n )\n\n if exit_code != 0 and check:\n stderr_text = b'\\n'.join(stderr_lines).decode()\n raise ClusterError(\n f'{args[0]} exited with status {exit_code}:\\n'\n + textwrap.indent(stderr_text, ' ' * 4),\n )\n else:\n return stdout_lines, stderr_lines, exit_code\n\n\nasync def _start_logged_subprocess(\n args: Sequence[str],\n *,\n logger: logging.Logger,\n level: int = logging.DEBUG,\n log_stdout: bool = True,\n log_stderr: bool = True,\n capture_stdout: bool = True,\n capture_stderr: bool = True,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n **kwargs: Any,\n) -> Tuple[\n asyncio.subprocess.Process,\n Coroutine[Any, Any, List[bytes]],\n Coroutine[Any, Any, List[bytes]],\n]:\n logger.log(\n level,\n f'running `{\" \".join(shlex.quote(arg) for arg in args)}`'\n )\n\n process = await asyncio.create_subprocess_exec(\n *args,\n stdout=(\n asyncio.subprocess.PIPE if log_stdout or capture_stdout\n else asyncio.subprocess.DEVNULL\n ),\n stderr=(\n asyncio.subprocess.PIPE if log_stderr or capture_stderr\n else asyncio.subprocess.DEVNULL\n ),\n **kwargs,\n )\n\n assert process.stderr is not None\n assert process.stdout is not None\n\n if log_stderr and capture_stderr:\n stderr_reader = _capture_and_log_subprocess_output(\n process.pid,\n process.stderr,\n logger,\n level,\n log_processor,\n )\n elif capture_stderr:\n stderr_reader = _capture_subprocess_output(process.stderr)\n elif log_stderr:\n stderr_reader = _log_subprocess_output(\n process.pid, process.stderr, logger, level, log_processor)\n else:\n stderr_reader = _dummy()\n\n if log_stdout and capture_stdout:\n stdout_reader = _capture_and_log_subprocess_output(\n process.pid,\n process.stdout,\n logger,\n level,\n log_processor,\n )\n elif capture_stdout:\n stdout_reader = _capture_subprocess_output(process.stdout)\n elif log_stdout:\n stdout_reader = _log_subprocess_output(\n process.pid, process.stdout, logger, level, log_processor)\n else:\n stdout_reader = _dummy()\n\n return process, stdout_reader, stderr_reader\n\n\nasync def _capture_subprocess_output(\n stream: asyncio.StreamReader,\n) -> List[bytes]:\n lines = []\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n lines.append(line.rstrip(b'\\n'))\n return lines\n\n\nasync def _capture_and_log_subprocess_output(\n pid: int,\n stream: asyncio.StreamReader,\n logger: logging.Logger,\n level: int,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n) -> List[bytes]:\n lines = []\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n line = line.rstrip(b'\\n')\n lines.append(line)\n log_line = line.decode()\n if log_processor is not None:\n log_line, level = log_processor(log_line)\n logger.log(level, log_line, extra={\"process\": pid})\n return lines\n\n\nasync def _log_subprocess_output(\n pid: int,\n stream: asyncio.StreamReader,\n logger: logging.Logger,\n level: int,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n) -> List[bytes]:\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n log_line = line.rstrip(b'\\n').decode()\n if log_processor is not None:\n log_line, level = log_processor(log_line)\n logger.log(level, log_line, extra={\"process\": pid})\n return []\n\n\nasync def _dummy() -> List[bytes]:\n return []\n\n\npostgres_to_python_level_map = {\n \"DEBUG5\": logging.DEBUG,\n \"DEBUG4\": logging.DEBUG,\n \"DEBUG3\": logging.DEBUG,\n \"DEBUG2\": logging.DEBUG,\n \"DEBUG1\": logging.DEBUG,\n \"INFO\": logging.INFO,\n \"NOTICE\": logging.INFO,\n \"LOG\": logging.INFO,\n \"WARNING\": logging.WARNING,\n \"ERROR\": logging.ERROR,\n \"FATAL\": logging.CRITICAL,\n \"PANIC\": logging.CRITICAL,\n}\n\npostgres_log_re = re.compile(r'^(\\w+):\\s*(.*)$')\n\npostgres_specific_msg_level_map = {\n \"terminating connection due to administrator command\": logging.INFO,\n \"the database system is shutting down\": logging.INFO,\n}\n\n\ndef postgres_log_processor(msg: str) -> Tuple[str, int]:\n if m := postgres_log_re.match(msg):\n postgres_level = m.group(1)\n msg = m.group(2)\n level = postgres_specific_msg_level_map.get(\n msg,\n postgres_to_python_level_map.get(postgres_level, logging.INFO),\n )\n else:\n level = logging.INFO\n\n return msg, level\n", "path": "edb/server/pgcluster.py" } ]
[ { "content": "# Copyright (C) 2016-present MagicStack Inc. and the EdgeDB authors.\n# Copyright (C) 2016-present the asyncpg authors and contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"PostgreSQL cluster management.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport asyncio\nimport enum\nimport functools\nimport locale\nimport logging\nimport os\nimport os.path\nimport pathlib\nimport re\nimport shlex\nimport shutil\nimport textwrap\nimport time\nimport urllib.parse\n\nimport asyncpg\n\nfrom edb import buildmeta\nfrom edb.common import supervisor\nfrom edb.common import uuidgen\n\nfrom edb.server import defines\nfrom edb.server.ha import base as ha_base\nfrom edb.pgsql import common as pgcommon\n\nfrom . import pgconnparams\n\n\nlogger = logging.getLogger('edb.pgcluster')\npg_dump_logger = logging.getLogger('pg_dump')\npg_ctl_logger = logging.getLogger('pg_ctl')\npg_config_logger = logging.getLogger('pg_config')\ninitdb_logger = logging.getLogger('initdb')\npostgres_logger = logging.getLogger('postgres')\n\nget_database_backend_name = pgcommon.get_database_backend_name\nget_role_backend_name = pgcommon.get_role_backend_name\n\n\ndef _is_c_utf8_locale_present() -> bool:\n try:\n locale.setlocale(locale.LC_CTYPE, 'C.UTF-8')\n except Exception:\n return False\n else:\n # We specifically don't use locale.getlocale(), because\n # it can lie and return a non-existent locale due to PEP 538.\n locale.setlocale(locale.LC_CTYPE, '')\n return True\n\n\nclass ClusterError(Exception):\n pass\n\n\nclass PostgresPidFileNotReadyError(Exception):\n \"\"\"Raised on an attempt to read non-existent or bad Postgres PID file\"\"\"\n\n\nclass BackendCapabilities(enum.IntFlag):\n\n NONE = 0\n #: Whether CREATE ROLE .. SUPERUSER is allowed\n SUPERUSER_ACCESS = 1 << 0\n #: Whether reading PostgreSQL configuration files\n #: via pg_file_settings is allowed\n CONFIGFILE_ACCESS = 1 << 1\n #: Whether the PostgreSQL server supports the C.UTF-8 locale\n C_UTF8_LOCALE = 1 << 2\n\n\nALL_BACKEND_CAPABILITIES = (\n BackendCapabilities.SUPERUSER_ACCESS\n | BackendCapabilities.CONFIGFILE_ACCESS\n | BackendCapabilities.C_UTF8_LOCALE\n)\n\n\nclass BackendInstanceParams(NamedTuple):\n\n capabilities: BackendCapabilities\n tenant_id: str\n base_superuser: Optional[str] = None\n max_connections: int = 500\n reserved_connections: int = 0\n\n\nclass BackendRuntimeParams(NamedTuple):\n\n instance_params: BackendInstanceParams\n session_authorization_role: Optional[str] = None\n\n\[email protected]_cache\ndef get_default_runtime_params(**instance_params: Any) -> BackendRuntimeParams:\n capabilities = ALL_BACKEND_CAPABILITIES\n if not _is_c_utf8_locale_present():\n capabilities &= ~BackendCapabilities.C_UTF8_LOCALE\n instance_params.setdefault('capabilities', capabilities)\n if 'tenant_id' not in instance_params:\n instance_params = dict(\n tenant_id=buildmeta.get_default_tenant_id(),\n **instance_params,\n )\n\n return BackendRuntimeParams(\n instance_params=BackendInstanceParams(**instance_params),\n )\n\n\nclass BaseCluster:\n\n def __init__(\n self,\n *,\n instance_params: Optional[BackendInstanceParams] = None,\n ) -> None:\n self._connection_addr: Optional[Tuple[str, int]] = None\n self._connection_params: Optional[\n pgconnparams.ConnectionParameters\n ] = None\n self._default_session_auth: Optional[str] = None\n self._pg_config_data: Dict[str, str] = {}\n self._pg_bin_dir: Optional[pathlib.Path] = None\n if instance_params is None:\n self._instance_params = (\n get_default_runtime_params().instance_params)\n else:\n self._instance_params = instance_params\n\n def get_db_name(self, db_name: str) -> str:\n return get_database_backend_name(\n db_name,\n tenant_id=self._instance_params.tenant_id,\n )\n\n def get_role_name(self, role_name: str) -> str:\n return get_database_backend_name(\n role_name,\n tenant_id=self._instance_params.tenant_id,\n )\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: Any,\n ) -> None:\n raise NotImplementedError\n\n async def stop(self, wait: int = 60) -> None:\n raise NotImplementedError\n\n def destroy(self) -> None:\n raise NotImplementedError\n\n async def connect(self, **kwargs: Any) -> asyncpg.Connection:\n conn_info = self.get_connection_spec()\n conn_info.update(kwargs)\n if 'sslmode' in conn_info:\n conn_info['ssl'] = conn_info.pop('sslmode').name\n conn = await asyncpg.connect(**conn_info)\n\n if (not kwargs.get('user')\n and self._default_session_auth\n and conn_info.get('user') != self._default_session_auth):\n # No explicit user given, and the default\n # SESSION AUTHORIZATION is different from the user\n # used to connect.\n await conn.execute(\n f'SET ROLE {pgcommon.quote_ident(self._default_session_auth)}'\n )\n\n return conn\n\n async def start_watching(\n self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None\n ) -> None:\n pass\n\n def stop_watching(self) -> None:\n pass\n\n def get_runtime_params(self) -> BackendRuntimeParams:\n params = self.get_connection_params()\n login_role: Optional[str] = params.user\n sup_role = self.get_role_name(defines.EDGEDB_SUPERUSER)\n return BackendRuntimeParams(\n instance_params=self._instance_params,\n session_authorization_role=(\n None if login_role == sup_role else login_role\n ),\n )\n\n def get_connection_addr(self) -> Optional[Tuple[str, int]]:\n return self._get_connection_addr()\n\n def set_default_session_authorization(self, rolename: str) -> None:\n self._default_session_auth = rolename\n\n def set_connection_params(\n self,\n params: pgconnparams.ConnectionParameters,\n ) -> None:\n self._connection_params = params\n\n def get_connection_params(\n self,\n ) -> pgconnparams.ConnectionParameters:\n assert self._connection_params is not None\n return self._connection_params\n\n def get_connection_spec(self) -> Dict[str, Any]:\n conn_dict: Dict[str, Any] = {}\n addr = self.get_connection_addr()\n assert addr is not None\n conn_dict['host'] = addr[0]\n conn_dict['port'] = addr[1]\n params = self.get_connection_params()\n for k in (\n 'user',\n 'password',\n 'database',\n 'ssl',\n 'sslmode',\n 'server_settings',\n ):\n v = getattr(params, k)\n if v is not None:\n conn_dict[k] = v\n\n cluster_settings = conn_dict.get('server_settings', {})\n\n edgedb_settings = {\n 'client_encoding': 'utf-8',\n 'search_path': 'edgedb',\n 'timezone': 'UTC',\n 'intervalstyle': 'iso_8601',\n 'jit': 'off',\n }\n\n conn_dict['server_settings'] = {**cluster_settings, **edgedb_settings}\n\n return conn_dict\n\n def _get_connection_addr(self) -> Optional[Tuple[str, int]]:\n return self._connection_addr\n\n def is_managed(self) -> bool:\n raise NotImplementedError\n\n async def get_status(self) -> str:\n raise NotImplementedError\n\n async def dump_database(\n self,\n dbname: str,\n *,\n exclude_schemas: Iterable[str] = (),\n dump_object_owners: bool = True,\n ) -> bytes:\n status = await self.get_status()\n if status != 'running':\n raise ClusterError('cannot dump: cluster is not running')\n\n if self._pg_bin_dir is None:\n await self.lookup_postgres()\n pg_dump = self._find_pg_binary('pg_dump')\n conn_spec = self.get_connection_spec()\n\n args = [\n pg_dump,\n '--inserts',\n f'--dbname={dbname}',\n f'--host={conn_spec[\"host\"]}',\n f'--port={conn_spec[\"port\"]}',\n f'--username={conn_spec[\"user\"]}',\n ]\n\n if not dump_object_owners:\n args.append('--no-owner')\n\n env = os.environ.copy()\n if conn_spec.get(\"password\"):\n env['PGPASSWORD'] = conn_spec[\"password\"]\n\n if exclude_schemas:\n for exclude_schema in exclude_schemas:\n args.append(f'--exclude-schema={exclude_schema}')\n\n stdout_lines, _, _ = await _run_logged_subprocess(\n args,\n logger=pg_dump_logger,\n log_stdout=False,\n env=env,\n )\n return b'\\n'.join(stdout_lines)\n\n def _find_pg_binary(self, binary: str) -> str:\n assert self._pg_bin_dir is not None\n bpath = self._pg_bin_dir / binary\n if not bpath.is_file():\n raise ClusterError(\n 'could not find {} executable: '.format(binary) +\n '{!r} does not exist or is not a file'.format(bpath))\n\n return str(bpath)\n\n def _subprocess_error(\n self,\n name: str,\n exitcode: int,\n stderr: Optional[bytes],\n ) -> ClusterError:\n if stderr:\n return ClusterError(\n f'{name} exited with status {exitcode}:\\n'\n + textwrap.indent(stderr.decode(), ' ' * 4),\n )\n else:\n return ClusterError(\n f'{name} exited with status {exitcode}',\n )\n\n async def lookup_postgres(self) -> None:\n self._pg_bin_dir = await get_pg_bin_dir()\n\n\nclass Cluster(BaseCluster):\n def __init__(\n self,\n data_dir: pathlib.Path,\n *,\n runstate_dir: Optional[pathlib.Path] = None,\n instance_params: Optional[BackendInstanceParams] = None,\n log_level: str = 'i',\n ):\n super().__init__(instance_params=instance_params)\n self._data_dir = data_dir\n self._runstate_dir = (\n runstate_dir if runstate_dir is not None else data_dir)\n self._daemon_pid: Optional[int] = None\n self._daemon_process: Optional[asyncio.subprocess.Process] = None\n self._daemon_supervisor: Optional[supervisor.Supervisor] = None\n self._log_level = log_level\n\n def is_managed(self) -> bool:\n return True\n\n def get_data_dir(self) -> pathlib.Path:\n return self._data_dir\n\n async def get_status(self) -> str:\n stdout_lines, stderr_lines, exit_code = (\n await _run_logged_text_subprocess(\n [self._pg_ctl, 'status', '-D', str(self._data_dir)],\n logger=pg_ctl_logger,\n check=False,\n )\n )\n\n if (\n exit_code == 4\n or not os.path.exists(self._data_dir)\n or not os.listdir(self._data_dir)\n ):\n return 'not-initialized'\n elif exit_code == 3:\n return 'stopped'\n elif exit_code == 0:\n output = '\\n'.join(stdout_lines)\n r = re.match(r'.*PID\\s?:\\s+(\\d+).*', output)\n if not r:\n raise ClusterError(\n f'could not parse pg_ctl status output: {output}')\n self._daemon_pid = int(r.group(1))\n if self._connection_addr is None:\n self._connection_addr = self._connection_addr_from_pidfile()\n return 'running'\n else:\n stderr_text = '\\n'.join(stderr_lines)\n raise ClusterError(\n f'`pg_ctl status` exited with status {exit_code}:\\n'\n + textwrap.indent(stderr_text, ' ' * 4),\n )\n\n async def ensure_initialized(self, **settings: Any) -> bool:\n cluster_status = await self.get_status()\n\n if cluster_status == 'not-initialized':\n logger.info(\n 'Initializing database cluster in %s', self._data_dir)\n\n instance_params = self.get_runtime_params().instance_params\n capabilities = instance_params.capabilities\n have_c_utf8 = (\n capabilities & BackendCapabilities.C_UTF8_LOCALE)\n await self.init(\n username='postgres',\n locale='C.UTF-8' if have_c_utf8 else 'en_US.UTF-8',\n lc_collate='C',\n encoding='UTF8',\n )\n self.reset_hba()\n self.add_hba_entry(\n type='local',\n database='all',\n user='postgres',\n auth_method='trust'\n )\n return True\n else:\n return False\n\n async def init(self, **settings: str) -> None:\n \"\"\"Initialize cluster.\"\"\"\n if await self.get_status() != 'not-initialized':\n raise ClusterError(\n 'cluster in {!r} has already been initialized'.format(\n self._data_dir))\n\n if settings:\n settings_args = ['--{}={}'.format(k.replace('_', '-'), v)\n for k, v in settings.items()]\n extra_args = ['-o'] + [' '.join(settings_args)]\n else:\n extra_args = []\n\n await _run_logged_subprocess(\n [self._pg_ctl, 'init', '-D', str(self._data_dir)] + extra_args,\n logger=initdb_logger,\n )\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: str,\n ) -> None:\n \"\"\"Start the cluster.\"\"\"\n status = await self.get_status()\n if status == 'running':\n return\n elif status == 'not-initialized':\n raise ClusterError(\n 'cluster in {!r} has not been initialized'.format(\n self._data_dir))\n\n extra_args = ['--{}={}'.format(k, v) for k, v in opts.items()]\n\n start_settings = {\n 'listen_addresses': '', # we use Unix sockets\n 'unix_socket_permissions': '0700',\n 'unix_socket_directories': str(self._runstate_dir),\n # here we are not setting superuser_reserved_connections because\n # we're using superuser only now (so all connections available),\n # and we don't support reserving connections for now\n 'max_connections': str(self._instance_params.max_connections),\n # From Postgres docs:\n #\n # You might need to raise this value if you have queries that\n # touch many different tables in a single transaction, e.g.,\n # query of a parent table with many children.\n #\n # EdgeDB queries might touch _lots_ of tables, especially in deep\n # inheritance hierarchies. This is especially important in low\n # `max_connections` scenarios.\n 'max_locks_per_transaction': 256,\n }\n\n if os.getenv('EDGEDB_DEBUG_PGSERVER'):\n start_settings['log_min_messages'] = 'info'\n start_settings['log_statement'] = 'all'\n else:\n log_level_map = {\n 'd': 'INFO',\n 'i': 'NOTICE',\n 'w': 'WARNING',\n 'e': 'ERROR',\n 's': 'PANIC',\n }\n start_settings['log_min_messages'] = log_level_map[self._log_level]\n start_settings['log_statement'] = 'none'\n start_settings['log_line_prefix'] = ''\n\n if server_settings:\n start_settings.update(server_settings)\n\n ssl_key = start_settings.get('ssl_key_file')\n if ssl_key:\n # Make sure server certificate key file has correct permissions.\n keyfile = os.path.join(self._data_dir, 'srvkey.pem')\n assert isinstance(ssl_key, str)\n shutil.copy(ssl_key, keyfile)\n os.chmod(keyfile, 0o600)\n start_settings['ssl_key_file'] = keyfile\n\n for k, v in start_settings.items():\n extra_args.extend(['-c', '{}={}'.format(k, v)])\n\n self._daemon_process, *loggers = await _start_logged_subprocess(\n [self._postgres, '-D', str(self._data_dir), *extra_args],\n capture_stdout=False,\n capture_stderr=False,\n logger=postgres_logger,\n log_processor=postgres_log_processor,\n )\n self._daemon_pid = self._daemon_process.pid\n\n sup = await supervisor.Supervisor.create(name=\"postgres loggers\")\n for logger_coro in loggers:\n sup.create_task(logger_coro)\n self._daemon_supervisor = sup\n\n await self._test_connection(timeout=wait)\n\n async def reload(self) -> None:\n \"\"\"Reload server configuration.\"\"\"\n status = await self.get_status()\n if status != 'running':\n raise ClusterError('cannot reload: cluster is not running')\n\n await _run_logged_subprocess(\n [self._pg_ctl, 'reload', '-D', str(self._data_dir)],\n logger=pg_ctl_logger,\n )\n\n async def stop(self, wait: int = 60) -> None:\n await _run_logged_subprocess(\n [\n self._pg_ctl,\n 'stop', '-D', str(self._data_dir),\n '-t', str(wait), '-m', 'fast'\n ],\n logger=pg_ctl_logger,\n )\n\n if (\n self._daemon_process is not None and\n self._daemon_process.returncode is None\n ):\n self._daemon_process.terminate()\n await asyncio.wait_for(self._daemon_process.wait(), timeout=wait)\n\n if self._daemon_supervisor is not None:\n await self._daemon_supervisor.cancel()\n self._daemon_supervisor = None\n\n def destroy(self) -> None:\n shutil.rmtree(self._data_dir)\n\n def reset_hba(self) -> None:\n \"\"\"Remove all records from pg_hba.conf.\"\"\"\n pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')\n\n try:\n with open(pg_hba, 'w'):\n pass\n except IOError as e:\n raise ClusterError(\n 'cannot modify HBA records: {}'.format(e)) from e\n\n def add_hba_entry(\n self,\n *,\n type: str = 'host',\n database: str,\n user: str,\n address: Optional[str] = None,\n auth_method: str,\n auth_options: Optional[Mapping[str, Any]] = None,\n ) -> None:\n \"\"\"Add a record to pg_hba.conf.\"\"\"\n if type not in {'local', 'host', 'hostssl', 'hostnossl'}:\n raise ValueError('invalid HBA record type: {!r}'.format(type))\n\n pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')\n\n record = '{} {} {}'.format(type, database, user)\n\n if type != 'local':\n if address is None:\n raise ValueError(\n '{!r} entry requires a valid address'.format(type))\n else:\n record += ' {}'.format(address)\n\n record += ' {}'.format(auth_method)\n\n if auth_options is not None:\n record += ' ' + ' '.join(\n '{}={}'.format(k, v) for k, v in auth_options.items())\n\n try:\n with open(pg_hba, 'a') as f:\n print(record, file=f)\n except IOError as e:\n raise ClusterError(\n 'cannot modify HBA records: {}'.format(e)) from e\n\n async def trust_local_connections(self) -> None:\n self.reset_hba()\n\n self.add_hba_entry(type='local', database='all',\n user='all', auth_method='trust')\n self.add_hba_entry(type='host', address='127.0.0.1/32',\n database='all', user='all',\n auth_method='trust')\n self.add_hba_entry(type='host', address='::1/128',\n database='all', user='all',\n auth_method='trust')\n status = await self.get_status()\n if status == 'running':\n await self.reload()\n\n async def lookup_postgres(self) -> None:\n await super().lookup_postgres()\n self._pg_ctl = self._find_pg_binary('pg_ctl')\n self._postgres = self._find_pg_binary('postgres')\n\n def _get_connection_addr(self) -> Tuple[str, int]:\n if self._connection_addr is None:\n self._connection_addr = self._connection_addr_from_pidfile()\n\n return self._connection_addr\n\n def _connection_addr_from_pidfile(self) -> Tuple[str, int]:\n pidfile = os.path.join(self._data_dir, 'postmaster.pid')\n\n try:\n with open(pidfile, 'rt') as f:\n piddata = f.read()\n except FileNotFoundError:\n raise PostgresPidFileNotReadyError\n\n lines = piddata.splitlines()\n\n if len(lines) < 6:\n # A complete postgres pidfile is at least 6 lines\n raise PostgresPidFileNotReadyError\n\n pmpid = int(lines[0])\n if self._daemon_pid and pmpid != self._daemon_pid:\n # This might be an old pidfile left from previous postgres\n # daemon run.\n raise PostgresPidFileNotReadyError\n\n portnum = int(lines[3])\n sockdir = lines[4]\n hostaddr = lines[5]\n\n if sockdir:\n if sockdir[0] != '/':\n # Relative sockdir\n sockdir = os.path.normpath(\n os.path.join(self._data_dir, sockdir))\n host_str = sockdir\n elif hostaddr:\n host_str = hostaddr\n else:\n raise PostgresPidFileNotReadyError\n\n if host_str == '*':\n host_str = 'localhost'\n elif host_str == '0.0.0.0':\n host_str = '127.0.0.1'\n elif host_str == '::':\n host_str = '::1'\n\n return (host_str, portnum)\n\n async def _test_connection(self, timeout: int = 60) -> str:\n self._connection_addr = None\n connected = False\n\n for n in range(timeout + 1):\n # pg usually comes up pretty quickly, but not so\n # quickly that we don't hit the wait case. Make our\n # first sleep pretty short, to shave almost a second\n # off the happy case.\n sleep_time = 1 if n else 0.10\n\n try:\n conn_addr = self._get_connection_addr()\n except PostgresPidFileNotReadyError:\n time.sleep(sleep_time)\n continue\n\n try:\n con = await asyncpg.connect(\n database='postgres',\n user='postgres',\n timeout=5,\n host=conn_addr[0],\n port=conn_addr[1],\n )\n except (\n OSError,\n asyncio.TimeoutError,\n asyncpg.CannotConnectNowError,\n asyncpg.PostgresConnectionError,\n ):\n time.sleep(sleep_time)\n continue\n except asyncpg.PostgresError:\n # Any other error other than ServerNotReadyError or\n # ConnectionError is interpreted to indicate the server is\n # up.\n break\n else:\n connected = True\n await con.close()\n break\n\n if connected:\n return 'running'\n else:\n return 'not-initialized'\n\n\nclass RemoteCluster(BaseCluster):\n def __init__(\n self,\n addr: Tuple[str, int],\n params: pgconnparams.ConnectionParameters,\n *,\n instance_params: Optional[BackendInstanceParams] = None,\n ha_backend: Optional[ha_base.HABackend] = None,\n ):\n super().__init__(instance_params=instance_params)\n self._connection_addr = addr\n self._connection_params = params\n self._ha_backend = ha_backend\n\n def _get_connection_addr(self) -> Optional[Tuple[str, int]]:\n if self._ha_backend is not None:\n return self._ha_backend.get_master_addr()\n return self._connection_addr\n\n async def ensure_initialized(self, **settings: Any) -> bool:\n return False\n\n def is_managed(self) -> bool:\n return False\n\n async def get_status(self) -> str:\n return 'running'\n\n def init(self, **settings: str) -> str:\n pass\n\n async def start(\n self,\n wait: int = 60,\n *,\n server_settings: Optional[Mapping[str, str]] = None,\n **opts: Any,\n ) -> None:\n pass\n\n async def stop(self, wait: int = 60) -> None:\n pass\n\n def destroy(self) -> None:\n pass\n\n def reset_hba(self) -> None:\n raise ClusterError('cannot modify HBA records of unmanaged cluster')\n\n def add_hba_entry(\n self,\n *,\n type: str = 'host',\n database: str,\n user: str,\n address: Optional[str] = None,\n auth_method: str,\n auth_options: Optional[Mapping[str, Any]] = None,\n ) -> None:\n raise ClusterError('cannot modify HBA records of unmanaged cluster')\n\n async def start_watching(\n self, cluster_protocol: Optional[ha_base.ClusterProtocol] = None\n ) -> None:\n if self._ha_backend is not None:\n await self._ha_backend.start_watching(cluster_protocol)\n\n def stop_watching(self) -> None:\n if self._ha_backend is not None:\n self._ha_backend.stop_watching()\n\n\nasync def get_pg_bin_dir() -> pathlib.Path:\n pg_config_data = await get_pg_config()\n pg_bin_dir = pg_config_data.get('bindir')\n if not pg_bin_dir:\n raise ClusterError(\n 'pg_config output did not provide the BINDIR value')\n return pathlib.Path(pg_bin_dir)\n\n\nasync def get_pg_config() -> Dict[str, str]:\n stdout_lines, _, _ = await _run_logged_text_subprocess(\n [str(buildmeta.get_pg_config_path())],\n logger=pg_config_logger,\n )\n\n config = {}\n for line in stdout_lines:\n k, eq, v = line.partition('=')\n if eq:\n config[k.strip().lower()] = v.strip()\n\n return config\n\n\nasync def get_local_pg_cluster(\n data_dir: pathlib.Path,\n *,\n runstate_dir: Optional[pathlib.Path] = None,\n max_connections: Optional[int] = None,\n tenant_id: Optional[str] = None,\n log_level: Optional[str] = None,\n) -> Cluster:\n if log_level is None:\n log_level = 'i'\n if tenant_id is None:\n tenant_id = buildmeta.get_default_tenant_id()\n instance_params = None\n if max_connections is not None:\n instance_params = get_default_runtime_params(\n max_connections=max_connections,\n tenant_id=tenant_id,\n ).instance_params\n cluster = Cluster(\n data_dir=data_dir,\n runstate_dir=runstate_dir,\n instance_params=instance_params,\n log_level=log_level,\n )\n await cluster.lookup_postgres()\n return cluster\n\n\nasync def get_remote_pg_cluster(\n dsn: str,\n *,\n tenant_id: Optional[str] = None,\n) -> RemoteCluster:\n parsed = urllib.parse.urlparse(dsn)\n ha_backend = None\n\n if parsed.scheme not in {'postgresql', 'postgres'}:\n ha_backend = ha_base.get_backend(parsed)\n if ha_backend is None:\n raise ValueError(\n 'invalid DSN: scheme is expected to be \"postgresql\", '\n '\"postgres\" or one of the supported HA backend, '\n 'got {!r}'.format(parsed.scheme))\n\n addr = await ha_backend.get_cluster_consensus()\n dsn = 'postgresql://{}:{}'.format(*addr)\n\n addrs, params = pgconnparams.parse_dsn(dsn)\n if len(addrs) > 1:\n raise ValueError('multiple hosts in Postgres DSN are not supported')\n if tenant_id is None:\n t_id = buildmeta.get_default_tenant_id()\n else:\n t_id = tenant_id\n rcluster = RemoteCluster(addrs[0], params)\n\n async def _get_cluster_type(\n conn: asyncpg.Connection,\n ) -> Tuple[Type[RemoteCluster], Optional[str]]:\n managed_clouds = {\n 'rds_superuser': RemoteCluster, # Amazon RDS\n 'cloudsqlsuperuser': RemoteCluster, # GCP Cloud SQL\n }\n\n managed_cloud_super = await conn.fetchval(\n \"\"\"\n SELECT\n rolname\n FROM\n pg_roles\n WHERE\n rolname = any($1::text[])\n LIMIT\n 1\n \"\"\",\n list(managed_clouds),\n )\n\n if managed_cloud_super is not None:\n return managed_clouds[managed_cloud_super], managed_cloud_super\n else:\n return RemoteCluster, None\n\n async def _detect_capabilities(\n conn: asyncpg.Connection,\n ) -> BackendCapabilities:\n caps = BackendCapabilities.NONE\n\n try:\n await conn.execute(f'ALTER SYSTEM SET foo = 10')\n except asyncpg.InsufficientPrivilegeError:\n configfile_access = False\n except asyncpg.UndefinedObjectError:\n configfile_access = True\n else:\n configfile_access = True\n\n if configfile_access:\n caps |= BackendCapabilities.CONFIGFILE_ACCESS\n\n tx = conn.transaction()\n await tx.start()\n rname = str(uuidgen.uuid1mc())\n\n try:\n await conn.execute(f'CREATE ROLE \"{rname}\" WITH SUPERUSER')\n except asyncpg.InsufficientPrivilegeError:\n can_make_superusers = False\n else:\n can_make_superusers = True\n finally:\n await tx.rollback()\n\n if can_make_superusers:\n caps |= BackendCapabilities.SUPERUSER_ACCESS\n\n coll = await conn.fetchval('''\n SELECT collname FROM pg_collation\n WHERE lower(replace(collname, '-', '')) = 'c.utf8' LIMIT 1;\n ''')\n\n if coll is not None:\n caps |= BackendCapabilities.C_UTF8_LOCALE\n\n return caps\n\n async def _get_pg_settings(\n conn: asyncpg.Connection,\n name: str,\n ) -> str:\n return await conn.fetchval( # type: ignore\n 'SELECT setting FROM pg_settings WHERE name = $1', name\n )\n\n async def _get_reserved_connections(\n conn: asyncpg.Connection,\n ) -> int:\n rv = int(\n await _get_pg_settings(conn, 'superuser_reserved_connections')\n )\n for name in [\n 'rds.rds_superuser_reserved_connections',\n ]:\n value = await _get_pg_settings(conn, name)\n if value:\n rv += int(value)\n return rv\n\n conn = await rcluster.connect()\n try:\n cluster_type, superuser_name = await _get_cluster_type(conn)\n max_connections = await _get_pg_settings(conn, 'max_connections')\n instance_params = BackendInstanceParams(\n capabilities=await _detect_capabilities(conn),\n base_superuser=superuser_name,\n max_connections=int(max_connections),\n reserved_connections=await _get_reserved_connections(conn),\n tenant_id=t_id,\n )\n finally:\n await conn.close()\n\n return cluster_type(\n addrs[0],\n params,\n instance_params=instance_params,\n ha_backend=ha_backend,\n )\n\n\nasync def _run_logged_text_subprocess(\n args: Sequence[str],\n logger: logging.Logger,\n level: int = logging.DEBUG,\n check: bool = True,\n log_stdout: bool = True,\n timeout: Optional[float] = None,\n **kwargs: Any,\n) -> Tuple[List[str], List[str], int]:\n stdout_lines, stderr_lines, exit_code = await _run_logged_subprocess(\n args,\n logger=logger,\n level=level,\n check=check,\n log_stdout=log_stdout,\n timeout=timeout,\n **kwargs,\n )\n\n return (\n [line.decode() for line in stdout_lines],\n [line.decode() for line in stderr_lines],\n exit_code,\n )\n\n\nasync def _run_logged_subprocess(\n args: Sequence[str],\n logger: logging.Logger,\n level: int = logging.DEBUG,\n check: bool = True,\n log_stdout: bool = True,\n log_stderr: bool = True,\n capture_stdout: bool = True,\n capture_stderr: bool = True,\n timeout: Optional[float] = None,\n **kwargs: Any,\n) -> Tuple[List[bytes], List[bytes], int]:\n process, stdout_reader, stderr_reader = await _start_logged_subprocess(\n args,\n logger=logger,\n level=level,\n log_stdout=log_stdout,\n log_stderr=log_stderr,\n capture_stdout=capture_stdout,\n capture_stderr=capture_stderr,\n **kwargs,\n )\n\n exit_code, stdout_lines, stderr_lines = await asyncio.wait_for(\n asyncio.gather(process.wait(), stdout_reader, stderr_reader),\n timeout=timeout,\n )\n\n if exit_code != 0 and check:\n stderr_text = b'\\n'.join(stderr_lines).decode()\n raise ClusterError(\n f'{args[0]} exited with status {exit_code}:\\n'\n + textwrap.indent(stderr_text, ' ' * 4),\n )\n else:\n return stdout_lines, stderr_lines, exit_code\n\n\nasync def _start_logged_subprocess(\n args: Sequence[str],\n *,\n logger: logging.Logger,\n level: int = logging.DEBUG,\n log_stdout: bool = True,\n log_stderr: bool = True,\n capture_stdout: bool = True,\n capture_stderr: bool = True,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n **kwargs: Any,\n) -> Tuple[\n asyncio.subprocess.Process,\n Coroutine[Any, Any, List[bytes]],\n Coroutine[Any, Any, List[bytes]],\n]:\n logger.log(\n level,\n f'running `{\" \".join(shlex.quote(arg) for arg in args)}`'\n )\n\n process = await asyncio.create_subprocess_exec(\n *args,\n stdout=(\n asyncio.subprocess.PIPE if log_stdout or capture_stdout\n else asyncio.subprocess.DEVNULL\n ),\n stderr=(\n asyncio.subprocess.PIPE if log_stderr or capture_stderr\n else asyncio.subprocess.DEVNULL\n ),\n **kwargs,\n )\n\n assert process.stderr is not None\n assert process.stdout is not None\n\n if log_stderr and capture_stderr:\n stderr_reader = _capture_and_log_subprocess_output(\n process.pid,\n process.stderr,\n logger,\n level,\n log_processor,\n )\n elif capture_stderr:\n stderr_reader = _capture_subprocess_output(process.stderr)\n elif log_stderr:\n stderr_reader = _log_subprocess_output(\n process.pid, process.stderr, logger, level, log_processor)\n else:\n stderr_reader = _dummy()\n\n if log_stdout and capture_stdout:\n stdout_reader = _capture_and_log_subprocess_output(\n process.pid,\n process.stdout,\n logger,\n level,\n log_processor,\n )\n elif capture_stdout:\n stdout_reader = _capture_subprocess_output(process.stdout)\n elif log_stdout:\n stdout_reader = _log_subprocess_output(\n process.pid, process.stdout, logger, level, log_processor)\n else:\n stdout_reader = _dummy()\n\n return process, stdout_reader, stderr_reader\n\n\nasync def _capture_subprocess_output(\n stream: asyncio.StreamReader,\n) -> List[bytes]:\n lines = []\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n lines.append(line.rstrip(b'\\n'))\n return lines\n\n\nasync def _capture_and_log_subprocess_output(\n pid: int,\n stream: asyncio.StreamReader,\n logger: logging.Logger,\n level: int,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n) -> List[bytes]:\n lines = []\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n line = line.rstrip(b'\\n')\n lines.append(line)\n log_line = line.decode()\n if log_processor is not None:\n log_line, level = log_processor(log_line)\n logger.log(level, log_line, extra={\"process\": pid})\n return lines\n\n\nasync def _log_subprocess_output(\n pid: int,\n stream: asyncio.StreamReader,\n logger: logging.Logger,\n level: int,\n log_processor: Optional[Callable[[str], Tuple[str, int]]] = None,\n) -> List[bytes]:\n while not stream.at_eof():\n line = await stream.readline()\n if line or not stream.at_eof():\n log_line = line.rstrip(b'\\n').decode()\n if log_processor is not None:\n log_line, level = log_processor(log_line)\n logger.log(level, log_line, extra={\"process\": pid})\n return []\n\n\nasync def _dummy() -> List[bytes]:\n return []\n\n\npostgres_to_python_level_map = {\n \"DEBUG5\": logging.DEBUG,\n \"DEBUG4\": logging.DEBUG,\n \"DEBUG3\": logging.DEBUG,\n \"DEBUG2\": logging.DEBUG,\n \"DEBUG1\": logging.DEBUG,\n \"INFO\": logging.INFO,\n \"NOTICE\": logging.INFO,\n \"LOG\": logging.INFO,\n \"WARNING\": logging.WARNING,\n \"ERROR\": logging.ERROR,\n \"FATAL\": logging.CRITICAL,\n \"PANIC\": logging.CRITICAL,\n}\n\npostgres_log_re = re.compile(r'^(\\w+):\\s*(.*)$')\n\npostgres_specific_msg_level_map = {\n \"terminating connection due to administrator command\": logging.INFO,\n \"the database system is shutting down\": logging.INFO,\n}\n\n\ndef postgres_log_processor(msg: str) -> Tuple[str, int]:\n if m := postgres_log_re.match(msg):\n postgres_level = m.group(1)\n msg = m.group(2)\n level = postgres_specific_msg_level_map.get(\n msg,\n postgres_to_python_level_map.get(postgres_level, logging.INFO),\n )\n else:\n level = logging.INFO\n\n return msg, level\n", "path": "edb/server/pgcluster.py" } ]
diff --git a/edb/server/pgcluster.py b/edb/server/pgcluster.py index 642609d560d..1a1cc0760b8 100644 --- a/edb/server/pgcluster.py +++ b/edb/server/pgcluster.py @@ -678,8 +678,10 @@ def _connection_addr_from_pidfile(self) -> Tuple[str, int]: sockdir = os.path.normpath( os.path.join(self._data_dir, sockdir)) host_str = sockdir - else: + elif hostaddr: host_str = hostaddr + else: + raise PostgresPidFileNotReadyError if host_str == '*': host_str = 'localhost'
EdgeDB server doesn't always correctly recognize PostgreSQL server startup <!-- Please search existing issues to avoid creating duplicates. --> - EdgeDB Version: 1.0-rc.2+dev6093.d2021102414.gec7d60ef3.cv202110240000 - OS Version: Arch Linux under WSL2 When the EdgeDB server starts up during `edb server` or `edb test` commands and tries to read the `postmaster.pid` file, sometimes it doesn't do it quite correctly, as it seems to me. I printed the content of the file and parsed host and port in `_connection_addr_from_pidfile` method and got the following: ``` 157186 /home/nik/.local/share/edgedb/_localdev 1635094601 5432 27256 262160 ('', 5432) ``` This results in parsing the host to an empty string, trying to connect to it in `pgcon._connect` and and getting a `ConnectionRefusedError` in the process. I suspect that the `postmaster.pid` file itself did not have time to initialize fully because when I ran `edgedb-server --botstrap-only` my computer already had almost 100% CPU/memory load. Also, the problem disappeared when I tried to repeat the steps, but my PC had free resources. But perhaps the `edb.server.pgcluster` module itself should have a check that the `postmaster.pid` file hasn't been fully loaded or something like that anyway. Steps to Reproduce: 1. Load PC resources to the limit (?) 2. Run `edgedb-server --bootstrap-only` Full logs of server startup: ``` INFO 159251 2021-10-24T20:04:35.317 edb.server: EdgeDB server (version: 1.0-rc.2+dev6093.d2021102414.gec7d60ef3.cv202110240000) is starting in DEV mode. INFO 159251 2021-10-24T20:04:36.056 edb.server: Using 127 max backend connections based on total memory. 159273 /home/nik/.local/share/edgedb/_localdev 1635095076 5432 27256 262207 ('', 5432) INFO 159273 2021-10-24T20:04:36.229 postgres: starting PostgreSQL 13.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 11.1.0, 64-bit INFO 159273 2021-10-24T20:04:37.240 postgres: listening on Unix socket "/home/nik/.local/share/edgedb/_localdev/.s.PGSQL.5432" INFO 159273 2021-10-24T20:04:37.240 postgres: database system was shut down at 2021-10-24 20:04:33 MSK INFO 159273 2021-10-24T20:04:37.241 postgres: database system is ready to accept connections INFO 159273 2021-10-24T20:05:37.071 postgres: received fast shutdown request INFO 159273 2021-10-24T20:05:37.106 postgres: aborting any active transactions INFO 159273 2021-10-24T20:05:37.107 postgres: background worker "logical replication launcher" (PID 159284) exited with exit code 1 INFO 159273 2021-10-24T20:05:37.107 postgres: shutting down INFO 159273 2021-10-24T20:05:37.239 postgres: database system is shut down ======================================================== Exception occurred: [Errno 111] Connection refused ========================================================= 1. ConnectionRefusedError: [Errno 111] Connection refused ----------------------------------------------------------------------------- Traceback ----------------------------------------------------------------------------- /home/nik/.virtualenvs/edgedb/bin/edb, line 33, in <module> > sys.exit(load_entry_point('edgedb-server', 'console_scripts', 'edb')()) /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 829, in __call__ > return self.main(*args, **kwargs) /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 782, in main > rv = self.invoke(ctx) /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 1259, in invoke > return _process_result(sub_ctx.command.invoke(sub_ctx)) /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 1066, in invoke > return ctx.invoke(self.callback, **ctx.params) /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/click/core.py, line 610, in invoke > return callback(*args, **kwargs) /home/nik/projects/edgedb/edgedb/edb/tools/edb.py, line 55, in server > srv_main.server_main(**kwargs) /home/nik/projects/edgedb/edgedb/edb/server/main.py, line 556, in server_main > asyncio.run(run_server(server_args)) /home/nik/.asdf/installs/python/3.10.0/lib/python3.10/asyncio/runners.py, line 44, in run > return loop.run_until_complete(main) uvloop/loop.pyx, line 1501, in uvloop.loop.Loop.run_until_complete /home/nik/projects/edgedb/edgedb/edb/server/main.py, line 459, in run_server > need_cluster_restart = await _init_cluster(cluster, args) /home/nik/projects/edgedb/edgedb/edb/server/main.py, line 141, in _init_cluster > need_restart = await bootstrap.ensure_bootstrapped(cluster, args) /home/nik/projects/edgedb/edgedb/edb/server/bootstrap.py, line 1386, in ensure_bootstrapped > pgconn = await cluster.connect() /home/nik/projects/edgedb/edgedb/edb/server/pgcluster.py, line 184, in connect > conn = await asyncpg.connect(**conn_info) /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connection.py, line 2045, in connect > return await connect_utils._connect( /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 790, in _connect > raise last_error /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 776, in _connect > return await _connect_addr( /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 676, in _connect_addr > return await __connect_addr(params, timeout, True, *args) /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 720, in __connect_addr > tr, pr = await compat.wait_for(connector, timeout=timeout) /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/compat.py, line 66, in wait_for > return await asyncio.wait_for(fut, timeout) /home/nik/.asdf/installs/python/3.10.0/lib/python3.10/asyncio/tasks.py, line 447, in wait_for > return fut.result() /home/nik/.virtualenvs/edgedb/lib/python3.10/site-packages/asyncpg/connect_utils.py, line 586, in _create_ssl_connection > tr, pr = await loop.create_connection( uvloop/loop.pyx, line 2024, in create_connection uvloop/loop.pyx, line 2001, in uvloop.loop.Loop.create_connection ConnectionRefusedError: [Errno 111] Connection refused ```
pydantic__pydantic-391
[ { "content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n python_requires='>=3.6',\n zip_safe=True,\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py" } ]
[ { "content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n package_data={'pydantic': ['py.typed']},\n python_requires='>=3.6',\n zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py" } ]
diff --git a/pydantic/py.typed b/pydantic/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/setup.py b/setup.py index 12247aa67ea..df3480316f8 100644 --- a/setup.py +++ b/setup.py @@ -68,8 +68,9 @@ def extra(self): url='https://github.com/samuelcolvin/pydantic', license='MIT', packages=['pydantic'], + package_data={'pydantic': ['py.typed']}, python_requires='>=3.6', - zip_safe=True, + zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html install_requires=[ 'dataclasses>=0.6;python_version<"3.7"' ],
Include a PEP 561 marker file # Feature Request Hi, The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine. I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this. https://mypy.readthedocs.io/en/latest/installed_packages.html https://www.python.org/dev/peps/pep-0561/ Include a PEP 561 marker file # Feature Request Hi, The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine. I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this. https://mypy.readthedocs.io/en/latest/installed_packages.html https://www.python.org/dev/peps/pep-0561/
e-valuation__EvaP-1241
[ { "content": "from datetime import datetime, date, timedelta\nimport logging\nimport random\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, Group, PermissionsMixin\nfrom django.core.cache import caches\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.db import models, transaction\nfrom django.db.models import Count, Q, Manager\nfrom django.dispatch import Signal, receiver\nfrom django.template import Context, Template\nfrom django.template.base import TemplateSyntaxError\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\n# see evaluation.meta for the use of Translate in this file\nfrom evap.evaluation.meta import LocalizeModelBase, Translate\nfrom evap.evaluation.tools import date_to_datetime, get_due_courses_for_user\n\nlogger = logging.getLogger(__name__)\n\n\nclass NotArchiveable(Exception):\n \"\"\"An attempt has been made to archive something that is not archiveable.\"\"\"\n pass\n\n\nclass Semester(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Represents a semester, e.g. the winter term of 2011/2012.\"\"\"\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n short_name_de = models.CharField(max_length=20, unique=True, verbose_name=_(\"short name (german)\"))\n short_name_en = models.CharField(max_length=20, unique=True, verbose_name=_(\"short name (english)\"))\n short_name = Translate\n\n participations_are_archived = models.BooleanField(default=False, verbose_name=_(\"participations are archived\"))\n grade_documents_are_deleted = models.BooleanField(default=False, verbose_name=_(\"grade documents are deleted\"))\n results_are_archived = models.BooleanField(default=False, verbose_name=_(\"results are archived\"))\n\n created_at = models.DateField(verbose_name=_(\"created at\"), auto_now_add=True)\n\n class Meta:\n ordering = ('-created_at', 'name_de')\n verbose_name = _(\"semester\")\n verbose_name_plural = _(\"semesters\")\n\n def __str__(self):\n return self.name\n\n @property\n def can_staff_delete(self):\n return all(course.can_staff_delete for course in self.course_set.all())\n\n @property\n def participations_can_be_archived(self):\n return not self.participations_are_archived and all(course.participations_can_be_archived for course in self.course_set.all())\n\n @property\n def grade_documents_can_be_deleted(self):\n return not self.grade_documents_are_deleted\n\n @property\n def results_can_be_archived(self):\n return not self.results_are_archived\n\n @transaction.atomic\n def archive_participations(self):\n if not self.participations_can_be_archived:\n raise NotArchiveable()\n for course in self.course_set.all():\n course._archive_participations()\n self.participations_are_archived = True\n self.save()\n\n @transaction.atomic\n def delete_grade_documents(self):\n from evap.grades.models import GradeDocument\n\n if not self.grade_documents_can_be_deleted:\n raise NotArchiveable()\n GradeDocument.objects.filter(course__semester=self).delete()\n self.grade_documents_are_deleted = True\n self.save()\n\n def archive_results(self):\n if not self.results_can_be_archived:\n raise NotArchiveable()\n self.results_are_archived = True\n self.save()\n\n @classmethod\n def get_all_with_unarchived_results(cls):\n return cls.objects.filter(results_are_archived=False).distinct()\n\n @classmethod\n def get_all_with_published_unarchived_results(cls):\n return cls.objects.filter(course__state=\"published\", results_are_archived=False).distinct()\n\n @classmethod\n def active_semester(cls):\n return cls.objects.order_by(\"created_at\").last()\n\n @property\n def is_active_semester(self):\n return self == Semester.active_semester()\n\n\nclass QuestionnaireManager(Manager):\n def course_questionnaires(self):\n return super().get_queryset().exclude(type=Questionnaire.CONTRIBUTOR)\n\n def contributor_questionnaires(self):\n return super().get_queryset().filter(type=Questionnaire.CONTRIBUTOR)\n\n\nclass Questionnaire(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A named collection of questions.\"\"\"\n\n TOP = 10\n CONTRIBUTOR = 20\n BOTTOM = 30\n TYPE_CHOICES = (\n (TOP, _('Top questionnaire')),\n (CONTRIBUTOR, _('Contributor questionnaire')),\n (BOTTOM, _('Bottom questionnaire')),\n )\n type = models.IntegerField(choices=TYPE_CHOICES, verbose_name=_('type'), default=TOP)\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n description_de = models.TextField(verbose_name=_(\"description (german)\"), blank=True, null=True)\n description_en = models.TextField(verbose_name=_(\"description (english)\"), blank=True, null=True)\n description = Translate\n\n public_name_de = models.CharField(max_length=1024, verbose_name=_(\"display name (german)\"))\n public_name_en = models.CharField(max_length=1024, verbose_name=_(\"display name (english)\"))\n public_name = Translate\n\n teaser_de = models.TextField(verbose_name=_(\"teaser (german)\"), blank=True, null=True)\n teaser_en = models.TextField(verbose_name=_(\"teaser (english)\"), blank=True, null=True)\n teaser = Translate\n\n order = models.IntegerField(verbose_name=_(\"ordering index\"), default=0)\n\n staff_only = models.BooleanField(verbose_name=_(\"display for staff only\"), default=False)\n obsolete = models.BooleanField(verbose_name=_(\"obsolete\"), default=False)\n\n objects = QuestionnaireManager()\n\n class Meta:\n ordering = ('type', 'order', 'name_de')\n verbose_name = _(\"questionnaire\")\n verbose_name_plural = _(\"questionnaires\")\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return (self.type, self.order, self.name_de) < (other.type, other.order, self.name_de)\n\n def __gt__(self, other):\n return (self.type, self.order, self.name_de) > (other.type, other.order, self.name_de)\n\n @property\n def is_above_contributors(self):\n return self.type == self.TOP\n\n @property\n def is_below_contributors(self):\n return self.type == self.BOTTOM\n\n @property\n def can_staff_edit(self):\n return not self.contributions.exclude(course__state='new').exists()\n\n @property\n def can_staff_delete(self):\n return not self.contributions.exists()\n\n @property\n def text_questions(self):\n return [question for question in self.question_set.all() if question.is_text_question]\n\n @property\n def rating_questions(self):\n return [question for question in self.question_set.all() if question.is_rating_question]\n\n SINGLE_RESULT_QUESTIONNAIRE_NAME = \"Single result\"\n\n @classmethod\n def single_result_questionnaire(cls):\n return cls.objects.get(name_en=cls.SINGLE_RESULT_QUESTIONNAIRE_NAME)\n\n\nclass Degree(models.Model, metaclass=LocalizeModelBase):\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n order = models.IntegerField(verbose_name=_(\"degree order\"), default=-1)\n\n class Meta:\n ordering = ['order', ]\n\n def __str__(self):\n return self.name\n\n def can_staff_delete(self):\n if self.pk is None:\n return True\n return not self.courses.all().exists()\n\n\nclass CourseType(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Model for the type of a course, e.g. a lecture\"\"\"\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n class Meta:\n ordering = ['name_de', ]\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return self.name_de < other.name_de\n\n def can_staff_delete(self):\n if not self.pk:\n return True\n return not self.courses.all().exists()\n\n\nclass Course(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Models a single course, e.g. the Math 101 course of 2002.\"\"\"\n\n state = FSMField(default='new', protected=True)\n\n semester = models.ForeignKey(Semester, models.PROTECT, verbose_name=_(\"semester\"))\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"))\n name = Translate\n\n # type of course: lecture, seminar, project\n type = models.ForeignKey(CourseType, models.PROTECT, verbose_name=_(\"course type\"), related_name=\"courses\")\n\n is_single_result = models.BooleanField(verbose_name=_(\"is single result\"), default=False)\n\n # e.g. Bachelor, Master\n degrees = models.ManyToManyField(Degree, verbose_name=_(\"degrees\"), related_name=\"courses\")\n\n # default is True as that's the more restrictive option\n is_graded = models.BooleanField(verbose_name=_(\"is graded\"), default=True)\n\n # defines whether results can only be seen by contributors and participants\n is_private = models.BooleanField(verbose_name=_(\"is private\"), default=False)\n\n # grade publishers can set this to True, then the course will be handled as if final grades have already been uploaded\n gets_no_grade_documents = models.BooleanField(verbose_name=_(\"gets no grade documents\"), default=False)\n\n # whether participants must vote to qualify for reward points\n is_rewarded = models.BooleanField(verbose_name=_(\"is rewarded\"), default=True)\n\n # whether the evaluation does take place during the semester, stating that evaluation results will be published while the course is still running\n is_midterm_evaluation = models.BooleanField(verbose_name=_(\"is midterm evaluation\"), default=False)\n\n # True, if the course has at least two voters or if the first voter explicitly confirmed that given text answers\n # can be published even if no other person evaluates the course\n can_publish_text_results = models.BooleanField(verbose_name=_(\"can publish text results\"), default=False)\n\n # students that are allowed to vote\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"participants\"), blank=True, related_name='courses_participating_in')\n _participant_count = models.IntegerField(verbose_name=_(\"participant count\"), blank=True, null=True, default=None)\n\n # students that already voted\n voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"voters\"), blank=True, related_name='courses_voted_for')\n _voter_count = models.IntegerField(verbose_name=_(\"voter count\"), blank=True, null=True, default=None)\n\n # when the evaluation takes place\n vote_start_datetime = models.DateTimeField(verbose_name=_(\"start of evaluation\"))\n vote_end_date = models.DateField(verbose_name=_(\"last day of evaluation\"))\n\n # who last modified this course\n last_modified_time = models.DateTimeField(auto_now=True)\n last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name=\"course_last_modified_user+\")\n\n course_evaluated = Signal(providing_args=['request', 'semester'])\n\n class Meta:\n ordering = ('name_de',)\n unique_together = (\n ('semester', 'name_de'),\n ('semester', 'name_en'),\n )\n verbose_name = _(\"course\")\n verbose_name_plural = _(\"courses\")\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kw):\n first_save = self.pk is None\n super().save(*args, **kw)\n\n # make sure there is a general contribution\n if not self.general_contribution:\n self.contributions.create(contributor=None)\n del self.general_contribution # invalidate cached property\n\n if self.is_single_result:\n # adding m2ms such as contributions/questionnaires requires saving the course first,\n # therefore we must allow the single result questionnaire to not exist on first save\n assert first_save or Questionnaire.objects.get(contributions__course=self).name_en == Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME\n assert self.vote_end_date == self.vote_start_datetime.date()\n else:\n assert self.vote_end_date >= self.vote_start_datetime.date()\n\n @property\n def is_fully_reviewed(self):\n if not self.can_publish_text_results:\n return True\n return not self.unreviewed_textanswer_set.exists()\n\n @property\n def vote_end_datetime(self):\n # The evaluation ends at EVALUATION_END_OFFSET_HOURS:00 of the day AFTER self.vote_end_date.\n return date_to_datetime(self.vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS)\n\n @property\n def is_in_evaluation_period(self):\n return self.vote_start_datetime <= datetime.now() <= self.vote_end_datetime\n\n @property\n def general_contribution_has_questionnaires(self):\n return self.general_contribution and (self.is_single_result or self.general_contribution.questionnaires.count() > 0)\n\n @property\n def all_contributions_have_questionnaires(self):\n return self.general_contribution and (self.is_single_result or all(self.contributions.annotate(Count('questionnaires')).values_list(\"questionnaires__count\", flat=True)))\n\n def can_user_vote(self, user):\n \"\"\"Returns whether the user is allowed to vote on this course.\"\"\"\n return (self.state == \"in_evaluation\"\n and self.is_in_evaluation_period\n and user in self.participants.all()\n and user not in self.voters.all())\n\n def can_user_see_course(self, user):\n if user.is_staff:\n return True\n if user.is_reviewer and not self.semester.results_are_archived:\n return True\n if self.is_private or user.is_external:\n return self.is_user_contributor_or_delegate(user) or self.participants.filter(pk=user.pk).exists()\n return True\n\n def can_user_see_results_page(self, user):\n if self.is_single_result:\n return False\n if user.is_staff:\n return True\n if user.is_reviewer and not self.semester.results_are_archived:\n return True\n if self.state != 'published':\n return False\n if not self.can_publish_rating_results or self.semester.results_are_archived or not self.can_user_see_course(user):\n return self.is_user_contributor_or_delegate(user)\n return True\n\n @property\n def can_staff_edit(self):\n return not self.participations_are_archived and self.state in ['new', 'prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']\n\n @property\n def can_staff_delete(self):\n return self.can_staff_edit and (self.num_voters == 0 or self.is_single_result)\n\n @property\n def can_publish_average_grade(self):\n if self.is_single_result:\n return True\n\n # the average grade is only published if at least the configured percentage of participants voted during the evaluation for significance reasons\n return self.can_publish_rating_results and self.num_voters / self.num_participants >= settings.VOTER_PERCENTAGE_NEEDED_FOR_PUBLISHING_AVERAGE_GRADE\n\n @property\n def can_publish_rating_results(self):\n if self.is_single_result:\n return True\n\n # the rating results are only published if at least the configured number of participants voted during the evaluation for anonymity reasons\n return self.num_voters >= settings.VOTER_COUNT_NEEDED_FOR_PUBLISHING_RATING_RESULTS\n\n @transition(field=state, source=['new', 'editor_approved'], target='prepared')\n def ready_for_editors(self):\n pass\n\n @transition(field=state, source='prepared', target='editor_approved')\n def editor_approve(self):\n pass\n\n @transition(field=state, source=['new', 'prepared', 'editor_approved'], target='approved', conditions=[lambda self: self.general_contribution_has_questionnaires])\n def staff_approve(self):\n pass\n\n @transition(field=state, source=['prepared', 'editor_approved', 'approved'], target='new')\n def revert_to_new(self):\n pass\n\n @transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def evaluation_begin(self):\n pass\n\n @transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def reopen_evaluation(self):\n pass\n\n @transition(field=state, source='in_evaluation', target='evaluated')\n def evaluation_end(self):\n pass\n\n @transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed])\n def review_finished(self):\n pass\n\n @transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result])\n def single_result_created(self):\n pass\n\n @transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: not self.is_fully_reviewed])\n def reopen_review(self):\n pass\n\n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n assert self._voter_count is None and self._participant_count is None\n self._voter_count = self.num_voters\n self._participant_count = self.num_participants\n\n if not self.can_publish_text_results:\n self.textanswer_set.delete()\n else:\n self.textanswer_set.filter(state=TextAnswer.HIDDEN).delete()\n self.textanswer_set.update(original_answer=None)\n\n @transition(field=state, source='published', target='reviewed')\n def unpublish(self):\n assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()\n self._voter_count = None\n self._participant_count = None\n\n @cached_property\n def general_contribution(self):\n try:\n return self.contributions.get(contributor=None)\n except Contribution.DoesNotExist:\n return None\n\n @cached_property\n def num_participants(self):\n if self._participant_count is not None:\n return self._participant_count\n return self.participants.count()\n\n @cached_property\n def num_voters(self):\n if self._voter_count is not None:\n return self._voter_count\n return self.voters.count()\n\n @property\n def due_participants(self):\n return self.participants.exclude(pk__in=self.voters.all())\n\n @cached_property\n def responsible_contributors(self):\n return UserProfile.objects.filter(contributions__course=self, contributions__responsible=True).order_by('contributions__order')\n\n @cached_property\n def num_contributors(self):\n return UserProfile.objects.filter(contributions__course=self).count()\n\n @property\n def days_left_for_evaluation(self):\n return (self.vote_end_date - date.today()).days\n\n @property\n def time_left_for_evaluation(self):\n return self.vote_end_datetime - datetime.now()\n\n def evaluation_ends_soon(self):\n return 0 < self.time_left_for_evaluation.total_seconds() < settings.EVALUATION_END_WARNING_PERIOD * 3600\n\n @property\n def days_until_evaluation(self):\n days_left = (self.vote_start_datetime.date() - date.today()).days\n if self.vote_start_datetime < datetime.now():\n days_left -= 1\n return days_left\n\n def is_user_editor_or_delegate(self, user):\n return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all()), can_edit=True).exists()\n\n def is_user_contributor_or_delegate(self, user):\n # early out that saves database hits since is_contributor_or_delegate is a cached_property\n if not user.is_contributor_or_delegate:\n return False\n return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all())).exists()\n\n @property\n def textanswer_set(self):\n return TextAnswer.objects.filter(contribution__course=self)\n\n @cached_property\n def num_textanswers(self):\n if not self.can_publish_text_results:\n return 0\n return self.textanswer_set.count()\n\n @property\n def unreviewed_textanswer_set(self):\n return self.textanswer_set.filter(state=TextAnswer.NOT_REVIEWED)\n\n @property\n def reviewed_textanswer_set(self):\n return self.textanswer_set.exclude(state=TextAnswer.NOT_REVIEWED)\n\n @cached_property\n def num_reviewed_textanswers(self):\n return self.reviewed_textanswer_set.count()\n\n @property\n def ratinganswer_counters(self):\n return RatingAnswerCounter.objects.filter(contribution__course=self)\n\n def _archive_participations(self):\n \"\"\"Should be called only via Semester.archive_participations\"\"\"\n if not self.participations_can_be_archived:\n raise NotArchiveable()\n if self._participant_count is not None:\n assert self._voter_count is not None\n assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()\n return\n assert self._participant_count is None and self._voter_count is None\n self._participant_count = self.num_participants\n self._voter_count = self.num_voters\n self.save()\n\n @property\n def participations_are_archived(self):\n semester_participations_are_archived = self.semester.participations_are_archived\n if semester_participations_are_archived:\n assert self._participant_count is not None and self._voter_count is not None\n return semester_participations_are_archived\n\n @property\n def participations_can_be_archived(self):\n return not self.semester.participations_are_archived and self.state in [\"new\", \"published\"]\n\n @property\n def final_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.FINAL_GRADES)\n\n @property\n def midterm_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.MIDTERM_GRADES)\n\n @classmethod\n def update_courses(cls):\n logger.info(\"update_courses called. Processing courses now.\")\n from evap.evaluation.tools import send_publish_notifications\n\n courses_new_in_evaluation = []\n evaluation_results_courses = []\n\n for course in cls.objects.all():\n try:\n if course.state == \"approved\" and course.vote_start_datetime <= datetime.now():\n course.evaluation_begin()\n course.last_modified_user = UserProfile.objects.cronjob_user()\n course.save()\n courses_new_in_evaluation.append(course)\n elif course.state == \"in_evaluation\" and datetime.now() >= course.vote_end_datetime:\n course.evaluation_end()\n if course.is_fully_reviewed:\n course.review_finished()\n if not course.is_graded or course.final_grade_documents.exists() or course.gets_no_grade_documents:\n course.publish()\n evaluation_results_courses.append(course)\n course.last_modified_user = UserProfile.objects.cronjob_user()\n course.save()\n except Exception:\n logger.exception('An error occured when updating the state of course \"{}\" (id {}).'.format(course, course.id))\n\n template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)\n EmailTemplate.send_to_users_in_courses(template, courses_new_in_evaluation, [EmailTemplate.ALL_PARTICIPANTS], use_cc=False, request=None)\n send_publish_notifications(evaluation_results_courses)\n logger.info(\"update_courses finished.\")\n\n\n@receiver(post_transition, sender=Course)\ndef warmup_cache_on_publish(instance, target, **_kwargs):\n if target == 'published':\n from evap.results.tools import collect_results\n from evap.results.views import warm_up_template_cache\n collect_results(instance)\n warm_up_template_cache([instance])\n\n\n@receiver(post_transition, sender=Course)\ndef delete_cache_on_unpublish(instance, source, **_kwargs):\n if source == 'published':\n from evap.results.tools import get_collect_results_cache_key\n from evap.results.views import delete_template_cache\n caches['results'].delete(get_collect_results_cache_key(instance))\n delete_template_cache(instance)\n\n\n@receiver(post_transition, sender=Course)\ndef log_state_transition(instance, name, source, target, **_kwargs):\n logger.info('Course \"{}\" (id {}) moved from state \"{}\" to state \"{}\", caused by transition \"{}\".'.format(instance, instance.pk, source, target, name))\n\n\nclass Contribution(models.Model):\n \"\"\"A contributor who is assigned to a course and his questionnaires.\"\"\"\n\n OWN_COMMENTS = 'OWN'\n COURSE_COMMENTS = 'COURSE'\n ALL_COMMENTS = 'ALL'\n COMMENT_VISIBILITY_CHOICES = (\n (OWN_COMMENTS, _('Own')),\n (COURSE_COMMENTS, _('Course')),\n (ALL_COMMENTS, _('All')),\n )\n IS_CONTRIBUTOR = 'CONTRIBUTOR'\n IS_EDITOR = 'EDITOR'\n IS_RESPONSIBLE = 'RESPONSIBLE'\n RESPONSIBILITY_CHOICES = (\n (IS_CONTRIBUTOR, _('Contributor')),\n (IS_EDITOR, _('Editor')),\n (IS_RESPONSIBLE, _('Responsible')),\n )\n\n course = models.ForeignKey(Course, models.CASCADE, verbose_name=_(\"course\"), related_name='contributions')\n contributor = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT, verbose_name=_(\"contributor\"), blank=True, null=True, related_name='contributions')\n questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_(\"questionnaires\"), blank=True, related_name=\"contributions\")\n responsible = models.BooleanField(verbose_name=_(\"responsible\"), default=False)\n can_edit = models.BooleanField(verbose_name=_(\"can edit\"), default=False)\n comment_visibility = models.CharField(max_length=10, choices=COMMENT_VISIBILITY_CHOICES, verbose_name=_('comment visibility'), default=OWN_COMMENTS)\n label = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"label\"))\n\n order = models.IntegerField(verbose_name=_(\"contribution order\"), default=-1)\n\n class Meta:\n unique_together = (\n ('course', 'contributor'),\n )\n ordering = ['order', ]\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n if self.responsible and not self.course.is_single_result:\n assert self.can_edit and self.comment_visibility == self.ALL_COMMENTS\n\n @property\n def is_general(self):\n return self.contributor_id is None\n\n\nclass Question(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A question including a type.\"\"\"\n\n QUESTION_TYPES = (\n (\"T\", _(\"Text Question\")),\n (\"L\", _(\"Likert Question\")),\n (\"G\", _(\"Grade Question\")),\n (\"P\", _(\"Positive Yes-No Question\")),\n (\"N\", _(\"Negative Yes-No Question\")),\n (\"H\", _(\"Heading\")),\n )\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n questionnaire = models.ForeignKey(Questionnaire, models.CASCADE)\n text_de = models.CharField(max_length=1024, verbose_name=_(\"question text (german)\"))\n text_en = models.CharField(max_length=1024, verbose_name=_(\"question text (english)\"))\n type = models.CharField(max_length=1, choices=QUESTION_TYPES, verbose_name=_(\"question type\"))\n\n text = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n @property\n def answer_class(self):\n if self.is_text_question:\n return TextAnswer\n elif self.is_rating_question:\n return RatingAnswerCounter\n else:\n raise Exception(\"Unknown answer type: %r\" % self.type)\n\n @property\n def is_likert_question(self):\n return self.type == \"L\"\n\n @property\n def is_text_question(self):\n return self.type == \"T\"\n\n @property\n def is_grade_question(self):\n return self.type == \"G\"\n\n @property\n def is_positive_yes_no_question(self):\n return self.type == \"P\"\n\n @property\n def is_negative_yes_no_question(self):\n return self.type == \"N\"\n\n @property\n def is_yes_no_question(self):\n return self.is_positive_yes_no_question or self.is_negative_yes_no_question\n\n @property\n def is_rating_question(self):\n return self.is_grade_question or self.is_likert_question or self.is_yes_no_question\n\n @property\n def is_non_grade_rating_question(self):\n return self.is_rating_question and not self.is_grade_question\n\n @property\n def is_heading_question(self):\n return self.type == \"H\"\n\n\nclass Answer(models.Model):\n \"\"\"An abstract answer to a question. For anonymity purposes, the answering\n user ist not stored in the object. Concrete subclasses are `RatingAnswerCounter`,\n and `TextAnswer`.\"\"\"\n\n question = models.ForeignKey(Question, models.PROTECT)\n contribution = models.ForeignKey(Contribution, models.PROTECT, related_name=\"%(class)s_set\")\n\n class Meta:\n abstract = True\n verbose_name = _(\"answer\")\n verbose_name_plural = _(\"answers\")\n\n\nclass RatingAnswerCounter(Answer):\n \"\"\"A rating answer counter to a question. A lower answer is better or indicates more agreement.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(\"answer\"))\n count = models.IntegerField(verbose_name=_(\"count\"), default=0)\n\n class Meta:\n unique_together = (\n ('question', 'contribution', 'answer'),\n )\n verbose_name = _(\"rating answer\")\n verbose_name_plural = _(\"rating answers\")\n\n\nclass TextAnswer(Answer):\n \"\"\"A free-form text answer to a question (usually a comment about a course\n or a contributor).\"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n answer = models.TextField(verbose_name=_(\"answer\"))\n original_answer = models.TextField(verbose_name=_(\"original answer\"), blank=True, null=True)\n\n HIDDEN = 'HI'\n PUBLISHED = 'PU'\n PRIVATE = 'PR'\n NOT_REVIEWED = 'NR'\n TEXT_ANSWER_STATES = (\n (HIDDEN, _('hidden')),\n (PUBLISHED, _('published')),\n (PRIVATE, _('private')),\n (NOT_REVIEWED, _('not reviewed')),\n )\n state = models.CharField(max_length=2, choices=TEXT_ANSWER_STATES, verbose_name=_('state of answer'), default=NOT_REVIEWED)\n\n class Meta:\n # Prevent ordering by date for privacy reasons\n ordering = ['id', ]\n verbose_name = _(\"text answer\")\n verbose_name_plural = _(\"text answers\")\n\n @property\n def is_hidden(self):\n return self.state == self.HIDDEN\n\n @property\n def is_private(self):\n return self.state == self.PRIVATE\n\n @property\n def is_published(self):\n return self.state == self.PUBLISHED\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n assert self.answer != self.original_answer\n\n def publish(self):\n self.state = self.PUBLISHED\n\n def hide(self):\n self.state = self.HIDDEN\n\n def make_private(self):\n self.state = self.PRIVATE\n\n def unreview(self):\n self.state = self.NOT_REVIEWED\n\n\nclass FaqSection(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Section in the frequently asked questions\"\"\"\n\n order = models.IntegerField(verbose_name=_(\"section order\"), default=-1)\n\n title_de = models.CharField(max_length=255, verbose_name=_(\"section title (german)\"))\n title_en = models.CharField(max_length=255, verbose_name=_(\"section title (english)\"))\n title = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"section\")\n verbose_name_plural = _(\"sections\")\n\n\nclass FaqQuestion(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Question and answer in the frequently asked questions\"\"\"\n\n section = models.ForeignKey(FaqSection, models.CASCADE, related_name=\"questions\")\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n\n question_de = models.CharField(max_length=1024, verbose_name=_(\"question (german)\"))\n question_en = models.CharField(max_length=1024, verbose_name=_(\"question (english)\"))\n question = Translate\n\n answer_de = models.TextField(verbose_name=_(\"answer (german)\"))\n answer_en = models.TextField(verbose_name=_(\"answer (german)\"))\n answer = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n\nclass UserProfileManager(BaseUserManager):\n def get_queryset(self):\n return super().get_queryset().exclude(username=UserProfile.CRONJOB_USER_USERNAME)\n\n def cronjob_user(self):\n return super().get_queryset().get(username=UserProfile.CRONJOB_USER_USERNAME)\n\n def exclude_inactive_users(self):\n return self.get_queryset().exclude(is_active=False)\n\n def create_user(self, username, password=None, email=None, first_name=None, last_name=None):\n if not username:\n raise ValueError(_('Users must have a username'))\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name\n )\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, username, password, email=None, first_name=None, last_name=None):\n user = self.create_user(\n username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name\n )\n user.is_superuser = True\n user.save()\n user.groups.add(Group.objects.get(name=\"Staff\"))\n return user\n\n\nclass UserProfile(AbstractBaseUser, PermissionsMixin):\n username = models.CharField(max_length=255, unique=True, verbose_name=_('username'))\n\n # null=True because users created through kerberos logins and certain external users don't have an address.\n email = models.EmailField(max_length=255, unique=True, blank=True, null=True, verbose_name=_('email address'))\n\n title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"Title\"))\n first_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"first name\"))\n last_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"last name\"))\n\n language = models.CharField(max_length=8, blank=True, null=True, verbose_name=_(\"language\"))\n\n # delegates of the user, which can also manage their courses\n delegates = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"Delegates\"), related_name=\"represented_users\", blank=True)\n\n # users to which all emails should be sent in cc without giving them delegate rights\n cc_users = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"CC Users\"), related_name=\"ccing_users\", blank=True)\n\n # key for url based login of this user\n MAX_LOGIN_KEY = 2**31 - 1\n\n login_key = models.IntegerField(verbose_name=_(\"Login Key\"), unique=True, blank=True, null=True)\n login_key_valid_until = models.DateField(verbose_name=_(\"Login Key Validity\"), blank=True, null=True)\n\n is_active = models.BooleanField(default=True, verbose_name=_(\"active\"))\n\n class Meta:\n ordering = ('last_name', 'first_name', 'username')\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = []\n\n objects = UserProfileManager()\n\n @property\n def full_name(self):\n if self.last_name:\n name = self.last_name\n if self.first_name:\n name = self.first_name + \" \" + name\n if self.title:\n name = self.title + \" \" + name\n return name\n else:\n return self.username\n\n @property\n def full_name_with_username(self):\n name = self.full_name\n if self.username not in name:\n name += \" (\" + self.username + \")\"\n return name\n\n def __str__(self):\n return self.full_name\n\n @cached_property\n def is_staff(self):\n return self.groups.filter(name='Staff').exists()\n\n @cached_property\n def is_reviewer(self):\n return self.is_staff or self.groups.filter(name='Reviewer').exists()\n\n @cached_property\n def is_grade_publisher(self):\n return self.groups.filter(name='Grade publisher').exists()\n\n CRONJOB_USER_USERNAME = \"cronjob\"\n\n @property\n def can_staff_mark_inactive(self):\n if self.is_reviewer or self.is_grade_publisher or self.is_superuser:\n return False\n if any(not course.participations_are_archived for course in self.courses_participating_in.all()):\n return False\n if any(not contribution.course.participations_are_archived for contribution in self.contributions.all()):\n return False\n return True\n\n @property\n def can_staff_delete(self):\n if self.is_contributor or self.is_reviewer or self.is_grade_publisher or self.is_superuser:\n return False\n if any(not course.participations_are_archived for course in self.courses_participating_in.all()):\n return False\n if any(not user.can_staff_delete for user in self.represented_users.all()):\n return False\n if any(not user.can_staff_delete for user in self.ccing_users.all()):\n return False\n return True\n\n @property\n def is_participant(self):\n return self.courses_participating_in.exists()\n\n @property\n def is_student(self):\n \"\"\"\n A UserProfile is not considered to be a student anymore if the\n newest contribution is newer than the newest participation.\n \"\"\"\n if not self.is_participant:\n return False\n\n if not self.is_contributor:\n return True\n\n last_semester_participated = Semester.objects.filter(course__participants=self).order_by(\"-created_at\").first()\n last_semester_contributed = Semester.objects.filter(course__contributions__contributor=self).order_by(\"-created_at\").first()\n\n return last_semester_participated.created_at >= last_semester_contributed.created_at\n\n @property\n def is_contributor(self):\n return self.contributions.exists()\n\n @property\n def is_editor(self):\n return self.contributions.filter(can_edit=True).exists()\n\n @property\n def is_responsible(self):\n # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it\n return any(contribution.responsible for contribution in self.contributions.all())\n\n @property\n def is_delegate(self):\n return self.represented_users.exists()\n\n @property\n def is_editor_or_delegate(self):\n return self.is_editor or self.is_delegate\n\n @cached_property\n def is_contributor_or_delegate(self):\n return self.is_contributor or self.is_delegate\n\n @property\n def is_external(self):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n if not self.email:\n return True\n return is_external_email(self.email)\n\n @property\n def can_download_grades(self):\n return not self.is_external\n\n @classmethod\n def email_needs_login_key(cls, email):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n return is_external_email(email)\n\n @property\n def needs_login_key(self):\n return UserProfile.email_needs_login_key(self.email)\n\n def ensure_valid_login_key(self):\n if self.login_key and self.login_key_valid_until > date.today():\n return\n\n while True:\n key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)\n if not UserProfile.objects.filter(login_key=key).exists():\n # key not yet used\n self.login_key = key\n break\n self.refresh_login_key()\n\n def refresh_login_key(self):\n self.login_key_valid_until = date.today() + timedelta(settings.LOGIN_KEY_VALIDITY)\n self.save()\n\n @property\n def login_url(self):\n if not self.needs_login_key:\n return \"\"\n return settings.PAGE_URL + \"?loginkey=\" + str(self.login_key)\n\n def get_sorted_contributions(self):\n return self.contributions.order_by('course__semester__created_at', 'course__name_de')\n\n def get_sorted_courses_participating_in(self):\n return self.courses_participating_in.order_by('semester__created_at', 'name_de')\n\n def get_sorted_courses_voted_for(self):\n return self.courses_voted_for.order_by('semester__created_at', 'name_de')\n\n\ndef validate_template(value):\n \"\"\"Field validator which ensures that the value can be compiled into a\n Django Template.\"\"\"\n try:\n Template(value)\n except TemplateSyntaxError as e:\n raise ValidationError(str(e))\n\n\nclass EmailTemplate(models.Model):\n name = models.CharField(max_length=1024, unique=True, verbose_name=_(\"Name\"))\n\n subject = models.CharField(max_length=1024, verbose_name=_(\"Subject\"), validators=[validate_template])\n body = models.TextField(verbose_name=_(\"Body\"), validators=[validate_template])\n\n EDITOR_REVIEW_NOTICE = \"Editor Review Notice\"\n EDITOR_REVIEW_REMINDER = \"Editor Review Reminder\"\n STUDENT_REMINDER = \"Student Reminder\"\n PUBLISHING_NOTICE = \"Publishing Notice\"\n LOGIN_KEY_CREATED = \"Login Key Created\"\n EVALUATION_STARTED = \"Evaluation Started\"\n\n ALL_PARTICIPANTS = 'all_participants'\n DUE_PARTICIPANTS = 'due_participants'\n RESPONSIBLE = 'responsible'\n EDITORS = 'editors'\n CONTRIBUTORS = 'contributors'\n\n EMAIL_RECIPIENTS = (\n (ALL_PARTICIPANTS, _('all participants')),\n (DUE_PARTICIPANTS, _('due participants')),\n (RESPONSIBLE, _('responsible person')),\n (EDITORS, _('all editors')),\n (CONTRIBUTORS, _('all contributors'))\n )\n\n @classmethod\n def recipient_list_for_course(cls, course, recipient_groups, filter_users_in_cc):\n recipients = []\n\n if cls.CONTRIBUTORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course)\n elif cls.EDITORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course, contributions__can_edit=True)\n elif cls.RESPONSIBLE in recipient_groups:\n recipients += course.responsible_contributors\n\n if cls.ALL_PARTICIPANTS in recipient_groups:\n recipients += course.participants.all()\n elif cls.DUE_PARTICIPANTS in recipient_groups:\n recipients += course.due_participants\n\n if filter_users_in_cc:\n # remove delegates and CC users of recipients from the recipient list\n # so they won't get the exact same email twice\n users_excluded = UserProfile.objects.filter(Q(represented_users__in=recipients) | Q(ccing_users__in=recipients))\n # but do so only if they have no delegates/cc_users, because otherwise\n # those won't get the email at all. consequently, some \"edge case users\"\n # will get the email twice, but there is no satisfying way around that.\n users_excluded = users_excluded.filter(delegates=None, cc_users=None)\n\n recipients = list(set(recipients) - set(users_excluded))\n\n return recipients\n\n @classmethod\n def render_string(cls, text, dictionary):\n return Template(text).render(Context(dictionary, autoescape=False))\n\n @classmethod\n def send_to_users_in_courses(cls, template, courses, recipient_groups, use_cc, request):\n user_course_map = {}\n for course in courses:\n recipients = cls.recipient_list_for_course(course, recipient_groups, filter_users_in_cc=use_cc)\n for user in recipients:\n user_course_map.setdefault(user, []).append(course)\n\n for user, courses in user_course_map.items():\n subject_params = {}\n body_params = {'user': user, 'courses': courses, 'due_courses': get_due_courses_for_user(user)}\n cls.send_to_user(user, template, subject_params, body_params, use_cc=use_cc, request=request)\n\n @classmethod\n def send_to_user(cls, user, template, subject_params, body_params, use_cc, request=None):\n if not user.email:\n warning_message = \"{} has no email address defined. Could not send email.\".format(user.username)\n # If this method is triggered by a cronjob changing course states, the request is None.\n # In this case warnings should be sent to the admins via email (configured in the settings for logger.error).\n # If a request exists, the page is displayed in the browser and the message can be shown on the page (messages.warning).\n if request is not None:\n logger.warning(warning_message)\n messages.warning(request, _(warning_message))\n else:\n logger.error(warning_message)\n return\n\n if use_cc:\n cc_users = set(user.delegates.all() | user.cc_users.all())\n cc_addresses = [p.email for p in cc_users if p.email]\n else:\n cc_addresses = []\n\n send_separate_login_url = False\n body_params['login_url'] = \"\"\n if user.needs_login_key:\n user.ensure_valid_login_key()\n if not cc_addresses:\n body_params['login_url'] = user.login_url\n else:\n send_separate_login_url = True\n\n subject = cls.render_string(template.subject, subject_params)\n body = cls.render_string(template.body, body_params)\n\n mail = EmailMessage(\n subject=subject,\n body=body,\n to=[user.email],\n cc=cc_addresses,\n bcc=[a[1] for a in settings.MANAGERS],\n headers={'Reply-To': settings.REPLY_TO_EMAIL})\n\n try:\n mail.send(False)\n logger.info(('Sent email \"{}\" to {}.').format(subject, user.username))\n if send_separate_login_url:\n cls.send_login_url_to_user(user)\n except Exception:\n logger.exception('An exception occurred when sending the following email to user \"{}\":\\n{}\\n'.format(user.username, mail.message()))\n\n @classmethod\n def send_reminder_to_user(cls, user, first_due_in_days, due_courses):\n template = cls.objects.get(name=cls.STUDENT_REMINDER)\n subject_params = {'user': user, 'first_due_in_days': first_due_in_days}\n body_params = {'user': user, 'first_due_in_days': first_due_in_days, 'due_courses': due_courses}\n\n cls.send_to_user(user, template, subject_params, body_params, use_cc=False)\n\n @classmethod\n def send_login_url_to_user(cls, user):\n template = cls.objects.get(name=cls.LOGIN_KEY_CREATED)\n subject_params = {}\n body_params = {'user': user, 'login_url': user.login_url}\n\n cls.send_to_user(user, template, subject_params, body_params, use_cc=False)\n logger.info(('Sent login url to {}.').format(user.username))\n", "path": "evap/evaluation/models.py" } ]
[ { "content": "from datetime import datetime, date, timedelta\nimport logging\nimport random\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, Group, PermissionsMixin\nfrom django.core.cache import caches\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.db import models, transaction\nfrom django.db.models import Count, Q, Manager\nfrom django.dispatch import Signal, receiver\nfrom django.template import Context, Template\nfrom django.template.base import TemplateSyntaxError\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\n# see evaluation.meta for the use of Translate in this file\nfrom evap.evaluation.meta import LocalizeModelBase, Translate\nfrom evap.evaluation.tools import date_to_datetime, get_due_courses_for_user\n\nlogger = logging.getLogger(__name__)\n\n\nclass NotArchiveable(Exception):\n \"\"\"An attempt has been made to archive something that is not archiveable.\"\"\"\n pass\n\n\nclass Semester(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Represents a semester, e.g. the winter term of 2011/2012.\"\"\"\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n short_name_de = models.CharField(max_length=20, unique=True, verbose_name=_(\"short name (german)\"))\n short_name_en = models.CharField(max_length=20, unique=True, verbose_name=_(\"short name (english)\"))\n short_name = Translate\n\n participations_are_archived = models.BooleanField(default=False, verbose_name=_(\"participations are archived\"))\n grade_documents_are_deleted = models.BooleanField(default=False, verbose_name=_(\"grade documents are deleted\"))\n results_are_archived = models.BooleanField(default=False, verbose_name=_(\"results are archived\"))\n\n created_at = models.DateField(verbose_name=_(\"created at\"), auto_now_add=True)\n\n class Meta:\n ordering = ('-created_at', 'name_de')\n verbose_name = _(\"semester\")\n verbose_name_plural = _(\"semesters\")\n\n def __str__(self):\n return self.name\n\n @property\n def can_staff_delete(self):\n return all(course.can_staff_delete for course in self.course_set.all())\n\n @property\n def participations_can_be_archived(self):\n return not self.participations_are_archived and all(course.participations_can_be_archived for course in self.course_set.all())\n\n @property\n def grade_documents_can_be_deleted(self):\n return not self.grade_documents_are_deleted\n\n @property\n def results_can_be_archived(self):\n return not self.results_are_archived\n\n @transaction.atomic\n def archive_participations(self):\n if not self.participations_can_be_archived:\n raise NotArchiveable()\n for course in self.course_set.all():\n course._archive_participations()\n self.participations_are_archived = True\n self.save()\n\n @transaction.atomic\n def delete_grade_documents(self):\n from evap.grades.models import GradeDocument\n\n if not self.grade_documents_can_be_deleted:\n raise NotArchiveable()\n GradeDocument.objects.filter(course__semester=self).delete()\n self.grade_documents_are_deleted = True\n self.save()\n\n def archive_results(self):\n if not self.results_can_be_archived:\n raise NotArchiveable()\n self.results_are_archived = True\n self.save()\n\n @classmethod\n def get_all_with_unarchived_results(cls):\n return cls.objects.filter(results_are_archived=False).distinct()\n\n @classmethod\n def get_all_with_published_unarchived_results(cls):\n return cls.objects.filter(course__state=\"published\", results_are_archived=False).distinct()\n\n @classmethod\n def active_semester(cls):\n return cls.objects.order_by(\"created_at\").last()\n\n @property\n def is_active_semester(self):\n return self == Semester.active_semester()\n\n\nclass QuestionnaireManager(Manager):\n def course_questionnaires(self):\n return super().get_queryset().exclude(type=Questionnaire.CONTRIBUTOR)\n\n def contributor_questionnaires(self):\n return super().get_queryset().filter(type=Questionnaire.CONTRIBUTOR)\n\n\nclass Questionnaire(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A named collection of questions.\"\"\"\n\n TOP = 10\n CONTRIBUTOR = 20\n BOTTOM = 30\n TYPE_CHOICES = (\n (TOP, _('Top questionnaire')),\n (CONTRIBUTOR, _('Contributor questionnaire')),\n (BOTTOM, _('Bottom questionnaire')),\n )\n type = models.IntegerField(choices=TYPE_CHOICES, verbose_name=_('type'), default=TOP)\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(\"name (english)\"))\n name = Translate\n\n description_de = models.TextField(verbose_name=_(\"description (german)\"), blank=True, null=True)\n description_en = models.TextField(verbose_name=_(\"description (english)\"), blank=True, null=True)\n description = Translate\n\n public_name_de = models.CharField(max_length=1024, verbose_name=_(\"display name (german)\"))\n public_name_en = models.CharField(max_length=1024, verbose_name=_(\"display name (english)\"))\n public_name = Translate\n\n teaser_de = models.TextField(verbose_name=_(\"teaser (german)\"), blank=True, null=True)\n teaser_en = models.TextField(verbose_name=_(\"teaser (english)\"), blank=True, null=True)\n teaser = Translate\n\n order = models.IntegerField(verbose_name=_(\"ordering index\"), default=0)\n\n staff_only = models.BooleanField(verbose_name=_(\"display for staff only\"), default=False)\n obsolete = models.BooleanField(verbose_name=_(\"obsolete\"), default=False)\n\n objects = QuestionnaireManager()\n\n class Meta:\n ordering = ('type', 'order', 'name_de')\n verbose_name = _(\"questionnaire\")\n verbose_name_plural = _(\"questionnaires\")\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return (self.type, self.order, self.name_de) < (other.type, other.order, self.name_de)\n\n def __gt__(self, other):\n return (self.type, self.order, self.name_de) > (other.type, other.order, self.name_de)\n\n @property\n def is_above_contributors(self):\n return self.type == self.TOP\n\n @property\n def is_below_contributors(self):\n return self.type == self.BOTTOM\n\n @property\n def can_staff_edit(self):\n return not self.contributions.exclude(course__state='new').exists()\n\n @property\n def can_staff_delete(self):\n return not self.contributions.exists()\n\n @property\n def text_questions(self):\n return [question for question in self.question_set.all() if question.is_text_question]\n\n @property\n def rating_questions(self):\n return [question for question in self.question_set.all() if question.is_rating_question]\n\n SINGLE_RESULT_QUESTIONNAIRE_NAME = \"Single result\"\n\n @classmethod\n def single_result_questionnaire(cls):\n return cls.objects.get(name_en=cls.SINGLE_RESULT_QUESTIONNAIRE_NAME)\n\n\nclass Degree(models.Model, metaclass=LocalizeModelBase):\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n order = models.IntegerField(verbose_name=_(\"degree order\"), default=-1)\n\n class Meta:\n ordering = ['order', ]\n\n def __str__(self):\n return self.name\n\n def can_staff_delete(self):\n if self.pk is None:\n return True\n return not self.courses.all().exists()\n\n\nclass CourseType(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Model for the type of a course, e.g. a lecture\"\"\"\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"), unique=True)\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"), unique=True)\n name = Translate\n\n class Meta:\n ordering = ['name_de', ]\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return self.name_de < other.name_de\n\n def can_staff_delete(self):\n if not self.pk:\n return True\n return not self.courses.all().exists()\n\n\nclass Course(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Models a single course, e.g. the Math 101 course of 2002.\"\"\"\n\n state = FSMField(default='new', protected=True)\n\n semester = models.ForeignKey(Semester, models.PROTECT, verbose_name=_(\"semester\"))\n\n name_de = models.CharField(max_length=1024, verbose_name=_(\"name (german)\"))\n name_en = models.CharField(max_length=1024, verbose_name=_(\"name (english)\"))\n name = Translate\n\n # type of course: lecture, seminar, project\n type = models.ForeignKey(CourseType, models.PROTECT, verbose_name=_(\"course type\"), related_name=\"courses\")\n\n is_single_result = models.BooleanField(verbose_name=_(\"is single result\"), default=False)\n\n # e.g. Bachelor, Master\n degrees = models.ManyToManyField(Degree, verbose_name=_(\"degrees\"), related_name=\"courses\")\n\n # default is True as that's the more restrictive option\n is_graded = models.BooleanField(verbose_name=_(\"is graded\"), default=True)\n\n # defines whether results can only be seen by contributors and participants\n is_private = models.BooleanField(verbose_name=_(\"is private\"), default=False)\n\n # grade publishers can set this to True, then the course will be handled as if final grades have already been uploaded\n gets_no_grade_documents = models.BooleanField(verbose_name=_(\"gets no grade documents\"), default=False)\n\n # whether participants must vote to qualify for reward points\n is_rewarded = models.BooleanField(verbose_name=_(\"is rewarded\"), default=True)\n\n # whether the evaluation does take place during the semester, stating that evaluation results will be published while the course is still running\n is_midterm_evaluation = models.BooleanField(verbose_name=_(\"is midterm evaluation\"), default=False)\n\n # True, if the course has at least two voters or if the first voter explicitly confirmed that given text answers\n # can be published even if no other person evaluates the course\n can_publish_text_results = models.BooleanField(verbose_name=_(\"can publish text results\"), default=False)\n\n # students that are allowed to vote\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"participants\"), blank=True, related_name='courses_participating_in')\n _participant_count = models.IntegerField(verbose_name=_(\"participant count\"), blank=True, null=True, default=None)\n\n # students that already voted\n voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(\"voters\"), blank=True, related_name='courses_voted_for')\n _voter_count = models.IntegerField(verbose_name=_(\"voter count\"), blank=True, null=True, default=None)\n\n # when the evaluation takes place\n vote_start_datetime = models.DateTimeField(verbose_name=_(\"start of evaluation\"))\n vote_end_date = models.DateField(verbose_name=_(\"last day of evaluation\"))\n\n # who last modified this course\n last_modified_time = models.DateTimeField(auto_now=True)\n last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name=\"course_last_modified_user+\")\n\n course_evaluated = Signal(providing_args=['request', 'semester'])\n\n class Meta:\n ordering = ('name_de',)\n unique_together = (\n ('semester', 'name_de'),\n ('semester', 'name_en'),\n )\n verbose_name = _(\"course\")\n verbose_name_plural = _(\"courses\")\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kw):\n first_save = self.pk is None\n super().save(*args, **kw)\n\n # make sure there is a general contribution\n if not self.general_contribution:\n self.contributions.create(contributor=None)\n del self.general_contribution # invalidate cached property\n\n if self.is_single_result:\n # adding m2ms such as contributions/questionnaires requires saving the course first,\n # therefore we must allow the single result questionnaire to not exist on first save\n assert first_save or Questionnaire.objects.get(contributions__course=self).name_en == Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME\n assert self.vote_end_date == self.vote_start_datetime.date()\n else:\n assert self.vote_end_date >= self.vote_start_datetime.date()\n\n @property\n def is_fully_reviewed(self):\n if not self.can_publish_text_results:\n return True\n return not self.unreviewed_textanswer_set.exists()\n\n @property\n def vote_end_datetime(self):\n # The evaluation ends at EVALUATION_END_OFFSET_HOURS:00 of the day AFTER self.vote_end_date.\n return date_to_datetime(self.vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS)\n\n @property\n def is_in_evaluation_period(self):\n return self.vote_start_datetime <= datetime.now() <= self.vote_end_datetime\n\n @property\n def general_contribution_has_questionnaires(self):\n return self.general_contribution and (self.is_single_result or self.general_contribution.questionnaires.count() > 0)\n\n @property\n def all_contributions_have_questionnaires(self):\n return self.general_contribution and (self.is_single_result or all(self.contributions.annotate(Count('questionnaires')).values_list(\"questionnaires__count\", flat=True)))\n\n def can_user_vote(self, user):\n \"\"\"Returns whether the user is allowed to vote on this course.\"\"\"\n return (self.state == \"in_evaluation\"\n and self.is_in_evaluation_period\n and user in self.participants.all()\n and user not in self.voters.all())\n\n def can_user_see_course(self, user):\n if user.is_staff:\n return True\n if user.is_reviewer and not self.semester.results_are_archived:\n return True\n if self.is_private or user.is_external:\n return self.is_user_contributor_or_delegate(user) or self.participants.filter(pk=user.pk).exists()\n return True\n\n def can_user_see_results_page(self, user):\n if self.is_single_result:\n return False\n if user.is_staff:\n return True\n if user.is_reviewer and not self.semester.results_are_archived:\n return True\n if self.state != 'published':\n return False\n if not self.can_publish_rating_results or self.semester.results_are_archived or not self.can_user_see_course(user):\n return self.is_user_contributor_or_delegate(user)\n return True\n\n @property\n def can_staff_edit(self):\n return not self.participations_are_archived and self.state in ['new', 'prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']\n\n @property\n def can_staff_delete(self):\n return self.can_staff_edit and (self.num_voters == 0 or self.is_single_result)\n\n @property\n def can_publish_average_grade(self):\n if self.is_single_result:\n return True\n\n # the average grade is only published if at least the configured percentage of participants voted during the evaluation for significance reasons\n return self.can_publish_rating_results and self.num_voters / self.num_participants >= settings.VOTER_PERCENTAGE_NEEDED_FOR_PUBLISHING_AVERAGE_GRADE\n\n @property\n def can_publish_rating_results(self):\n if self.is_single_result:\n return True\n\n # the rating results are only published if at least the configured number of participants voted during the evaluation for anonymity reasons\n return self.num_voters >= settings.VOTER_COUNT_NEEDED_FOR_PUBLISHING_RATING_RESULTS\n\n @transition(field=state, source=['new', 'editor_approved'], target='prepared')\n def ready_for_editors(self):\n pass\n\n @transition(field=state, source='prepared', target='editor_approved')\n def editor_approve(self):\n pass\n\n @transition(field=state, source=['new', 'prepared', 'editor_approved'], target='approved', conditions=[lambda self: self.general_contribution_has_questionnaires])\n def staff_approve(self):\n pass\n\n @transition(field=state, source=['prepared', 'editor_approved', 'approved'], target='new')\n def revert_to_new(self):\n pass\n\n @transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def evaluation_begin(self):\n pass\n\n @transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])\n def reopen_evaluation(self):\n pass\n\n @transition(field=state, source='in_evaluation', target='evaluated')\n def evaluation_end(self):\n pass\n\n @transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed])\n def review_finished(self):\n pass\n\n @transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result])\n def single_result_created(self):\n pass\n\n @transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: not self.is_fully_reviewed])\n def reopen_review(self):\n pass\n\n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n assert self.is_single_result or self._voter_count is None and self._participant_count is None\n self._voter_count = self.num_voters\n self._participant_count = self.num_participants\n\n if not self.can_publish_text_results:\n self.textanswer_set.delete()\n else:\n self.textanswer_set.filter(state=TextAnswer.HIDDEN).delete()\n self.textanswer_set.update(original_answer=None)\n\n @transition(field=state, source='published', target='reviewed')\n def unpublish(self):\n assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()\n self._voter_count = None\n self._participant_count = None\n\n @cached_property\n def general_contribution(self):\n try:\n return self.contributions.get(contributor=None)\n except Contribution.DoesNotExist:\n return None\n\n @cached_property\n def num_participants(self):\n if self._participant_count is not None:\n return self._participant_count\n return self.participants.count()\n\n @cached_property\n def num_voters(self):\n if self._voter_count is not None:\n return self._voter_count\n return self.voters.count()\n\n @property\n def due_participants(self):\n return self.participants.exclude(pk__in=self.voters.all())\n\n @cached_property\n def responsible_contributors(self):\n return UserProfile.objects.filter(contributions__course=self, contributions__responsible=True).order_by('contributions__order')\n\n @cached_property\n def num_contributors(self):\n return UserProfile.objects.filter(contributions__course=self).count()\n\n @property\n def days_left_for_evaluation(self):\n return (self.vote_end_date - date.today()).days\n\n @property\n def time_left_for_evaluation(self):\n return self.vote_end_datetime - datetime.now()\n\n def evaluation_ends_soon(self):\n return 0 < self.time_left_for_evaluation.total_seconds() < settings.EVALUATION_END_WARNING_PERIOD * 3600\n\n @property\n def days_until_evaluation(self):\n days_left = (self.vote_start_datetime.date() - date.today()).days\n if self.vote_start_datetime < datetime.now():\n days_left -= 1\n return days_left\n\n def is_user_editor_or_delegate(self, user):\n return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all()), can_edit=True).exists()\n\n def is_user_contributor_or_delegate(self, user):\n # early out that saves database hits since is_contributor_or_delegate is a cached_property\n if not user.is_contributor_or_delegate:\n return False\n return self.contributions.filter(Q(contributor=user) | Q(contributor__in=user.represented_users.all())).exists()\n\n @property\n def textanswer_set(self):\n return TextAnswer.objects.filter(contribution__course=self)\n\n @cached_property\n def num_textanswers(self):\n if not self.can_publish_text_results:\n return 0\n return self.textanswer_set.count()\n\n @property\n def unreviewed_textanswer_set(self):\n return self.textanswer_set.filter(state=TextAnswer.NOT_REVIEWED)\n\n @property\n def reviewed_textanswer_set(self):\n return self.textanswer_set.exclude(state=TextAnswer.NOT_REVIEWED)\n\n @cached_property\n def num_reviewed_textanswers(self):\n return self.reviewed_textanswer_set.count()\n\n @property\n def ratinganswer_counters(self):\n return RatingAnswerCounter.objects.filter(contribution__course=self)\n\n def _archive_participations(self):\n \"\"\"Should be called only via Semester.archive_participations\"\"\"\n if not self.participations_can_be_archived:\n raise NotArchiveable()\n if self._participant_count is not None:\n assert self._voter_count is not None\n assert self.is_single_result or self._voter_count == self.voters.count() and self._participant_count == self.participants.count()\n return\n assert self._participant_count is None and self._voter_count is None\n self._participant_count = self.num_participants\n self._voter_count = self.num_voters\n self.save()\n\n @property\n def participations_are_archived(self):\n semester_participations_are_archived = self.semester.participations_are_archived\n if semester_participations_are_archived:\n assert self._participant_count is not None and self._voter_count is not None\n return semester_participations_are_archived\n\n @property\n def participations_can_be_archived(self):\n return not self.semester.participations_are_archived and self.state in [\"new\", \"published\"]\n\n @property\n def final_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.FINAL_GRADES)\n\n @property\n def midterm_grade_documents(self):\n from evap.grades.models import GradeDocument\n return self.grade_documents.filter(type=GradeDocument.MIDTERM_GRADES)\n\n @classmethod\n def update_courses(cls):\n logger.info(\"update_courses called. Processing courses now.\")\n from evap.evaluation.tools import send_publish_notifications\n\n courses_new_in_evaluation = []\n evaluation_results_courses = []\n\n for course in cls.objects.all():\n try:\n if course.state == \"approved\" and course.vote_start_datetime <= datetime.now():\n course.evaluation_begin()\n course.last_modified_user = UserProfile.objects.cronjob_user()\n course.save()\n courses_new_in_evaluation.append(course)\n elif course.state == \"in_evaluation\" and datetime.now() >= course.vote_end_datetime:\n course.evaluation_end()\n if course.is_fully_reviewed:\n course.review_finished()\n if not course.is_graded or course.final_grade_documents.exists() or course.gets_no_grade_documents:\n course.publish()\n evaluation_results_courses.append(course)\n course.last_modified_user = UserProfile.objects.cronjob_user()\n course.save()\n except Exception:\n logger.exception('An error occured when updating the state of course \"{}\" (id {}).'.format(course, course.id))\n\n template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)\n EmailTemplate.send_to_users_in_courses(template, courses_new_in_evaluation, [EmailTemplate.ALL_PARTICIPANTS], use_cc=False, request=None)\n send_publish_notifications(evaluation_results_courses)\n logger.info(\"update_courses finished.\")\n\n\n@receiver(post_transition, sender=Course)\ndef warmup_cache_on_publish(instance, target, **_kwargs):\n if target == 'published':\n from evap.results.tools import collect_results\n from evap.results.views import warm_up_template_cache\n collect_results(instance)\n warm_up_template_cache([instance])\n\n\n@receiver(post_transition, sender=Course)\ndef delete_cache_on_unpublish(instance, source, **_kwargs):\n if source == 'published':\n from evap.results.tools import get_collect_results_cache_key\n from evap.results.views import delete_template_cache\n caches['results'].delete(get_collect_results_cache_key(instance))\n delete_template_cache(instance)\n\n\n@receiver(post_transition, sender=Course)\ndef log_state_transition(instance, name, source, target, **_kwargs):\n logger.info('Course \"{}\" (id {}) moved from state \"{}\" to state \"{}\", caused by transition \"{}\".'.format(instance, instance.pk, source, target, name))\n\n\nclass Contribution(models.Model):\n \"\"\"A contributor who is assigned to a course and his questionnaires.\"\"\"\n\n OWN_COMMENTS = 'OWN'\n COURSE_COMMENTS = 'COURSE'\n ALL_COMMENTS = 'ALL'\n COMMENT_VISIBILITY_CHOICES = (\n (OWN_COMMENTS, _('Own')),\n (COURSE_COMMENTS, _('Course')),\n (ALL_COMMENTS, _('All')),\n )\n IS_CONTRIBUTOR = 'CONTRIBUTOR'\n IS_EDITOR = 'EDITOR'\n IS_RESPONSIBLE = 'RESPONSIBLE'\n RESPONSIBILITY_CHOICES = (\n (IS_CONTRIBUTOR, _('Contributor')),\n (IS_EDITOR, _('Editor')),\n (IS_RESPONSIBLE, _('Responsible')),\n )\n\n course = models.ForeignKey(Course, models.CASCADE, verbose_name=_(\"course\"), related_name='contributions')\n contributor = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT, verbose_name=_(\"contributor\"), blank=True, null=True, related_name='contributions')\n questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_(\"questionnaires\"), blank=True, related_name=\"contributions\")\n responsible = models.BooleanField(verbose_name=_(\"responsible\"), default=False)\n can_edit = models.BooleanField(verbose_name=_(\"can edit\"), default=False)\n comment_visibility = models.CharField(max_length=10, choices=COMMENT_VISIBILITY_CHOICES, verbose_name=_('comment visibility'), default=OWN_COMMENTS)\n label = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"label\"))\n\n order = models.IntegerField(verbose_name=_(\"contribution order\"), default=-1)\n\n class Meta:\n unique_together = (\n ('course', 'contributor'),\n )\n ordering = ['order', ]\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n if self.responsible and not self.course.is_single_result:\n assert self.can_edit and self.comment_visibility == self.ALL_COMMENTS\n\n @property\n def is_general(self):\n return self.contributor_id is None\n\n\nclass Question(models.Model, metaclass=LocalizeModelBase):\n \"\"\"A question including a type.\"\"\"\n\n QUESTION_TYPES = (\n (\"T\", _(\"Text Question\")),\n (\"L\", _(\"Likert Question\")),\n (\"G\", _(\"Grade Question\")),\n (\"P\", _(\"Positive Yes-No Question\")),\n (\"N\", _(\"Negative Yes-No Question\")),\n (\"H\", _(\"Heading\")),\n )\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n questionnaire = models.ForeignKey(Questionnaire, models.CASCADE)\n text_de = models.CharField(max_length=1024, verbose_name=_(\"question text (german)\"))\n text_en = models.CharField(max_length=1024, verbose_name=_(\"question text (english)\"))\n type = models.CharField(max_length=1, choices=QUESTION_TYPES, verbose_name=_(\"question type\"))\n\n text = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n @property\n def answer_class(self):\n if self.is_text_question:\n return TextAnswer\n elif self.is_rating_question:\n return RatingAnswerCounter\n else:\n raise Exception(\"Unknown answer type: %r\" % self.type)\n\n @property\n def is_likert_question(self):\n return self.type == \"L\"\n\n @property\n def is_text_question(self):\n return self.type == \"T\"\n\n @property\n def is_grade_question(self):\n return self.type == \"G\"\n\n @property\n def is_positive_yes_no_question(self):\n return self.type == \"P\"\n\n @property\n def is_negative_yes_no_question(self):\n return self.type == \"N\"\n\n @property\n def is_yes_no_question(self):\n return self.is_positive_yes_no_question or self.is_negative_yes_no_question\n\n @property\n def is_rating_question(self):\n return self.is_grade_question or self.is_likert_question or self.is_yes_no_question\n\n @property\n def is_non_grade_rating_question(self):\n return self.is_rating_question and not self.is_grade_question\n\n @property\n def is_heading_question(self):\n return self.type == \"H\"\n\n\nclass Answer(models.Model):\n \"\"\"An abstract answer to a question. For anonymity purposes, the answering\n user ist not stored in the object. Concrete subclasses are `RatingAnswerCounter`,\n and `TextAnswer`.\"\"\"\n\n question = models.ForeignKey(Question, models.PROTECT)\n contribution = models.ForeignKey(Contribution, models.PROTECT, related_name=\"%(class)s_set\")\n\n class Meta:\n abstract = True\n verbose_name = _(\"answer\")\n verbose_name_plural = _(\"answers\")\n\n\nclass RatingAnswerCounter(Answer):\n \"\"\"A rating answer counter to a question. A lower answer is better or indicates more agreement.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(\"answer\"))\n count = models.IntegerField(verbose_name=_(\"count\"), default=0)\n\n class Meta:\n unique_together = (\n ('question', 'contribution', 'answer'),\n )\n verbose_name = _(\"rating answer\")\n verbose_name_plural = _(\"rating answers\")\n\n\nclass TextAnswer(Answer):\n \"\"\"A free-form text answer to a question (usually a comment about a course\n or a contributor).\"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n answer = models.TextField(verbose_name=_(\"answer\"))\n original_answer = models.TextField(verbose_name=_(\"original answer\"), blank=True, null=True)\n\n HIDDEN = 'HI'\n PUBLISHED = 'PU'\n PRIVATE = 'PR'\n NOT_REVIEWED = 'NR'\n TEXT_ANSWER_STATES = (\n (HIDDEN, _('hidden')),\n (PUBLISHED, _('published')),\n (PRIVATE, _('private')),\n (NOT_REVIEWED, _('not reviewed')),\n )\n state = models.CharField(max_length=2, choices=TEXT_ANSWER_STATES, verbose_name=_('state of answer'), default=NOT_REVIEWED)\n\n class Meta:\n # Prevent ordering by date for privacy reasons\n ordering = ['id', ]\n verbose_name = _(\"text answer\")\n verbose_name_plural = _(\"text answers\")\n\n @property\n def is_hidden(self):\n return self.state == self.HIDDEN\n\n @property\n def is_private(self):\n return self.state == self.PRIVATE\n\n @property\n def is_published(self):\n return self.state == self.PUBLISHED\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n assert self.answer != self.original_answer\n\n def publish(self):\n self.state = self.PUBLISHED\n\n def hide(self):\n self.state = self.HIDDEN\n\n def make_private(self):\n self.state = self.PRIVATE\n\n def unreview(self):\n self.state = self.NOT_REVIEWED\n\n\nclass FaqSection(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Section in the frequently asked questions\"\"\"\n\n order = models.IntegerField(verbose_name=_(\"section order\"), default=-1)\n\n title_de = models.CharField(max_length=255, verbose_name=_(\"section title (german)\"))\n title_en = models.CharField(max_length=255, verbose_name=_(\"section title (english)\"))\n title = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"section\")\n verbose_name_plural = _(\"sections\")\n\n\nclass FaqQuestion(models.Model, metaclass=LocalizeModelBase):\n \"\"\"Question and answer in the frequently asked questions\"\"\"\n\n section = models.ForeignKey(FaqSection, models.CASCADE, related_name=\"questions\")\n\n order = models.IntegerField(verbose_name=_(\"question order\"), default=-1)\n\n question_de = models.CharField(max_length=1024, verbose_name=_(\"question (german)\"))\n question_en = models.CharField(max_length=1024, verbose_name=_(\"question (english)\"))\n question = Translate\n\n answer_de = models.TextField(verbose_name=_(\"answer (german)\"))\n answer_en = models.TextField(verbose_name=_(\"answer (german)\"))\n answer = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(\"question\")\n verbose_name_plural = _(\"questions\")\n\n\nclass UserProfileManager(BaseUserManager):\n def get_queryset(self):\n return super().get_queryset().exclude(username=UserProfile.CRONJOB_USER_USERNAME)\n\n def cronjob_user(self):\n return super().get_queryset().get(username=UserProfile.CRONJOB_USER_USERNAME)\n\n def exclude_inactive_users(self):\n return self.get_queryset().exclude(is_active=False)\n\n def create_user(self, username, password=None, email=None, first_name=None, last_name=None):\n if not username:\n raise ValueError(_('Users must have a username'))\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name\n )\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, username, password, email=None, first_name=None, last_name=None):\n user = self.create_user(\n username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name\n )\n user.is_superuser = True\n user.save()\n user.groups.add(Group.objects.get(name=\"Staff\"))\n return user\n\n\nclass UserProfile(AbstractBaseUser, PermissionsMixin):\n username = models.CharField(max_length=255, unique=True, verbose_name=_('username'))\n\n # null=True because users created through kerberos logins and certain external users don't have an address.\n email = models.EmailField(max_length=255, unique=True, blank=True, null=True, verbose_name=_('email address'))\n\n title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"Title\"))\n first_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"first name\"))\n last_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_(\"last name\"))\n\n language = models.CharField(max_length=8, blank=True, null=True, verbose_name=_(\"language\"))\n\n # delegates of the user, which can also manage their courses\n delegates = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"Delegates\"), related_name=\"represented_users\", blank=True)\n\n # users to which all emails should be sent in cc without giving them delegate rights\n cc_users = models.ManyToManyField(\"UserProfile\", verbose_name=_(\"CC Users\"), related_name=\"ccing_users\", blank=True)\n\n # key for url based login of this user\n MAX_LOGIN_KEY = 2**31 - 1\n\n login_key = models.IntegerField(verbose_name=_(\"Login Key\"), unique=True, blank=True, null=True)\n login_key_valid_until = models.DateField(verbose_name=_(\"Login Key Validity\"), blank=True, null=True)\n\n is_active = models.BooleanField(default=True, verbose_name=_(\"active\"))\n\n class Meta:\n ordering = ('last_name', 'first_name', 'username')\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = []\n\n objects = UserProfileManager()\n\n @property\n def full_name(self):\n if self.last_name:\n name = self.last_name\n if self.first_name:\n name = self.first_name + \" \" + name\n if self.title:\n name = self.title + \" \" + name\n return name\n else:\n return self.username\n\n @property\n def full_name_with_username(self):\n name = self.full_name\n if self.username not in name:\n name += \" (\" + self.username + \")\"\n return name\n\n def __str__(self):\n return self.full_name\n\n @cached_property\n def is_staff(self):\n return self.groups.filter(name='Staff').exists()\n\n @cached_property\n def is_reviewer(self):\n return self.is_staff or self.groups.filter(name='Reviewer').exists()\n\n @cached_property\n def is_grade_publisher(self):\n return self.groups.filter(name='Grade publisher').exists()\n\n CRONJOB_USER_USERNAME = \"cronjob\"\n\n @property\n def can_staff_mark_inactive(self):\n if self.is_reviewer or self.is_grade_publisher or self.is_superuser:\n return False\n if any(not course.participations_are_archived for course in self.courses_participating_in.all()):\n return False\n if any(not contribution.course.participations_are_archived for contribution in self.contributions.all()):\n return False\n return True\n\n @property\n def can_staff_delete(self):\n if self.is_contributor or self.is_reviewer or self.is_grade_publisher or self.is_superuser:\n return False\n if any(not course.participations_are_archived for course in self.courses_participating_in.all()):\n return False\n if any(not user.can_staff_delete for user in self.represented_users.all()):\n return False\n if any(not user.can_staff_delete for user in self.ccing_users.all()):\n return False\n return True\n\n @property\n def is_participant(self):\n return self.courses_participating_in.exists()\n\n @property\n def is_student(self):\n \"\"\"\n A UserProfile is not considered to be a student anymore if the\n newest contribution is newer than the newest participation.\n \"\"\"\n if not self.is_participant:\n return False\n\n if not self.is_contributor:\n return True\n\n last_semester_participated = Semester.objects.filter(course__participants=self).order_by(\"-created_at\").first()\n last_semester_contributed = Semester.objects.filter(course__contributions__contributor=self).order_by(\"-created_at\").first()\n\n return last_semester_participated.created_at >= last_semester_contributed.created_at\n\n @property\n def is_contributor(self):\n return self.contributions.exists()\n\n @property\n def is_editor(self):\n return self.contributions.filter(can_edit=True).exists()\n\n @property\n def is_responsible(self):\n # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it\n return any(contribution.responsible for contribution in self.contributions.all())\n\n @property\n def is_delegate(self):\n return self.represented_users.exists()\n\n @property\n def is_editor_or_delegate(self):\n return self.is_editor or self.is_delegate\n\n @cached_property\n def is_contributor_or_delegate(self):\n return self.is_contributor or self.is_delegate\n\n @property\n def is_external(self):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n if not self.email:\n return True\n return is_external_email(self.email)\n\n @property\n def can_download_grades(self):\n return not self.is_external\n\n @classmethod\n def email_needs_login_key(cls, email):\n # do the import here to prevent a circular import\n from evap.evaluation.tools import is_external_email\n return is_external_email(email)\n\n @property\n def needs_login_key(self):\n return UserProfile.email_needs_login_key(self.email)\n\n def ensure_valid_login_key(self):\n if self.login_key and self.login_key_valid_until > date.today():\n return\n\n while True:\n key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)\n if not UserProfile.objects.filter(login_key=key).exists():\n # key not yet used\n self.login_key = key\n break\n self.refresh_login_key()\n\n def refresh_login_key(self):\n self.login_key_valid_until = date.today() + timedelta(settings.LOGIN_KEY_VALIDITY)\n self.save()\n\n @property\n def login_url(self):\n if not self.needs_login_key:\n return \"\"\n return settings.PAGE_URL + \"?loginkey=\" + str(self.login_key)\n\n def get_sorted_contributions(self):\n return self.contributions.order_by('course__semester__created_at', 'course__name_de')\n\n def get_sorted_courses_participating_in(self):\n return self.courses_participating_in.order_by('semester__created_at', 'name_de')\n\n def get_sorted_courses_voted_for(self):\n return self.courses_voted_for.order_by('semester__created_at', 'name_de')\n\n\ndef validate_template(value):\n \"\"\"Field validator which ensures that the value can be compiled into a\n Django Template.\"\"\"\n try:\n Template(value)\n except TemplateSyntaxError as e:\n raise ValidationError(str(e))\n\n\nclass EmailTemplate(models.Model):\n name = models.CharField(max_length=1024, unique=True, verbose_name=_(\"Name\"))\n\n subject = models.CharField(max_length=1024, verbose_name=_(\"Subject\"), validators=[validate_template])\n body = models.TextField(verbose_name=_(\"Body\"), validators=[validate_template])\n\n EDITOR_REVIEW_NOTICE = \"Editor Review Notice\"\n EDITOR_REVIEW_REMINDER = \"Editor Review Reminder\"\n STUDENT_REMINDER = \"Student Reminder\"\n PUBLISHING_NOTICE = \"Publishing Notice\"\n LOGIN_KEY_CREATED = \"Login Key Created\"\n EVALUATION_STARTED = \"Evaluation Started\"\n\n ALL_PARTICIPANTS = 'all_participants'\n DUE_PARTICIPANTS = 'due_participants'\n RESPONSIBLE = 'responsible'\n EDITORS = 'editors'\n CONTRIBUTORS = 'contributors'\n\n EMAIL_RECIPIENTS = (\n (ALL_PARTICIPANTS, _('all participants')),\n (DUE_PARTICIPANTS, _('due participants')),\n (RESPONSIBLE, _('responsible person')),\n (EDITORS, _('all editors')),\n (CONTRIBUTORS, _('all contributors'))\n )\n\n @classmethod\n def recipient_list_for_course(cls, course, recipient_groups, filter_users_in_cc):\n recipients = []\n\n if cls.CONTRIBUTORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course)\n elif cls.EDITORS in recipient_groups:\n recipients += UserProfile.objects.filter(contributions__course=course, contributions__can_edit=True)\n elif cls.RESPONSIBLE in recipient_groups:\n recipients += course.responsible_contributors\n\n if cls.ALL_PARTICIPANTS in recipient_groups:\n recipients += course.participants.all()\n elif cls.DUE_PARTICIPANTS in recipient_groups:\n recipients += course.due_participants\n\n if filter_users_in_cc:\n # remove delegates and CC users of recipients from the recipient list\n # so they won't get the exact same email twice\n users_excluded = UserProfile.objects.filter(Q(represented_users__in=recipients) | Q(ccing_users__in=recipients))\n # but do so only if they have no delegates/cc_users, because otherwise\n # those won't get the email at all. consequently, some \"edge case users\"\n # will get the email twice, but there is no satisfying way around that.\n users_excluded = users_excluded.filter(delegates=None, cc_users=None)\n\n recipients = list(set(recipients) - set(users_excluded))\n\n return recipients\n\n @classmethod\n def render_string(cls, text, dictionary):\n return Template(text).render(Context(dictionary, autoescape=False))\n\n @classmethod\n def send_to_users_in_courses(cls, template, courses, recipient_groups, use_cc, request):\n user_course_map = {}\n for course in courses:\n recipients = cls.recipient_list_for_course(course, recipient_groups, filter_users_in_cc=use_cc)\n for user in recipients:\n user_course_map.setdefault(user, []).append(course)\n\n for user, courses in user_course_map.items():\n subject_params = {}\n body_params = {'user': user, 'courses': courses, 'due_courses': get_due_courses_for_user(user)}\n cls.send_to_user(user, template, subject_params, body_params, use_cc=use_cc, request=request)\n\n @classmethod\n def send_to_user(cls, user, template, subject_params, body_params, use_cc, request=None):\n if not user.email:\n warning_message = \"{} has no email address defined. Could not send email.\".format(user.username)\n # If this method is triggered by a cronjob changing course states, the request is None.\n # In this case warnings should be sent to the admins via email (configured in the settings for logger.error).\n # If a request exists, the page is displayed in the browser and the message can be shown on the page (messages.warning).\n if request is not None:\n logger.warning(warning_message)\n messages.warning(request, _(warning_message))\n else:\n logger.error(warning_message)\n return\n\n if use_cc:\n cc_users = set(user.delegates.all() | user.cc_users.all())\n cc_addresses = [p.email for p in cc_users if p.email]\n else:\n cc_addresses = []\n\n send_separate_login_url = False\n body_params['login_url'] = \"\"\n if user.needs_login_key:\n user.ensure_valid_login_key()\n if not cc_addresses:\n body_params['login_url'] = user.login_url\n else:\n send_separate_login_url = True\n\n subject = cls.render_string(template.subject, subject_params)\n body = cls.render_string(template.body, body_params)\n\n mail = EmailMessage(\n subject=subject,\n body=body,\n to=[user.email],\n cc=cc_addresses,\n bcc=[a[1] for a in settings.MANAGERS],\n headers={'Reply-To': settings.REPLY_TO_EMAIL})\n\n try:\n mail.send(False)\n logger.info(('Sent email \"{}\" to {}.').format(subject, user.username))\n if send_separate_login_url:\n cls.send_login_url_to_user(user)\n except Exception:\n logger.exception('An exception occurred when sending the following email to user \"{}\":\\n{}\\n'.format(user.username, mail.message()))\n\n @classmethod\n def send_reminder_to_user(cls, user, first_due_in_days, due_courses):\n template = cls.objects.get(name=cls.STUDENT_REMINDER)\n subject_params = {'user': user, 'first_due_in_days': first_due_in_days}\n body_params = {'user': user, 'first_due_in_days': first_due_in_days, 'due_courses': due_courses}\n\n cls.send_to_user(user, template, subject_params, body_params, use_cc=False)\n\n @classmethod\n def send_login_url_to_user(cls, user):\n template = cls.objects.get(name=cls.LOGIN_KEY_CREATED)\n subject_params = {}\n body_params = {'user': user, 'login_url': user.login_url}\n\n cls.send_to_user(user, template, subject_params, body_params, use_cc=False)\n logger.info(('Sent login url to {}.').format(user.username))\n", "path": "evap/evaluation/models.py" } ]
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py index 91dcad791f..ebf6437694 100644 --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -446,7 +446,7 @@ def reopen_review(self): @transition(field=state, source='reviewed', target='published') def publish(self): - assert self._voter_count is None and self._participant_count is None + assert self.is_single_result or self._voter_count is None and self._participant_count is None self._voter_count = self.num_voters self._participant_count = self.num_participants diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py index 8e8a75ea79..a6c1de0255 100644 --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -187,6 +187,21 @@ def test_single_result_can_be_deleted_only_in_reviewed(self): course.delete() self.assertFalse(Course.objects.filter(pk=course.pk).exists()) + def test_single_result_can_be_published(self): + """ Regression test for #1238 """ + responsible = mommy.make(UserProfile) + single_result = mommy.make(Course, + semester=mommy.make(Semester), is_single_result=True, _participant_count=5, _voter_count=5 + ) + contribution = mommy.make(Contribution, + course=single_result, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS, + questionnaires=[Questionnaire.single_result_questionnaire()] + ) + mommy.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().question_set.first(), contribution=contribution) + + single_result.single_result_created() + single_result.publish() # used to crash + def test_adding_second_voter_sets_can_publish_text_results_to_true(self): student1 = mommy.make(UserProfile) student2 = mommy.make(UserProfile) @@ -420,15 +435,14 @@ def test_course_participations_are_not_archived_if_participant_count_is_set(self def test_archiving_participations_doesnt_change_single_results_participant_count(self): responsible = mommy.make(UserProfile) - course = mommy.make(Course, state="published", is_single_result=True) + course = mommy.make(Course, + state="published", is_single_result=True, _participant_count=5, _voter_count=5 + ) contribution = mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS) contribution.questionnaires.add(Questionnaire.single_result_questionnaire()) - course._participant_count = 5 - course._voter_count = 5 - course.save() - course.semester.archive_participations() + course = Course.objects.get(pk=course.pk) self.assertEqual(course._participant_count, 5) self.assertEqual(course._voter_count, 5)
Single result publishing fails Single results can't be published because https://github.com/fsr-itse/EvaP/blob/master/evap/evaluation/models.py#L449 asserts `self._voter_count is None` which it is not for single results.
analysiscenter__batchflow-593
[ { "content": "\"\"\" Pipeline decorators \"\"\"\nimport os\nimport traceback\nimport threading\nimport concurrent.futures as cf\nimport asyncio\nimport functools\nimport logging\nimport inspect\n\ntry:\n from numba import jit\nexcept ImportError:\n jit = None\n\nfrom .named_expr import P\nfrom .utils_random import make_seed_sequence, spawn_seed_sequence\n\n\ndef make_function(method, is_global=False):\n \"\"\" Makes a function from a method\n\n Parameters\n ----------\n method\n a callable\n\n is_global : bool\n whether to create a function in a global namespace\n\n Notes\n -----\n A method should not be decorated with any other decorator.\n \"\"\"\n source = inspect.getsource(method).split('\\n')\n indent = len(source[0]) - len(source[0].lstrip())\n\n # strip indent spaces\n source = [s[indent:] for s in source if len(s) > indent]\n # skip all decorator and comment lines before 'def' or 'async def'\n start = 0\n for i, s in enumerate(source):\n if s[:3] in ['def', 'asy']:\n start = i\n break\n source = '\\n'.join(source[start:])\n\n globs = globals() if is_global else method.__globals__.copy()\n exec(source, globs) # pylint:disable=exec-used\n\n # Method with the same name might exist in various classes or modules\n # so a global function should have a unique name\n function_name = method.__module__ + \"_\" + method.__qualname__\n function_name = function_name.replace('.', '_')\n globs[function_name] = globs[method.__name__]\n return globs[function_name]\n\n\ndef _workers_count():\n cpu_count = 0\n try:\n cpu_count = len(os.sched_getaffinity(0))\n except AttributeError:\n cpu_count = os.cpu_count()\n return cpu_count * 4\n\n\ndef _make_action_wrapper_with_args(use_lock=None, no_eval=None): # pylint: disable=redefined-outer-name\n return functools.partial(_make_action_wrapper, use_lock=use_lock, no_eval=no_eval)\n\ndef _make_action_wrapper(action_method, use_lock=None, no_eval=None):\n @functools.wraps(action_method)\n def _action_wrapper(action_self, *args, **kwargs):\n \"\"\" Call the action method \"\"\"\n if use_lock is not None:\n if action_self.pipeline is not None:\n if isinstance(use_lock, bool):\n _lock_name = '#_lock_' + action_method.__name__\n else:\n _lock_name = use_lock\n if not action_self.pipeline.has_variable(_lock_name):\n action_self.pipeline.init_variable(_lock_name, threading.Lock())\n action_self.pipeline.get_variable(_lock_name).acquire()\n\n _res = action_method(action_self, *args, **kwargs)\n\n if use_lock is not None:\n if action_self.pipeline is not None:\n action_self.pipeline.get_variable(_lock_name).release()\n\n return _res\n\n if isinstance(no_eval, str):\n no_eval = [no_eval]\n _action_wrapper.action = dict(method=action_method, use_lock=use_lock, no_eval=no_eval)\n return _action_wrapper\n\ndef action(*args, **kwargs):\n \"\"\" Decorator for action methods in :class:`~.Batch` classes\n\n Parameters\n ----------\n use_lock : bool or str\n whether to lock an action when a pipeline is executed. It can be bool or a lock name.\n A pipeline variable with a lock is created in the pipeline during the execution.\n\n no_eval : str or a sequence of str\n parameters to skip from named expression evaluation.\n A parameter should be passed as a named argument only.\n\n Examples\n --------\n\n .. code-block:: python\n\n @action\n def some_action(self, arg1, arg2):\n ...\n\n @action(no_eval='dst')\n def calc_offset(self, src, dst=None):\n ...\n\n @action(use_lock=True)\n def critical_section(self, some_arg, another_arg):\n ...\n\n @action(use_lock='lock_name')\n def another_critical_section(self, some_arg, another_arg):\n ...\n \"\"\"\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # action without arguments\n return _make_action_wrapper(action_method=args[0])\n # action with arguments\n return _make_action_wrapper_with_args(*args, **kwargs)\n\n\ndef apply_parallel(*args, **kwargs):\n \"\"\" Mark class method for transform in its metaclass.\n\n Decorator writes `kwargs` to the method attribute `apply_kwargs`,\n so they can be extracted and used in metaclass.\n\n Parameters\n ----------\n args, kwargs\n other parameters passed to `apply_parallel` method of the class\n where this decorator is being used\n\n Notes\n -----\n Redefine the attribute `apply_defaults <.Batch.apply_defaults>` in\n the batch class. This is proposed solely for the purposes of brevity — in\n order to avoid repeated heavily loaded class methods decoration, e.g.\n `@apply_parallel(src='images', target='for')` which in most cases is\n actually equivalent to simple `@apply_parallel` assuming\n that the defaults are redefined for the class whose methods are being\n transformed.\n\n Note, that if no defaults redefined those from the nearest\n parent class will be used in :class:`~.batch.MethodsTransformingMeta`.\n \"\"\"\n def mark(method):\n method.apply_kwargs = kwargs\n return method\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return mark(args[0])\n if len(args) != 0:\n raise ValueError(\"This decorator accepts only named arguments\")\n\n return mark\n\n\ndef any_action_failed(results):\n \"\"\" Return `True` if some parallelized invocations threw exceptions \"\"\"\n return any(isinstance(res, Exception) for res in results)\n\ndef call_method(method, use_self, args, kwargs, seed=None):\n \"\"\" Call a method with given args \"\"\"\n if use_self and hasattr(args[0], 'random_seed') and seed is not None:\n # set batch.random_seed to create RNG\n args[0].random_seed = seed\n return method(*args, **kwargs)\n\ndef inbatch_parallel(init, post=None, target='threads', _use_self=None, **dec_kwargs):\n \"\"\" Decorator for parallel methods in :class:`~.Batch` classes\n\n Parameters\n ----------\n init\n a method name or a callable that returns an iterable for parallelization\n (e.g. a list of indices or items to be passed to a parallelized method)\n post\n a method name or a callable to call after parallel invocations\n (e.g. to assemble the batch)\n target : 'threads', 'mpc', 'async', 'for'\n a parallelization engine\n _use_self : bool\n whether to pass `self` (i.e. whether a decorated callable is a method or a function)\n\n Notes\n -----\n `mpc` can be used with a method that is decorated only by `inbatch_parallel`.\n All other decorators will be ignored.\n \"\"\"\n if target not in ['nogil', 'threads', 'mpc', 'async', 'for', 't', 'm', 'a', 'f']:\n raise ValueError(\"target should be one of 'threads', 'mpc', 'async', 'for'\")\n\n def inbatch_parallel_decorator(method):\n \"\"\" Return a decorator which run a method in parallel \"\"\"\n use_self = '.' in method.__qualname__ if _use_self is None else _use_self\n mpc_method = method\n if use_self:\n try:\n mpc_method = make_function(method, is_global=True)\n except Exception: # pylint:disable=broad-except\n mpc_method = None\n\n def _check_functions(self):\n \"\"\" Check decorator's `init` and `post` parameters \"\"\"\n if init is None:\n raise ValueError(\"init cannot be None\")\n\n if isinstance(init, str):\n try:\n init_fn = getattr(self, init)\n except AttributeError as e:\n raise ValueError(\"init should refer to a method or property of the class\", type(self).__name__,\n \"returning the list of arguments\") from e\n elif callable(init):\n init_fn = init\n else:\n init_fn = init\n\n if isinstance(post, str):\n try:\n post_fn = getattr(self, post)\n except AttributeError as e:\n raise ValueError(\"post should refer to a method of the class\", type(self).__name__) from e\n elif callable(post):\n post_fn = post\n else:\n post_fn = post\n\n return init_fn, post_fn\n\n def _call_init_fn(init_fn, args, kwargs):\n if callable(init_fn):\n return init_fn(*args, **kwargs)\n return init_fn\n\n def _call_post_fn(self, post_fn, futures, args, kwargs):\n all_results = []\n for future in futures:\n try:\n if isinstance(future, (cf.Future, asyncio.Task)):\n result = future.result()\n else:\n result = future\n except Exception as exce: # pylint: disable=broad-except\n result = exce\n finally:\n all_results += [result]\n\n if post_fn is None:\n if any_action_failed(all_results):\n all_errors = [error for error in all_results if isinstance(error, Exception)]\n logging.error(\"Parallel action failed %s\", all_errors)\n traceback.print_tb(all_errors[0].__traceback__)\n raise RuntimeError(\"Parallel action failed\")\n return self\n return post_fn(all_results, *args, **kwargs)\n\n def _prepare_args(self, args, kwargs):\n params = list()\n\n def _get_value(value, pos=None, name=None):\n if isinstance(value, P):\n if pos is not None:\n params.append(pos)\n elif name is not None:\n params.append(name)\n v = value.get(batch=self, parallel=True)\n return v\n return value\n\n _args = []\n for i, v in enumerate(args):\n _args.append(_get_value(v, pos=i))\n _kwargs = {}\n for k, v in kwargs.items():\n _kwargs.update({k: _get_value(v, name=k)})\n\n return _args, _kwargs, params\n\n def _make_args(self, iteration, init_args, args, kwargs, params=None):\n \"\"\" Make args, kwargs tuple \"\"\"\n if isinstance(init_args, tuple) and len(init_args) == 2 and \\\n isinstance(init_args[0], tuple) and isinstance(init_args[1], dict):\n margs, mkwargs = init_args\n elif isinstance(init_args, dict):\n margs = list()\n mkwargs = init_args\n else:\n margs = init_args\n mkwargs = dict()\n\n margs = margs if isinstance(margs, (list, tuple)) else [margs]\n\n if params:\n _args = list(args)\n _kwargs = {**kwargs}\n for k in params:\n if isinstance(k, str):\n _kwargs[k] = _kwargs[k][iteration]\n else:\n _args[k] = _args[k][iteration]\n else:\n _args = args\n _kwargs = kwargs\n\n if len(args) > 0:\n margs = list(margs) + list(_args)\n if len(kwargs) > 0:\n mkwargs.update(_kwargs)\n\n if use_self:\n margs = [self] + list(margs)\n\n return margs, mkwargs\n\n def make_random_seed(self):\n if getattr(self, 'random_state', None) is None:\n return make_seed_sequence()\n return self.random_stat\n\n def wrap_with_threads(self, args, kwargs):\n \"\"\" Run a method in parallel threads \"\"\"\n init_fn, post_fn = _check_functions(self)\n\n n_workers = kwargs.pop('n_workers', _workers_count())\n with cf.ThreadPoolExecutor(max_workers=n_workers) as executor:\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)\n one_ft = executor.submit(call_method, method, use_self, margs, mkwargs, seed=seed)\n futures.append(one_ft)\n\n timeout = kwargs.get('timeout', None)\n cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_mpc(self, args, kwargs):\n \"\"\" Run a method in parallel processes \"\"\"\n init_fn, post_fn = _check_functions(self)\n\n n_workers = kwargs.pop('n_workers', _workers_count())\n with cf.ProcessPoolExecutor(max_workers=n_workers) as executor:\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)\n one_ft = executor.submit(call_method, mpc_method, use_self, margs, mkwargs, seed=seed)\n futures.append(one_ft)\n\n timeout = kwargs.pop('timeout', None)\n cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_async(self, args, kwargs):\n \"\"\" Run a method in parallel with async / await \"\"\"\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n # this is a new thread where there is no loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n else:\n # allow to specify a loop as an action parameter\n loop = kwargs.get('loop', loop)\n\n if loop.is_running():\n raise RuntimeError('Cannot parallel async methods with a running event loop (e.g. in IPython).')\n\n init_fn, post_fn = _check_functions(self)\n\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n # save an initial seed to generate child seeds from\n random_seed = make_random_seed(self)\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = spawn_seed_sequence(random_seed)\n futures.append(loop.create_task(call_method(method, use_self, margs, mkwargs, seed=seed)))\n\n loop.run_until_complete(asyncio.gather(*futures, loop=loop, return_exceptions=True))\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_for(self, args, kwargs):\n \"\"\" Run a method sequentially (without parallelism) \"\"\"\n init_fn, post_fn = _check_functions(self)\n _ = kwargs.pop('n_workers', _workers_count())\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n # save an initial seed to generate child seeds from\n random_seed = make_random_seed(self)\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n\n seed = spawn_seed_sequence(random_seed)\n try:\n one_ft = call_method(method, use_self, margs, mkwargs, seed=seed)\n except Exception as e: # pylint: disable=broad-except\n one_ft = e\n futures.append(one_ft)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n @functools.wraps(method)\n def wrapped_method(*args, **kwargs):\n \"\"\" Wrap a method with a required parallel engine \"\"\"\n if use_self:\n # the first arg is self, not an ordinary arg\n self = args[0]\n args = args[1:]\n else:\n # still need self to preserve the signatures of other functions\n self = None\n\n _target = kwargs.pop('target', target)\n\n if asyncio.iscoroutinefunction(method) or _target in ['async', 'a']:\n x = wrap_with_async(self, args, kwargs)\n elif _target in ['threads', 't']:\n x = wrap_with_threads(self, args, kwargs)\n elif _target in ['mpc', 'm']:\n if mpc_method is not None:\n x = wrap_with_mpc(self, args, kwargs)\n else:\n raise ValueError('Cannot use MPC with this method', method)\n elif _target in ['for', 'f']:\n x = wrap_with_for(self, args, kwargs)\n else:\n raise ValueError('Wrong parallelization target:', _target)\n return x\n return wrapped_method\n\n return inbatch_parallel_decorator\n\n\n\ndef parallel(*args, use_self=None, **kwargs):\n \"\"\" Decorator for a parallel execution of a function \"\"\"\n return inbatch_parallel(*args, _use_self=use_self, **kwargs)\n\n\ndef njit(nogil=True, parallel=True): # pylint: disable=redefined-outer-name\n \"\"\" Fake njit decorator to use when numba is not installed \"\"\"\n _, _ = nogil, parallel\n def njit_fake_decorator(method):\n \"\"\" Return a decorator \"\"\"\n @functools.wraps(method)\n def wrapped_method(*args, **kwargs):\n \"\"\" Log warning that numba is not installed which causes preformance degradation \"\"\"\n logging.warning('numba is not installed. This causes a severe performance degradation for method %s',\n method.__name__)\n return method(*args, **kwargs)\n return wrapped_method\n return njit_fake_decorator\n\n\ndef mjit(*args, nopython=True, nogil=True, **kwargs):\n \"\"\" jit decorator for methods\n\n Notes\n -----\n This decorator should be applied directly to a method, not another decorator.\n \"\"\"\n def _jit(method):\n if jit is not None:\n func = make_function(method)\n func = jit(*args, nopython=nopython, nogil=nogil, **kwargs)(func)\n else:\n func = method\n logging.warning('numba is not installed. This causes a severe performance degradation for method %s',\n method.__name__)\n\n @functools.wraps(method)\n def _wrapped_method(self, *args, **kwargs):\n _ = self\n return func(None, *args, **kwargs)\n return _wrapped_method\n\n if len(args) == 1 and (callable(args[0])) and len(kwargs) == 0:\n method = args[0]\n args = tuple()\n return _jit(method)\n return _jit\n\n\ndef deprecated(msg):\n \"\"\" Decorator for deprecated functions and methods \"\"\"\n def decorator(func):\n @functools.wraps(func)\n def _call(*args, **kwargs):\n logging.warning(msg)\n return func(*args, **kwargs)\n return _call\n return decorator\n", "path": "batchflow/decorators.py" } ]
[ { "content": "\"\"\" Pipeline decorators \"\"\"\nimport os\nimport traceback\nimport threading\nimport concurrent.futures as cf\nimport asyncio\nimport functools\nimport logging\nimport inspect\n\ntry:\n from numba import jit\nexcept ImportError:\n jit = None\n\nfrom .named_expr import P\nfrom .utils_random import make_seed_sequence, spawn_seed_sequence\n\n\ndef make_function(method, is_global=False):\n \"\"\" Makes a function from a method\n\n Parameters\n ----------\n method\n a callable\n\n is_global : bool\n whether to create a function in a global namespace\n\n Notes\n -----\n A method should not be decorated with any other decorator.\n \"\"\"\n source = inspect.getsource(method).split('\\n')\n indent = len(source[0]) - len(source[0].lstrip())\n\n # strip indent spaces\n source = [s[indent:] for s in source if len(s) > indent]\n # skip all decorator and comment lines before 'def' or 'async def'\n start = 0\n for i, s in enumerate(source):\n if s[:3] in ['def', 'asy']:\n start = i\n break\n source = '\\n'.join(source[start:])\n\n globs = globals() if is_global else method.__globals__.copy()\n exec(source, globs) # pylint:disable=exec-used\n\n # Method with the same name might exist in various classes or modules\n # so a global function should have a unique name\n function_name = method.__module__ + \"_\" + method.__qualname__\n function_name = function_name.replace('.', '_')\n globs[function_name] = globs[method.__name__]\n return globs[function_name]\n\n\ndef _workers_count():\n cpu_count = 0\n try:\n cpu_count = len(os.sched_getaffinity(0))\n except AttributeError:\n cpu_count = os.cpu_count()\n return cpu_count * 4\n\n\ndef _make_action_wrapper_with_args(use_lock=None, no_eval=None): # pylint: disable=redefined-outer-name\n return functools.partial(_make_action_wrapper, use_lock=use_lock, no_eval=no_eval)\n\ndef _make_action_wrapper(action_method, use_lock=None, no_eval=None):\n @functools.wraps(action_method)\n def _action_wrapper(action_self, *args, **kwargs):\n \"\"\" Call the action method \"\"\"\n if use_lock is not None:\n if action_self.pipeline is not None:\n if isinstance(use_lock, bool):\n _lock_name = '#_lock_' + action_method.__name__\n else:\n _lock_name = use_lock\n if not action_self.pipeline.has_variable(_lock_name):\n action_self.pipeline.init_variable(_lock_name, threading.Lock())\n action_self.pipeline.get_variable(_lock_name).acquire()\n\n _res = action_method(action_self, *args, **kwargs)\n\n if use_lock is not None:\n if action_self.pipeline is not None:\n action_self.pipeline.get_variable(_lock_name).release()\n\n return _res\n\n if isinstance(no_eval, str):\n no_eval = [no_eval]\n _action_wrapper.action = dict(method=action_method, use_lock=use_lock, no_eval=no_eval)\n return _action_wrapper\n\ndef action(*args, **kwargs):\n \"\"\" Decorator for action methods in :class:`~.Batch` classes\n\n Parameters\n ----------\n use_lock : bool or str\n whether to lock an action when a pipeline is executed. It can be bool or a lock name.\n A pipeline variable with a lock is created in the pipeline during the execution.\n\n no_eval : str or a sequence of str\n parameters to skip from named expression evaluation.\n A parameter should be passed as a named argument only.\n\n Examples\n --------\n\n .. code-block:: python\n\n @action\n def some_action(self, arg1, arg2):\n ...\n\n @action(no_eval='dst')\n def calc_offset(self, src, dst=None):\n ...\n\n @action(use_lock=True)\n def critical_section(self, some_arg, another_arg):\n ...\n\n @action(use_lock='lock_name')\n def another_critical_section(self, some_arg, another_arg):\n ...\n \"\"\"\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # action without arguments\n return _make_action_wrapper(action_method=args[0])\n # action with arguments\n return _make_action_wrapper_with_args(*args, **kwargs)\n\n\ndef apply_parallel(*args, **kwargs):\n \"\"\" Mark class method for transform in its metaclass.\n\n Decorator writes `kwargs` to the method attribute `apply_kwargs`,\n so they can be extracted and used in metaclass.\n\n Parameters\n ----------\n args, kwargs\n other parameters passed to `apply_parallel` method of the class\n where this decorator is being used\n\n Notes\n -----\n Redefine the attribute `apply_defaults <.Batch.apply_defaults>` in\n the batch class. This is proposed solely for the purposes of brevity — in\n order to avoid repeated heavily loaded class methods decoration, e.g.\n `@apply_parallel(src='images', target='for')` which in most cases is\n actually equivalent to simple `@apply_parallel` assuming\n that the defaults are redefined for the class whose methods are being\n transformed.\n\n Note, that if no defaults redefined those from the nearest\n parent class will be used in :class:`~.batch.MethodsTransformingMeta`.\n \"\"\"\n def mark(method):\n method.apply_kwargs = kwargs\n return method\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return mark(args[0])\n if len(args) != 0:\n raise ValueError(\"This decorator accepts only named arguments\")\n\n return mark\n\n\ndef any_action_failed(results):\n \"\"\" Return `True` if some parallelized invocations threw exceptions \"\"\"\n return any(isinstance(res, Exception) for res in results)\n\ndef call_method(method, use_self, args, kwargs, seed=None):\n \"\"\" Call a method with given args \"\"\"\n if use_self and hasattr(args[0], 'random_seed') and seed is not None:\n # set batch.random_seed to create RNG\n args[0].random_seed = seed\n return method(*args, **kwargs)\n\ndef inbatch_parallel(init, post=None, target='threads', _use_self=None, **dec_kwargs):\n \"\"\" Decorator for parallel methods in :class:`~.Batch` classes\n\n Parameters\n ----------\n init\n a method name or a callable that returns an iterable for parallelization\n (e.g. a list of indices or items to be passed to a parallelized method)\n post\n a method name or a callable to call after parallel invocations\n (e.g. to assemble the batch)\n target : 'threads', 'mpc', 'async', 'for'\n a parallelization engine\n _use_self : bool\n whether to pass `self` (i.e. whether a decorated callable is a method or a function)\n\n Notes\n -----\n `mpc` can be used with a method that is decorated only by `inbatch_parallel`.\n All other decorators will be ignored.\n \"\"\"\n if target not in ['nogil', 'threads', 'mpc', 'async', 'for', 't', 'm', 'a', 'f']:\n raise ValueError(\"target should be one of 'threads', 'mpc', 'async', 'for'\")\n\n def inbatch_parallel_decorator(method):\n \"\"\" Return a decorator which run a method in parallel \"\"\"\n use_self = '.' in method.__qualname__ if _use_self is None else _use_self\n mpc_method = method\n if use_self:\n try:\n mpc_method = make_function(method, is_global=True)\n except Exception: # pylint:disable=broad-except\n mpc_method = None\n\n def _check_functions(self):\n \"\"\" Check decorator's `init` and `post` parameters \"\"\"\n if init is None:\n raise ValueError(\"init cannot be None\")\n\n if isinstance(init, str):\n try:\n init_fn = getattr(self, init)\n except AttributeError as e:\n raise ValueError(\"init should refer to a method or property of the class\", type(self).__name__,\n \"returning the list of arguments\") from e\n elif callable(init):\n init_fn = init\n else:\n init_fn = init\n\n if isinstance(post, str):\n try:\n post_fn = getattr(self, post)\n except AttributeError as e:\n raise ValueError(\"post should refer to a method of the class\", type(self).__name__) from e\n elif callable(post):\n post_fn = post\n else:\n post_fn = post\n\n return init_fn, post_fn\n\n def _call_init_fn(init_fn, args, kwargs):\n if callable(init_fn):\n return init_fn(*args, **kwargs)\n return init_fn\n\n def _call_post_fn(self, post_fn, futures, args, kwargs):\n all_results = []\n for future in futures:\n try:\n if isinstance(future, (cf.Future, asyncio.Task)):\n result = future.result()\n else:\n result = future\n except Exception as exce: # pylint: disable=broad-except\n result = exce\n finally:\n all_results += [result]\n\n if post_fn is None:\n if any_action_failed(all_results):\n all_errors = [error for error in all_results if isinstance(error, Exception)]\n logging.error(\"Parallel action failed %s\", all_errors)\n traceback.print_tb(all_errors[0].__traceback__)\n raise RuntimeError(\"Parallel action failed\")\n return self\n return post_fn(all_results, *args, **kwargs)\n\n def _prepare_args(self, args, kwargs):\n params = list()\n\n def _get_value(value, pos=None, name=None):\n if isinstance(value, P):\n if pos is not None:\n params.append(pos)\n elif name is not None:\n params.append(name)\n v = value.get(batch=self, parallel=True)\n return v\n return value\n\n _args = []\n for i, v in enumerate(args):\n _args.append(_get_value(v, pos=i))\n _kwargs = {}\n for k, v in kwargs.items():\n _kwargs.update({k: _get_value(v, name=k)})\n\n return _args, _kwargs, params\n\n def _make_args(self, iteration, init_args, args, kwargs, params=None):\n \"\"\" Make args, kwargs tuple \"\"\"\n if isinstance(init_args, tuple) and len(init_args) == 2 and \\\n isinstance(init_args[0], tuple) and isinstance(init_args[1], dict):\n margs, mkwargs = init_args\n elif isinstance(init_args, dict):\n margs = list()\n mkwargs = init_args\n else:\n margs = init_args\n mkwargs = dict()\n\n margs = margs if isinstance(margs, (list, tuple)) else [margs]\n\n if params:\n _args = list(args)\n _kwargs = {**kwargs}\n for k in params:\n if isinstance(k, str):\n _kwargs[k] = _kwargs[k][iteration]\n else:\n _args[k] = _args[k][iteration]\n else:\n _args = args\n _kwargs = kwargs\n\n if len(args) > 0:\n margs = list(margs) + list(_args)\n if len(kwargs) > 0:\n mkwargs.update(_kwargs)\n\n if use_self:\n margs = [self] + list(margs)\n\n return margs, mkwargs\n\n def make_random_seed(self):\n if getattr(self, 'random_state', None) is None:\n return make_seed_sequence()\n return self.random_state\n\n def wrap_with_threads(self, args, kwargs):\n \"\"\" Run a method in parallel threads \"\"\"\n init_fn, post_fn = _check_functions(self)\n\n n_workers = kwargs.pop('n_workers', _workers_count())\n with cf.ThreadPoolExecutor(max_workers=n_workers) as executor:\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)\n one_ft = executor.submit(call_method, method, use_self, margs, mkwargs, seed=seed)\n futures.append(one_ft)\n\n timeout = kwargs.get('timeout', None)\n cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_mpc(self, args, kwargs):\n \"\"\" Run a method in parallel processes \"\"\"\n init_fn, post_fn = _check_functions(self)\n\n n_workers = kwargs.pop('n_workers', _workers_count())\n with cf.ProcessPoolExecutor(max_workers=n_workers) as executor:\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = None if getattr(self, 'random_state', None) is None else spawn_seed_sequence(self)\n one_ft = executor.submit(call_method, mpc_method, use_self, margs, mkwargs, seed=seed)\n futures.append(one_ft)\n\n timeout = kwargs.pop('timeout', None)\n cf.wait(futures, timeout=timeout, return_when=cf.ALL_COMPLETED)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_async(self, args, kwargs):\n \"\"\" Run a method in parallel with async / await \"\"\"\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n # this is a new thread where there is no loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n else:\n # allow to specify a loop as an action parameter\n loop = kwargs.get('loop', loop)\n\n if loop.is_running():\n raise RuntimeError('Cannot parallel async methods with a running event loop (e.g. in IPython).')\n\n init_fn, post_fn = _check_functions(self)\n\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n # save an initial seed to generate child seeds from\n random_seed = make_random_seed(self)\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n seed = spawn_seed_sequence(random_seed)\n futures.append(loop.create_task(call_method(method, use_self, margs, mkwargs, seed=seed)))\n\n loop.run_until_complete(asyncio.gather(*futures, loop=loop, return_exceptions=True))\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n def wrap_with_for(self, args, kwargs):\n \"\"\" Run a method sequentially (without parallelism) \"\"\"\n init_fn, post_fn = _check_functions(self)\n _ = kwargs.pop('n_workers', _workers_count())\n futures = []\n args, kwargs, params = _prepare_args(self, args, kwargs)\n full_kwargs = {**dec_kwargs, **kwargs}\n # save an initial seed to generate child seeds from\n random_seed = make_random_seed(self)\n for iteration, arg in enumerate(_call_init_fn(init_fn, args, full_kwargs)):\n margs, mkwargs = _make_args(self, iteration, arg, args, kwargs, params)\n\n seed = spawn_seed_sequence(random_seed)\n try:\n one_ft = call_method(method, use_self, margs, mkwargs, seed=seed)\n except Exception as e: # pylint: disable=broad-except\n one_ft = e\n futures.append(one_ft)\n\n return _call_post_fn(self, post_fn, futures, args, full_kwargs)\n\n @functools.wraps(method)\n def wrapped_method(*args, **kwargs):\n \"\"\" Wrap a method with a required parallel engine \"\"\"\n if use_self:\n # the first arg is self, not an ordinary arg\n self = args[0]\n args = args[1:]\n else:\n # still need self to preserve the signatures of other functions\n self = None\n\n _target = kwargs.pop('target', target)\n\n if asyncio.iscoroutinefunction(method) or _target in ['async', 'a']:\n x = wrap_with_async(self, args, kwargs)\n elif _target in ['threads', 't']:\n x = wrap_with_threads(self, args, kwargs)\n elif _target in ['mpc', 'm']:\n if mpc_method is not None:\n x = wrap_with_mpc(self, args, kwargs)\n else:\n raise ValueError('Cannot use MPC with this method', method)\n elif _target in ['for', 'f']:\n x = wrap_with_for(self, args, kwargs)\n else:\n raise ValueError('Wrong parallelization target:', _target)\n return x\n return wrapped_method\n\n return inbatch_parallel_decorator\n\n\n\ndef parallel(*args, use_self=None, **kwargs):\n \"\"\" Decorator for a parallel execution of a function \"\"\"\n return inbatch_parallel(*args, _use_self=use_self, **kwargs)\n\n\ndef njit(nogil=True, parallel=True): # pylint: disable=redefined-outer-name\n \"\"\" Fake njit decorator to use when numba is not installed \"\"\"\n _, _ = nogil, parallel\n def njit_fake_decorator(method):\n \"\"\" Return a decorator \"\"\"\n @functools.wraps(method)\n def wrapped_method(*args, **kwargs):\n \"\"\" Log warning that numba is not installed which causes preformance degradation \"\"\"\n logging.warning('numba is not installed. This causes a severe performance degradation for method %s',\n method.__name__)\n return method(*args, **kwargs)\n return wrapped_method\n return njit_fake_decorator\n\n\ndef mjit(*args, nopython=True, nogil=True, **kwargs):\n \"\"\" jit decorator for methods\n\n Notes\n -----\n This decorator should be applied directly to a method, not another decorator.\n \"\"\"\n def _jit(method):\n if jit is not None:\n func = make_function(method)\n func = jit(*args, nopython=nopython, nogil=nogil, **kwargs)(func)\n else:\n func = method\n logging.warning('numba is not installed. This causes a severe performance degradation for method %s',\n method.__name__)\n\n @functools.wraps(method)\n def _wrapped_method(self, *args, **kwargs):\n _ = self\n return func(None, *args, **kwargs)\n return _wrapped_method\n\n if len(args) == 1 and (callable(args[0])) and len(kwargs) == 0:\n method = args[0]\n args = tuple()\n return _jit(method)\n return _jit\n\n\ndef deprecated(msg):\n \"\"\" Decorator for deprecated functions and methods \"\"\"\n def decorator(func):\n @functools.wraps(func)\n def _call(*args, **kwargs):\n logging.warning(msg)\n return func(*args, **kwargs)\n return _call\n return decorator\n", "path": "batchflow/decorators.py" } ]
diff --git a/batchflow/decorators.py b/batchflow/decorators.py index 64ffa46ee..b659baab2 100644 --- a/batchflow/decorators.py +++ b/batchflow/decorators.py @@ -334,7 +334,7 @@ def _make_args(self, iteration, init_args, args, kwargs, params=None): def make_random_seed(self): if getattr(self, 'random_state', None) is None: return make_seed_sequence() - return self.random_stat + return self.random_state def wrap_with_threads(self, args, kwargs): """ Run a method in parallel threads """
Typo https://github.com/analysiscenter/batchflow/blob/cd9062150811665b5a4e51a1080da5855f1c4dcb/batchflow/decorators.py#L337 `random_stat` -> `random_state`
avocado-framework__avocado-2276
[ { "content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2014-2017\n# Authors: Ruda Moura <[email protected]>\n# Cleber Rosa <[email protected]>\n\nimport getpass\nimport json\nimport logging\nimport os\nimport re\nimport sys\nimport time\n\nimport fabric.api\nimport fabric.network\nimport fabric.operations\nimport fabric.tasks\nfrom fabric.context_managers import shell_env\nfrom fabric.exceptions import CommandTimeout\n\nfrom avocado.core import exceptions\nfrom avocado.core import exit_codes\nfrom avocado.core import loader\nfrom avocado.core import output\nfrom avocado.core import status\nfrom avocado.core.output import LOG_JOB, LOG_UI\nfrom avocado.core.plugin_interfaces import CLI\nfrom avocado.core.runner import TestRunner\nfrom avocado.core.settings import settings\nfrom avocado.core.test import TestID\nfrom avocado.utils import archive\nfrom avocado.utils import astring\nfrom avocado.utils import process\nfrom avocado.utils import stacktrace\n\n\nclass RemoterError(Exception):\n pass\n\n\nclass ConnectionError(RemoterError):\n pass\n\n\ndef _get_env_vars(env_vars):\n \"\"\"\n Gets environment variables.\n\n :param variables: A list of variables to get.\n :return: A dictionary with variables names and values.\n \"\"\"\n env_vars_map = {}\n for var in env_vars:\n value = os.environ.get(var)\n if value is not None:\n env_vars_map[var] = value\n return env_vars_map\n\n\ndef run(command, ignore_status=False, quiet=True, timeout=60):\n \"\"\"\n Executes a command on the defined fabric hosts.\n\n This is basically a wrapper to fabric.operations.run, encapsulating\n the result on an avocado process.CmdResult object. This also assumes\n the fabric environment was previously (and properly) initialized.\n\n :param command: the command string to execute.\n :param ignore_status: Whether to not raise exceptions in case the\n command's return code is different than zero.\n :param timeout: Maximum time allowed for the command to return.\n :param quiet: Whether to not log command stdout/err. Default: True.\n\n :return: the result of the remote program's execution.\n :rtype: :class:`avocado.utils.process.CmdResult`.\n :raise fabric.exceptions.CommandTimeout: When timeout exhausted.\n \"\"\"\n\n result = process.CmdResult()\n start_time = time.time()\n end_time = time.time() + (timeout or 0) # Support timeout=None\n # Fabric sometimes returns NetworkError even when timeout not reached\n fabric_result = None\n fabric_exception = None\n while True:\n try:\n fabric_result = fabric.operations.run(command=command,\n quiet=quiet,\n warn_only=True,\n timeout=timeout,\n pty=False,\n combine_stderr=False)\n break\n except fabric.network.NetworkError as details:\n fabric_exception = details\n timeout = end_time - time.time()\n if time.time() < end_time:\n break\n if fabric_result is None:\n if fabric_exception is not None:\n raise fabric_exception # it's not None pylint: disable=E0702\n else:\n raise fabric.network.NetworkError(\"Remote execution of '%s'\"\n \"failed without any \"\n \"exception. This should not \"\n \"happen.\" % command)\n end_time = time.time()\n duration = end_time - start_time\n result.command = command\n result.stdout = str(fabric_result.stdout)\n result.stderr = str(fabric_result.stderr)\n result.duration = duration\n result.exit_status = fabric_result.return_code\n result.failed = fabric_result.failed\n result.succeeded = fabric_result.succeeded\n if not ignore_status:\n if result.failed:\n raise process.CmdError(command=command, result=result)\n return result\n\n\ndef send_files(local_path, remote_path):\n \"\"\"\n Send files to the defined fabric host.\n\n This assumes the fabric environment was previously (and properly)\n initialized.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n try:\n fabric.operations.put(local_path, remote_path,\n mirror_local_mode=True)\n except ValueError:\n return False\n return True\n\n\ndef receive_files(local_path, remote_path):\n \"\"\"\n Receive files from the defined fabric host.\n\n This assumes the fabric environment was previously (and properly)\n initialized.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n try:\n fabric.operations.get(remote_path,\n local_path)\n except ValueError:\n return False\n return True\n\n\ndef _update_fabric_env(method):\n \"\"\"\n Update fabric env with the appropriate parameters.\n\n :param method: Remote method to wrap.\n :return: Wrapped method.\n \"\"\"\n def wrapper(*args, **kwargs):\n fabric.api.env.update(host_string=args[0].hostname,\n user=args[0].username,\n key_filename=args[0].key_filename,\n password=args[0].password,\n port=args[0].port)\n return method(*args, **kwargs)\n return wrapper\n\n\nclass Remote(object):\n\n \"\"\"\n Performs remote operations.\n \"\"\"\n\n def __init__(self, hostname, username=None, password=None,\n key_filename=None, port=22, timeout=60, attempts=10,\n env_keep=None):\n \"\"\"\n Creates an instance of :class:`Remote`.\n\n :param hostname: the hostname.\n :param username: the username. Default: autodetect.\n :param password: the password. Default: try to use public key.\n :param key_filename: path to an identity file (Example: .pem files\n from Amazon EC2).\n :param timeout: remote command timeout, in seconds. Default: 60.\n :param attempts: number of attempts to connect. Default: 10.\n \"\"\"\n self.hostname = hostname\n if username is None:\n username = getpass.getuser()\n self.username = username\n self.key_filename = key_filename\n # None = use public key\n self.password = password\n self.port = port\n reject_unknown_hosts = settings.get_value('remoter.behavior',\n 'reject_unknown_hosts',\n key_type=bool,\n default=False)\n disable_known_hosts = settings.get_value('remoter.behavior',\n 'disable_known_hosts',\n key_type=bool,\n default=False)\n if env_keep is None:\n self.env_vars = {}\n else:\n self.env_vars = _get_env_vars(env_keep)\n fabric.api.env.update(host_string=hostname,\n user=username,\n password=password,\n key_filename=key_filename,\n port=port,\n timeout=timeout / attempts,\n connection_attempts=attempts,\n linewise=True,\n abort_on_prompts=True,\n abort_exception=ConnectionError,\n reject_unknown_hosts=reject_unknown_hosts,\n disable_known_hosts=disable_known_hosts)\n\n @_update_fabric_env\n def run(self, command, ignore_status=False, quiet=True, timeout=60):\n \"\"\"\n Run a command on the remote host.\n\n :param command: the command string to execute.\n :param ignore_status: Whether to not raise exceptions in case the\n command's return code is different than zero.\n :param timeout: Maximum time allowed for the command to return.\n :param quiet: Whether to not log command stdout/err. Default: True.\n\n :return: the result of the remote program's execution.\n :rtype: :class:`avocado.utils.process.CmdResult`.\n :raise fabric.exceptions.CommandTimeout: When timeout exhausted.\n \"\"\"\n\n with shell_env(**self.env_vars): # pylint: disable=E1129\n return_dict = fabric.tasks.execute(run, command, ignore_status,\n quiet, timeout,\n hosts=[self.hostname])\n return return_dict[self.hostname]\n\n def uptime(self):\n \"\"\"\n Performs uptime (good to check connection).\n\n :return: the uptime string or empty string if fails.\n \"\"\"\n res = self.run('uptime', ignore_status=True)\n if res.exit_status == 0:\n return res\n else:\n return ''\n\n def makedir(self, remote_path):\n \"\"\"\n Create a directory.\n\n :param remote_path: the remote path to create.\n \"\"\"\n self.run('mkdir -p %s' % remote_path)\n\n @_update_fabric_env\n def send_files(self, local_path, remote_path):\n \"\"\"\n Send files to remote host.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n result_dict = fabric.tasks.execute(send_files, local_path,\n remote_path, hosts=[self.hostname])\n return result_dict[self.hostname]\n\n @_update_fabric_env\n def receive_files(self, local_path, remote_path):\n \"\"\"\n Receive files from the remote host.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n result_dict = fabric.tasks.execute(receive_files, local_path,\n remote_path, hosts=[self.hostname])\n return result_dict[self.hostname]\n\n\nclass RemoteTestRunner(TestRunner):\n\n \"\"\" Tooled TestRunner to run on remote machine using ssh \"\"\"\n\n # Let's use re.MULTILINE because sometimes servers might have MOTD\n # that will introduce a line break on output.\n remote_version_re = re.compile(r'^Avocado (\\d+)\\.(\\d+)\\r?$',\n re.MULTILINE)\n\n def __init__(self, job, result):\n super(RemoteTestRunner, self).__init__(job, result)\n #: remoter connection to the remote machine\n self.remote = None\n\n def setup(self):\n \"\"\" Setup remote environment \"\"\"\n stdout_claimed_by = getattr(self.job.args, 'stdout_claimed_by', None)\n if not stdout_claimed_by:\n self.job.log.info(\"LOGIN : %s@%s:%d (TIMEOUT: %s seconds)\",\n self.job.args.remote_username,\n self.job.args.remote_hostname,\n self.job.args.remote_port,\n self.job.args.remote_timeout)\n self.remote = Remote(hostname=self.job.args.remote_hostname,\n username=self.job.args.remote_username,\n password=self.job.args.remote_password,\n key_filename=self.job.args.remote_key_file,\n port=self.job.args.remote_port,\n timeout=self.job.args.remote_timeout,\n env_keep=self.job.args.env_keep)\n\n def check_remote_avocado(self):\n \"\"\"\n Checks if the remote system appears to have avocado installed\n\n The \"appears to have\" description is justified by the fact that the\n check is rather simplistic, it attempts to run an `avocado -v` command\n and checks if the output looks like what avocado would print out.\n\n :rtype: tuple with (bool, tuple)\n :returns: (True, (x, y, z)) if avocado appears to be installed and\n (False, None) otherwise.\n \"\"\"\n # This will be useful as extra debugging info in case avocado\n # doesn't seem to be available in the remote system.\n self.remote.run('env', ignore_status=True, timeout=60)\n\n result = self.remote.run('avocado -v',\n ignore_status=True,\n timeout=60)\n if result.exit_status == 127:\n return (False, None)\n\n match = self.remote_version_re.findall(result.stderr)\n if match is None:\n return (False, None)\n\n try:\n return (True, tuple(int(_) for _ in match[0]))\n except IndexError:\n return (False, None)\n\n @staticmethod\n def _parse_json_response(json_output):\n \"\"\"\n Try to parse JSON response from the remote output.\n\n It tries to find start of the json dictionary and then grabs\n everything till the end of the dictionary. It supports single-\n line as well as multi-line pretty json output.\n \"\"\"\n _result = iter(json_output.splitlines())\n json_result = \"\"\n response = None\n for line in _result: # Find the beginning\n if line.startswith('{'):\n json_result += line\n break\n else:\n raise ValueError(\"Could not find the beginning of the remote JSON\"\n \" output:\\n%s\" % output)\n if json_result.endswith('}'): # probably single-line\n try:\n response = json.loads(json_result)\n except ValueError:\n pass\n if not response:\n # Json was incomplete, try to find another end\n for line in _result:\n json_result += line\n if line.startswith('}'):\n try:\n response = json.loads(json_result)\n break\n except ValueError:\n pass\n if not response:\n raise ValueError(\"Could not find the end of the remote JSON \"\n \"output:\\n%s\" % output)\n return response\n\n def run_test(self, references, timeout):\n \"\"\"\n Run tests.\n\n :param references: a string with test references.\n :return: a dictionary with test results.\n \"\"\"\n extra_params = []\n mux_files = getattr(self.job.args, 'mux_yaml') or []\n if mux_files:\n extra_params.append(\"-m %s\" % \" \".join(mux_files))\n\n if getattr(self.job.args, \"dry_run\", False):\n extra_params.append(\"--dry-run\")\n references_str = \" \".join(references)\n\n avocado_cmd = ('avocado run --force-job-id %s --json - '\n '--archive %s %s' % (self.job.unique_id,\n references_str, \" \".join(extra_params)))\n try:\n result = self.remote.run(avocado_cmd, ignore_status=True,\n timeout=timeout)\n if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:\n raise exceptions.JobError(\"Remote execution failed with: %s\" % result.stderr)\n\n except CommandTimeout:\n raise exceptions.JobError(\"Remote execution took longer than \"\n \"specified timeout (%s). Interrupting.\"\n % (timeout))\n\n try:\n json_result = self._parse_json_response(result.stdout)\n except:\n stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')\n raise exceptions.JobError(result.stdout)\n\n for t_dict in json_result['tests']:\n logdir = os.path.join(self.job.logdir, 'test-results')\n relative_path = astring.string_to_safe_path(str(t_dict['test']))\n logdir = os.path.join(logdir, relative_path)\n t_dict['logdir'] = logdir\n t_dict['logfile'] = os.path.join(logdir, 'debug.log')\n\n return json_result\n\n def run_suite(self, test_suite, variants, timeout=0, replay_map=None,\n suite_order=\"variants-per-test\"):\n \"\"\"\n Run one or more tests and report with test result.\n\n :param params_list: a list of param dicts.\n :param variants: A varianter iterator (unused here)\n\n :return: a set with types of test failures.\n \"\"\"\n del test_suite # using self.job.references instead\n del variants # we're not using multiplexation here\n if suite_order != \"variants-per-test\" and suite_order is not None:\n raise exceptions.JobError(\"execution-order %s is not supported \"\n \"for remote execution.\" % suite_order)\n del suite_order # suite_order is ignored for now\n if not timeout: # avoid timeout = 0\n timeout = None\n summary = set()\n\n stdout_backup = sys.stdout\n stderr_backup = sys.stderr\n fabric_debugfile = os.path.join(self.job.logdir, 'remote.log')\n paramiko_logger = logging.getLogger('paramiko')\n fabric_logger = logging.getLogger('avocado.fabric')\n remote_logger = logging.getLogger('avocado.remote')\n app_logger = logging.getLogger('avocado.debug')\n fmt = ('%(asctime)s %(module)-10.10s L%(lineno)-.4d %('\n 'levelname)-5.5s| %(message)s')\n formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')\n file_handler = logging.FileHandler(filename=fabric_debugfile)\n file_handler.setFormatter(formatter)\n fabric_logger.addHandler(file_handler)\n paramiko_logger.addHandler(file_handler)\n remote_logger.addHandler(file_handler)\n if self.job.args.show_job_log:\n output.add_log_handler(paramiko_logger.name)\n logger_list = [output.LOG_JOB]\n sys.stdout = output.LoggingFile(loggers=logger_list)\n sys.stderr = output.LoggingFile(loggers=logger_list)\n try:\n try:\n self.setup()\n avocado_installed, _ = self.check_remote_avocado()\n if not avocado_installed:\n raise exceptions.JobError('Remote machine does not seem to'\n ' have avocado installed')\n except Exception as details:\n stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)\n raise exceptions.JobError(details)\n results = self.run_test(self.job.references, timeout)\n remote_log_dir = os.path.dirname(results['debuglog'])\n self.result.tests_total = results['total']\n local_log_dir = self.job.logdir\n for tst in results['tests']:\n name = tst['test'].split('-', 1)\n name = [name[0]] + name[1].split(';')\n if len(name) == 3:\n name[2] = {\"variant_id\": name[2]}\n name = TestID(*name, no_digits=-1)\n state = dict(name=name,\n time_elapsed=tst['time'],\n time_start=tst['start'],\n time_end=tst['end'],\n status=tst['status'],\n logdir=tst['logdir'],\n logfile=tst['logfile'],\n fail_reason=tst['fail_reason'],\n job_logdir=local_log_dir,\n job_unique_id='')\n self.result.start_test(state)\n self.job._result_events_dispatcher.map_method('start_test',\n self.result,\n state)\n self.result.check_test(state)\n self.job._result_events_dispatcher.map_method('end_test',\n self.result,\n state)\n if state['status'] == \"INTERRUPTED\":\n summary.add(\"INTERRUPTED\")\n elif not status.mapping[state['status']]:\n summary.add(\"FAIL\")\n zip_filename = remote_log_dir + '.zip'\n zip_path_filename = os.path.join(local_log_dir,\n os.path.basename(zip_filename))\n self.remote.receive_files(local_log_dir, zip_filename)\n archive.uncompress(zip_path_filename, local_log_dir)\n os.remove(zip_path_filename)\n self.result.end_tests()\n self.job._result_events_dispatcher.map_method('post_tests',\n self.job)\n finally:\n try:\n self.tear_down()\n except Exception as details:\n stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)\n raise exceptions.JobError(details)\n sys.stdout = stdout_backup\n sys.stderr = stderr_backup\n return summary\n\n def tear_down(self):\n \"\"\"\n This method is only called when `run_suite` gets to the point of to be\n executing `setup` method and is called at the end of the execution.\n\n :warning: It might be called on `setup` exceptions, so things\n initialized during `setup` might not yet be initialized.\n \"\"\"\n pass\n\n\nclass RemoteCLI(CLI):\n\n \"\"\"\n Run tests on a remote machine\n \"\"\"\n\n name = 'remote'\n description = \"Remote machine options for 'run' subcommand\"\n\n def configure(self, parser):\n run_subcommand_parser = parser.subcommands.choices.get('run', None)\n if run_subcommand_parser is None:\n return\n\n msg = 'test execution on a remote machine'\n remote_parser = run_subcommand_parser.add_argument_group(msg)\n remote_parser.add_argument('--remote-hostname',\n dest='remote_hostname', default=None,\n help=('Specify the hostname to login on'\n ' remote machine'))\n remote_parser.add_argument('--remote-port', dest='remote_port',\n default=22, type=int,\n help=('Specify the port number to login on '\n 'remote machine. Default: %(default)s'))\n remote_parser.add_argument('--remote-username',\n dest='remote_username',\n default=getpass.getuser(),\n help=('Specify the username to login on'\n ' remote machine. Default: '\n '%(default)s'))\n remote_parser.add_argument('--remote-password',\n dest='remote_password', default=None,\n help=('Specify the password to login on'\n ' remote machine'))\n remote_parser.add_argument('--remote-key-file',\n dest='remote_key_file', default=None,\n help=('Specify an identity file with a '\n 'private key instead of a password '\n '(Example: .pem files from Amazon EC2)'))\n remote_parser.add_argument('--remote-timeout', metavar='SECONDS',\n default=60, type=int,\n help=(\"Amount of time (in seconds) to \"\n \"wait for a successful connection\"\n \" to the remote machine. Defaults\"\n \" to %(default)s seconds.\"))\n\n @staticmethod\n def _check_required_args(args, enable_arg, required_args):\n \"\"\"\n :return: True when enable_arg enabled and all required args are set\n :raise sys.exit: When missing required argument.\n \"\"\"\n if (not hasattr(args, enable_arg) or\n not getattr(args, enable_arg)):\n return False\n missing = []\n for arg in required_args:\n if not getattr(args, arg):\n missing.append(arg)\n if missing:\n LOG_UI.error(\"Use of %s requires %s arguments to be set. Please \"\n \"set %s.\", enable_arg, ', '.join(required_args),\n ', '.join(missing))\n\n return sys.exit(exit_codes.AVOCADO_FAIL)\n return True\n\n def run(self, args):\n if self._check_required_args(args, 'remote_hostname',\n ('remote_hostname',)):\n loader.loader.clear_plugins()\n loader.loader.register_plugin(loader.DummyLoader)\n args.test_runner = RemoteTestRunner\n", "path": "optional_plugins/runner_remote/avocado_runner_remote/__init__.py" } ]
[ { "content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2014-2017\n# Authors: Ruda Moura <[email protected]>\n# Cleber Rosa <[email protected]>\n\nimport getpass\nimport json\nimport logging\nimport os\nimport re\nimport sys\nimport time\n\nimport fabric.api\nimport fabric.network\nimport fabric.operations\nimport fabric.tasks\nfrom fabric.context_managers import shell_env\nfrom fabric.exceptions import CommandTimeout\n\nfrom avocado.core import exceptions\nfrom avocado.core import exit_codes\nfrom avocado.core import loader\nfrom avocado.core import output\nfrom avocado.core import status\nfrom avocado.core.output import LOG_JOB, LOG_UI\nfrom avocado.core.plugin_interfaces import CLI\nfrom avocado.core.runner import TestRunner\nfrom avocado.core.settings import settings\nfrom avocado.core.test import TestID\nfrom avocado.utils import archive\nfrom avocado.utils import astring\nfrom avocado.utils import process\nfrom avocado.utils import stacktrace\n\n\nclass RemoterError(Exception):\n pass\n\n\nclass ConnectionError(RemoterError):\n pass\n\n\ndef _get_env_vars(env_vars):\n \"\"\"\n Gets environment variables.\n\n :param variables: A list of variables to get.\n :return: A dictionary with variables names and values.\n \"\"\"\n env_vars_map = {}\n for var in env_vars:\n value = os.environ.get(var)\n if value is not None:\n env_vars_map[var] = value\n return env_vars_map\n\n\ndef run(command, ignore_status=False, quiet=True, timeout=60):\n \"\"\"\n Executes a command on the defined fabric hosts.\n\n This is basically a wrapper to fabric.operations.run, encapsulating\n the result on an avocado process.CmdResult object. This also assumes\n the fabric environment was previously (and properly) initialized.\n\n :param command: the command string to execute.\n :param ignore_status: Whether to not raise exceptions in case the\n command's return code is different than zero.\n :param timeout: Maximum time allowed for the command to return.\n :param quiet: Whether to not log command stdout/err. Default: True.\n\n :return: the result of the remote program's execution.\n :rtype: :class:`avocado.utils.process.CmdResult`.\n :raise fabric.exceptions.CommandTimeout: When timeout exhausted.\n \"\"\"\n\n result = process.CmdResult()\n start_time = time.time()\n end_time = time.time() + (timeout or 0) # Support timeout=None\n # Fabric sometimes returns NetworkError even when timeout not reached\n fabric_result = None\n fabric_exception = None\n while True:\n try:\n fabric_result = fabric.operations.run(command=command,\n quiet=quiet,\n warn_only=True,\n timeout=timeout,\n pty=False,\n combine_stderr=False)\n break\n except fabric.network.NetworkError as details:\n fabric_exception = details\n timeout = end_time - time.time()\n if time.time() > end_time:\n break\n if fabric_result is None:\n if fabric_exception is not None:\n raise fabric_exception # it's not None pylint: disable=E0702\n else:\n raise fabric.network.NetworkError(\"Remote execution of '%s'\"\n \"failed without any \"\n \"exception. This should not \"\n \"happen.\" % command)\n end_time = time.time()\n duration = end_time - start_time\n result.command = command\n result.stdout = str(fabric_result.stdout)\n result.stderr = str(fabric_result.stderr)\n result.duration = duration\n result.exit_status = fabric_result.return_code\n result.failed = fabric_result.failed\n result.succeeded = fabric_result.succeeded\n if not ignore_status:\n if result.failed:\n raise process.CmdError(command=command, result=result)\n return result\n\n\ndef send_files(local_path, remote_path):\n \"\"\"\n Send files to the defined fabric host.\n\n This assumes the fabric environment was previously (and properly)\n initialized.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n try:\n fabric.operations.put(local_path, remote_path,\n mirror_local_mode=True)\n except ValueError:\n return False\n return True\n\n\ndef receive_files(local_path, remote_path):\n \"\"\"\n Receive files from the defined fabric host.\n\n This assumes the fabric environment was previously (and properly)\n initialized.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n try:\n fabric.operations.get(remote_path,\n local_path)\n except ValueError:\n return False\n return True\n\n\ndef _update_fabric_env(method):\n \"\"\"\n Update fabric env with the appropriate parameters.\n\n :param method: Remote method to wrap.\n :return: Wrapped method.\n \"\"\"\n def wrapper(*args, **kwargs):\n fabric.api.env.update(host_string=args[0].hostname,\n user=args[0].username,\n key_filename=args[0].key_filename,\n password=args[0].password,\n port=args[0].port)\n return method(*args, **kwargs)\n return wrapper\n\n\nclass Remote(object):\n\n \"\"\"\n Performs remote operations.\n \"\"\"\n\n def __init__(self, hostname, username=None, password=None,\n key_filename=None, port=22, timeout=60, attempts=10,\n env_keep=None):\n \"\"\"\n Creates an instance of :class:`Remote`.\n\n :param hostname: the hostname.\n :param username: the username. Default: autodetect.\n :param password: the password. Default: try to use public key.\n :param key_filename: path to an identity file (Example: .pem files\n from Amazon EC2).\n :param timeout: remote command timeout, in seconds. Default: 60.\n :param attempts: number of attempts to connect. Default: 10.\n \"\"\"\n self.hostname = hostname\n if username is None:\n username = getpass.getuser()\n self.username = username\n self.key_filename = key_filename\n # None = use public key\n self.password = password\n self.port = port\n reject_unknown_hosts = settings.get_value('remoter.behavior',\n 'reject_unknown_hosts',\n key_type=bool,\n default=False)\n disable_known_hosts = settings.get_value('remoter.behavior',\n 'disable_known_hosts',\n key_type=bool,\n default=False)\n if env_keep is None:\n self.env_vars = {}\n else:\n self.env_vars = _get_env_vars(env_keep)\n fabric.api.env.update(host_string=hostname,\n user=username,\n password=password,\n key_filename=key_filename,\n port=port,\n timeout=timeout / attempts,\n connection_attempts=attempts,\n linewise=True,\n abort_on_prompts=True,\n abort_exception=ConnectionError,\n reject_unknown_hosts=reject_unknown_hosts,\n disable_known_hosts=disable_known_hosts)\n\n @_update_fabric_env\n def run(self, command, ignore_status=False, quiet=True, timeout=60):\n \"\"\"\n Run a command on the remote host.\n\n :param command: the command string to execute.\n :param ignore_status: Whether to not raise exceptions in case the\n command's return code is different than zero.\n :param timeout: Maximum time allowed for the command to return.\n :param quiet: Whether to not log command stdout/err. Default: True.\n\n :return: the result of the remote program's execution.\n :rtype: :class:`avocado.utils.process.CmdResult`.\n :raise fabric.exceptions.CommandTimeout: When timeout exhausted.\n \"\"\"\n\n with shell_env(**self.env_vars): # pylint: disable=E1129\n return_dict = fabric.tasks.execute(run, command, ignore_status,\n quiet, timeout,\n hosts=[self.hostname])\n return return_dict[self.hostname]\n\n def uptime(self):\n \"\"\"\n Performs uptime (good to check connection).\n\n :return: the uptime string or empty string if fails.\n \"\"\"\n res = self.run('uptime', ignore_status=True)\n if res.exit_status == 0:\n return res\n else:\n return ''\n\n def makedir(self, remote_path):\n \"\"\"\n Create a directory.\n\n :param remote_path: the remote path to create.\n \"\"\"\n self.run('mkdir -p %s' % remote_path)\n\n @_update_fabric_env\n def send_files(self, local_path, remote_path):\n \"\"\"\n Send files to remote host.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n result_dict = fabric.tasks.execute(send_files, local_path,\n remote_path, hosts=[self.hostname])\n return result_dict[self.hostname]\n\n @_update_fabric_env\n def receive_files(self, local_path, remote_path):\n \"\"\"\n Receive files from the remote host.\n\n :param local_path: the local path.\n :param remote_path: the remote path.\n \"\"\"\n result_dict = fabric.tasks.execute(receive_files, local_path,\n remote_path, hosts=[self.hostname])\n return result_dict[self.hostname]\n\n\nclass RemoteTestRunner(TestRunner):\n\n \"\"\" Tooled TestRunner to run on remote machine using ssh \"\"\"\n\n # Let's use re.MULTILINE because sometimes servers might have MOTD\n # that will introduce a line break on output.\n remote_version_re = re.compile(r'^Avocado (\\d+)\\.(\\d+)\\r?$',\n re.MULTILINE)\n\n def __init__(self, job, result):\n super(RemoteTestRunner, self).__init__(job, result)\n #: remoter connection to the remote machine\n self.remote = None\n\n def setup(self):\n \"\"\" Setup remote environment \"\"\"\n stdout_claimed_by = getattr(self.job.args, 'stdout_claimed_by', None)\n if not stdout_claimed_by:\n self.job.log.info(\"LOGIN : %s@%s:%d (TIMEOUT: %s seconds)\",\n self.job.args.remote_username,\n self.job.args.remote_hostname,\n self.job.args.remote_port,\n self.job.args.remote_timeout)\n self.remote = Remote(hostname=self.job.args.remote_hostname,\n username=self.job.args.remote_username,\n password=self.job.args.remote_password,\n key_filename=self.job.args.remote_key_file,\n port=self.job.args.remote_port,\n timeout=self.job.args.remote_timeout,\n env_keep=self.job.args.env_keep)\n\n def check_remote_avocado(self):\n \"\"\"\n Checks if the remote system appears to have avocado installed\n\n The \"appears to have\" description is justified by the fact that the\n check is rather simplistic, it attempts to run an `avocado -v` command\n and checks if the output looks like what avocado would print out.\n\n :rtype: tuple with (bool, tuple)\n :returns: (True, (x, y, z)) if avocado appears to be installed and\n (False, None) otherwise.\n \"\"\"\n # This will be useful as extra debugging info in case avocado\n # doesn't seem to be available in the remote system.\n self.remote.run('env', ignore_status=True, timeout=60)\n\n result = self.remote.run('avocado -v',\n ignore_status=True,\n timeout=60)\n if result.exit_status == 127:\n return (False, None)\n\n match = self.remote_version_re.findall(result.stderr)\n if match is None:\n return (False, None)\n\n try:\n return (True, tuple(int(_) for _ in match[0]))\n except IndexError:\n return (False, None)\n\n @staticmethod\n def _parse_json_response(json_output):\n \"\"\"\n Try to parse JSON response from the remote output.\n\n It tries to find start of the json dictionary and then grabs\n everything till the end of the dictionary. It supports single-\n line as well as multi-line pretty json output.\n \"\"\"\n _result = iter(json_output.splitlines())\n json_result = \"\"\n response = None\n for line in _result: # Find the beginning\n if line.startswith('{'):\n json_result += line\n break\n else:\n raise ValueError(\"Could not find the beginning of the remote JSON\"\n \" output:\\n%s\" % output)\n if json_result.endswith('}'): # probably single-line\n try:\n response = json.loads(json_result)\n except ValueError:\n pass\n if not response:\n # Json was incomplete, try to find another end\n for line in _result:\n json_result += line\n if line.startswith('}'):\n try:\n response = json.loads(json_result)\n break\n except ValueError:\n pass\n if not response:\n raise ValueError(\"Could not find the end of the remote JSON \"\n \"output:\\n%s\" % output)\n return response\n\n def run_test(self, references, timeout):\n \"\"\"\n Run tests.\n\n :param references: a string with test references.\n :return: a dictionary with test results.\n \"\"\"\n extra_params = []\n mux_files = getattr(self.job.args, 'mux_yaml') or []\n if mux_files:\n extra_params.append(\"-m %s\" % \" \".join(mux_files))\n\n if getattr(self.job.args, \"dry_run\", False):\n extra_params.append(\"--dry-run\")\n references_str = \" \".join(references)\n\n avocado_cmd = ('avocado run --force-job-id %s --json - '\n '--archive %s %s' % (self.job.unique_id,\n references_str, \" \".join(extra_params)))\n try:\n result = self.remote.run(avocado_cmd, ignore_status=True,\n timeout=timeout)\n if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:\n raise exceptions.JobError(\"Remote execution failed with: %s\" % result.stderr)\n\n except CommandTimeout:\n raise exceptions.JobError(\"Remote execution took longer than \"\n \"specified timeout (%s). Interrupting.\"\n % (timeout))\n\n try:\n json_result = self._parse_json_response(result.stdout)\n except:\n stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')\n raise exceptions.JobError(result.stdout)\n\n for t_dict in json_result['tests']:\n logdir = os.path.join(self.job.logdir, 'test-results')\n relative_path = astring.string_to_safe_path(str(t_dict['test']))\n logdir = os.path.join(logdir, relative_path)\n t_dict['logdir'] = logdir\n t_dict['logfile'] = os.path.join(logdir, 'debug.log')\n\n return json_result\n\n def run_suite(self, test_suite, variants, timeout=0, replay_map=None,\n suite_order=\"variants-per-test\"):\n \"\"\"\n Run one or more tests and report with test result.\n\n :param params_list: a list of param dicts.\n :param variants: A varianter iterator (unused here)\n\n :return: a set with types of test failures.\n \"\"\"\n del test_suite # using self.job.references instead\n del variants # we're not using multiplexation here\n if suite_order != \"variants-per-test\" and suite_order is not None:\n raise exceptions.JobError(\"execution-order %s is not supported \"\n \"for remote execution.\" % suite_order)\n del suite_order # suite_order is ignored for now\n if not timeout: # avoid timeout = 0\n timeout = None\n summary = set()\n\n stdout_backup = sys.stdout\n stderr_backup = sys.stderr\n fabric_debugfile = os.path.join(self.job.logdir, 'remote.log')\n paramiko_logger = logging.getLogger('paramiko')\n fabric_logger = logging.getLogger('avocado.fabric')\n remote_logger = logging.getLogger('avocado.remote')\n app_logger = logging.getLogger('avocado.debug')\n fmt = ('%(asctime)s %(module)-10.10s L%(lineno)-.4d %('\n 'levelname)-5.5s| %(message)s')\n formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')\n file_handler = logging.FileHandler(filename=fabric_debugfile)\n file_handler.setFormatter(formatter)\n fabric_logger.addHandler(file_handler)\n paramiko_logger.addHandler(file_handler)\n remote_logger.addHandler(file_handler)\n if self.job.args.show_job_log:\n output.add_log_handler(paramiko_logger.name)\n logger_list = [output.LOG_JOB]\n sys.stdout = output.LoggingFile(loggers=logger_list)\n sys.stderr = output.LoggingFile(loggers=logger_list)\n try:\n try:\n self.setup()\n avocado_installed, _ = self.check_remote_avocado()\n if not avocado_installed:\n raise exceptions.JobError('Remote machine does not seem to'\n ' have avocado installed')\n except Exception as details:\n stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)\n raise exceptions.JobError(details)\n results = self.run_test(self.job.references, timeout)\n remote_log_dir = os.path.dirname(results['debuglog'])\n self.result.tests_total = results['total']\n local_log_dir = self.job.logdir\n for tst in results['tests']:\n name = tst['test'].split('-', 1)\n name = [name[0]] + name[1].split(';')\n if len(name) == 3:\n name[2] = {\"variant_id\": name[2]}\n name = TestID(*name, no_digits=-1)\n state = dict(name=name,\n time_elapsed=tst['time'],\n time_start=tst['start'],\n time_end=tst['end'],\n status=tst['status'],\n logdir=tst['logdir'],\n logfile=tst['logfile'],\n fail_reason=tst['fail_reason'],\n job_logdir=local_log_dir,\n job_unique_id='')\n self.result.start_test(state)\n self.job._result_events_dispatcher.map_method('start_test',\n self.result,\n state)\n self.result.check_test(state)\n self.job._result_events_dispatcher.map_method('end_test',\n self.result,\n state)\n if state['status'] == \"INTERRUPTED\":\n summary.add(\"INTERRUPTED\")\n elif not status.mapping[state['status']]:\n summary.add(\"FAIL\")\n zip_filename = remote_log_dir + '.zip'\n zip_path_filename = os.path.join(local_log_dir,\n os.path.basename(zip_filename))\n self.remote.receive_files(local_log_dir, zip_filename)\n archive.uncompress(zip_path_filename, local_log_dir)\n os.remove(zip_path_filename)\n self.result.end_tests()\n self.job._result_events_dispatcher.map_method('post_tests',\n self.job)\n finally:\n try:\n self.tear_down()\n except Exception as details:\n stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)\n raise exceptions.JobError(details)\n sys.stdout = stdout_backup\n sys.stderr = stderr_backup\n return summary\n\n def tear_down(self):\n \"\"\"\n This method is only called when `run_suite` gets to the point of to be\n executing `setup` method and is called at the end of the execution.\n\n :warning: It might be called on `setup` exceptions, so things\n initialized during `setup` might not yet be initialized.\n \"\"\"\n pass\n\n\nclass RemoteCLI(CLI):\n\n \"\"\"\n Run tests on a remote machine\n \"\"\"\n\n name = 'remote'\n description = \"Remote machine options for 'run' subcommand\"\n\n def configure(self, parser):\n run_subcommand_parser = parser.subcommands.choices.get('run', None)\n if run_subcommand_parser is None:\n return\n\n msg = 'test execution on a remote machine'\n remote_parser = run_subcommand_parser.add_argument_group(msg)\n remote_parser.add_argument('--remote-hostname',\n dest='remote_hostname', default=None,\n help=('Specify the hostname to login on'\n ' remote machine'))\n remote_parser.add_argument('--remote-port', dest='remote_port',\n default=22, type=int,\n help=('Specify the port number to login on '\n 'remote machine. Default: %(default)s'))\n remote_parser.add_argument('--remote-username',\n dest='remote_username',\n default=getpass.getuser(),\n help=('Specify the username to login on'\n ' remote machine. Default: '\n '%(default)s'))\n remote_parser.add_argument('--remote-password',\n dest='remote_password', default=None,\n help=('Specify the password to login on'\n ' remote machine'))\n remote_parser.add_argument('--remote-key-file',\n dest='remote_key_file', default=None,\n help=('Specify an identity file with a '\n 'private key instead of a password '\n '(Example: .pem files from Amazon EC2)'))\n remote_parser.add_argument('--remote-timeout', metavar='SECONDS',\n default=60, type=int,\n help=(\"Amount of time (in seconds) to \"\n \"wait for a successful connection\"\n \" to the remote machine. Defaults\"\n \" to %(default)s seconds.\"))\n\n @staticmethod\n def _check_required_args(args, enable_arg, required_args):\n \"\"\"\n :return: True when enable_arg enabled and all required args are set\n :raise sys.exit: When missing required argument.\n \"\"\"\n if (not hasattr(args, enable_arg) or\n not getattr(args, enable_arg)):\n return False\n missing = []\n for arg in required_args:\n if not getattr(args, arg):\n missing.append(arg)\n if missing:\n LOG_UI.error(\"Use of %s requires %s arguments to be set. Please \"\n \"set %s.\", enable_arg, ', '.join(required_args),\n ', '.join(missing))\n\n return sys.exit(exit_codes.AVOCADO_FAIL)\n return True\n\n def run(self, args):\n if self._check_required_args(args, 'remote_hostname',\n ('remote_hostname',)):\n loader.loader.clear_plugins()\n loader.loader.register_plugin(loader.DummyLoader)\n args.test_runner = RemoteTestRunner\n", "path": "optional_plugins/runner_remote/avocado_runner_remote/__init__.py" } ]
diff --git a/optional_plugins/runner_remote/avocado_runner_remote/__init__.py b/optional_plugins/runner_remote/avocado_runner_remote/__init__.py index 84bd1b7c4b..5ff38ca163 100644 --- a/optional_plugins/runner_remote/avocado_runner_remote/__init__.py +++ b/optional_plugins/runner_remote/avocado_runner_remote/__init__.py @@ -104,7 +104,7 @@ def run(command, ignore_status=False, quiet=True, timeout=60): except fabric.network.NetworkError as details: fabric_exception = details timeout = end_time - time.time() - if time.time() < end_time: + if time.time() > end_time: break if fabric_result is None: if fabric_exception is not None:
why this is "<" not ">" ? https://github.com/avocado-framework/avocado/blob/d1503a1dcfe684a1b6ab03fd79546eb0f2bfb511/optional_plugins/runner_remote/avocado_runner_remote/__init__.py#L107
chanzuckerberg__single-cell-curation-428
[ { "content": "import logging\nimport os\nimport sys\n\n\ndef set_log_level(log_level: str):\n \"\"\"\n :param log_level: the logging level (\"NOTSET\", \"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"FATAL\")\n \"\"\"\n all_levels = logging._nameToLevel.keys()\n if log_level not in all_levels:\n raise Exception(f\"The log_level arg must be one of {list(all_levels)}\")\n os.environ[\"LOG_LEVEL\"] = log_level\n logger = logging.getLogger()\n logger.setLevel(log_level)\n for h in logger.handlers:\n h.setLevel(log_level)\n print(f\"Set logging level to {log_level}\")\n\n\ndef get_custom_logger() -> logging.Logger:\n \"\"\"\n Get a custom logger that will still print to stdout in notebooks.\n :return: the logger object\n \"\"\"\n log_level = os.getenv(\"LOG_LEVEL\", \"INFO\")\n logging.basicConfig(level=log_level)\n logger = logging.getLogger()\n logger.removeHandler(logger.handlers[0])\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(level=log_level)\n level_printout = f\"{'%(levelname)s:' if logger.level in ('WARN', 'ERROR') else ''}\"\n formatter = logging.Formatter(f\"{level_printout}%(message)s\")\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\ndef failure(logger: logging.Logger, e, *messages):\n logger.error(\"\\n\\033[1m\\033[38;5;9mFAILED\\033[0m\\n\") # 'FAILED' in bold red\n logger.error(e.response.reason + \"\\n\")\n logger.error(e.response.text + \"\\n\")\n if messages:\n [logger.error(m) for m in messages]\n\n\ndef success(logger: logging.Logger, *messages):\n logger.info(\"\\n\\033[1m\\033[38;5;10mSUCCESS\\033[0m\\n\") # 'SUCCESS' in bold green\n if messages:\n [logger.info(m) for m in messages]\n", "path": "notebooks/curation_api/python/src/utils/logger.py" } ]
[ { "content": "import logging\nimport os\nimport sys\n\n\ndef set_log_level(log_level: str):\n \"\"\"\n :param log_level: the logging level (\"NOTSET\", \"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"FATAL\")\n \"\"\"\n all_levels = logging._nameToLevel.keys()\n if log_level not in all_levels:\n raise Exception(f\"The log_level arg must be one of {list(all_levels)}\")\n os.environ[\"LOG_LEVEL\"] = log_level\n logger = logging.getLogger()\n logger.setLevel(log_level)\n for h in logger.handlers:\n h.setLevel(log_level)\n print(f\"Set logging level to {log_level}\")\n\n\ndef get_custom_logger() -> logging.Logger:\n \"\"\"\n Get a custom logger that will still print to stdout in notebooks.\n :return: the logger object\n \"\"\"\n log_level = os.getenv(\"LOG_LEVEL\", \"INFO\")\n logging.basicConfig(level=log_level)\n logger = logging.getLogger()\n logger.removeHandler(logger.handlers[0])\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(level=log_level)\n level_printout = f\"{'%(levelname)s:' if logger.level in ('WARN', 'ERROR') else ''}\"\n formatter = logging.Formatter(f\"{level_printout}%(message)s\")\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\ndef failure(logger: logging.Logger, e, *messages):\n logger.error(\"\\n\\033[1m\\033[38;5;9mFAILED\\033[0m\\n\") # 'FAILED' in bold red\n logger.error(e.response.reason + \"\\n\")\n logger.error(e.response.text + \"\\n\")\n logger.error(f\"x-request-id: {e.response.headers.get('x-request-id')}\")\n\n if messages:\n [logger.error(m) for m in messages]\n\n\ndef success(logger: logging.Logger, *messages):\n logger.info(\"\\n\\033[1m\\033[38;5;10mSUCCESS\\033[0m\\n\") # 'SUCCESS' in bold green\n if messages:\n [logger.info(m) for m in messages]\n", "path": "notebooks/curation_api/python/src/utils/logger.py" } ]
diff --git a/notebooks/curation_api/python/src/utils/logger.py b/notebooks/curation_api/python/src/utils/logger.py index f053a08af..4c1dbd10d 100644 --- a/notebooks/curation_api/python/src/utils/logger.py +++ b/notebooks/curation_api/python/src/utils/logger.py @@ -40,6 +40,8 @@ def failure(logger: logging.Logger, e, *messages): logger.error("\n\033[1m\033[38;5;9mFAILED\033[0m\n") # 'FAILED' in bold red logger.error(e.response.reason + "\n") logger.error(e.response.text + "\n") + logger.error(f"x-request-id: {e.response.headers.get('x-request-id')}") + if messages: [logger.error(m) for m in messages]
Feat(curation notebooks): display the x-request-id in the error response When an error occurs especially 500 errors, the only piece of debug information the user has is the x-request-id. Making this value visible to the users will help developer trouble shoot errors seen by the users in the future.
statsmodels__statsmodels-6518
[ { "content": "\"\"\"Analyze a set of multiple variables with a linear models\n\nmultiOLS:\n take a model and test it on a series of variables defined over a\n pandas dataset, returning a summary for each variable\n\nmultigroup:\n take a boolean vector and the definition of several groups of variables\n and test if the group has a fraction of true values higher than the\n rest. It allows to test if the variables in the group are significantly\n more significant than outside the group.\n\"\"\"\n\nfrom statsmodels.compat.python import iteritems\nfrom patsy import dmatrix\nimport pandas as pd\nfrom statsmodels.api import OLS\nfrom statsmodels.api import stats\nimport numpy as np\nimport logging\n\ndef _model2dataframe(model_endog, model_exog, model_type=OLS, **kwargs):\n \"\"\"return a series containing the summary of a linear model\n\n All the exceding parameters will be redirected to the linear model\n \"\"\"\n # create the linear model and perform the fit\n model_result = model_type(model_endog, model_exog, **kwargs).fit()\n # keeps track of some global statistics\n statistics = pd.Series({'r2': model_result.rsquared,\n 'adj_r2': model_result.rsquared_adj})\n # put them togher with the result for each term\n result_df = pd.DataFrame({'params': model_result.params,\n 'pvals': model_result.pvalues,\n 'std': model_result.bse,\n 'statistics': statistics})\n # add the complexive results for f-value and the total p-value\n fisher_df = pd.DataFrame({'params': {'_f_test': model_result.fvalue},\n 'pvals': {'_f_test': model_result.f_pvalue}})\n # merge them and unstack to obtain a hierarchically indexed series\n res_series = pd.concat([result_df, fisher_df]).unstack()\n return res_series.dropna()\n\n\ndef multiOLS(model, dataframe, column_list=None, method='fdr_bh',\n alpha=0.05, subset=None, model_type=OLS, **kwargs):\n \"\"\"apply a linear model to several endogenous variables on a dataframe\n\n Take a linear model definition via formula and a dataframe that will be\n the environment of the model, and apply the linear model to a subset\n (or all) of the columns of the dataframe. It will return a dataframe\n with part of the information from the linear model summary.\n\n Parameters\n ----------\n model : str\n formula description of the model\n dataframe : pandas.dataframe\n dataframe where the model will be evaluated\n column_list : list[str], optional\n Names of the columns to analyze with the model.\n If None (Default) it will perform the function on all the\n eligible columns (numerical type and not in the model definition)\n model_type : model class, optional\n The type of model to be used. The default is the linear model.\n Can be any linear model (OLS, WLS, GLS, etc..)\n method: str, optional\n the method used to perform the pvalue correction for multiple testing.\n default is the Benjamini/Hochberg, other available methods are:\n\n `bonferroni` : one-step correction\n `sidak` : on-step correction\n `holm-sidak` :\n `holm` :\n `simes-hochberg` :\n `hommel` :\n `fdr_bh` : Benjamini/Hochberg\n `fdr_by` : Benjamini/Yekutieli\n\n alpha: float, optional\n the significance level used for the pvalue correction (default 0.05)\n subset: bool array\n the selected rows to be used in the regression\n\n all the other parameters will be directed to the model creation.\n\n Returns\n -------\n summary : pandas.DataFrame\n a dataframe containing an extract from the summary of the model\n obtained for each columns. It will give the model complexive f test\n result and p-value, and the regression value and standard deviarion\n for each of the regressors. The DataFrame has a hierachical column\n structure, divided as:\n\n - params: contains the parameters resulting from the models. Has\n an additional column named _f_test containing the result of the\n F test.\n - pval: the pvalue results of the models. Has the _f_test column\n for the significativity of the whole test.\n - adj_pval: the corrected pvalues via the multitest function.\n - std: uncertainties of the model parameters\n - statistics: contains the r squared statistics and the adjusted\n r squared.\n\n Notes\n -----\n The main application of this function is on system biology to perform\n a linear model testing of a lot of different parameters, like the\n different genetic expression of several genes.\n\n See Also\n --------\n statsmodels.stats.multitest\n contains several functions to perform the multiple p-value correction\n\n Examples\n --------\n Using the longley data as dataframe example\n\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.longley.load_pandas()\n >>> df = data.exog\n >>> df['TOTEMP'] = data.endog\n\n This will perform the specified linear model on all the\n other columns of the dataframe\n >>> multiOLS('GNP + 1', df)\n\n This select only a certain subset of the columns\n >>> multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])\n\n It is possible to specify a trasformation also on the target column,\n conforming to the patsy formula specification\n >>> multiOLS('GNP + 0', df, ['I(GNPDEFL**2)', 'center(TOTEMP)'])\n\n It is possible to specify the subset of the dataframe\n on which perform the analysis\n >> multiOLS('GNP + 1', df, subset=df.GNPDEFL > 90)\n\n Even a single column name can be given without enclosing it in a list\n >>> multiOLS('GNP + 0', df, 'GNPDEFL')\n \"\"\"\n # data normalization\n # if None take all the numerical columns that are not present in the model\n # it's not waterproof but is a good enough criterion for everyday use\n if column_list is None:\n column_list = [name for name in dataframe.columns\n if dataframe[name].dtype != object and name not in model]\n # if it's a single string transform it in a single element list\n if isinstance(column_list, str):\n column_list = [column_list]\n if subset is not None:\n dataframe = dataframe.loc[subset]\n # perform each model and retrieve the statistics\n col_results = {}\n # as the model will use always the same endogenous variables\n # we can create them once and reuse\n model_exog = dmatrix(model, data=dataframe, return_type=\"dataframe\")\n for col_name in column_list:\n # it will try to interpret the column name as a valid dataframe\n # index as it can be several times faster. If it fails it\n # interpret it as a patsy formula (for example for centering)\n try:\n model_endog = dataframe[col_name]\n except KeyError:\n model_endog = dmatrix(col_name + ' + 0', data=dataframe)\n # retrieve the result and store them\n res = _model2dataframe(model_endog, model_exog, model_type, **kwargs)\n col_results[col_name] = res\n # mangle them togheter and sort by complexive p-value\n summary = pd.DataFrame(col_results)\n # order by the p-value: the most useful model first!\n summary = summary.T.sort_values([('pvals', '_f_test')])\n summary.index.name = 'endogenous vars'\n # implementing the pvalue correction method\n smt = stats.multipletests\n for (key1, key2) in summary:\n if key1 != 'pvals':\n continue\n p_values = summary[key1, key2]\n corrected = smt(p_values, method=method, alpha=alpha)[1]\n # extend the dataframe of results with the column\n # of the corrected p_values\n summary['adj_' + key1, key2] = corrected\n return summary\n\n\ndef _test_group(pvalues, group_name, group, exact=True):\n \"\"\"test if the objects in the group are different from the general set.\n\n The test is performed on the pvalues set (ad a pandas series) over\n the group specified via a fisher exact test.\n \"\"\"\n from scipy.stats import fisher_exact, chi2_contingency\n\n totals = 1.0 * len(pvalues)\n total_significant = 1.0 * np.sum(pvalues)\n cross_index = [c for c in group if c in pvalues.index]\n missing = [c for c in group if c not in pvalues.index]\n if missing:\n s = ('the test is not well defined if the group '\n 'has elements not presents in the significativity '\n 'array. group name: {}, missing elements: {}')\n logging.warning(s.format(group_name, missing))\n # how many are significant and not in the group\n group_total = 1.0 * len(cross_index)\n group_sign = 1.0 * len([c for c in cross_index if pvalues[c]])\n group_nonsign = 1.0 * (group_total - group_sign)\n # how many are significant and not outside the group\n extern_sign = 1.0 * (total_significant - group_sign)\n extern_nonsign = 1.0 * (totals - total_significant - group_nonsign)\n # make the fisher test or the chi squared\n test = fisher_exact if exact else chi2_contingency\n table = [[extern_nonsign, extern_sign], [group_nonsign, group_sign]]\n pvalue = test(np.array(table))[1]\n # is the group more represented or less?\n part = group_sign, group_nonsign, extern_sign, extern_nonsign\n #increase = (group_sign / group_total) > (total_significant / totals)\n increase = np.log((totals * group_sign)\n / (total_significant * group_total))\n return pvalue, increase, part\n\n\ndef multigroup(pvals, groups, exact=True, keep_all=True, alpha=0.05):\n \"\"\"Test if the given groups are different from the total partition.\n\n Given a boolean array test if each group has a proportion of positives\n different than the complexive proportion.\n The test can be done as an exact Fisher test or approximated as a\n Chi squared test for more speed.\n\n Parameters\n ----------\n pvals: pandas series of boolean\n the significativity of the variables under analysis\n groups: dict of list\n the name of each category of variables under exam.\n each one is a list of the variables included\n exact: bool, optional\n If True (default) use the fisher exact test, otherwise\n use the chi squared test for contingencies tables.\n For high number of elements in the array the fisher test can\n be significantly slower than the chi squared.\n keep_all: bool, optional\n if False it will drop those groups where the fraction\n of positive is below the expected result. If True (default)\n it will keep all the significant results.\n alpha: float, optional\n the significativity level for the pvalue correction\n on the whole set of groups (not inside the groups themselves).\n\n Returns\n -------\n result_df: pandas dataframe\n for each group returns:\n\n pvals - the fisher p value of the test\n adj_pvals - the adjusted pvals\n increase - the log of the odd ratio between the\n internal significant ratio versus the external one\n _in_sign - significative elements inside the group\n _in_non - non significative elements inside the group\n _out_sign - significative elements outside the group\n _out_non - non significative elements outside the group\n\n Notes\n -----\n This test allow to see if a category of variables is generally better\n suited to be described for the model. For example to see if a predictor\n gives more information on demographic or economical parameters,\n by creating two groups containing the endogenous variables of each\n category.\n\n This function is conceived for medical dataset with a lot of variables\n that can be easily grouped into functional groups. This is because\n The significativity of a group require a rather large number of\n composing elements.\n\n Examples\n --------\n A toy example on a real dataset, the Guerry dataset from R\n >>> url = \"https://raw.githubusercontent.com/vincentarelbundock/\"\n >>> url = url + \"Rdatasets/csv/HistData/Guerry.csv\"\n >>> df = pd.read_csv(url, index_col='dept')\n\n evaluate the relationship between the variuos paramenters whith the Wealth\n >>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']\n\n define the groups\n >>> groups = {}\n >>> groups['crime'] = ['Crime_prop', 'Infanticide',\n ... 'Crime_parents', 'Desertion', 'Crime_pers']\n >>> groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']\n >>> groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']\n\n do the analysis of the significativity\n >>> multigroup(pvals < 0.05, groups)\n \"\"\"\n pvals = pd.Series(pvals)\n if not (set(pvals.unique()) <= set([False, True])):\n raise ValueError(\"the series should be binary\")\n if hasattr(pvals.index, 'is_unique') and not pvals.index.is_unique:\n raise ValueError(\"series with duplicated index is not accepted\")\n results = {'pvals': {},\n 'increase': {},\n '_in_sign': {},\n '_in_non': {},\n '_out_sign': {},\n '_out_non': {}}\n for group_name, group_list in iteritems(groups):\n res = _test_group(pvals, group_name, group_list, exact)\n results['pvals'][group_name] = res[0]\n results['increase'][group_name] = res[1]\n results['_in_sign'][group_name] = res[2][0]\n results['_in_non'][group_name] = res[2][1]\n results['_out_sign'][group_name] = res[2][2]\n results['_out_non'][group_name] = res[2][3]\n result_df = pd.DataFrame(results).sort_values('pvals')\n if not keep_all:\n result_df = result_df[result_df.increase]\n smt = stats.multipletests\n corrected = smt(result_df['pvals'], method='fdr_bh', alpha=alpha)[1]\n result_df['adj_pvals'] = corrected\n return result_df\n", "path": "statsmodels/sandbox/multilinear.py" } ]
[ { "content": "\"\"\"Analyze a set of multiple variables with a linear models\n\nmultiOLS:\n take a model and test it on a series of variables defined over a\n pandas dataset, returning a summary for each variable\n\nmultigroup:\n take a boolean vector and the definition of several groups of variables\n and test if the group has a fraction of true values higher than the\n rest. It allows to test if the variables in the group are significantly\n more significant than outside the group.\n\"\"\"\n\nfrom statsmodels.compat.python import iteritems\nfrom patsy import dmatrix\nimport pandas as pd\nfrom statsmodels.api import OLS\nfrom statsmodels.api import stats\nimport numpy as np\nimport logging\n\ndef _model2dataframe(model_endog, model_exog, model_type=OLS, **kwargs):\n \"\"\"return a series containing the summary of a linear model\n\n All the exceding parameters will be redirected to the linear model\n \"\"\"\n # create the linear model and perform the fit\n model_result = model_type(model_endog, model_exog, **kwargs).fit()\n # keeps track of some global statistics\n statistics = pd.Series({'r2': model_result.rsquared,\n 'adj_r2': model_result.rsquared_adj})\n # put them togher with the result for each term\n result_df = pd.DataFrame({'params': model_result.params,\n 'pvals': model_result.pvalues,\n 'std': model_result.bse,\n 'statistics': statistics})\n # add the complexive results for f-value and the total p-value\n fisher_df = pd.DataFrame({'params': {'_f_test': model_result.fvalue},\n 'pvals': {'_f_test': model_result.f_pvalue}})\n # merge them and unstack to obtain a hierarchically indexed series\n res_series = pd.concat([result_df, fisher_df]).unstack()\n return res_series.dropna()\n\n\ndef multiOLS(model, dataframe, column_list=None, method='fdr_bh',\n alpha=0.05, subset=None, model_type=OLS, **kwargs):\n \"\"\"apply a linear model to several endogenous variables on a dataframe\n\n Take a linear model definition via formula and a dataframe that will be\n the environment of the model, and apply the linear model to a subset\n (or all) of the columns of the dataframe. It will return a dataframe\n with part of the information from the linear model summary.\n\n Parameters\n ----------\n model : str\n formula description of the model\n dataframe : pandas.dataframe\n dataframe where the model will be evaluated\n column_list : list[str], optional\n Names of the columns to analyze with the model.\n If None (Default) it will perform the function on all the\n eligible columns (numerical type and not in the model definition)\n model_type : model class, optional\n The type of model to be used. The default is the linear model.\n Can be any linear model (OLS, WLS, GLS, etc..)\n method: str, optional\n the method used to perform the pvalue correction for multiple testing.\n default is the Benjamini/Hochberg, other available methods are:\n\n `bonferroni` : one-step correction\n `sidak` : on-step correction\n `holm-sidak` :\n `holm` :\n `simes-hochberg` :\n `hommel` :\n `fdr_bh` : Benjamini/Hochberg\n `fdr_by` : Benjamini/Yekutieli\n\n alpha: float, optional\n the significance level used for the pvalue correction (default 0.05)\n subset: bool array\n the selected rows to be used in the regression\n\n all the other parameters will be directed to the model creation.\n\n Returns\n -------\n summary : pandas.DataFrame\n a dataframe containing an extract from the summary of the model\n obtained for each columns. It will give the model complexive f test\n result and p-value, and the regression value and standard deviarion\n for each of the regressors. The DataFrame has a hierachical column\n structure, divided as:\n\n - params: contains the parameters resulting from the models. Has\n an additional column named _f_test containing the result of the\n F test.\n - pval: the pvalue results of the models. Has the _f_test column\n for the significativity of the whole test.\n - adj_pval: the corrected pvalues via the multitest function.\n - std: uncertainties of the model parameters\n - statistics: contains the r squared statistics and the adjusted\n r squared.\n\n Notes\n -----\n The main application of this function is on system biology to perform\n a linear model testing of a lot of different parameters, like the\n different genetic expression of several genes.\n\n See Also\n --------\n statsmodels.stats.multitest\n contains several functions to perform the multiple p-value correction\n\n Examples\n --------\n Using the longley data as dataframe example\n\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.longley.load_pandas()\n >>> df = data.exog\n >>> df['TOTEMP'] = data.endog\n\n This will perform the specified linear model on all the\n other columns of the dataframe\n >>> multiOLS('GNP + 1', df)\n\n This select only a certain subset of the columns\n >>> multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])\n\n It is possible to specify a trasformation also on the target column,\n conforming to the patsy formula specification\n >>> multiOLS('GNP + 0', df, ['I(GNPDEFL**2)', 'center(TOTEMP)'])\n\n It is possible to specify the subset of the dataframe\n on which perform the analysis\n >> multiOLS('GNP + 1', df, subset=df.GNPDEFL > 90)\n\n Even a single column name can be given without enclosing it in a list\n >>> multiOLS('GNP + 0', df, 'GNPDEFL')\n \"\"\"\n # data normalization\n # if None take all the numerical columns that are not present in the model\n # it's not waterproof but is a good enough criterion for everyday use\n if column_list is None:\n column_list = [name for name in dataframe.columns\n if dataframe[name].dtype != object and name not in model]\n # if it's a single string transform it in a single element list\n if isinstance(column_list, str):\n column_list = [column_list]\n if subset is not None:\n dataframe = dataframe.loc[subset]\n # perform each model and retrieve the statistics\n col_results = {}\n # as the model will use always the same endogenous variables\n # we can create them once and reuse\n model_exog = dmatrix(model, data=dataframe, return_type=\"dataframe\")\n for col_name in column_list:\n # it will try to interpret the column name as a valid dataframe\n # index as it can be several times faster. If it fails it\n # interpret it as a patsy formula (for example for centering)\n try:\n model_endog = dataframe[col_name]\n except KeyError:\n model_endog = dmatrix(col_name + ' + 0', data=dataframe)\n # retrieve the result and store them\n res = _model2dataframe(model_endog, model_exog, model_type, **kwargs)\n col_results[col_name] = res\n # mangle them togheter and sort by complexive p-value\n summary = pd.DataFrame(col_results)\n # order by the p-value: the most useful model first!\n summary = summary.T.sort_values([('pvals', '_f_test')])\n summary.index.name = 'endogenous vars'\n # implementing the pvalue correction method\n smt = stats.multipletests\n for (key1, key2) in summary:\n if key1 != 'pvals':\n continue\n p_values = summary[key1, key2]\n corrected = smt(p_values, method=method, alpha=alpha)[1]\n # extend the dataframe of results with the column\n # of the corrected p_values\n summary['adj_' + key1, key2] = corrected\n return summary\n\n\ndef _test_group(pvalues, group_name, group, exact=True):\n \"\"\"test if the objects in the group are different from the general set.\n\n The test is performed on the pvalues set (ad a pandas series) over\n the group specified via a fisher exact test.\n \"\"\"\n from scipy.stats import fisher_exact, chi2_contingency\n\n totals = 1.0 * len(pvalues)\n total_significant = 1.0 * np.sum(pvalues)\n cross_index = [c for c in group if c in pvalues.index]\n missing = [c for c in group if c not in pvalues.index]\n if missing:\n s = ('the test is not well defined if the group '\n 'has elements not presents in the significativity '\n 'array. group name: {}, missing elements: {}')\n logging.warning(s.format(group_name, missing))\n # how many are significant and not in the group\n group_total = 1.0 * len(cross_index)\n group_sign = 1.0 * len([c for c in cross_index if pvalues[c]])\n group_nonsign = 1.0 * (group_total - group_sign)\n # how many are significant and not outside the group\n extern_sign = 1.0 * (total_significant - group_sign)\n extern_nonsign = 1.0 * (totals - total_significant - group_nonsign)\n # make the fisher test or the chi squared\n test = fisher_exact if exact else chi2_contingency\n table = [[extern_nonsign, extern_sign], [group_nonsign, group_sign]]\n pvalue = test(np.array(table))[1]\n # is the group more represented or less?\n part = group_sign, group_nonsign, extern_sign, extern_nonsign\n #increase = (group_sign / group_total) > (total_significant / totals)\n increase = np.log((totals * group_sign)\n / (total_significant * group_total))\n return pvalue, increase, part\n\n\ndef multigroup(pvals, groups, exact=True, keep_all=True, alpha=0.05):\n \"\"\"Test if the given groups are different from the total partition.\n\n Given a boolean array test if each group has a proportion of positives\n different than the complexive proportion.\n The test can be done as an exact Fisher test or approximated as a\n Chi squared test for more speed.\n\n Parameters\n ----------\n pvals: pandas series of boolean\n the significativity of the variables under analysis\n groups: dict of list\n the name of each category of variables under exam.\n each one is a list of the variables included\n exact: bool, optional\n If True (default) use the fisher exact test, otherwise\n use the chi squared test for contingencies tables.\n For high number of elements in the array the fisher test can\n be significantly slower than the chi squared.\n keep_all: bool, optional\n if False it will drop those groups where the fraction\n of positive is below the expected result. If True (default)\n it will keep all the significant results.\n alpha: float, optional\n the significativity level for the pvalue correction\n on the whole set of groups (not inside the groups themselves).\n\n Returns\n -------\n result_df: pandas dataframe\n for each group returns:\n\n pvals - the fisher p value of the test\n adj_pvals - the adjusted pvals\n increase - the log of the odd ratio between the\n internal significant ratio versus the external one\n _in_sign - significative elements inside the group\n _in_non - non significative elements inside the group\n _out_sign - significative elements outside the group\n _out_non - non significative elements outside the group\n\n Notes\n -----\n This test allow to see if a category of variables is generally better\n suited to be described for the model. For example to see if a predictor\n gives more information on demographic or economical parameters,\n by creating two groups containing the endogenous variables of each\n category.\n\n This function is conceived for medical dataset with a lot of variables\n that can be easily grouped into functional groups. This is because\n The significativity of a group require a rather large number of\n composing elements.\n\n Examples\n --------\n A toy example on a real dataset, the Guerry dataset from R\n >>> url = \"https://raw.githubusercontent.com/vincentarelbundock/\"\n >>> url = url + \"Rdatasets/csv/HistData/Guerry.csv\"\n >>> df = pd.read_csv(url, index_col='dept')\n\n evaluate the relationship between the various paramenters whith the Wealth\n >>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']\n\n define the groups\n >>> groups = {}\n >>> groups['crime'] = ['Crime_prop', 'Infanticide',\n ... 'Crime_parents', 'Desertion', 'Crime_pers']\n >>> groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']\n >>> groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']\n\n do the analysis of the significativity\n >>> multigroup(pvals < 0.05, groups)\n \"\"\"\n pvals = pd.Series(pvals)\n if not (set(pvals.unique()) <= set([False, True])):\n raise ValueError(\"the series should be binary\")\n if hasattr(pvals.index, 'is_unique') and not pvals.index.is_unique:\n raise ValueError(\"series with duplicated index is not accepted\")\n results = {'pvals': {},\n 'increase': {},\n '_in_sign': {},\n '_in_non': {},\n '_out_sign': {},\n '_out_non': {}}\n for group_name, group_list in iteritems(groups):\n res = _test_group(pvals, group_name, group_list, exact)\n results['pvals'][group_name] = res[0]\n results['increase'][group_name] = res[1]\n results['_in_sign'][group_name] = res[2][0]\n results['_in_non'][group_name] = res[2][1]\n results['_out_sign'][group_name] = res[2][2]\n results['_out_non'][group_name] = res[2][3]\n result_df = pd.DataFrame(results).sort_values('pvals')\n if not keep_all:\n result_df = result_df[result_df.increase]\n smt = stats.multipletests\n corrected = smt(result_df['pvals'], method='fdr_bh', alpha=alpha)[1]\n result_df['adj_pvals'] = corrected\n return result_df\n", "path": "statsmodels/sandbox/multilinear.py" } ]
diff --git a/statsmodels/sandbox/multilinear.py b/statsmodels/sandbox/multilinear.py index 73012273927..4ac86aff154 100644 --- a/statsmodels/sandbox/multilinear.py +++ b/statsmodels/sandbox/multilinear.py @@ -284,7 +284,7 @@ def multigroup(pvals, groups, exact=True, keep_all=True, alpha=0.05): >>> url = url + "Rdatasets/csv/HistData/Guerry.csv" >>> df = pd.read_csv(url, index_col='dept') - evaluate the relationship between the variuos paramenters whith the Wealth + evaluate the relationship between the various paramenters whith the Wealth >>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test'] define the groups
Fix simple typo: variuos -> various # Issue Type [x] Bug (Typo) # Steps to Replicate 1. Examine statsmodels/sandbox/multilinear.py. 2. Search for `variuos`. # Expected Behaviour 1. Should read `various`.
holoviz__panel-645
[ { "content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport signal\nimport threading\n\nfrom functools import partial\n\nfrom bokeh.server.server import Server\n\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\ndef _origin_url(url):\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\n\ndef _server_url(url, port):\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\ndef get_server(panel, port=0, websocket_origin=None, loop=None,\n show=False, start=False, **kwargs):\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n\n Returns\n -------\n server : bokeh.server.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from tornado.ioloop import IOLoop\n opts = dict(kwargs)\n if loop:\n loop.make_current()\n opts['io_loop'] = loop\n else:\n opts['io_loop'] = IOLoop.current()\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n server_id = kwargs.pop('server_id', None)\n server = Server({'/': partial(panel._modify_doc, server_id)}, port=port, **opts)\n if server_id:\n state._servers[server_id] = (server, panel, [])\n\n if show:\n def show_callback():\n server.show('/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop=None, timeout=1000, **kwargs):\n from tornado import ioloop\n super(StoppableThread, self).__init__(**kwargs)\n self._stop_event = threading.Event()\n self.io_loop = io_loop\n self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)\n self._cb.start()\n\n def _check_stopped(self):\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n\n def stop(self):\n self._stop_event.set()\n\n @property\n def stopped(self):\n return self._stop_event.is_set()\n", "path": "panel/io/server.py" } ]
[ { "content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport signal\nimport threading\n\nfrom functools import partial\n\nfrom bokeh.server.server import Server\n\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\ndef _origin_url(url):\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\n\ndef _server_url(url, port):\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\ndef get_server(panel, port=0, websocket_origin=None, loop=None,\n show=False, start=False, **kwargs):\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n\n Returns\n -------\n server : bokeh.server.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from tornado.ioloop import IOLoop\n opts = dict(kwargs)\n if loop:\n loop.make_current()\n opts['io_loop'] = loop\n else:\n opts['io_loop'] = IOLoop.current()\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n server_id = kwargs.pop('server_id', None)\n server = Server({'/': partial(panel._modify_doc, server_id)}, port=port, **opts)\n if server_id:\n state._servers[server_id] = (server, panel, [])\n\n if show:\n def show_callback():\n server.show('/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop=None, timeout=1000, **kwargs):\n from tornado import ioloop\n super(StoppableThread, self).__init__(**kwargs)\n self._stop_event = threading.Event()\n self.io_loop = io_loop\n self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)\n self._cb.start()\n\n def _check_stopped(self):\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n \n def run(self):\n try:\n if self._target:\n bokeh_server = self._target(*self._args, **self._kwargs)\n finally:\n if isinstance(bokeh_server, Server):\n bokeh_server.stop()\n del self._target, self._args, self._kwargs\n\n def stop(self):\n self._stop_event.set()\n\n @property\n def stopped(self):\n return self._stop_event.is_set()\n", "path": "panel/io/server.py" } ]
diff --git a/panel/io/server.py b/panel/io/server.py index 1231230592..784bbfc67c 100644 --- a/panel/io/server.py +++ b/panel/io/server.py @@ -122,6 +122,15 @@ def _check_stopped(self): if self.stopped: self._cb.stop() self.io_loop.stop() + + def run(self): + try: + if self._target: + bokeh_server = self._target(*self._args, **self._kwargs) + finally: + if isinstance(bokeh_server, Server): + bokeh_server.stop() + del self._target, self._args, self._kwargs def stop(self): self._stop_event.set()
Port not being released after stopping threaded holoviz panel app server Closing a threaded panel app holds on to the port it started on. This is different behavior than closing an app initialized without threading. ``` usgs_logo = pn.panel('../assets/usgs_logo.png', height=130) column = pn.Column(usgs_logo) app = column.show(port=8889) app.stop() ``` Port 8889 is released. ``` app = row.show(port=8889, threaded=True) app.stop() ``` Port 8889 is not released.
hylang__hy-2328
[ { "content": "\"Character reader for parsing Hy source.\"\n\nimport hy\nfrom hy.models import (\n Bytes,\n Complex,\n Dict,\n Expression,\n FComponent,\n Float,\n FString,\n Integer,\n Keyword,\n List,\n Set,\n String,\n Symbol,\n Tuple,\n as_model,\n)\n\nfrom .exceptions import LexException, PrematureEndOfInput\nfrom .mangling import mangle\nfrom .reader import Reader, isnormalizedspace\n\n\ndef sym(name):\n return Symbol(name, from_parser=True)\n\n\n# Note: This is subtly different from\n# the `mkexpr` in hy/compiler.py !\ndef mkexpr(root, *args):\n return Expression((sym(root) if isinstance(root, str) else root, *args))\n\n\ndef symbol_like(ident, reader=None):\n \"\"\"Generate a Hy model from an identifier-like string.\n\n Also verifies the syntax of dot notation and validity of symbol names.\n\n Parameters\n ----------\n ident : str\n Text to convert.\n\n reader : Reader, optional\n The reader to use, if any; used for generating position data for errors.\n\n Returns\n -------\n out : a hy.models.Object subtype corresponding to the parsed text.\n \"\"\"\n try:\n return Integer(ident)\n except ValueError:\n pass\n try:\n return Float(ident)\n except ValueError:\n pass\n if ident not in (\"j\", \"J\"):\n try:\n return Complex(ident)\n except ValueError:\n pass\n\n if \".\" in ident:\n for chunk in ident.split(\".\"):\n if chunk and not isinstance(symbol_like(chunk, reader=reader), Symbol):\n msg = (\n \"Cannot access attribute on anything other\"\n \" than a name (in order to get attributes of expressions,\"\n \" use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\"\n )\n if reader is None:\n raise ValueError(msg)\n else:\n raise LexException.from_reader(msg, reader)\n\n if reader is None:\n if (\n not ident\n or ident[:1] == \":\"\n or any(isnormalizedspace(c) for c in ident)\n or HyReader.NON_IDENT.intersection(ident)\n ):\n raise ValueError(f\"Syntactically illegal symbol: {ident!r}\")\n\n return sym(ident)\n\n\nclass HyReader(Reader):\n \"\"\"A modular reader for Hy source.\"\"\"\n\n ###\n # Components necessary for Reader implementation\n ###\n\n NON_IDENT = set(\"()[]{};\\\"'\")\n\n def fill_pos(self, model, start):\n \"\"\"Attach line/col information to a model.\n\n Sets the end location of `model` to the current cursor position.\n\n Args:\n model (hy.models.Object): model to set line/col info for.\n start (tuple[int, int]): (line, column) tuple indicating the start\n location to assign to `model`.\n \"\"\"\n model.start_line, model.start_column = start\n model.end_line, model.end_column = self.pos\n return model\n\n def read_default(self, key):\n \"\"\"Default reader handler when nothing in the table matches.\n\n Try to read an identifier/symbol. If there's a double-quote immediately\n following, then parse it as a string with the given prefix (e.g.,\n `r\"...\"`). Otherwise, parse it as a symbol-like.\n \"\"\"\n ident = key + self.read_ident()\n if self.peek_and_getc('\"'):\n return self.prefixed_string('\"', ident)\n return symbol_like(ident, reader=self)\n\n def parse(self, stream, filename=None):\n \"\"\"Yields all `hy.models.Object`'s in `source`\n\n Additionally exposes `self` as ``hy.&reader`` during read/compile time.\n\n Args:\n source:\n Hy source to be parsed.\n filename (str | None):\n Filename to use for error messages. If `None` then previously\n set filename is used.\n \"\"\"\n self._set_source(stream, filename)\n rname = mangle(\"&reader\")\n old_reader = getattr(hy, rname, None)\n setattr(hy, rname, self)\n\n try:\n yield from self.parse_forms_until(\"\")\n finally:\n if old_reader is None:\n delattr(hy, rname)\n else:\n setattr(hy, rname, old_reader)\n\n ###\n # Reading forms\n ###\n\n def try_parse_one_form(self):\n \"\"\"Attempt to parse a single Hy form.\n\n Read one (non-space) character from the stream, then call the\n corresponding handler.\n\n Returns:\n hy.models.Object | None:\n Model optionally returned by the called handler. Handlers may\n return `None` to signify no parsed form (e.g., for comments).\n\n Raises:\n PrematureEndOfInput: If the reader hits the end of the file before\n fully parsing a form.\n LexException: If there is an error during form parsing.\n \"\"\"\n try:\n self.slurp_space()\n c = self.getc()\n start = self._pos\n if not c:\n raise PrematureEndOfInput.from_reader(\n \"Premature end of input while attempting to parse one form\", self\n )\n handler = self.reader_table.get(c)\n model = handler(self, c) if handler else self.read_default(c)\n return self.fill_pos(model, start) if model is not None else None\n except LexException:\n raise\n except Exception as e:\n raise LexException.from_reader(\n str(e) or \"Exception thrown attempting to parse one form\", self\n )\n\n def parse_one_form(self):\n \"\"\"Read from the stream until a form is parsed.\n\n Guaranteed to return a model (i.e., skips over comments).\n\n Returns:\n hy.models.Object\n \"\"\"\n model = None\n while model is None:\n model = self.try_parse_one_form()\n return model\n\n def parse_forms_until(self, closer):\n \"\"\"Yields `hy.models.Object`'s until character `closer` is seen.\n\n Useful for reading a sequence such as s-exprs or lists.\n \"\"\"\n while True:\n self.slurp_space()\n if self.peek_and_getc(closer):\n break\n model = self.try_parse_one_form()\n if model is not None:\n yield model\n\n ###\n # Basic atoms\n ###\n\n @reader_for(\")\")\n @reader_for(\"]\")\n @reader_for(\"}\")\n def INVALID(self, key):\n raise LexException.from_reader(\n f\"Ran into a '{key}' where it wasn't expected.\", self\n )\n\n @reader_for(\";\")\n def line_comment(self, _):\n any(c == \"\\n\" for c in self.chars(eof_ok=True))\n return None\n\n @reader_for(\":\")\n def keyword(self, _):\n ident = self.read_ident()\n if \".\" in ident:\n raise LexException.from_reader(\n \"Cannot access attribute on anything other\"\n \" than a name (in order to get attributes of expressions,\"\n \" use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\",\n self,\n )\n return Keyword(ident, from_parser=True)\n\n @reader_for('\"')\n def prefixed_string(self, _, prefix=\"\"):\n prefix_chars = set(prefix)\n if (\n len(prefix_chars) != len(prefix)\n or prefix_chars - set(\"bfr\")\n or set(\"bf\") <= prefix_chars\n ):\n raise LexException.from_reader(f\"invalid string prefix {prefix!r}\", self)\n\n escaping = False\n\n def quote_closing(c):\n nonlocal escaping\n if c == \"\\\\\":\n escaping = not escaping\n return 0\n if c == '\"' and not escaping:\n return 1\n if (\n escaping\n and \"r\" not in prefix\n and\n # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n c\n not in (\"\\n\\r\\\\'\\\"abfnrtv01234567x\" + (\"\" if \"b\" in prefix else \"NuU\"))\n ):\n raise LexException.from_reader(\"invalid escape sequence \\\\\" + c, self)\n escaping = False\n return 0\n\n return self.read_string_until(quote_closing, prefix, \"f\" in prefix.lower())\n\n ###\n # Special annotations\n ###\n\n @reader_for(\"'\", (\"quote\",))\n @reader_for(\"`\", (\"quasiquote\",))\n def tag_as(root):\n def _tag_as(self, _):\n nc = self.peekc()\n if (\n not nc\n or isnormalizedspace(nc)\n or self.reader_table.get(nc) == self.INVALID\n ):\n raise LexException.from_reader(\n \"Could not identify the next token.\", self\n )\n model = self.parse_one_form()\n return mkexpr(root, model)\n\n return _tag_as\n\n @reader_for(\"~\")\n def unquote(self, key):\n nc = self.peekc()\n if not nc or isnormalizedspace(nc) or self.reader_table.get(nc) == self.INVALID:\n return sym(key)\n return mkexpr(\n \"unquote\" + (\"-splice\" if self.peek_and_getc(\"@\") else \"\"),\n self.parse_one_form(),\n )\n\n ###\n # Sequences\n ###\n\n @reader_for(\"(\", (Expression, \")\"))\n @reader_for(\"[\", (List, \"]\"))\n @reader_for(\"{\", (Dict, \"}\"))\n @reader_for(\"#{\", (Set, \"}\"))\n @reader_for(\"#(\", (Tuple, \")\"))\n def sequence(seq_type, closer):\n return lambda self, _: seq_type(self.parse_forms_until(closer))\n\n ###\n # Reader tag-macros\n ###\n\n @reader_for(\"#\")\n def tag_dispatch(self, key):\n \"\"\"General handler for reader macros (and tag macros).\n\n Reads a full identifier after the `#` and calls the corresponding handler\n (this allows, e.g., `#reads-multiple-forms foo bar baz`).\n\n Failing that, reads a single character after the `#` and immediately\n calls the corresponding handler (this allows, e.g., `#*args` to parse\n as `#*` followed by `args`).\n \"\"\"\n\n if not self.peekc():\n raise PrematureEndOfInput.from_reader(\n \"Premature end of input while attempting dispatch\", self\n )\n\n if self.peek_and_getc(\"^\"):\n typ = self.parse_one_form()\n target = self.parse_one_form()\n return mkexpr(\"annotate\", target, typ)\n\n tag = None\n # try dispatching tagged ident\n ident = self.read_ident(just_peeking=True)\n if ident and mangle(key + ident) in self.reader_table:\n self.getn(len(ident))\n tag = mangle(key + ident)\n # failing that, dispatch tag + single character\n elif key + self.peekc() in self.reader_table:\n tag = key + self.getc()\n if tag:\n tree = self.dispatch(tag)\n return as_model(tree) if tree is not None else None\n\n raise LexException.from_reader(\n f\"reader macro '{key + self.read_ident()}' is not defined\", self\n )\n\n @reader_for(\"#_\")\n def discard(self, _):\n \"\"\"Discards the next parsed form.\"\"\"\n self.parse_one_form()\n return None\n\n @reader_for(\"#*\")\n def hash_star(self, _):\n \"\"\"Unpacking forms `#*` and `#**`, corresponding to `*` and `**` in Python.\"\"\"\n num_stars = 1\n while self.peek_and_getc(\"*\"):\n num_stars += 1\n if num_stars > 2:\n raise LexException.from_reader(\"too many stars\", self)\n return mkexpr(\n \"unpack-\" + (\"iterable\", \"mapping\")[num_stars - 1],\n self.parse_one_form(),\n )\n\n ###\n # Strings\n # (these are more complicated because f-strings\n # form their own sublanguage)\n ###\n\n @reader_for(\"#[\")\n def bracketed_string(self, _):\n \"\"\"Bracketed strings. See the Hy docs for full details.\"\"\"\n delim = []\n for c in self.chars():\n if c == \"[\":\n break\n elif c == \"]\":\n raise LexException.from_reader(\n \"Ran into a ']' where it wasn't expected.\", self\n )\n delim.append(c)\n delim = \"\".join(delim)\n is_fstring = delim == \"f\" or delim.startswith(\"f-\")\n\n # discard single initial newline, if any, accounting for all\n # three styles of newline\n self.peek_and_getc(\"\\x0d\")\n self.peek_and_getc(\"\\x0a\")\n\n index = -1\n\n def delim_closing(c):\n nonlocal index\n if c == \"]\":\n if index == len(delim):\n # this is the second bracket at the end of the delim\n return len(delim) + 2\n else:\n # reset state, this may be the first bracket of closing delim\n index = 0\n elif 0 <= index <= len(delim):\n # we're inside a possible closing delim\n if index < len(delim) and c == delim[index]:\n index += 1\n else:\n # failed delim, reset state\n index = -1\n return 0\n\n return self.read_string_until(delim_closing, None, is_fstring, brackets=delim)\n\n def read_string_until(self, closing, prefix, is_fstring, **kwargs):\n if is_fstring:\n components = self.read_fcomponents_until(closing, prefix)\n return FString(components, **kwargs)\n s = self.read_chars_until(closing, prefix, is_fstring=False)\n return (Bytes if isinstance(s, bytes) else String)(s, **kwargs)\n\n def read_chars_until(self, closing, prefix, is_fstring):\n s = []\n for c in self.chars():\n s.append(c)\n # check if c is closing\n n_closing_chars = closing(c)\n if n_closing_chars:\n # string has ended\n s = s[:-n_closing_chars]\n break\n # check if c is start of component\n if is_fstring and c == \"{\":\n # check and handle \"{{\"\n if self.peek_and_getc(\"{\"):\n s.append(\"{\")\n else:\n # remove \"{\" from end of string component\n s.pop()\n break\n res = \"\".join(s).replace(\"\\x0d\\x0a\", \"\\x0a\").replace(\"\\x0d\", \"\\x0a\")\n\n if prefix is not None:\n res = eval(f'{prefix}\"\"\"{res}\"\"\"')\n if is_fstring:\n return res, n_closing_chars\n return res\n\n def read_fcomponents_until(self, closing, prefix):\n components = []\n start = self.pos\n while True:\n s, closed = self.read_chars_until(closing, prefix, is_fstring=True)\n if s:\n components.append(self.fill_pos(String(s), start))\n if closed:\n break\n components.extend(self.read_fcomponent(prefix))\n return components\n\n def read_fcomponent(self, prefix):\n \"\"\"May return one or two components, since the `=` debugging syntax\n will create a String component.\"\"\"\n start = self.pos\n values = []\n conversion = None\n has_debug = False\n\n # read the expression, saving the text verbatim\n # in case we encounter debug `=`\n space_before = self.slurp_space()\n with self.saving_chars() as form_text:\n model = self.parse_one_form()\n space_between = self.slurp_space()\n\n # check for and handle debug syntax:\n # we emt the verbatim text before we emit the value\n if self.peek_and_getc(\"=\"):\n has_debug = True\n space_after = self.slurp_space()\n dbg_prefix = (\n space_before + \"\".join(form_text) + space_between + \"=\" + space_after\n )\n values.append(self.fill_pos(String(dbg_prefix), start))\n\n # handle conversion code\n if self.peek_and_getc(\"!\"):\n conversion = self.getc()\n self.slurp_space()\n\n def component_closing(c):\n if c == \"}\":\n return 1\n return 0\n\n # handle formatting options\n format_components = []\n if self.peek_and_getc(\":\"):\n format_components = self.read_fcomponents_until(component_closing, prefix)\n else:\n if has_debug and conversion is None:\n conversion = \"r\"\n if not self.getc() == \"}\":\n raise LexException.from_reader(\"f-string: trailing junk in field\", self)\n return values + [\n self.fill_pos(FComponent((model, *format_components), conversion), start)\n ]\n", "path": "hy/reader/hy_reader.py" } ]
[ { "content": "\"Character reader for parsing Hy source.\"\n\nimport hy\nfrom hy.models import (\n Bytes,\n Complex,\n Dict,\n Expression,\n FComponent,\n Float,\n FString,\n Integer,\n Keyword,\n List,\n Set,\n String,\n Symbol,\n Tuple,\n as_model,\n)\n\nfrom .exceptions import LexException, PrematureEndOfInput\nfrom .mangling import mangle\nfrom .reader import Reader, isnormalizedspace\n\n\ndef sym(name):\n return Symbol(name, from_parser=True)\n\n\n# Note: This is subtly different from\n# the `mkexpr` in hy/compiler.py !\ndef mkexpr(root, *args):\n return Expression((sym(root) if isinstance(root, str) else root, *args))\n\n\ndef symbol_like(ident, reader=None):\n \"\"\"Generate a Hy model from an identifier-like string.\n\n Also verifies the syntax of dot notation and validity of symbol names.\n\n Parameters\n ----------\n ident : str\n Text to convert.\n\n reader : Reader, optional\n The reader to use, if any; used for generating position data for errors.\n\n Returns\n -------\n out : a hy.models.Object subtype corresponding to the parsed text.\n \"\"\"\n try:\n return Integer(ident)\n except ValueError:\n pass\n try:\n return Float(ident)\n except ValueError:\n pass\n if ident not in (\"j\", \"J\"):\n try:\n return Complex(ident)\n except ValueError:\n pass\n\n if \".\" in ident:\n for chunk in ident.split(\".\"):\n if chunk and not isinstance(symbol_like(chunk, reader=reader), Symbol):\n msg = (\n \"Cannot access attribute on anything other\"\n \" than a name (in order to get attributes of expressions,\"\n \" use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\"\n )\n if reader is None:\n raise ValueError(msg)\n else:\n raise LexException.from_reader(msg, reader)\n\n if reader is None:\n if (\n not ident\n or ident[:1] == \":\"\n or any(isnormalizedspace(c) for c in ident)\n or HyReader.NON_IDENT.intersection(ident)\n ):\n raise ValueError(f\"Syntactically illegal symbol: {ident!r}\")\n\n return sym(ident)\n\n\nclass HyReader(Reader):\n \"\"\"A modular reader for Hy source.\"\"\"\n\n ###\n # Components necessary for Reader implementation\n ###\n\n NON_IDENT = set(\"()[]{};\\\"'\")\n\n def fill_pos(self, model, start):\n \"\"\"Attach line/col information to a model.\n\n Sets the end location of `model` to the current cursor position.\n\n Args:\n model (hy.models.Object): model to set line/col info for.\n start (tuple[int, int]): (line, column) tuple indicating the start\n location to assign to `model`.\n \"\"\"\n model.start_line, model.start_column = start\n model.end_line, model.end_column = self.pos\n return model\n\n def read_default(self, key):\n \"\"\"Default reader handler when nothing in the table matches.\n\n Try to read an identifier/symbol. If there's a double-quote immediately\n following, then parse it as a string with the given prefix (e.g.,\n `r\"...\"`). Otherwise, parse it as a symbol-like.\n \"\"\"\n ident = key + self.read_ident()\n if self.peek_and_getc('\"'):\n return self.prefixed_string('\"', ident)\n return symbol_like(ident, reader=self)\n\n def parse(self, stream, filename=None):\n \"\"\"Yields all `hy.models.Object`'s in `source`\n\n Additionally exposes `self` as ``hy.&reader`` during read/compile time.\n\n Args:\n source:\n Hy source to be parsed.\n filename (str | None):\n Filename to use for error messages. If `None` then previously\n set filename is used.\n \"\"\"\n self._set_source(stream, filename)\n rname = mangle(\"&reader\")\n old_reader = getattr(hy, rname, None)\n setattr(hy, rname, self)\n\n try:\n yield from self.parse_forms_until(\"\")\n finally:\n if old_reader is None:\n delattr(hy, rname)\n else:\n setattr(hy, rname, old_reader)\n\n ###\n # Reading forms\n ###\n\n def try_parse_one_form(self):\n \"\"\"Attempt to parse a single Hy form.\n\n Read one (non-space) character from the stream, then call the\n corresponding handler.\n\n Returns:\n hy.models.Object | None:\n Model optionally returned by the called handler. Handlers may\n return `None` to signify no parsed form (e.g., for comments).\n\n Raises:\n PrematureEndOfInput: If the reader hits the end of the file before\n fully parsing a form.\n LexException: If there is an error during form parsing.\n \"\"\"\n try:\n self.slurp_space()\n c = self.getc()\n start = self._pos\n if not c:\n raise PrematureEndOfInput.from_reader(\n \"Premature end of input while attempting to parse one form\", self\n )\n handler = self.reader_table.get(c)\n model = handler(self, c) if handler else self.read_default(c)\n return self.fill_pos(model, start) if model is not None else None\n except LexException:\n raise\n except Exception as e:\n raise LexException.from_reader(\n str(e) or \"Exception thrown attempting to parse one form\", self\n )\n\n def parse_one_form(self):\n \"\"\"Read from the stream until a form is parsed.\n\n Guaranteed to return a model (i.e., skips over comments).\n\n Returns:\n hy.models.Object\n \"\"\"\n model = None\n while model is None:\n model = self.try_parse_one_form()\n return model\n\n def parse_forms_until(self, closer):\n \"\"\"Yields `hy.models.Object`'s until character `closer` is seen.\n\n Useful for reading a sequence such as s-exprs or lists.\n \"\"\"\n while True:\n self.slurp_space()\n if self.peek_and_getc(closer):\n break\n model = self.try_parse_one_form()\n if model is not None:\n yield model\n\n ###\n # Basic atoms\n ###\n\n @reader_for(\")\")\n @reader_for(\"]\")\n @reader_for(\"}\")\n def INVALID(self, key):\n raise LexException.from_reader(\n f\"Ran into a '{key}' where it wasn't expected.\", self\n )\n\n @reader_for(\";\")\n def line_comment(self, _):\n any(c == \"\\n\" for c in self.chars(eof_ok=True))\n return None\n\n @reader_for(\":\")\n def keyword(self, _):\n ident = self.read_ident()\n if \".\" in ident:\n raise LexException.from_reader(\n \"Cannot access attribute on anything other\"\n \" than a name (in order to get attributes of expressions,\"\n \" use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\",\n self,\n )\n return Keyword(ident, from_parser=True)\n\n @reader_for('\"')\n def prefixed_string(self, _, prefix=\"\"):\n prefix_chars = set(prefix)\n if (\n len(prefix_chars) != len(prefix)\n or prefix_chars - set(\"bfr\")\n or set(\"bf\") <= prefix_chars\n ):\n raise LexException.from_reader(f\"invalid string prefix {prefix!r}\", self)\n\n escaping = False\n\n def quote_closing(c):\n nonlocal escaping\n if c == \"\\\\\":\n escaping = not escaping\n return 0\n if c == '\"' and not escaping:\n return 1\n if (\n escaping\n and \"r\" not in prefix\n and\n # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n c\n not in (\"\\n\\r\\\\'\\\"abfnrtv01234567x\" + (\"\" if \"b\" in prefix else \"NuU\"))\n ):\n raise LexException.from_reader(\"invalid escape sequence \\\\\" + c, self)\n escaping = False\n return 0\n\n return self.read_string_until(quote_closing, prefix, \"f\" in prefix.lower())\n\n ###\n # Special annotations\n ###\n\n @reader_for(\"'\", (\"quote\",))\n @reader_for(\"`\", (\"quasiquote\",))\n def tag_as(root):\n def _tag_as(self, _):\n nc = self.peekc()\n if (\n not nc\n or isnormalizedspace(nc)\n or self.reader_table.get(nc) == self.INVALID\n ):\n raise LexException.from_reader(\n \"Could not identify the next token.\", self\n )\n model = self.parse_one_form()\n return mkexpr(root, model)\n\n return _tag_as\n\n @reader_for(\"~\")\n def unquote(self, key):\n nc = self.peekc()\n if not nc or isnormalizedspace(nc) or self.reader_table.get(nc) == self.INVALID:\n return sym(key)\n return mkexpr(\n \"unquote\" + (\"-splice\" if self.peek_and_getc(\"@\") else \"\"),\n self.parse_one_form(),\n )\n\n ###\n # Sequences\n ###\n\n @reader_for(\"(\", (Expression, \")\"))\n @reader_for(\"[\", (List, \"]\"))\n @reader_for(\"{\", (Dict, \"}\"))\n @reader_for(\"#{\", (Set, \"}\"))\n @reader_for(\"#(\", (Tuple, \")\"))\n def sequence(seq_type, closer):\n return lambda self, _: seq_type(self.parse_forms_until(closer))\n\n ###\n # Reader tag-macros\n ###\n\n @reader_for(\"#\")\n def tag_dispatch(self, key):\n \"\"\"General handler for reader macros (and tag macros).\n\n Reads a full identifier after the `#` and calls the corresponding handler\n (this allows, e.g., `#reads-multiple-forms foo bar baz`).\n\n Failing that, reads a single character after the `#` and immediately\n calls the corresponding handler (this allows, e.g., `#*args` to parse\n as `#*` followed by `args`).\n \"\"\"\n\n if not self.peekc():\n raise PrematureEndOfInput.from_reader(\n \"Premature end of input while attempting dispatch\", self\n )\n\n if self.peek_and_getc(\"^\"):\n typ = self.parse_one_form()\n target = self.parse_one_form()\n return mkexpr(\"annotate\", target, typ)\n\n tag = None\n # try dispatching tagged ident\n ident = self.read_ident(just_peeking=True)\n if ident and mangle(key + ident) in self.reader_table:\n self.getn(len(ident))\n tag = mangle(key + ident)\n # failing that, dispatch tag + single character\n elif key + self.peekc() in self.reader_table:\n tag = key + self.getc()\n if tag:\n tree = self.dispatch(tag)\n return as_model(tree) if tree is not None else None\n\n raise LexException.from_reader(\n f\"reader macro '{key + self.read_ident()}' is not defined\", self\n )\n\n @reader_for(\"#_\")\n def discard(self, _):\n \"\"\"Discards the next parsed form.\"\"\"\n self.parse_one_form()\n return None\n\n @reader_for(\"#*\")\n def hash_star(self, _):\n \"\"\"Unpacking forms `#*` and `#**`, corresponding to `*` and `**` in Python.\"\"\"\n num_stars = 1\n while self.peek_and_getc(\"*\"):\n num_stars += 1\n if num_stars > 2:\n raise LexException.from_reader(\"too many stars\", self)\n return mkexpr(\n \"unpack-\" + (\"iterable\", \"mapping\")[num_stars - 1],\n self.parse_one_form(),\n )\n\n ###\n # Strings\n # (these are more complicated because f-strings\n # form their own sublanguage)\n ###\n\n @reader_for(\"#[\")\n def bracketed_string(self, _):\n \"\"\"Bracketed strings. See the Hy docs for full details.\"\"\"\n delim = []\n for c in self.chars():\n if c == \"[\":\n break\n elif c == \"]\":\n raise LexException.from_reader(\n \"Ran into a ']' where it wasn't expected.\", self\n )\n delim.append(c)\n delim = \"\".join(delim)\n is_fstring = delim == \"f\" or delim.startswith(\"f-\")\n\n # discard single initial newline, if any, accounting for all\n # three styles of newline\n self.peek_and_getc(\"\\x0d\")\n self.peek_and_getc(\"\\x0a\")\n\n index = -1\n\n def delim_closing(c):\n nonlocal index\n if c == \"]\":\n if index == len(delim):\n # this is the second bracket at the end of the delim\n return len(delim) + 2\n else:\n # reset state, this may be the first bracket of closing delim\n index = 0\n elif 0 <= index <= len(delim):\n # we're inside a possible closing delim\n if index < len(delim) and c == delim[index]:\n index += 1\n else:\n # failed delim, reset state\n index = -1\n return 0\n\n return self.read_string_until(delim_closing, None, is_fstring, brackets=delim)\n\n def read_string_until(self, closing, prefix, is_fstring, **kwargs):\n if is_fstring:\n components = self.read_fcomponents_until(closing, prefix)\n return FString(components, **kwargs)\n s = self.read_chars_until(closing, prefix, is_fstring=False)\n return (Bytes if isinstance(s, bytes) else String)(s, **kwargs)\n\n def read_chars_until(self, closing, prefix, is_fstring):\n s = []\n for c in self.chars():\n s.append(c)\n # check if c is closing\n n_closing_chars = closing(c)\n if n_closing_chars:\n # string has ended\n s = s[:-n_closing_chars]\n break\n # check if c is start of component\n if is_fstring and c == \"{\" and s[-3:] != [\"\\\\\", \"N\", \"{\"]:\n # check and handle \"{{\"\n if self.peek_and_getc(\"{\"):\n s.append(\"{\")\n else:\n # remove \"{\" from end of string component\n s.pop()\n break\n res = \"\".join(s).replace(\"\\x0d\\x0a\", \"\\x0a\").replace(\"\\x0d\", \"\\x0a\")\n\n if prefix is not None:\n res = eval(f'{prefix}\"\"\"{res}\"\"\"')\n if is_fstring:\n return res, n_closing_chars\n return res\n\n def read_fcomponents_until(self, closing, prefix):\n components = []\n start = self.pos\n while True:\n s, closed = self.read_chars_until(closing, prefix, is_fstring=True)\n if s:\n components.append(self.fill_pos(String(s), start))\n if closed:\n break\n components.extend(self.read_fcomponent(prefix))\n return components\n\n def read_fcomponent(self, prefix):\n \"\"\"May return one or two components, since the `=` debugging syntax\n will create a String component.\"\"\"\n start = self.pos\n values = []\n conversion = None\n has_debug = False\n\n # read the expression, saving the text verbatim\n # in case we encounter debug `=`\n space_before = self.slurp_space()\n with self.saving_chars() as form_text:\n model = self.parse_one_form()\n space_between = self.slurp_space()\n\n # check for and handle debug syntax:\n # we emt the verbatim text before we emit the value\n if self.peek_and_getc(\"=\"):\n has_debug = True\n space_after = self.slurp_space()\n dbg_prefix = (\n space_before + \"\".join(form_text) + space_between + \"=\" + space_after\n )\n values.append(self.fill_pos(String(dbg_prefix), start))\n\n # handle conversion code\n if self.peek_and_getc(\"!\"):\n conversion = self.getc()\n self.slurp_space()\n\n def component_closing(c):\n if c == \"}\":\n return 1\n return 0\n\n # handle formatting options\n format_components = []\n if self.peek_and_getc(\":\"):\n format_components = self.read_fcomponents_until(component_closing, prefix)\n else:\n if has_debug and conversion is None:\n conversion = \"r\"\n if not self.getc() == \"}\":\n raise LexException.from_reader(\"f-string: trailing junk in field\", self)\n return values + [\n self.fill_pos(FComponent((model, *format_components), conversion), start)\n ]\n", "path": "hy/reader/hy_reader.py" } ]
diff --git a/NEWS.rst b/NEWS.rst index 288169f3f..0df14fc63 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -15,6 +15,7 @@ Bug Fixes * `__file__` should now be set the same way as in Python. * Fixed a bug with `python -O` where assertions were still partly evaluated. +* `\N{…}` escape sequences are now recognized in f-strings. Misc. Improvements ------------------------------ diff --git a/hy/reader/hy_reader.py b/hy/reader/hy_reader.py index b077c9e6d..0e80b2a3a 100644 --- a/hy/reader/hy_reader.py +++ b/hy/reader/hy_reader.py @@ -448,7 +448,7 @@ def read_chars_until(self, closing, prefix, is_fstring): s = s[:-n_closing_chars] break # check if c is start of component - if is_fstring and c == "{": + if is_fstring and c == "{" and s[-3:] != ["\\", "N", "{"]: # check and handle "{{" if self.peek_and_getc("{"): s.append("{") diff --git a/tests/native_tests/language.hy b/tests/native_tests/language.hy index af18716f4..a88ff672a 100644 --- a/tests/native_tests/language.hy +++ b/tests/native_tests/language.hy @@ -1109,6 +1109,11 @@ cee"} dee" "ey bee\ncee dee")) (+ "C[" format-spec "]"))) (assert (= f"{(C) : {(str (+ 1 1)) !r :x<5}}" "C[ '2'xx]")) + ; \N sequences + ; https://github.com/hylang/hy/issues/2321 + (setv ampersand "wich") + (assert (= f"sand{ampersand} \N{ampersand} chips" "sandwich & chips")) + ; Format bracket strings (assert (= #[f[a{p !r :9}]f] "a'xyzzy' ")) (assert (= #[f-string[result: {value :{width}.{precision}}]f-string]
F-strings don't allow `\N{…}` ``` Hy 0.24.0 using CPython(default) 3.9.6 on Darwin => (print "\N{slightly smiling face}") 🙂 => (print f"\N{slightly smiling face}") Traceback (most recent call last): File "stdin-eda06fe2e57521e633661e996d6216e5bab61d9b", line 1 (print f"\N{slightly smiling face}") ^ hy.reader.exceptions.LexException: (unicode error) 'unicodeescape' codec can't decode bytes in position 0-1: malformed \N character escape (<string>, line 1) ``` Python 3.9 works fine: ``` >>> print("\N{slightly smiling face}") 🙂 >>> print(f"\N{slightly smiling face}") 🙂 ```
mosaicml__composer-182
[ { "content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"numpy==1.21.5\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['deepspeed'] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py" } ]
[ { "content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport site\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\n\n# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"numpy==1.21.5\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['deepspeed'] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py" } ]
diff --git a/docker/Makefile b/docker/Makefile index ecbc390280..f37ff50afc 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -32,11 +32,11 @@ COMPOSER_EXTRA_DEPS ?= base # Resulting docker tag for the composer dockerimage COMPOSER_TAG ?= mosaicml/composer:$(COMPOSER_EXTRA_DEPS) -.PHONY: build +.PHONY: build pytorch composer build: composer -base: +pytorch: cd pytorch && docker build -t $(PYTORCH_TAG) \ --build-arg BASE_IMAGE=$(BASE_IMAGE) \ --build-arg CUDA_VERSION_TAG=$(CUDA_VERSION_TAG) \ @@ -45,7 +45,7 @@ base: --build-arg TORCHVISION_VERSION=$(TORCHVISION_VERSION) \ . -composer: base +composer: pytorch cd .. && docker build -t $(COMPOSER_TAG) \ --build-arg BASE_IMAGE=$(PYTORCH_TAG) \ --build-arg COMPOSER_EXTRA_DEPS=$(COMPOSER_EXTRA_DEPS) \ diff --git a/docker/pytorch/Dockerfile b/docker/pytorch/Dockerfile index de1fad13b6..66f35b2459 100644 --- a/docker/pytorch/Dockerfile +++ b/docker/pytorch/Dockerfile @@ -3,12 +3,18 @@ ARG BASE_IMAGE FROM ${BASE_IMAGE} ARG DEBIAN_FRONTEND=noninteractive +# remove a bad symlink from the base composer image +# If this file is present after the first command, kaniko +# won't be able to build the docker image. +RUN rm -f /usr/local/cuda-11.3/cuda-11.3 + RUN apt-get update && \ apt-get install -y --no-install-recommends \ libgomp1 \ curl \ sudo \ build-essential \ + git \ software-properties-common \ # For PILLOW: zlib1g-dev \ @@ -21,25 +27,27 @@ RUN apt-get update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -# # Upgrade NCCL to fix NVLink errors -# # No Need to install, as the cuda 11.3 image already contains a late enough version -# RUN DISTRO="$(lsb_release -si | tr '[:upper:]' '[:lower:]')$(lsb_release -sr | sed 's/\.//')" && \ -# apt-get update && \ -# apt-get install -y --no-install-recommends \ -# dirmngr \ -# gpg-agent && \ -# apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/${DISTRO}/x86_64/7fa2af80.pub && \ -# add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${DISTRO}/x86_64/ /" && \ -# apt-get install -y --no-install-recommends --allow-change-held-packages \ -# libnccl2 \ -# libnccl-dev && \ -# apt-get autoclean && \ -# apt-get clean && \ -# rm -rf /var/lib/apt/lists/* - -# # Use system installed NCCL per update above, point to library -# ENV USE_SYSTEM_NCCL=1 -# ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libnccl.so.2.9.6 +################################################## +# Change the NCCL version to fix NVLink errors +# This is required to train on Nvidia A100s in GCP +################################################## +RUN DISTRO="$(lsb_release -si | tr '[:upper:]' '[:lower:]')$(lsb_release -sr | sed 's/\.//')" && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + dirmngr \ + gpg-agent && \ + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/${DISTRO}/x86_64/7fa2af80.pub && \ + add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${DISTRO}/x86_64/ /" && \ + apt-get install -y --no-install-recommends --allow-change-held-packages --allow-downgrades \ + libnccl2=2.9.6-1+cuda11.0 \ + libnccl-dev=2.9.6-1+cuda11.0 && \ + apt-get autoclean && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Use system installed NCCL per update above, point to library +ENV USE_SYSTEM_NCCL=1 +ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libnccl.so.2.9.6 ############################## # Install NodeJS (for Pyright) @@ -64,14 +72,12 @@ RUN add-apt-repository ppa:deadsnakes/ppa && \ python${PYTHON_VERSION}-dev \ python${PYTHON_VERSION}-distutils \ python${PYTHON_VERSION}-venv && \ - update-alternatives --install /usr/bin/python python /usr/bin/python${PYTHON_VERSION} 1 && \ - update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \ apt-get autoclean && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -RUN curl -fsSL https://bootstrap.pypa.io/get-pip.py | python - && \ - pip install --no-cache-dir --upgrade pip +RUN curl -fsSL https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION} - && \ + pip${PYTHON_VERSION} install --no-cache-dir --upgrade pip setuptools ##################### # Install pillow-simd @@ -83,8 +89,8 @@ ARG PILLOW_SIMD_VERSION=7.0.0.post3 # so when pillow_simd is installed, other packages won't later override it COPY pillow_stub /tmp/pillow_stub -RUN pip install --no-cache-dir --upgrade /tmp/pillow_stub && \ - pip install --no-cache-dir --upgrade pillow_simd==${PILLOW_SIMD_VERSION} +RUN pip${PYTHON_VERSION} install --no-cache-dir --upgrade /tmp/pillow_stub && \ + pip${PYTHON_VERSION} install --no-cache-dir --upgrade pillow_simd==${PILLOW_SIMD_VERSION} ################# # Install Pytorch @@ -93,7 +99,7 @@ ARG PYTORCH_VERSION ARG TORCHVISION_VERSION ARG CUDA_VERSION_TAG -RUN pip install --no-cache-dir --find-links https://download.pytorch.org/whl/torch_stable.html \ +RUN pip${PYTHON_VERSION} install --no-cache-dir --find-links https://download.pytorch.org/whl/torch_stable.html \ torch==${PYTORCH_VERSION}+${CUDA_VERSION_TAG} \ torchvision==${TORCHVISION_VERSION}+${CUDA_VERSION_TAG} @@ -102,11 +108,41 @@ RUN pip install --no-cache-dir --find-links https://download.pytorch.org/whl/tor ######### RUN rm -rf /tmp/* + +####################### +# Set the shell to bash +####################### +SHELL ["/bin/bash", "-c"] + + +################################ +# Use the correct python version +################################ + +# Set the default python by creating our own folder and hacking the path +# We don't want to use upgrade-alternatives as that will break system packages + +ARG COMPOSER_PYTHON_BIN=/composer-python + +RUN mkdir -p ${COMPOSER_PYTHON_BIN} && \ + ln -s $(which python${PYTHON_VERSION}) ${COMPOSER_PYTHON_BIN}/python && \ + ln -s $(which python${PYTHON_VERSION}) ${COMPOSER_PYTHON_BIN}/python3 && \ + ln -s $(which python${PYTHON_VERSION}) ${COMPOSER_PYTHON_BIN}/python${PYTHON_VERSION} && \ + ln -s $(which pip${PYTHON_VERSION}) ${COMPOSER_PYTHON_BIN}/pip && \ + ln -s $(which pip${PYTHON_VERSION}) ${COMPOSER_PYTHON_BIN}/pip3 && \ + ln -s $(which pip${PYTHON_VERSION}) ${COMPOSER_PYTHON_BIN}/pip${PYTHON_VERSION} && \ + # Include this folder, and the local bin folder, on the path + echo "export PATH=~/.local/bin:$COMPOSER_PYTHON_BIN:$PATH" >> /etc/profile && \ + echo "export PATH=~/.local/bin:$COMPOSER_PYTHON_BIN:$PATH" >> /etc/bash.bashrc && \ + echo "export PATH=~/.local/bin:$COMPOSER_PYTHON_BIN:$PATH" >> /etc/zshenv + +# Ensure that non-interactive shells load /etc/profile +ENV BASH_ENV=/etc/profile + ######################### # Configure non-root user ######################### -RUN echo "export PATH=\$PATH:~/.local/bin" >> /etc/skel/.bashrc && \ - useradd -rm -d /home/mosaicml -s /bin/bash -u 1000 -U -s /bin/bash mosaicml && \ +RUN useradd -rm -d /home/mosaicml -s /bin/bash -u 1000 -U -s /bin/bash mosaicml && \ usermod -a -G sudo mosaicml && \ echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers diff --git a/setup.py b/setup.py index 9015603417..6d521f2c8f 100755 --- a/setup.py +++ b/setup.py @@ -1,12 +1,16 @@ # Copyright 2021 MosaicML. All Rights Reserved. import os +import site import sys import textwrap import setuptools from setuptools import setup +# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255 +site.ENABLE_USER_SITE = "--user" in sys.argv[1:] + def package_files(directory: str): # from https://stackoverflow.com/a/36693250
Add venv into docker image to enable editable `pip install` When trying to install composer with `pip install -e .` from within the docker image, we are seeing this error: ``` Traceback (most recent call last): File "/usr/bin/composer", line 33, in <module> sys.exit(load_entry_point('mosaicml', 'console_scripts', 'composer')()) File "/usr/bin/composer", line 22, in importlib_load_entry_point for entry_point in distribution(dist_name).entry_points File "/usr/lib/python3.8/importlib/metadata.py", line 445, in distribution return Distribution.from_name(distribution_name) File "/usr/lib/python3.8/importlib/metadata.py", line 169, in from_name raise PackageNotFoundError(name) importlib.metadata.PackageNotFoundError: mosaicml ``` This seems to be remedied by running the `pip install` from within a virtualenv. Can we bake a virtualenv into the docker image as a workaround?
python__mypy-4106
[ { "content": "\"\"\"Type inference constraints.\"\"\"\n\nfrom typing import Iterable, List, Optional, Sequence\n\nfrom mypy import experiments\nfrom mypy.types import (\n CallableType, Type, TypeVisitor, UnboundType, AnyType, NoneTyp, TypeVarType, Instance,\n TupleType, TypedDictType, UnionType, Overloaded, ErasedType, PartialType, DeletedType,\n UninhabitedType, TypeType, TypeVarId, TypeQuery, is_named_instance, TypeOfAny\n)\nfrom mypy.maptype import map_instance_to_supertype\nfrom mypy import nodes\nimport mypy.subtypes\nfrom mypy.sametypes import is_same_type\nfrom mypy.erasetype import erase_typevars\n\n\nSUBTYPE_OF = 0 # type: int\nSUPERTYPE_OF = 1 # type: int\n\n\nclass Constraint:\n \"\"\"A representation of a type constraint.\n\n It can be either T <: type or T :> type (T is a type variable).\n \"\"\"\n\n type_var = None # type: TypeVarId\n op = 0 # SUBTYPE_OF or SUPERTYPE_OF\n target = None # type: Type\n\n def __init__(self, type_var: TypeVarId, op: int, target: Type) -> None:\n self.type_var = type_var\n self.op = op\n self.target = target\n\n def __repr__(self) -> str:\n op_str = '<:'\n if self.op == SUPERTYPE_OF:\n op_str = ':>'\n return '{} {} {}'.format(self.type_var, op_str, self.target)\n\n\ndef infer_constraints_for_callable(\n callee: CallableType, arg_types: Sequence[Optional[Type]], arg_kinds: List[int],\n formal_to_actual: List[List[int]]) -> List[Constraint]:\n \"\"\"Infer type variable constraints for a callable and actual arguments.\n\n Return a list of constraints.\n \"\"\"\n constraints = [] # type: List[Constraint]\n tuple_counter = [0]\n\n for i, actuals in enumerate(formal_to_actual):\n for actual in actuals:\n actual_arg_type = arg_types[actual]\n if actual_arg_type is None:\n continue\n\n actual_type = get_actual_type(actual_arg_type, arg_kinds[actual],\n tuple_counter)\n c = infer_constraints(callee.arg_types[i], actual_type,\n SUPERTYPE_OF)\n constraints.extend(c)\n\n return constraints\n\n\ndef get_actual_type(arg_type: Type, kind: int,\n tuple_counter: List[int]) -> Type:\n \"\"\"Return the type of an actual argument with the given kind.\n\n If the argument is a *arg, return the individual argument item.\n \"\"\"\n\n if kind == nodes.ARG_STAR:\n if isinstance(arg_type, Instance):\n if arg_type.type.fullname() == 'builtins.list':\n # List *arg.\n return arg_type.args[0]\n elif arg_type.args:\n # TODO try to map type arguments to Iterable\n return arg_type.args[0]\n else:\n return AnyType(TypeOfAny.from_error)\n elif isinstance(arg_type, TupleType):\n # Get the next tuple item of a tuple *arg.\n tuple_counter[0] += 1\n return arg_type.items[tuple_counter[0] - 1]\n else:\n return AnyType(TypeOfAny.from_error)\n elif kind == nodes.ARG_STAR2:\n if isinstance(arg_type, Instance) and (arg_type.type.fullname() == 'builtins.dict'):\n # Dict **arg. TODO more general (Mapping)\n return arg_type.args[1]\n else:\n return AnyType(TypeOfAny.from_error)\n else:\n # No translation for other kinds.\n return arg_type\n\n\ndef infer_constraints(template: Type, actual: Type,\n direction: int) -> List[Constraint]:\n \"\"\"Infer type constraints.\n\n Match a template type, which may contain type variable references,\n recursively against a type which does not contain (the same) type\n variable references. The result is a list of type constrains of\n form 'T is a supertype/subtype of x', where T is a type variable\n present in the template and x is a type without reference to type\n variables present in the template.\n\n Assume T and S are type variables. Now the following results can be\n calculated (read as '(template, actual) --> result'):\n\n (T, X) --> T :> X\n (X[T], X[Y]) --> T <: Y and T :> Y\n ((T, T), (X, Y)) --> T :> X and T :> Y\n ((T, S), (X, Y)) --> T :> X and S :> Y\n (X[T], Any) --> T <: Any and T :> Any\n\n The constraints are represented as Constraint objects.\n \"\"\"\n\n # If the template is simply a type variable, emit a Constraint directly.\n # We need to handle this case before handling Unions for two reasons:\n # 1. \"T <: Union[U1, U2]\" is not equivalent to \"T <: U1 or T <: U2\",\n # because T can itself be a union (notably, Union[U1, U2] itself).\n # 2. \"T :> Union[U1, U2]\" is logically equivalent to \"T :> U1 and\n # T :> U2\", but they are not equivalent to the constraint solver,\n # which never introduces new Union types (it uses join() instead).\n if isinstance(template, TypeVarType):\n return [Constraint(template.id, direction, actual)]\n\n # Now handle the case of either template or actual being a Union.\n # For a Union to be a subtype of another type, every item of the Union\n # must be a subtype of that type, so concatenate the constraints.\n if direction == SUBTYPE_OF and isinstance(template, UnionType):\n res = []\n for t_item in template.items:\n res.extend(infer_constraints(t_item, actual, direction))\n return res\n if direction == SUPERTYPE_OF and isinstance(actual, UnionType):\n res = []\n for a_item in actual.items:\n res.extend(infer_constraints(template, a_item, direction))\n return res\n\n # Now the potential subtype is known not to be a Union or a type\n # variable that we are solving for. In that case, for a Union to\n # be a supertype of the potential subtype, some item of the Union\n # must be a supertype of it.\n if direction == SUBTYPE_OF and isinstance(actual, UnionType):\n # If some of items is not a complete type, disregard that.\n items = simplify_away_incomplete_types(actual.items)\n # We infer constraints eagerly -- try to find constraints for a type\n # variable if possible. This seems to help with some real-world\n # use cases.\n return any_constraints(\n [infer_constraints_if_possible(template, a_item, direction)\n for a_item in items],\n eager=True)\n if direction == SUPERTYPE_OF and isinstance(template, UnionType):\n # When the template is a union, we are okay with leaving some\n # type variables indeterminate. This helps with some special\n # cases, though this isn't very principled.\n return any_constraints(\n [infer_constraints_if_possible(t_item, actual, direction)\n for t_item in template.items],\n eager=False)\n\n # Remaining cases are handled by ConstraintBuilderVisitor.\n return template.accept(ConstraintBuilderVisitor(actual, direction))\n\n\ndef infer_constraints_if_possible(template: Type, actual: Type,\n direction: int) -> Optional[List[Constraint]]:\n \"\"\"Like infer_constraints, but return None if the input relation is\n known to be unsatisfiable, for example if template=List[T] and actual=int.\n (In this case infer_constraints would return [], just like it would for\n an automatically satisfied relation like template=List[T] and actual=object.)\n \"\"\"\n if (direction == SUBTYPE_OF and\n not mypy.subtypes.is_subtype(erase_typevars(template), actual)):\n return None\n if (direction == SUPERTYPE_OF and\n not mypy.subtypes.is_subtype(actual, erase_typevars(template))):\n return None\n return infer_constraints(template, actual, direction)\n\n\ndef any_constraints(options: List[Optional[List[Constraint]]], eager: bool) -> List[Constraint]:\n \"\"\"Deduce what we can from a collection of constraint lists.\n\n It's a given that at least one of the lists must be satisfied. A\n None element in the list of options represents an unsatisfiable\n constraint and is ignored. Ignore empty constraint lists if eager\n is true -- they are always trivially satisfiable.\n \"\"\"\n if eager:\n valid_options = [option for option in options if option]\n else:\n valid_options = [option for option in options if option is not None]\n if len(valid_options) == 1:\n return valid_options[0]\n elif (len(valid_options) > 1 and\n all(is_same_constraints(valid_options[0], c)\n for c in valid_options[1:])):\n # Multiple sets of constraints that are all the same. Just pick any one of them.\n # TODO: More generally, if a given (variable, direction) pair appears in\n # every option, combine the bounds with meet/join.\n return valid_options[0]\n\n # Otherwise, there are either no valid options or multiple, inconsistent valid\n # options. Give up and deduce nothing.\n return []\n\n\ndef is_same_constraints(x: List[Constraint], y: List[Constraint]) -> bool:\n for c1 in x:\n if not any(is_same_constraint(c1, c2) for c2 in y):\n return False\n for c1 in y:\n if not any(is_same_constraint(c1, c2) for c2 in x):\n return False\n return True\n\n\ndef is_same_constraint(c1: Constraint, c2: Constraint) -> bool:\n return (c1.type_var == c2.type_var\n and c1.op == c2.op\n and is_same_type(c1.target, c2.target))\n\n\ndef simplify_away_incomplete_types(types: List[Type]) -> List[Type]:\n complete = [typ for typ in types if is_complete_type(typ)]\n if complete:\n return complete\n else:\n return types\n\n\ndef is_complete_type(typ: Type) -> bool:\n \"\"\"Is a type complete?\n\n A complete doesn't have uninhabited type components or (when not in strict\n optional mode) None components.\n \"\"\"\n return typ.accept(CompleteTypeVisitor())\n\n\nclass CompleteTypeVisitor(TypeQuery[bool]):\n def __init__(self) -> None:\n super().__init__(all)\n\n def visit_uninhabited_type(self, t: UninhabitedType) -> bool:\n return False\n\n\nclass ConstraintBuilderVisitor(TypeVisitor[List[Constraint]]):\n \"\"\"Visitor class for inferring type constraints.\"\"\"\n\n # The type that is compared against a template\n # TODO: The value may be None. Is that actually correct?\n actual = None # type: Type\n\n def __init__(self, actual: Type, direction: int) -> None:\n # Direction must be SUBTYPE_OF or SUPERTYPE_OF.\n self.actual = actual\n self.direction = direction\n\n # Trivial leaf types\n\n def visit_unbound_type(self, template: UnboundType) -> List[Constraint]:\n return []\n\n def visit_any(self, template: AnyType) -> List[Constraint]:\n return []\n\n def visit_none_type(self, template: NoneTyp) -> List[Constraint]:\n return []\n\n def visit_uninhabited_type(self, template: UninhabitedType) -> List[Constraint]:\n return []\n\n def visit_erased_type(self, template: ErasedType) -> List[Constraint]:\n return []\n\n def visit_deleted_type(self, template: DeletedType) -> List[Constraint]:\n return []\n\n # Errors\n\n def visit_partial_type(self, template: PartialType) -> List[Constraint]:\n # We can't do anything useful with a partial type here.\n assert False, \"Internal error\"\n\n # Non-trivial leaf type\n\n def visit_type_var(self, template: TypeVarType) -> List[Constraint]:\n assert False, (\"Unexpected TypeVarType in ConstraintBuilderVisitor\"\n \" (should have been handled in infer_constraints)\")\n\n # Non-leaf types\n\n def visit_instance(self, template: Instance) -> List[Constraint]:\n original_actual = actual = self.actual\n res = [] # type: List[Constraint]\n if isinstance(actual, CallableType) and actual.fallback is not None:\n actual = actual.fallback\n if isinstance(actual, TypedDictType):\n actual = actual.as_anonymous().fallback\n if isinstance(actual, Instance):\n instance = actual\n # We always try nominal inference if possible,\n # it is much faster than the structural one.\n if (self.direction == SUBTYPE_OF and\n template.type.has_base(instance.type.fullname())):\n mapped = map_instance_to_supertype(template, instance.type)\n for i in range(len(instance.args)):\n # The constraints for generic type parameters are\n # invariant. Include constraints from both directions\n # to achieve the effect.\n res.extend(infer_constraints(\n mapped.args[i], instance.args[i], self.direction))\n res.extend(infer_constraints(\n mapped.args[i], instance.args[i], neg_op(self.direction)))\n return res\n elif (self.direction == SUPERTYPE_OF and\n instance.type.has_base(template.type.fullname())):\n mapped = map_instance_to_supertype(instance, template.type)\n for j in range(len(template.args)):\n # The constraints for generic type parameters are\n # invariant.\n res.extend(infer_constraints(\n template.args[j], mapped.args[j], self.direction))\n res.extend(infer_constraints(\n template.args[j], mapped.args[j], neg_op(self.direction)))\n return res\n if (template.type.is_protocol and self.direction == SUPERTYPE_OF and\n # We avoid infinite recursion for structural subtypes by checking\n # whether this type already appeared in the inference chain.\n # This is a conservative way break the inference cycles.\n # It never produces any \"false\" constraints but gives up soon\n # on purely structural inference cycles, see #3829.\n not any(is_same_type(template, t) for t in template.type.inferring) and\n mypy.subtypes.is_subtype(instance, erase_typevars(template))):\n template.type.inferring.append(template)\n self.infer_constraints_from_protocol_members(res, instance, template,\n original_actual, template)\n template.type.inferring.pop()\n return res\n elif (instance.type.is_protocol and self.direction == SUBTYPE_OF and\n # We avoid infinite recursion for structural subtypes also here.\n not any(is_same_type(instance, i) for i in instance.type.inferring) and\n mypy.subtypes.is_subtype(erase_typevars(template), instance)):\n instance.type.inferring.append(instance)\n self.infer_constraints_from_protocol_members(res, instance, template,\n template, instance)\n instance.type.inferring.pop()\n return res\n if isinstance(actual, AnyType):\n # IDEA: Include both ways, i.e. add negation as well?\n return self.infer_against_any(template.args, actual)\n if (isinstance(actual, TupleType) and\n (is_named_instance(template, 'typing.Iterable') or\n is_named_instance(template, 'typing.Container') or\n is_named_instance(template, 'typing.Sequence') or\n is_named_instance(template, 'typing.Reversible'))\n and self.direction == SUPERTYPE_OF):\n for item in actual.items:\n cb = infer_constraints(template.args[0], item, SUPERTYPE_OF)\n res.extend(cb)\n return res\n elif (isinstance(actual, TupleType) and template.type.is_protocol and\n self.direction == SUPERTYPE_OF):\n if mypy.subtypes.is_subtype(actual.fallback, erase_typevars(template)):\n res.extend(infer_constraints(template, actual.fallback, self.direction))\n return res\n return []\n else:\n return []\n\n def infer_constraints_from_protocol_members(self, res: List[Constraint],\n instance: Instance, template: Instance,\n subtype: Type, protocol: Instance) -> None:\n \"\"\"Infer constraints for situations where either 'template' or 'instance' is a protocol.\n\n The 'protocol' is the one of two that is an instance of protocol type, 'subtype'\n is the type used to bind self during inference. Currently, we just infer constrains for\n every protocol member type (both ways for settable members).\n \"\"\"\n for member in protocol.type.protocol_members:\n inst = mypy.subtypes.find_member(member, instance, subtype)\n temp = mypy.subtypes.find_member(member, template, subtype)\n assert inst is not None and temp is not None\n # The above is safe since at this point we know that 'instance' is a subtype\n # of (erased) 'template', therefore it defines all protocol members\n res.extend(infer_constraints(temp, inst, self.direction))\n if (mypy.subtypes.IS_SETTABLE in\n mypy.subtypes.get_member_flags(member, protocol.type)):\n # Settable members are invariant, add opposite constraints\n res.extend(infer_constraints(temp, inst, neg_op(self.direction)))\n\n def visit_callable_type(self, template: CallableType) -> List[Constraint]:\n if isinstance(self.actual, CallableType):\n cactual = self.actual\n # FIX verify argument counts\n # FIX what if one of the functions is generic\n res = [] # type: List[Constraint]\n\n # We can't infer constraints from arguments if the template is Callable[..., T] (with\n # literal '...').\n if not template.is_ellipsis_args:\n # The lengths should match, but don't crash (it will error elsewhere).\n for t, a in zip(template.arg_types, cactual.arg_types):\n # Negate direction due to function argument type contravariance.\n res.extend(infer_constraints(t, a, neg_op(self.direction)))\n res.extend(infer_constraints(template.ret_type, cactual.ret_type,\n self.direction))\n return res\n elif isinstance(self.actual, AnyType):\n # FIX what if generic\n res = self.infer_against_any(template.arg_types, self.actual)\n any_type = AnyType(TypeOfAny.from_another_any, source_any=self.actual)\n res.extend(infer_constraints(template.ret_type, any_type, self.direction))\n return res\n elif isinstance(self.actual, Overloaded):\n return self.infer_against_overloaded(self.actual, template)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.ret_type, self.actual.item, self.direction)\n elif isinstance(self.actual, Instance):\n # Instances with __call__ method defined are considered structural\n # subtypes of Callable with a compatible signature.\n call = mypy.subtypes.find_member('__call__', self.actual, self.actual)\n if call:\n return infer_constraints(template, call, self.direction)\n else:\n return []\n else:\n return []\n\n def infer_against_overloaded(self, overloaded: Overloaded,\n template: CallableType) -> List[Constraint]:\n # Create constraints by matching an overloaded type against a template.\n # This is tricky to do in general. We cheat by only matching against\n # the first overload item, and by only matching the return type. This\n # seems to work somewhat well, but we should really use a more\n # reliable technique.\n item = find_matching_overload_item(overloaded, template)\n return infer_constraints(template.ret_type, item.ret_type,\n self.direction)\n\n def visit_tuple_type(self, template: TupleType) -> List[Constraint]:\n actual = self.actual\n if isinstance(actual, TupleType) and len(actual.items) == len(template.items):\n res = [] # type: List[Constraint]\n for i in range(len(template.items)):\n res.extend(infer_constraints(template.items[i],\n actual.items[i],\n self.direction))\n return res\n elif isinstance(actual, AnyType):\n return self.infer_against_any(template.items, actual)\n else:\n return []\n\n def visit_typeddict_type(self, template: TypedDictType) -> List[Constraint]:\n actual = self.actual\n if isinstance(actual, TypedDictType):\n res = [] # type: List[Constraint]\n # NOTE: Non-matching keys are ignored. Compatibility is checked\n # elsewhere so this shouldn't be unsafe.\n for (item_name, template_item_type, actual_item_type) in template.zip(actual):\n res.extend(infer_constraints(template_item_type,\n actual_item_type,\n self.direction))\n return res\n elif isinstance(actual, AnyType):\n return self.infer_against_any(template.items.values(), actual)\n else:\n return []\n\n def visit_union_type(self, template: UnionType) -> List[Constraint]:\n assert False, (\"Unexpected UnionType in ConstraintBuilderVisitor\"\n \" (should have been handled in infer_constraints)\")\n\n def infer_against_any(self, types: Iterable[Type], any_type: AnyType) -> List[Constraint]:\n res = [] # type: List[Constraint]\n for t in types:\n res.extend(infer_constraints(t, any_type, self.direction))\n return res\n\n def visit_overloaded(self, template: Overloaded) -> List[Constraint]:\n res = [] # type: List[Constraint]\n for t in template.items():\n res.extend(infer_constraints(t, self.actual, self.direction))\n return res\n\n def visit_type_type(self, template: TypeType) -> List[Constraint]:\n if isinstance(self.actual, CallableType):\n return infer_constraints(template.item, self.actual.ret_type, self.direction)\n elif isinstance(self.actual, Overloaded):\n return infer_constraints(template.item, self.actual.items()[0].ret_type,\n self.direction)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.item, self.actual.item, self.direction)\n else:\n return []\n\n\ndef neg_op(op: int) -> int:\n \"\"\"Map SubtypeOf to SupertypeOf and vice versa.\"\"\"\n\n if op == SUBTYPE_OF:\n return SUPERTYPE_OF\n elif op == SUPERTYPE_OF:\n return SUBTYPE_OF\n else:\n raise ValueError('Invalid operator {}'.format(op))\n\n\ndef find_matching_overload_item(overloaded: Overloaded, template: CallableType) -> CallableType:\n \"\"\"Disambiguate overload item against a template.\"\"\"\n items = overloaded.items()\n for item in items:\n # Return type may be indeterminate in the template, so ignore it when performing a\n # subtype check.\n if mypy.subtypes.is_callable_subtype(item, template, ignore_return=True):\n return item\n # Fall back to the first item if we can't find a match. This is totally arbitrary --\n # maybe we should just bail out at this point.\n return items[0]\n", "path": "mypy/constraints.py" } ]
[ { "content": "\"\"\"Type inference constraints.\"\"\"\n\nfrom typing import Iterable, List, Optional, Sequence\n\nfrom mypy import experiments\nfrom mypy.types import (\n CallableType, Type, TypeVisitor, UnboundType, AnyType, NoneTyp, TypeVarType, Instance,\n TupleType, TypedDictType, UnionType, Overloaded, ErasedType, PartialType, DeletedType,\n UninhabitedType, TypeType, TypeVarId, TypeQuery, is_named_instance, TypeOfAny\n)\nfrom mypy.maptype import map_instance_to_supertype\nfrom mypy import nodes\nimport mypy.subtypes\nfrom mypy.sametypes import is_same_type\nfrom mypy.erasetype import erase_typevars\n\n\nSUBTYPE_OF = 0 # type: int\nSUPERTYPE_OF = 1 # type: int\n\n\nclass Constraint:\n \"\"\"A representation of a type constraint.\n\n It can be either T <: type or T :> type (T is a type variable).\n \"\"\"\n\n type_var = None # type: TypeVarId\n op = 0 # SUBTYPE_OF or SUPERTYPE_OF\n target = None # type: Type\n\n def __init__(self, type_var: TypeVarId, op: int, target: Type) -> None:\n self.type_var = type_var\n self.op = op\n self.target = target\n\n def __repr__(self) -> str:\n op_str = '<:'\n if self.op == SUPERTYPE_OF:\n op_str = ':>'\n return '{} {} {}'.format(self.type_var, op_str, self.target)\n\n\ndef infer_constraints_for_callable(\n callee: CallableType, arg_types: Sequence[Optional[Type]], arg_kinds: List[int],\n formal_to_actual: List[List[int]]) -> List[Constraint]:\n \"\"\"Infer type variable constraints for a callable and actual arguments.\n\n Return a list of constraints.\n \"\"\"\n constraints = [] # type: List[Constraint]\n tuple_counter = [0]\n\n for i, actuals in enumerate(formal_to_actual):\n for actual in actuals:\n actual_arg_type = arg_types[actual]\n if actual_arg_type is None:\n continue\n\n actual_type = get_actual_type(actual_arg_type, arg_kinds[actual],\n tuple_counter)\n c = infer_constraints(callee.arg_types[i], actual_type,\n SUPERTYPE_OF)\n constraints.extend(c)\n\n return constraints\n\n\ndef get_actual_type(arg_type: Type, kind: int,\n tuple_counter: List[int]) -> Type:\n \"\"\"Return the type of an actual argument with the given kind.\n\n If the argument is a *arg, return the individual argument item.\n \"\"\"\n\n if kind == nodes.ARG_STAR:\n if isinstance(arg_type, Instance):\n if arg_type.type.fullname() == 'builtins.list':\n # List *arg.\n return arg_type.args[0]\n elif arg_type.args:\n # TODO try to map type arguments to Iterable\n return arg_type.args[0]\n else:\n return AnyType(TypeOfAny.from_error)\n elif isinstance(arg_type, TupleType):\n # Get the next tuple item of a tuple *arg.\n tuple_counter[0] += 1\n return arg_type.items[tuple_counter[0] - 1]\n else:\n return AnyType(TypeOfAny.from_error)\n elif kind == nodes.ARG_STAR2:\n if isinstance(arg_type, Instance) and (arg_type.type.fullname() == 'builtins.dict'):\n # Dict **arg. TODO more general (Mapping)\n return arg_type.args[1]\n else:\n return AnyType(TypeOfAny.from_error)\n else:\n # No translation for other kinds.\n return arg_type\n\n\ndef infer_constraints(template: Type, actual: Type,\n direction: int) -> List[Constraint]:\n \"\"\"Infer type constraints.\n\n Match a template type, which may contain type variable references,\n recursively against a type which does not contain (the same) type\n variable references. The result is a list of type constrains of\n form 'T is a supertype/subtype of x', where T is a type variable\n present in the template and x is a type without reference to type\n variables present in the template.\n\n Assume T and S are type variables. Now the following results can be\n calculated (read as '(template, actual) --> result'):\n\n (T, X) --> T :> X\n (X[T], X[Y]) --> T <: Y and T :> Y\n ((T, T), (X, Y)) --> T :> X and T :> Y\n ((T, S), (X, Y)) --> T :> X and S :> Y\n (X[T], Any) --> T <: Any and T :> Any\n\n The constraints are represented as Constraint objects.\n \"\"\"\n\n # If the template is simply a type variable, emit a Constraint directly.\n # We need to handle this case before handling Unions for two reasons:\n # 1. \"T <: Union[U1, U2]\" is not equivalent to \"T <: U1 or T <: U2\",\n # because T can itself be a union (notably, Union[U1, U2] itself).\n # 2. \"T :> Union[U1, U2]\" is logically equivalent to \"T :> U1 and\n # T :> U2\", but they are not equivalent to the constraint solver,\n # which never introduces new Union types (it uses join() instead).\n if isinstance(template, TypeVarType):\n return [Constraint(template.id, direction, actual)]\n\n # Now handle the case of either template or actual being a Union.\n # For a Union to be a subtype of another type, every item of the Union\n # must be a subtype of that type, so concatenate the constraints.\n if direction == SUBTYPE_OF and isinstance(template, UnionType):\n res = []\n for t_item in template.items:\n res.extend(infer_constraints(t_item, actual, direction))\n return res\n if direction == SUPERTYPE_OF and isinstance(actual, UnionType):\n res = []\n for a_item in actual.items:\n res.extend(infer_constraints(template, a_item, direction))\n return res\n\n # Now the potential subtype is known not to be a Union or a type\n # variable that we are solving for. In that case, for a Union to\n # be a supertype of the potential subtype, some item of the Union\n # must be a supertype of it.\n if direction == SUBTYPE_OF and isinstance(actual, UnionType):\n # If some of items is not a complete type, disregard that.\n items = simplify_away_incomplete_types(actual.items)\n # We infer constraints eagerly -- try to find constraints for a type\n # variable if possible. This seems to help with some real-world\n # use cases.\n return any_constraints(\n [infer_constraints_if_possible(template, a_item, direction)\n for a_item in items],\n eager=True)\n if direction == SUPERTYPE_OF and isinstance(template, UnionType):\n # When the template is a union, we are okay with leaving some\n # type variables indeterminate. This helps with some special\n # cases, though this isn't very principled.\n return any_constraints(\n [infer_constraints_if_possible(t_item, actual, direction)\n for t_item in template.items],\n eager=False)\n\n # Remaining cases are handled by ConstraintBuilderVisitor.\n return template.accept(ConstraintBuilderVisitor(actual, direction))\n\n\ndef infer_constraints_if_possible(template: Type, actual: Type,\n direction: int) -> Optional[List[Constraint]]:\n \"\"\"Like infer_constraints, but return None if the input relation is\n known to be unsatisfiable, for example if template=List[T] and actual=int.\n (In this case infer_constraints would return [], just like it would for\n an automatically satisfied relation like template=List[T] and actual=object.)\n \"\"\"\n if (direction == SUBTYPE_OF and\n not mypy.subtypes.is_subtype(erase_typevars(template), actual)):\n return None\n if (direction == SUPERTYPE_OF and\n not mypy.subtypes.is_subtype(actual, erase_typevars(template))):\n return None\n return infer_constraints(template, actual, direction)\n\n\ndef any_constraints(options: List[Optional[List[Constraint]]], eager: bool) -> List[Constraint]:\n \"\"\"Deduce what we can from a collection of constraint lists.\n\n It's a given that at least one of the lists must be satisfied. A\n None element in the list of options represents an unsatisfiable\n constraint and is ignored. Ignore empty constraint lists if eager\n is true -- they are always trivially satisfiable.\n \"\"\"\n if eager:\n valid_options = [option for option in options if option]\n else:\n valid_options = [option for option in options if option is not None]\n if len(valid_options) == 1:\n return valid_options[0]\n elif (len(valid_options) > 1 and\n all(is_same_constraints(valid_options[0], c)\n for c in valid_options[1:])):\n # Multiple sets of constraints that are all the same. Just pick any one of them.\n # TODO: More generally, if a given (variable, direction) pair appears in\n # every option, combine the bounds with meet/join.\n return valid_options[0]\n\n # Otherwise, there are either no valid options or multiple, inconsistent valid\n # options. Give up and deduce nothing.\n return []\n\n\ndef is_same_constraints(x: List[Constraint], y: List[Constraint]) -> bool:\n for c1 in x:\n if not any(is_same_constraint(c1, c2) for c2 in y):\n return False\n for c1 in y:\n if not any(is_same_constraint(c1, c2) for c2 in x):\n return False\n return True\n\n\ndef is_same_constraint(c1: Constraint, c2: Constraint) -> bool:\n return (c1.type_var == c2.type_var\n and c1.op == c2.op\n and is_same_type(c1.target, c2.target))\n\n\ndef simplify_away_incomplete_types(types: List[Type]) -> List[Type]:\n complete = [typ for typ in types if is_complete_type(typ)]\n if complete:\n return complete\n else:\n return types\n\n\ndef is_complete_type(typ: Type) -> bool:\n \"\"\"Is a type complete?\n\n A complete doesn't have uninhabited type components or (when not in strict\n optional mode) None components.\n \"\"\"\n return typ.accept(CompleteTypeVisitor())\n\n\nclass CompleteTypeVisitor(TypeQuery[bool]):\n def __init__(self) -> None:\n super().__init__(all)\n\n def visit_uninhabited_type(self, t: UninhabitedType) -> bool:\n return False\n\n\nclass ConstraintBuilderVisitor(TypeVisitor[List[Constraint]]):\n \"\"\"Visitor class for inferring type constraints.\"\"\"\n\n # The type that is compared against a template\n # TODO: The value may be None. Is that actually correct?\n actual = None # type: Type\n\n def __init__(self, actual: Type, direction: int) -> None:\n # Direction must be SUBTYPE_OF or SUPERTYPE_OF.\n self.actual = actual\n self.direction = direction\n\n # Trivial leaf types\n\n def visit_unbound_type(self, template: UnboundType) -> List[Constraint]:\n return []\n\n def visit_any(self, template: AnyType) -> List[Constraint]:\n return []\n\n def visit_none_type(self, template: NoneTyp) -> List[Constraint]:\n return []\n\n def visit_uninhabited_type(self, template: UninhabitedType) -> List[Constraint]:\n return []\n\n def visit_erased_type(self, template: ErasedType) -> List[Constraint]:\n return []\n\n def visit_deleted_type(self, template: DeletedType) -> List[Constraint]:\n return []\n\n # Errors\n\n def visit_partial_type(self, template: PartialType) -> List[Constraint]:\n # We can't do anything useful with a partial type here.\n assert False, \"Internal error\"\n\n # Non-trivial leaf type\n\n def visit_type_var(self, template: TypeVarType) -> List[Constraint]:\n assert False, (\"Unexpected TypeVarType in ConstraintBuilderVisitor\"\n \" (should have been handled in infer_constraints)\")\n\n # Non-leaf types\n\n def visit_instance(self, template: Instance) -> List[Constraint]:\n original_actual = actual = self.actual\n res = [] # type: List[Constraint]\n if isinstance(actual, CallableType) and actual.fallback is not None:\n actual = actual.fallback\n if isinstance(actual, TypedDictType):\n actual = actual.as_anonymous().fallback\n if isinstance(actual, Instance):\n instance = actual\n # We always try nominal inference if possible,\n # it is much faster than the structural one.\n if (self.direction == SUBTYPE_OF and\n template.type.has_base(instance.type.fullname())):\n mapped = map_instance_to_supertype(template, instance.type)\n for i in range(len(instance.args)):\n # The constraints for generic type parameters are\n # invariant. Include constraints from both directions\n # to achieve the effect.\n res.extend(infer_constraints(\n mapped.args[i], instance.args[i], self.direction))\n res.extend(infer_constraints(\n mapped.args[i], instance.args[i], neg_op(self.direction)))\n return res\n elif (self.direction == SUPERTYPE_OF and\n instance.type.has_base(template.type.fullname())):\n mapped = map_instance_to_supertype(instance, template.type)\n for j in range(len(template.args)):\n # The constraints for generic type parameters are\n # invariant.\n res.extend(infer_constraints(\n template.args[j], mapped.args[j], self.direction))\n res.extend(infer_constraints(\n template.args[j], mapped.args[j], neg_op(self.direction)))\n return res\n if (template.type.is_protocol and self.direction == SUPERTYPE_OF and\n # We avoid infinite recursion for structural subtypes by checking\n # whether this type already appeared in the inference chain.\n # This is a conservative way break the inference cycles.\n # It never produces any \"false\" constraints but gives up soon\n # on purely structural inference cycles, see #3829.\n not any(is_same_type(template, t) for t in template.type.inferring) and\n mypy.subtypes.is_subtype(instance, erase_typevars(template))):\n template.type.inferring.append(template)\n self.infer_constraints_from_protocol_members(res, instance, template,\n original_actual, template)\n template.type.inferring.pop()\n return res\n elif (instance.type.is_protocol and self.direction == SUBTYPE_OF and\n # We avoid infinite recursion for structural subtypes also here.\n not any(is_same_type(instance, i) for i in instance.type.inferring) and\n mypy.subtypes.is_subtype(erase_typevars(template), instance)):\n instance.type.inferring.append(instance)\n self.infer_constraints_from_protocol_members(res, instance, template,\n template, instance)\n instance.type.inferring.pop()\n return res\n if isinstance(actual, AnyType):\n # IDEA: Include both ways, i.e. add negation as well?\n return self.infer_against_any(template.args, actual)\n if (isinstance(actual, TupleType) and\n (is_named_instance(template, 'typing.Iterable') or\n is_named_instance(template, 'typing.Container') or\n is_named_instance(template, 'typing.Sequence') or\n is_named_instance(template, 'typing.Reversible'))\n and self.direction == SUPERTYPE_OF):\n for item in actual.items:\n cb = infer_constraints(template.args[0], item, SUPERTYPE_OF)\n res.extend(cb)\n return res\n elif (isinstance(actual, TupleType) and template.type.is_protocol and\n self.direction == SUPERTYPE_OF):\n if mypy.subtypes.is_subtype(actual.fallback, erase_typevars(template)):\n res.extend(infer_constraints(template, actual.fallback, self.direction))\n return res\n return []\n else:\n return []\n\n def infer_constraints_from_protocol_members(self, res: List[Constraint],\n instance: Instance, template: Instance,\n subtype: Type, protocol: Instance) -> None:\n \"\"\"Infer constraints for situations where either 'template' or 'instance' is a protocol.\n\n The 'protocol' is the one of two that is an instance of protocol type, 'subtype'\n is the type used to bind self during inference. Currently, we just infer constrains for\n every protocol member type (both ways for settable members).\n \"\"\"\n for member in protocol.type.protocol_members:\n inst = mypy.subtypes.find_member(member, instance, subtype)\n temp = mypy.subtypes.find_member(member, template, subtype)\n assert inst is not None and temp is not None\n # The above is safe since at this point we know that 'instance' is a subtype\n # of (erased) 'template', therefore it defines all protocol members\n res.extend(infer_constraints(temp, inst, self.direction))\n if (mypy.subtypes.IS_SETTABLE in\n mypy.subtypes.get_member_flags(member, protocol.type)):\n # Settable members are invariant, add opposite constraints\n res.extend(infer_constraints(temp, inst, neg_op(self.direction)))\n\n def visit_callable_type(self, template: CallableType) -> List[Constraint]:\n if isinstance(self.actual, CallableType):\n cactual = self.actual\n # FIX verify argument counts\n # FIX what if one of the functions is generic\n res = [] # type: List[Constraint]\n\n # We can't infer constraints from arguments if the template is Callable[..., T] (with\n # literal '...').\n if not template.is_ellipsis_args:\n # The lengths should match, but don't crash (it will error elsewhere).\n for t, a in zip(template.arg_types, cactual.arg_types):\n # Negate direction due to function argument type contravariance.\n res.extend(infer_constraints(t, a, neg_op(self.direction)))\n res.extend(infer_constraints(template.ret_type, cactual.ret_type,\n self.direction))\n return res\n elif isinstance(self.actual, AnyType):\n # FIX what if generic\n res = self.infer_against_any(template.arg_types, self.actual)\n any_type = AnyType(TypeOfAny.from_another_any, source_any=self.actual)\n res.extend(infer_constraints(template.ret_type, any_type, self.direction))\n return res\n elif isinstance(self.actual, Overloaded):\n return self.infer_against_overloaded(self.actual, template)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.ret_type, self.actual.item, self.direction)\n elif isinstance(self.actual, Instance):\n # Instances with __call__ method defined are considered structural\n # subtypes of Callable with a compatible signature.\n call = mypy.subtypes.find_member('__call__', self.actual, self.actual)\n if call:\n return infer_constraints(template, call, self.direction)\n else:\n return []\n else:\n return []\n\n def infer_against_overloaded(self, overloaded: Overloaded,\n template: CallableType) -> List[Constraint]:\n # Create constraints by matching an overloaded type against a template.\n # This is tricky to do in general. We cheat by only matching against\n # the first overload item, and by only matching the return type. This\n # seems to work somewhat well, but we should really use a more\n # reliable technique.\n item = find_matching_overload_item(overloaded, template)\n return infer_constraints(template.ret_type, item.ret_type,\n self.direction)\n\n def visit_tuple_type(self, template: TupleType) -> List[Constraint]:\n actual = self.actual\n if isinstance(actual, TupleType) and len(actual.items) == len(template.items):\n res = [] # type: List[Constraint]\n for i in range(len(template.items)):\n res.extend(infer_constraints(template.items[i],\n actual.items[i],\n self.direction))\n return res\n elif isinstance(actual, AnyType):\n return self.infer_against_any(template.items, actual)\n else:\n return []\n\n def visit_typeddict_type(self, template: TypedDictType) -> List[Constraint]:\n actual = self.actual\n if isinstance(actual, TypedDictType):\n res = [] # type: List[Constraint]\n # NOTE: Non-matching keys are ignored. Compatibility is checked\n # elsewhere so this shouldn't be unsafe.\n for (item_name, template_item_type, actual_item_type) in template.zip(actual):\n res.extend(infer_constraints(template_item_type,\n actual_item_type,\n self.direction))\n return res\n elif isinstance(actual, AnyType):\n return self.infer_against_any(template.items.values(), actual)\n else:\n return []\n\n def visit_union_type(self, template: UnionType) -> List[Constraint]:\n assert False, (\"Unexpected UnionType in ConstraintBuilderVisitor\"\n \" (should have been handled in infer_constraints)\")\n\n def infer_against_any(self, types: Iterable[Type], any_type: AnyType) -> List[Constraint]:\n res = [] # type: List[Constraint]\n for t in types:\n res.extend(infer_constraints(t, any_type, self.direction))\n return res\n\n def visit_overloaded(self, template: Overloaded) -> List[Constraint]:\n res = [] # type: List[Constraint]\n for t in template.items():\n res.extend(infer_constraints(t, self.actual, self.direction))\n return res\n\n def visit_type_type(self, template: TypeType) -> List[Constraint]:\n if isinstance(self.actual, CallableType):\n return infer_constraints(template.item, self.actual.ret_type, self.direction)\n elif isinstance(self.actual, Overloaded):\n return infer_constraints(template.item, self.actual.items()[0].ret_type,\n self.direction)\n elif isinstance(self.actual, TypeType):\n return infer_constraints(template.item, self.actual.item, self.direction)\n elif isinstance(self.actual, AnyType):\n return infer_constraints(template.item, self.actual, self.direction)\n else:\n return []\n\n\ndef neg_op(op: int) -> int:\n \"\"\"Map SubtypeOf to SupertypeOf and vice versa.\"\"\"\n\n if op == SUBTYPE_OF:\n return SUPERTYPE_OF\n elif op == SUPERTYPE_OF:\n return SUBTYPE_OF\n else:\n raise ValueError('Invalid operator {}'.format(op))\n\n\ndef find_matching_overload_item(overloaded: Overloaded, template: CallableType) -> CallableType:\n \"\"\"Disambiguate overload item against a template.\"\"\"\n items = overloaded.items()\n for item in items:\n # Return type may be indeterminate in the template, so ignore it when performing a\n # subtype check.\n if mypy.subtypes.is_callable_subtype(item, template, ignore_return=True):\n return item\n # Fall back to the first item if we can't find a match. This is totally arbitrary --\n # maybe we should just bail out at this point.\n return items[0]\n", "path": "mypy/constraints.py" } ]
diff --git a/mypy/constraints.py b/mypy/constraints.py index 4e7c2ecba269..0a79483b235b 100644 --- a/mypy/constraints.py +++ b/mypy/constraints.py @@ -506,6 +506,8 @@ def visit_type_type(self, template: TypeType) -> List[Constraint]: self.direction) elif isinstance(self.actual, TypeType): return infer_constraints(template.item, self.actual.item, self.direction) + elif isinstance(self.actual, AnyType): + return infer_constraints(template.item, self.actual, self.direction) else: return [] diff --git a/test-data/unit/check-generics.test b/test-data/unit/check-generics.test index 6e8fcd43e6e4..b4a5050c431a 100644 --- a/test-data/unit/check-generics.test +++ b/test-data/unit/check-generics.test @@ -1747,3 +1747,13 @@ class D: def __or__(self, x: G[X]) -> G[X]: pass def __ior__(self, x: G[S2]) -> G[S2]: pass \ # E: Signatures of "__ior__" and "__or__" are incompatible + +[case testConstraintInferenceForAnyAgainstTypeT] +from typing import Type, Any, TypeVar + +T = TypeVar('T') + +def f(c: Type[T]) -> T: ... + +x: Any +reveal_type(f(x)) # E: Revealed type is 'Any'
No constraint inferred for Any vs. Type[T] I'd expect the revealed type to be `Any` instead of `<nothing>` for this example: ```py from typing import Type, Any, TypeVar T = TypeVar('T') def f(c: Type[T]) -> T: ... x: Any reveal_type(f(x)) # <nothing> ``` It looks like constraint inference doesn't work correctly when matching `Any` against `Type[T]`.
elastic__helm-charts-516
[ { "content": "import tempfile\nimport yaml\nimport os\nimport json\nfrom subprocess import check_output\n\n\ndef helm_template(config):\n with tempfile.NamedTemporaryFile() as temp:\n with open(temp.name, \"w\") as values:\n values.write(config)\n helm_cmd = \"helm template -f {0} --namespace default ./\".format(temp.name)\n result = yaml.load_all(check_output(helm_cmd.split()))\n\n results = {}\n for r in result:\n if r:\n kind = r[\"kind\"].lower()\n if kind not in results:\n results[kind] = {}\n results[kind][r[\"metadata\"][\"name\"]] = r\n\n if os.environ.get(\"DEBUG\"):\n print(json.dumps(results, indent=4, sort_keys=True))\n return results\n", "path": "helpers/helpers.py" } ]
[ { "content": "import tempfile\nimport yaml\nimport os\nimport json\nfrom subprocess import check_output\n\n\ndef helm_template(config):\n with tempfile.NamedTemporaryFile() as temp:\n with open(temp.name, \"w\") as values:\n values.write(config)\n helm_cmd = \"helm template release-name -f {0} ./\".format(temp.name)\n result = yaml.load_all(check_output(helm_cmd.split()))\n\n results = {}\n for r in result:\n if r:\n kind = r[\"kind\"].lower()\n if kind not in results:\n results[kind] = {}\n results[kind][r[\"metadata\"][\"name\"]] = r\n\n if os.environ.get(\"DEBUG\"):\n print(json.dumps(results, indent=4, sort_keys=True))\n return results\n", "path": "helpers/helpers.py" } ]
diff --git a/README.md b/README.md index d1e245f91..d40991f46 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,6 @@ - [Support Matrix](#support-matrix) - [Kubernetes Versions](#kubernetes-versions) - [Helm versions](#helm-versions) - - [Helm 3 beta](#helm-3-beta) - [ECK](#eck) <!-- END doctoc generated TOC please keep comment here to allow auto update --> @@ -77,13 +76,8 @@ exact versions are defined under `KUBERNETES_VERSIONS` in ### Helm versions While we are checking backward compatibility, the charts are only tested with -Helm version mentioned in [helm-tester Dockerfile][] (currently 2.17.0). +Helm version mentioned in [helm-tester Dockerfile][] (currently 3.4.1). -#### Helm 3 beta - -While we don't have automated tests for [Helm 3][] yet, we fixed the main -blockers to use it. We now have enough feedbacks from internal and external -users to add support in beta. ## ECK @@ -97,7 +91,6 @@ Kubernetes. [elastic cloud on kubernetes]: https://github.com/elastic/cloud-on-k8s [elastic helm repo]: https://helm.elastic.co [github releases]: https://github.com/elastic/helm-charts/releases -[helm 3]: https://v3.helm.sh [helm-tester Dockerfile]: https://github.com/elastic/helm-charts/blob/master/helpers/helm-tester/Dockerfile [helpers/matrix.yml]: https://github.com/elastic/helm-charts/blob/master/helpers/matrix.yml [operator pattern]: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/ diff --git a/apm-server/README.md b/apm-server/README.md index f95914a06..d513eb3f2 100644 --- a/apm-server/README.md +++ b/apm-server/README.md @@ -48,8 +48,8 @@ See [supported configurations][] for more details. `helm repo add elastic https://helm.elastic.co` * Install it: - - with Helm 2: `helm install --name apm-server elastic/apm-server` - - with [Helm 3 (beta)][]: `helm install apm-server elastic/apm-server` + - with Helm 3: `helm install apm-server elastic/apm-server` + - with Helm 2 (deprecated): `helm install --name apm-server elastic/apm-server` ### Install development version using master branch @@ -57,8 +57,8 @@ See [supported configurations][] for more details. * Clone the git repo: `git clone [email protected]:elastic/helm-charts.git` * Install it: - - with Helm 2: `helm install --name apm-server ./helm-charts/apm-server --set imageTag=8.0.0-SNAPSHOT` - - with [Helm 3 (beta)][]: `helm install apm-server ./helm-charts/apm-server --set imageTag=8.0.0-SNAPSHOT` + - with Helm 3: `helm install apm-server ./helm-charts/apm-server --set imageTag=8.0.0-SNAPSHOT` + - with Helm 2 (deprecated): `helm install --name apm-server ./helm-charts/apm-server --set imageTag=8.0.0-SNAPSHOT` ## Upgrading @@ -160,7 +160,6 @@ about our development and testing process. [examples/oss]: https://github.com/elastic/helm-charts/tree/master/apm-server/examples/oss [examples/security]: https://github.com/elastic/helm-charts/tree/master/apm-server/examples/security [helm]: https://helm.sh -[helm 3 (beta)]: https://github.com/elastic/helm-charts/tree/master/README.md#helm-3-beta [horizontal pod autoscaler]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ [imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images [imagePullSecrets]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret diff --git a/apm-server/examples/default/Makefile b/apm-server/examples/default/Makefile index d4638cb2e..57c4116d2 100644 --- a/apm-server/examples/default/Makefile +++ b/apm-server/examples/default/Makefile @@ -5,9 +5,9 @@ include ../../../helpers/examples.mk RELEASE := helm-apm-server-default install: - helm upgrade --wait --timeout=900 --install $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/apm-server/examples/oss/Makefile b/apm-server/examples/oss/Makefile index b82ea5a43..61ad62b28 100644 --- a/apm-server/examples/oss/Makefile +++ b/apm-server/examples/oss/Makefile @@ -5,9 +5,9 @@ include ../../../helpers/examples.mk RELEASE := helm-apm-server-oss install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values values.yaml ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/apm-server/examples/security/Makefile b/apm-server/examples/security/Makefile index 3afb654d7..de704f595 100644 --- a/apm-server/examples/security/Makefile +++ b/apm-server/examples/security/Makefile @@ -5,9 +5,9 @@ include ../../../helpers/examples.mk RELEASE := helm-apm-server-security install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values values.yaml ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/apm-server/templates/_helpers.tpl b/apm-server/templates/_helpers.tpl index abc1361ea..6f64c4b38 100755 --- a/apm-server/templates/_helpers.tpl +++ b/apm-server/templates/_helpers.tpl @@ -23,14 +23,14 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this Return the appropriate apiVersion for ingress. */}} {{- define "apm.ingress.apiVersion" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.Version -}} {{- print "extensions/v1beta1" -}} {{- else -}} {{- print "networking.k8s.io/v1beta1" -}} {{- end -}} {{- end -}} {{- define "apm.autoscaling.apiVersion" -}} -{{- if semverCompare "<1.12-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare "<1.12-0" .Capabilities.KubeVersion.Version -}} {{- print "autoscaling/v2beta1" -}} {{- else -}} {{- print "autoscaling/v2beta2" -}} diff --git a/elasticsearch/README.md b/elasticsearch/README.md index b5d4f2a52..8ef7db063 100644 --- a/elasticsearch/README.md +++ b/elasticsearch/README.md @@ -59,8 +59,8 @@ See [supported configurations][] for more details. `helm repo add elastic https://helm.elastic.co` * Install it: - - with Helm 2: `helm install --name elasticsearch elastic/elasticsearch` - - with [Helm 3 (beta)][]: `helm install elasticsearch elastic/elasticsearch` + - with Helm 3: `helm install elasticsearch elastic/elasticsearch` + - with Helm 2 (deprecated): `helm install --name elasticsearch elastic/elasticsearch` ### Install development version using master branch @@ -68,8 +68,8 @@ See [supported configurations][] for more details. * Clone the git repo: `git clone [email protected]:elastic/helm-charts.git` * Install it: - - with Helm 2: `helm install --name elasticsearch ./helm-charts/elasticsearch --set imageTag=8.0.0-SNAPSHOT` - - with [Helm 3 (beta)][]: `helm install elasticsearch ./helm-charts/elasticsearch --set imageTag=8.0.0-SNAPSHOT` + - with Helm 3: `helm install elasticsearch ./helm-charts/elasticsearch --set imageTag=8.0.0-SNAPSHOT` + - with Helm 2 (deprecated): `helm install --name elasticsearch ./helm-charts/elasticsearch --set imageTag=8.0.0-SNAPSHOT` ## Upgrading @@ -413,7 +413,6 @@ about our development and testing process. [examples/security]: https://github.com/elastic/helm-charts/tree/master/elasticsearch/examples/security [gke]: https://cloud.google.com/kubernetes-engine [helm]: https://helm.sh -[helm 3 (beta)]: https://github.com/elastic/helm-charts/tree/master/README.md#helm-3-beta [helm/charts stable]: https://github.com/helm/charts/tree/master/stable/elasticsearch/ [how to install plugins guide]: https://github.com/elastic/helm-charts/tree/master/elasticsearch/README.md#how-to-install-plugins [how to use the keystore]: https://github.com/elastic/helm-charts/tree/master/elasticsearch/README.md#how-to-use-the-keystore diff --git a/elasticsearch/examples/config/Makefile b/elasticsearch/examples/config/Makefile index 502531378..9eee45df2 100644 --- a/elasticsearch/examples/config/Makefile +++ b/elasticsearch/examples/config/Makefile @@ -4,7 +4,7 @@ include ../../../helpers/examples.mk RELEASE := helm-es-config install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values ./values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values ./values.yaml ../../ secrets: kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true @@ -16,4 +16,4 @@ secrets: test: secrets install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/elasticsearch/examples/default/Makefile b/elasticsearch/examples/default/Makefile index 15558c19a..beb90461d 100644 --- a/elasticsearch/examples/default/Makefile +++ b/elasticsearch/examples/default/Makefile @@ -5,12 +5,12 @@ include ../../../helpers/examples.mk RELEASE := helm-es-default install: - helm upgrade --wait --timeout=900 --install $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) ../../ restart: - helm upgrade --set terminationGracePeriod=121 --wait --timeout=900 --install $(RELEASE) ../../ + helm upgrade --set terminationGracePeriod=121 --wait --timeout=900s --install $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/elasticsearch/examples/docker-for-mac/Makefile b/elasticsearch/examples/docker-for-mac/Makefile index 99894063d..705b08f0b 100644 --- a/elasticsearch/examples/docker-for-mac/Makefile +++ b/elasticsearch/examples/docker-for-mac/Makefile @@ -3,10 +3,10 @@ default: test RELEASE := helm-es-docker-for-mac install: - helm upgrade --wait --timeout=1200 --install --values values.yaml $(RELEASE) ../../ + helm upgrade --wait --timeout=1200s --install --values values.yaml $(RELEASE) ../../ test: install helm test $(RELEASE) purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/elasticsearch/examples/kubernetes-kind/Makefile b/elasticsearch/examples/kubernetes-kind/Makefile index f18f98668..3f6bdead9 100644 --- a/elasticsearch/examples/kubernetes-kind/Makefile +++ b/elasticsearch/examples/kubernetes-kind/Makefile @@ -3,14 +3,14 @@ default: test RELEASE := helm-es-kind install: - helm upgrade --wait --timeout=1200 --install --values values.yaml $(RELEASE) ../../ + helm upgrade --wait --timeout=1200s --install --values values.yaml $(RELEASE) ../../ install-local-path: kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml - helm upgrade --wait --timeout=1200 --install --values values-local-path.yaml $(RELEASE) ../../ + helm upgrade --wait --timeout=1200s --install --values values-local-path.yaml $(RELEASE) ../../ test: install helm test $(RELEASE) purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/elasticsearch/examples/microk8s/Makefile b/elasticsearch/examples/microk8s/Makefile index 834b929e4..3ce3ebdef 100644 --- a/elasticsearch/examples/microk8s/Makefile +++ b/elasticsearch/examples/microk8s/Makefile @@ -3,10 +3,10 @@ default: test RELEASE := helm-es-microk8s install: - helm upgrade --wait --timeout=1200 --install --values values.yaml $(RELEASE) ../../ + helm upgrade --wait --timeout=1200s --install --values values.yaml $(RELEASE) ../../ test: install helm test $(RELEASE) purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/elasticsearch/examples/migration/Makefile b/elasticsearch/examples/migration/Makefile index bcef97e6d..efd4abfc5 100644 --- a/elasticsearch/examples/migration/Makefile +++ b/elasticsearch/examples/migration/Makefile @@ -1,10 +1,10 @@ PREFIX := helm-es-migration data: - helm upgrade --wait --timeout=900 --install --values ./data.yml $(PREFIX)-data ../../ + helm upgrade --wait --timeout=900s --install --values ./data.yml $(PREFIX)-data ../../ master: - helm upgrade --wait --timeout=900 --install --values ./master.yml $(PREFIX)-master ../../ + helm upgrade --wait --timeout=900s --install --values ./master.yml $(PREFIX)-master ../../ client: - helm upgrade --wait --timeout=900 --install --values ./client.yml $(PREFIX)-client ../../ + helm upgrade --wait --timeout=900s --install --values ./client.yml $(PREFIX)-client ../../ diff --git a/elasticsearch/examples/minikube/Makefile b/elasticsearch/examples/minikube/Makefile index f8b7e7b6a..09b61f81d 100644 --- a/elasticsearch/examples/minikube/Makefile +++ b/elasticsearch/examples/minikube/Makefile @@ -3,10 +3,10 @@ default: test RELEASE := helm-es-minikube install: - helm upgrade --wait --timeout=1200 --install --values values.yaml $(RELEASE) ../../ + helm upgrade --wait --timeout=1200s --install --values values.yaml $(RELEASE) ../../ test: install helm test $(RELEASE) purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/elasticsearch/examples/multi/Makefile b/elasticsearch/examples/multi/Makefile index 4e4835a03..bb3da7267 100644 --- a/elasticsearch/examples/multi/Makefile +++ b/elasticsearch/examples/multi/Makefile @@ -6,13 +6,13 @@ PREFIX := helm-es-multi RELEASE := helm-es-multi-master install: - helm upgrade --wait --timeout=900 --install --values ./master.yml $(PREFIX)-master ../../ - helm upgrade --wait --timeout=900 --install --values ./data.yml $(PREFIX)-data ../../ - helm upgrade --wait --timeout=900 --install --values ./client.yml $(PREFIX)-client ../../ + helm upgrade --wait --timeout=900s --install --values ./master.yml $(PREFIX)-master ../../ + helm upgrade --wait --timeout=900s --install --values ./data.yml $(PREFIX)-data ../../ + helm upgrade --wait --timeout=900s --install --values ./client.yml $(PREFIX)-client ../../ test: install goss purge: - helm del --purge $(PREFIX)-master - helm del --purge $(PREFIX)-data - helm del --purge $(PREFIX)-client + helm del $(PREFIX)-master + helm del $(PREFIX)-data + helm del $(PREFIX)-client diff --git a/elasticsearch/examples/openshift/Makefile b/elasticsearch/examples/openshift/Makefile index 8d08d8959..653288854 100644 --- a/elasticsearch/examples/openshift/Makefile +++ b/elasticsearch/examples/openshift/Makefile @@ -7,9 +7,9 @@ template: helm template --values ./values.yaml ../../ install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values ./values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values ./values.yaml ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/elasticsearch/examples/oss/Makefile b/elasticsearch/examples/oss/Makefile index fa69ddc2d..e57284176 100644 --- a/elasticsearch/examples/oss/Makefile +++ b/elasticsearch/examples/oss/Makefile @@ -4,9 +4,9 @@ include ../../../helpers/examples.mk RELEASE := helm-es-oss install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values ./values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values ./values.yaml ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/elasticsearch/examples/security/Makefile b/elasticsearch/examples/security/Makefile index ef32cae8e..988d98e61 100644 --- a/elasticsearch/examples/security/Makefile +++ b/elasticsearch/examples/security/Makefile @@ -6,11 +6,11 @@ RELEASE := helm-es-security ELASTICSEARCH_IMAGE := docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) install: - helm upgrade --wait --timeout=900 --install --values ./security.yml $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install --values ./security.yml $(RELEASE) ../../ purge: kubectl delete secrets elastic-credentials elastic-certificates elastic-certificate-pem elastic-certificate-crt|| true - helm del --purge $(RELEASE) + helm del $(RELEASE) test: secrets install goss diff --git a/elasticsearch/templates/NOTES.txt b/elasticsearch/templates/NOTES.txt index 3841adafc..73edf425a 100755 --- a/elasticsearch/templates/NOTES.txt +++ b/elasticsearch/templates/NOTES.txt @@ -1,4 +1,4 @@ 1. Watch all cluster members come up. $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "elasticsearch.uname" . }} -w 2. Test cluster health using Helm test. - $ helm test {{ .Release.Name }} --cleanup + $ helm test {{ .Release.Name }} diff --git a/elasticsearch/templates/_helpers.tpl b/elasticsearch/templates/_helpers.tpl index 0b0fe9ae1..2a5d39497 100755 --- a/elasticsearch/templates/_helpers.tpl +++ b/elasticsearch/templates/_helpers.tpl @@ -68,7 +68,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this Return the appropriate apiVersion for statefulset. */}} {{- define "elasticsearch.statefulset.apiVersion" -}} -{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} {{- print "apps/v1beta2" -}} {{- else -}} {{- print "apps/v1" -}} @@ -79,7 +79,7 @@ Return the appropriate apiVersion for statefulset. Return the appropriate apiVersion for ingress. */}} {{- define "elasticsearch.ingress.apiVersion" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.Version -}} {{- print "extensions/v1beta1" -}} {{- else -}} {{- print "networking.k8s.io/v1beta1" -}} diff --git a/elasticsearch/templates/statefulset.yaml b/elasticsearch/templates/statefulset.yaml index 0450a0cbc..892538638 100644 --- a/elasticsearch/templates/statefulset.yaml +++ b/elasticsearch/templates/statefulset.yaml @@ -28,7 +28,6 @@ spec: name: {{ template "elasticsearch.uname" . }} {{- if .Values.persistence.labels.enabled }} labels: - heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} chart: "{{ .Chart.Name }}" app: "{{ template "elasticsearch.uname" . }}" @@ -47,7 +46,6 @@ spec: metadata: name: "{{ template "elasticsearch.uname" . }}" labels: - heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} chart: "{{ .Chart.Name }}" app: "{{ template "elasticsearch.uname" . }}" @@ -154,7 +152,7 @@ spec: imagePullSecrets: {{ toYaml .Values.imagePullSecrets | indent 8 }} {{- end }} - {{- if semverCompare ">1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if semverCompare ">1.13-0" .Capabilities.KubeVersion.Version }} enableServiceLinks: {{ .Values.enableServiceLinks }} {{- end }} initContainers: diff --git a/elasticsearch/tests/elasticsearch_test.py b/elasticsearch/tests/elasticsearch_test.py index 293453dd7..6ceec27db 100755 --- a/elasticsearch/tests/elasticsearch_test.py +++ b/elasticsearch/tests/elasticsearch_test.py @@ -445,7 +445,10 @@ def test_enabling_persistence_label_in_volumeclaimtemplate(): "volumeClaimTemplates" ][0]["metadata"]["labels"] statefulset_labels = r["statefulset"][uname]["metadata"]["labels"] - assert volume_claim_template_labels == statefulset_labels + expected_labels = statefulset_labels + # heritage label shouldn't be present in volumeClaimTemplates labels + expected_labels.pop("heritage") + assert volume_claim_template_labels == expected_labels def test_adding_a_secret_mount(): diff --git a/filebeat/README.md b/filebeat/README.md index bd137de32..fa82ce265 100644 --- a/filebeat/README.md +++ b/filebeat/README.md @@ -45,8 +45,8 @@ See [supported configurations][] for more details. `helm repo add elastic https://helm.elastic.co` * Install it: - - with Helm 2: `helm install --name filebeat elastic/filebeat` - - with [Helm 3 (beta)][]: `helm install filebeat elastic/filebeat` + - with Helm 3: `helm install filebeat elastic/filebeat` + - with Helm 2 (deprecated): `helm install --name filebeat elastic/filebeat` ### Install development version using master branch @@ -54,8 +54,8 @@ See [supported configurations][] for more details. * Clone the git repo: `git clone [email protected]:elastic/helm-charts.git` * Install it: - - with Helm 2: `helm install --name filebeat ./helm-charts/filebeat --set imageTag=8.0.0-SNAPSHOT` - - with [Helm 3 (beta)][]: `helm install filebeat ./helm-charts/filebeat --set imageTag=8.0.0-SNAPSHOT` + - with Helm 3: `helm install filebeat ./helm-charts/filebeat --set imageTag=8.0.0-SNAPSHOT` + - with Helm 2 (deprecated): `helm install --name filebeat ./helm-charts/filebeat --set imageTag=8.0.0-SNAPSHOT` ## Upgrading @@ -200,7 +200,6 @@ about our development and testing process. [filebeat oss docker image]: https://www.docker.elastic.co/r/beats/filebeat-oss [filebeat outputs]: https://www.elastic.co/guide/en/beats/filebeat/current/configuring-output.html [helm]: https://helm.sh -[helm 3 (beta)]: https://github.com/elastic/helm-charts/tree/master/README.md#helm-3-beta [hostNetwork]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces [dnsConfig]: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ [hostPath]: https://kubernetes.io/docs/concepts/storage/volumes/#hostpath diff --git a/filebeat/examples/default/Makefile b/filebeat/examples/default/Makefile index 937cea681..e27affffc 100644 --- a/filebeat/examples/default/Makefile +++ b/filebeat/examples/default/Makefile @@ -5,9 +5,9 @@ include ../../../helpers/examples.mk RELEASE := helm-filebeat-default install: - helm upgrade --wait --timeout=900 --install $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/filebeat/examples/oss/Makefile b/filebeat/examples/oss/Makefile index fa3d507af..455c2e63a 100644 --- a/filebeat/examples/oss/Makefile +++ b/filebeat/examples/oss/Makefile @@ -5,9 +5,9 @@ include ../../../helpers/examples.mk RELEASE := helm-filebeat-oss install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values values.yaml ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/filebeat/examples/security/Makefile b/filebeat/examples/security/Makefile index 602dc4ae2..50c0b5bed 100644 --- a/filebeat/examples/security/Makefile +++ b/filebeat/examples/security/Makefile @@ -5,9 +5,9 @@ include ../../../helpers/examples.mk RELEASE := helm-filebeat-security install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values values.yaml ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/helpers/common.mk b/helpers/common.mk index d98b0c61f..f7debead3 100644 --- a/helpers/common.mk +++ b/helpers/common.mk @@ -13,18 +13,10 @@ build: ## Build helm-tester docker image .PHONY: deps deps: ## Update helm charts dependencies - sed --in-place '/charts\//d' ./.helmignore helm dependency update -.PHONY: helm -helm: ## Deploy helm on k8s cluster - kubectl get cs - kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default || true - helm init --wait --upgrade - .PHONY: lint lint: ## Lint helm templates - grep 'charts/' ./.helmignore || echo 'charts/' >> ./.helmignore helm lint --strict ./ .PHONY: lint-python @@ -44,4 +36,4 @@ test: build ## Run all tests in a docker container docker run --rm -i --user "$$(id -u):$$(id -g)" -v $$(pwd)/../:/app -w /app/$$(basename $$(pwd)) helm-tester make test-all .PHONY: test-all ## Run all tests -test-all: lint deps template pytest +test-all: deps lint template pytest diff --git a/helpers/helm-tester/Dockerfile b/helpers/helm-tester/Dockerfile index b0cefa7b1..96ad4c483 100644 --- a/helpers/helm-tester/Dockerfile +++ b/helpers/helm-tester/Dockerfile @@ -1,13 +1,13 @@ FROM python:3.7 -ENV HELM_VERSION=2.17.0 +ENV HELM_VERSION=3.4.1 -RUN wget --no-verbose https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \ +RUN wget --no-verbose https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \ tar xfv helm-v${HELM_VERSION}-linux-amd64.tar.gz && \ mv linux-amd64/helm /usr/local/bin/ && \ - rm -rf linux-amd64 && \ - HOME=/ helm init --client-only && \ - chmod 777 -R /.helm + mkdir --parents --mode=777 /.config/helm && \ + HOME=/ helm repo add stable https://charts.helm.sh/stable && \ + rm -rf helm-v${HELM_VERSION}-linux-amd64.tar.gz linux-amd64 COPY requirements.txt /usr/src/app/ RUN pip install --no-cache-dir -r /usr/src/app/requirements.txt diff --git a/helpers/helpers.py b/helpers/helpers.py index 9af447a94..4cc96756e 100644 --- a/helpers/helpers.py +++ b/helpers/helpers.py @@ -9,7 +9,7 @@ def helm_template(config): with tempfile.NamedTemporaryFile() as temp: with open(temp.name, "w") as values: values.write(config) - helm_cmd = "helm template -f {0} --namespace default ./".format(temp.name) + helm_cmd = "helm template release-name -f {0} ./".format(temp.name) result = yaml.load_all(check_output(helm_cmd.split())) results = {} diff --git a/helpers/terraform/Dockerfile b/helpers/terraform/Dockerfile index 5f49abccd..3540dfb60 100644 --- a/helpers/terraform/Dockerfile +++ b/helpers/terraform/Dockerfile @@ -3,7 +3,7 @@ FROM centos:7 ENV VAULT_VERSION 0.9.3 ENV TERRAFORM_VERSION=0.11.7 ENV KUBECTL_VERSION=1.16.10 -ENV HELM_VERSION=2.17.0 +ENV HELM_VERSION=3.4.1 ENV DOCKER_VERSION=18.09.7 ENV JQ_VERSION=1.6 @@ -22,6 +22,7 @@ RUN yum -y install \ RUN curl -O https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip && \ unzip vault_${VAULT_VERSION}_linux_amd64.zip -d /usr/local/bin/ && \ chmod +x /usr/local/bin/vault && \ + rm -f vault_${VAULT_VERSION}_linux_amd64.zip && \ vault version RUN curl -O https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ @@ -34,17 +35,17 @@ RUN curl -O https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL chmod a+x /usr/local/bin/kubectl && \ kubectl version --client -RUN curl -O https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \ +RUN curl -O https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \ tar xfv helm-v${HELM_VERSION}-linux-amd64.tar.gz && \ mv linux-amd64/helm /usr/local/bin/ && \ - rm -rf linux-amd64 && \ + rm -rf helm-v${HELM_VERSION}-linux-amd64.tar.gz linux-amd64 && \ helm version --client RUN curl -O https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz && \ tar xfv docker* && \ mv docker/docker /usr/local/bin && \ - rm -rf docker/ && \ - docker + rm -rf docker-${DOCKER_VERSION}.tgz docker/ && \ + docker -v RUN curl -O -L https://github.com/stedolan/jq/releases/download/jq-${JQ_VERSION}/jq-linux64 && \ mv jq-linux64 /usr/local/bin/jq && \ diff --git a/helpers/terraform/Makefile b/helpers/terraform/Makefile index a6dcff24b..8d448c3e3 100644 --- a/helpers/terraform/Makefile +++ b/helpers/terraform/Makefile @@ -65,8 +65,8 @@ creds: credentials.json ## Get gke credentials kubectl create namespace $(NAMESPACE) || true kubectl config set-context $$(kubectl config current-context) --namespace=$(NAMESPACE) -.PHONY: k8s -k8s: apply creds ## Configure gke cluster +.PHONY: up +up: apply creds ## Configure gke cluster kubectl get cs .PHONY: k8s-staging-registry @@ -77,15 +77,10 @@ k8s-staging-registry: creds ## Create the staging registry auth secret in k8s --docker-username="devops-ci" \ --docker-password="$$DOCKER_PASSWORD" -.PHONY: up -up: k8s ## Install helm on gke cluster - kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default || true - for i in 1 2 3 4 5; do helm init --wait --upgrade && break || sleep 5; done - .PHONY: integration integration: creds ## Deploy helm chart and run integration tests cd ../../$(CHART)/ && \ - helm init --client-only && \ + helm repo add stable https://charts.helm.sh/stable && \ helm dependency update && \ cd ./examples/$(SUITE) && \ make @@ -98,4 +93,3 @@ build: ## Build helm-charts docker image pull-private-images: ## Pull private images used in testing cd ../../elasticsearch/examples/security/ && \ make pull-elasticsearch-image - diff --git a/kibana/README.md b/kibana/README.md index ea2cc9d3a..283aa7b07 100644 --- a/kibana/README.md +++ b/kibana/README.md @@ -47,8 +47,8 @@ See [supported configurations][] for more details. `helm repo add elastic https://helm.elastic.co` * Install it: - - with Helm 2: `helm install --name kibana elastic/kibana` - - with [Helm 3 (beta)][]: `helm install kibana elastic/kibana` + - with Helm 3: `helm install kibana elastic/kibana` + - with Helm 2 (deprecated): `helm install --name kibana elastic/kibana` ### Install development version using master branch @@ -56,8 +56,8 @@ See [supported configurations][] for more details. * Clone the git repo: `git clone [email protected]:elastic/helm-charts.git` * Install it: - - with Helm 2: `helm install --name kibana ./helm-charts/kibana --set imageTag=8.0.0-SNAPSHOT` - - with [Helm 3 (beta)][]: `helm install kibana ./helm-charts/kibana --set imageTag=8.0.0-SNAPSHOT` + - with Helm 3: `helm install kibana ./helm-charts/kibana --set imageTag=8.0.0-SNAPSHOT` + - with Helm 2 (deprecated): `helm install --name kibana ./helm-charts/kibana --set imageTag=8.0.0-SNAPSHOT` ## Upgrading @@ -216,7 +216,6 @@ about our development and testing process. [examples/security]: https://github.com/elastic/helm-charts/tree/master/kibana/examples/security [gke]: https://cloud.google.com/kubernetes-engine [helm]: https://helm.sh -[helm 3 (beta)]: https://github.com/elastic/helm-charts/tree/master/README.md#helm-3-beta [imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images [imagePullSecrets]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret [ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ diff --git a/kibana/examples/default/Makefile b/kibana/examples/default/Makefile index ea15adfd0..4c6a6e6d8 100644 --- a/kibana/examples/default/Makefile +++ b/kibana/examples/default/Makefile @@ -5,9 +5,9 @@ RELEASE := helm-kibana-default install: echo "Goss container: $(GOSS_CONTAINER)" - helm upgrade --wait --timeout=900 --install $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/kibana/examples/openshift/Makefile b/kibana/examples/openshift/Makefile index e1aadf2a0..8435ba2fe 100644 --- a/kibana/examples/openshift/Makefile +++ b/kibana/examples/openshift/Makefile @@ -7,9 +7,9 @@ template: helm template --values ./values.yml ../../ install: - helm upgrade --wait --timeout=900 --install --values ./values.yml $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install --values ./values.yml $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/kibana/examples/oss/Makefile b/kibana/examples/oss/Makefile index 16e974fcd..cbda8764d 100644 --- a/kibana/examples/oss/Makefile +++ b/kibana/examples/oss/Makefile @@ -4,9 +4,9 @@ include ../../../helpers/examples.mk RELEASE := helm-kibana-oss install: - helm upgrade --wait --timeout=900 --install --values ./values.yml $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install --values ./values.yml $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/kibana/examples/security/Makefile b/kibana/examples/security/Makefile index 107d8b74b..2fa8607e6 100644 --- a/kibana/examples/security/Makefile +++ b/kibana/examples/security/Makefile @@ -4,13 +4,13 @@ include ../../../helpers/examples.mk RELEASE := helm-kibana-security install: - helm upgrade --wait --timeout=900 --install --values ./security.yml $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install --values ./security.yml $(RELEASE) ../../ test: secrets install goss purge: kubectl delete secret kibana || true - helm del --purge $(RELEASE) + helm del $(RELEASE) secrets: encryptionkey=$$(docker run --rm busybox:1.31.1 /bin/sh -c "< /dev/urandom tr -dc _A-Za-z0-9 | head -c50") && \ diff --git a/kibana/templates/_helpers.tpl b/kibana/templates/_helpers.tpl index d2ab927b6..d03cc29cf 100755 --- a/kibana/templates/_helpers.tpl +++ b/kibana/templates/_helpers.tpl @@ -23,7 +23,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this Return the appropriate apiVersion for ingress. */}} {{- define "kibana.ingress.apiVersion" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.Version -}} {{- print "extensions/v1beta1" -}} {{- else -}} {{- print "networking.k8s.io/v1beta1" -}} diff --git a/logstash/README.md b/logstash/README.md index 6365e9429..677b3ef86 100644 --- a/logstash/README.md +++ b/logstash/README.md @@ -47,8 +47,8 @@ See [supported configurations][] for more details. `helm repo add elastic https://helm.elastic.co` * Install it: - - with Helm 2: `helm install --name logstash elastic/logstash` - - with [Helm 3 (beta)][]: `helm install logstash elastic/logstash` + - with Helm 3: `helm install logstash elastic/logstash` + - with Helm 2 (deprecated): `helm install --name logstash elastic/logstash` ### Install development version using master branch @@ -56,8 +56,8 @@ See [supported configurations][] for more details. * Clone the git repo: `git clone [email protected]:elastic/helm-charts.git` * Install it: - - with Helm 2: `helm install --name logstash ./helm-charts/logstash --set imageTag=8.0.0-SNAPSHOT` - - with [Helm 3 (beta)][]: `helm install logstash ./helm-charts/logstash --set imageTag=8.0.0-SNAPSHOT` + - with Helm 3: `helm install logstash ./helm-charts/logstash --set imageTag=8.0.0-SNAPSHOT` + - with Helm 2 (deprecated): `helm install --name logstash ./helm-charts/logstash --set imageTag=8.0.0-SNAPSHOT` ## Upgrading @@ -202,7 +202,6 @@ about our development and testing process. [examples]: https://github.com/elastic/helm-charts/tree/master/logstash/examples [examples/oss]: https://github.com/elastic/helm-charts/tree/master/logstash/examples/oss [helm]: https://helm.sh -[helm 3 (beta)]: https://github.com/elastic/helm-charts/tree/master/README.md#helm-3-beta [http input plugin]: https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http.html [imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images [imagePullSecrets]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret diff --git a/logstash/examples/default/Makefile b/logstash/examples/default/Makefile index 0e295d765..4c5920df5 100644 --- a/logstash/examples/default/Makefile +++ b/logstash/examples/default/Makefile @@ -5,12 +5,12 @@ include ../../../helpers/examples.mk RELEASE := helm-logstash-default install: - helm upgrade --wait --timeout=1200 --install $(RELEASE) ../../ + helm upgrade --wait --timeout=1200s --install $(RELEASE) ../../ restart: - helm upgrade --set terminationGracePeriod=121 --wait --timeout=1200 --install $(RELEASE) ../../ + helm upgrade --set terminationGracePeriod=121 --wait --timeout=1200s --install $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/logstash/examples/elasticsearch/Makefile b/logstash/examples/elasticsearch/Makefile index 0323fcea5..b11f714e3 100644 --- a/logstash/examples/elasticsearch/Makefile +++ b/logstash/examples/elasticsearch/Makefile @@ -5,13 +5,13 @@ include ../../../helpers/examples.mk RELEASE := helm-logstash-elasticsearch install: - helm upgrade --wait --timeout=1200 --install $(RELEASE) --values ./values.yaml ../../ + helm upgrade --wait --timeout=1200s --install $(RELEASE) --values ./values.yaml ../../ restart: - helm upgrade --set terminationGracePeriod=121 --wait --timeout=1200 --install $(RELEASE) ../../ + helm upgrade --set terminationGracePeriod=121 --wait --timeout=1200s --install $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) kubectl delete $$(kubectl get pvc -l release=$(RELEASE) -o name) diff --git a/logstash/examples/oss/Makefile b/logstash/examples/oss/Makefile index 541d0182f..81c83f702 100644 --- a/logstash/examples/oss/Makefile +++ b/logstash/examples/oss/Makefile @@ -5,12 +5,12 @@ include ../../../helpers/examples.mk RELEASE := helm-logstash-oss install: - helm upgrade --wait --timeout=1200 --install $(RELEASE) --values ./values.yaml ../../ + helm upgrade --wait --timeout=1200s --install $(RELEASE) --values ./values.yaml ../../ restart: - helm upgrade --set terminationGracePeriod=121 --wait --timeout=1200 --install $(RELEASE) ../../ + helm upgrade --set terminationGracePeriod=121 --wait --timeout=1200s --install $(RELEASE) ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/logstash/examples/security/Makefile b/logstash/examples/security/Makefile index 8bad0bc1c..ef8ebf13e 100644 --- a/logstash/examples/security/Makefile +++ b/logstash/examples/security/Makefile @@ -5,10 +5,10 @@ include ../../../helpers/examples.mk RELEASE := helm-logstash-security install: - helm upgrade --wait --timeout=1200 --install $(RELEASE) --values values.yaml ../../ + helm upgrade --wait --timeout=1200s --install $(RELEASE) --values values.yaml ../../ test: install goss purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) kubectl delete $$(kubectl get pvc -l release=$(RELEASE) -o name) diff --git a/logstash/templates/_helpers.tpl b/logstash/templates/_helpers.tpl index d240a026e..13c76fde6 100755 --- a/logstash/templates/_helpers.tpl +++ b/logstash/templates/_helpers.tpl @@ -23,7 +23,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this Return the appropriate apiVersion for statefulset. */}} {{- define "logstash.statefulset.apiVersion" -}} -{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} {{- print "apps/v1beta2" -}} {{- else -}} {{- print "apps/v1" -}} @@ -34,7 +34,7 @@ Return the appropriate apiVersion for statefulset. Return the appropriate apiVersion for ingress. */}} {{- define "logstash.ingress.apiVersion" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.Version -}} {{- print "extensions/v1beta1" -}} {{- else -}} {{- print "networking.k8s.io/v1beta1" -}} diff --git a/logstash/templates/service.yaml b/logstash/templates/service.yaml index 6cbca1ee4..6540c8c61 100644 --- a/logstash/templates/service.yaml +++ b/logstash/templates/service.yaml @@ -16,7 +16,6 @@ spec: selector: app: "{{ template "logstash.fullname" . }}" chart: "{{ .Chart.Name }}" - heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} ports: {{ toYaml .Values.service.ports | indent 4 }} diff --git a/metricbeat/README.md b/metricbeat/README.md index 061d8fe68..e68ff9c8c 100644 --- a/metricbeat/README.md +++ b/metricbeat/README.md @@ -46,8 +46,8 @@ See [supported configurations][] for more details. `helm repo add elastic https://helm.elastic.co` * Install it: - - with Helm 2: `helm install --name metricbeat elastic/metricbeat` - - with [Helm 3 (beta)][]: `helm install metricbeat elastic/metricbeat` + - with Helm 3: `helm install metricbeat elastic/metricbeat` + - with Helm 2 (deprecated): `helm install --name metricbeat elastic/metricbeat` ### Install development version using master branch @@ -55,8 +55,8 @@ See [supported configurations][] for more details. * Clone the git repo: `git clone [email protected]:elastic/helm-charts.git` * Install it: - - with Helm 2: `helm install --name metricbeat ./helm-charts/metricbeat --set imageTag=8.0.0-SNAPSHOT` - - with [Helm 3 (beta)][]: `helm install metricbeat ./helm-charts/metricbeat --set imageTag=8.0.0-SNAPSHOT` + - with Helm 3: `helm install metricbeat ./helm-charts/metricbeat --set imageTag=8.0.0-SNAPSHOT` + - with Helm 2 (deprecated): `helm install --name metricbeat ./helm-charts/metricbeat --set imageTag=8.0.0-SNAPSHOT` ## Upgrading @@ -224,7 +224,6 @@ about our development and testing process. [examples/oss]: https://github.com/elastic/helm-charts/tree/master/metricbeat/examples/oss [examples/security]: https://github.com/elastic/helm-charts/tree/master/metricbeat/examples/security [helm]: https://helm.sh -[helm 3 (beta)]: https://github.com/elastic/helm-charts/tree/master/README.md#helm-3-beta [hostPath]: https://kubernetes.io/docs/concepts/storage/volumes/#hostpath [hostNetwork]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces [imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images diff --git a/metricbeat/examples/default/Makefile b/metricbeat/examples/default/Makefile index 76da6b209..6ff5a30e7 100644 --- a/metricbeat/examples/default/Makefile +++ b/metricbeat/examples/default/Makefile @@ -6,7 +6,7 @@ RELEASE = helm-metricbeat-default GOSS_SELECTOR = release=$(RELEASE),app=helm-metricbeat-default-metricbeat install: - helm upgrade --wait --timeout=900 --install $(RELEASE) ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) ../../ test-metrics: GOSS_FILE=goss-metrics.yaml make goss GOSS_SELECTOR=release=$(RELEASE),app=helm-metricbeat-default-metricbeat-metrics @@ -14,4 +14,4 @@ test-metrics: test: install goss test-metrics purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) diff --git a/metricbeat/examples/oss/Makefile b/metricbeat/examples/oss/Makefile index afe3c1f2c..acb1124bc 100644 --- a/metricbeat/examples/oss/Makefile +++ b/metricbeat/examples/oss/Makefile @@ -6,10 +6,10 @@ RELEASE := helm-metricbeat-oss GOSS_SELECTOR = release=$(RELEASE),app=helm-metricbeat-oss-metricbeat install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values values.yaml ../../ purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) test-metrics: GOSS_FILE=goss-metrics.yaml make goss GOSS_SELECTOR=release=$(RELEASE),app=helm-metricbeat-oss-metricbeat-metrics diff --git a/metricbeat/examples/security/Makefile b/metricbeat/examples/security/Makefile index acabeb20a..d57cb05be 100644 --- a/metricbeat/examples/security/Makefile +++ b/metricbeat/examples/security/Makefile @@ -6,10 +6,10 @@ RELEASE := helm-metricbeat-security GOSS_SELECTOR = release=$(RELEASE),app=helm-metricbeat-security-metricbeat install: - helm upgrade --wait --timeout=900 --install $(RELEASE) --values values.yaml ../../ + helm upgrade --wait --timeout=900s --install $(RELEASE) --values values.yaml ../../ purge: - helm del --purge $(RELEASE) + helm del $(RELEASE) test-metrics: GOSS_FILE=goss-metrics.yaml make goss GOSS_SELECTOR=release=$(RELEASE),app=helm-metricbeat-security-metricbeat-metrics diff --git a/metricbeat/templates/deployment.yaml b/metricbeat/templates/deployment.yaml index e9d682b2e..cc97f20e5 100644 --- a/metricbeat/templates/deployment.yaml +++ b/metricbeat/templates/deployment.yaml @@ -30,7 +30,6 @@ spec: selector: matchLabels: app: '{{ template "metricbeat.fullname" . }}-metrics' - heritage: '{{ .Release.Service }}' release: '{{ .Release.Name }}' template: metadata: @@ -45,7 +44,6 @@ spec: labels: app: '{{ template "metricbeat.fullname" . }}-metrics' chart: '{{ .Chart.Name }}-{{ .Chart.Version }}' - heritage: '{{ .Release.Service }}' release: '{{ .Release.Name }}' {{- if .Values.deployment.labels }} {{- range $key, $value := .Values.deployment.labels }}
helm upgrade fails due to 'cannot patch "elasticsearch-master" with kind StatefulSet' **Chart version:** 7.6.0 **Kubernetes version:** v1.14.9-eks-c0eccc **Kubernetes provider:** E.g. GKE (Google Kubernetes Engine) EKS **Helm Version:** v3.0.2 **`helm get release` output** <details> <summary>Output of helm get release</summary> ``` NAME: elasticsearch LAST DEPLOYED: Fri Feb 21 16:30:05 2020 NAMESPACE: elasticsearch STATUS: failed REVISION: 29 USER-SUPPLIED VALUES: antiAffinity: hard antiAffinityTopologyKey: kubernetes.io/hostname clusterHealthCheckParams: wait_for_status=green&timeout=1s clusterName: elasticsearch esConfig: elasticsearch.yml: | xpack.security.enabled: true xpack.security.transport.ssl.enabled: true xpack.security.transport.ssl.verification_mode: certificate xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.http.ssl.enabled: true xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 network.host: 0.0.0.0 esJavaOpts: -Xmx1g -Xms1g esMajorVersion: "" extraEnvs: - name: ELASTIC_PASSWORD valueFrom: secretKeyRef: key: password name: elastic-credentials - name: ELASTIC_USERNAME valueFrom: secretKeyRef: key: username name: elastic-credentials extraInitContainers: "" extraVolumeMounts: "" extraVolumes: "" fsGroup: "" fullnameOverride: "" httpPort: 9200 image: docker.elastic.co/elasticsearch/elasticsearch imagePullPolicy: IfNotPresent imagePullSecrets: [] imageTag: 7.6.0 ingress: annotations: {} enabled: false hosts: - elasticsearch.local path: / tls: [] initResources: {} keystore: [] labels: {} lifecycle: {} masterService: "" masterTerminationFix: false maxUnavailable: 1 minimumMasterNodes: 2 nameOverride: "" networkHost: 0.0.0.0 nodeAffinity: {} nodeGroup: master nodeSelector: {} persistence: annotations: {} enabled: true podAnnotations: {} podManagementPolicy: Parallel podSecurityContext: fsGroup: 1000 runAsUser: 1000 podSecurityPolicy: create: false name: "" spec: fsGroup: rule: RunAsAny privileged: true runAsUser: rule: RunAsAny seLinux: rule: RunAsAny supplementalGroups: rule: RunAsAny volumes: - secret - configMap - persistentVolumeClaim priorityClassName: "" protocol: https rbac: create: false serviceAccountName: "" readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 3 timeoutSeconds: 5 replicas: 3 resources: limits: cpu: 1000m memory: 2Gi requests: cpu: 200m memory: 2Gi roles: data: "true" ingest: "true" master: "true" schedulerName: "" secretMounts: - name: elastic-certificates path: /usr/share/elasticsearch/config/certs secretName: elastic-certificates securityContext: capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000 service: annotations: {} httpPortName: http labels: {} labelsHeadless: {} nodePort: "" transportPortName: transport type: ClusterIP sidecarResources: {} sysctlInitContainer: enabled: true sysctlVmMaxMapCount: 262144 terminationGracePeriod: 120 tolerations: [] transportPort: 9300 updateStrategy: RollingUpdate volumeClaimTemplate: accessModes: - ReadWriteOnce resources: requests: storage: 50Gi COMPUTED VALUES: antiAffinity: hard antiAffinityTopologyKey: kubernetes.io/hostname clusterHealthCheckParams: wait_for_status=green&timeout=1s clusterName: elasticsearch esConfig: elasticsearch.yml: | xpack.security.enabled: true xpack.security.transport.ssl.enabled: true xpack.security.transport.ssl.verification_mode: certificate xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.http.ssl.enabled: true xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 network.host: 0.0.0.0 esJavaOpts: -Xmx1g -Xms1g esMajorVersion: "" extraContainers: "" extraEnvs: - name: ELASTIC_PASSWORD valueFrom: secretKeyRef: key: password name: elastic-credentials - name: ELASTIC_USERNAME valueFrom: secretKeyRef: key: username name: elastic-credentials extraInitContainers: "" extraVolumeMounts: "" extraVolumes: "" fsGroup: "" fullnameOverride: "" httpPort: 9200 image: docker.elastic.co/elasticsearch/elasticsearch imagePullPolicy: IfNotPresent imagePullSecrets: [] imageTag: 7.6.0 ingress: annotations: {} enabled: false hosts: - elasticsearch.local path: / tls: [] initResources: {} keystore: [] labels: {} lifecycle: {} masterService: "" masterTerminationFix: false maxUnavailable: 1 minimumMasterNodes: 2 nameOverride: "" networkHost: 0.0.0.0 nodeAffinity: {} nodeGroup: master nodeSelector: {} persistence: annotations: {} enabled: true podAnnotations: {} podManagementPolicy: Parallel podSecurityContext: fsGroup: 1000 runAsUser: 1000 podSecurityPolicy: create: false name: "" spec: fsGroup: rule: RunAsAny privileged: true runAsUser: rule: RunAsAny seLinux: rule: RunAsAny supplementalGroups: rule: RunAsAny volumes: - secret - configMap - persistentVolumeClaim priorityClassName: "" protocol: https rbac: create: false serviceAccountName: "" readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 3 timeoutSeconds: 5 replicas: 3 resources: limits: cpu: 1000m memory: 2Gi requests: cpu: 200m memory: 2Gi roles: data: "true" ingest: "true" master: "true" schedulerName: "" secretMounts: - name: elastic-certificates path: /usr/share/elasticsearch/config/certs secretName: elastic-certificates securityContext: capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000 service: annotations: {} httpPortName: http labels: {} labelsHeadless: {} nodePort: "" transportPortName: transport type: ClusterIP sidecarResources: {} sysctlInitContainer: enabled: true sysctlVmMaxMapCount: 262144 terminationGracePeriod: 120 tolerations: [] transportPort: 9300 updateStrategy: RollingUpdate volumeClaimTemplate: accessModes: - ReadWriteOnce resources: requests: storage: 50Gi HOOKS: --- # Source: elasticsearch/templates/test/test-elasticsearch-health.yaml apiVersion: v1 kind: Pod metadata: name: "elasticsearch-sbxrc-test" annotations: "helm.sh/hook": test-success spec: containers: - name: "elasticsearch-ualfr-test" image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0" command: - "sh" - "-c" - | #!/usr/bin/env bash -e curl -XGET --fail 'elasticsearch-master:9200/_cluster/health?wait_for_status=green&timeout=1s' restartPolicy: Never MANIFEST: --- # Source: elasticsearch/templates/poddisruptionbudget.yaml apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: name: "elasticsearch-master-pdb" spec: maxUnavailable: 1 selector: matchLabels: app: "elasticsearch-master" --- # Source: elasticsearch/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: elasticsearch-master-config labels: heritage: "Helm" release: "elasticsearch" chart: "elasticsearch" app: "elasticsearch-master" data: elasticsearch.yml: | xpack.security.enabled: true xpack.security.transport.ssl.enabled: true xpack.security.transport.ssl.verification_mode: certificate xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.http.ssl.enabled: true xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 network.host: 0.0.0.0 --- # Source: elasticsearch/templates/service.yaml kind: Service apiVersion: v1 metadata: name: elasticsearch-master-headless labels: heritage: "Helm" release: "elasticsearch" chart: "elasticsearch" app: "elasticsearch-master" annotations: service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve # Create endpoints also if the related pod isn't ready publishNotReadyAddresses: true selector: app: "elasticsearch-master" ports: - name: http port: 9200 - name: transport port: 9300 --- # Source: elasticsearch/templates/service.yaml kind: Service apiVersion: v1 metadata: name: elasticsearch-master labels: heritage: "Helm" release: "elasticsearch" chart: "elasticsearch" app: "elasticsearch-master" annotations: {} spec: type: ClusterIP selector: heritage: "Helm" release: "elasticsearch" chart: "elasticsearch" app: "elasticsearch-master" ports: - name: http protocol: TCP port: 9200 - name: transport protocol: TCP port: 9300 --- # Source: elasticsearch/templates/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: elasticsearch-master labels: heritage: "Helm" release: "elasticsearch" chart: "elasticsearch" app: "elasticsearch-master" annotations: esMajorVersion: "7" spec: serviceName: elasticsearch-master-headless selector: matchLabels: app: "elasticsearch-master" replicas: 3 podManagementPolicy: Parallel updateStrategy: type: RollingUpdate volumeClaimTemplates: - metadata: name: elasticsearch-master spec: accessModes: - ReadWriteOnce resources: requests: storage: 50Gi template: metadata: name: "elasticsearch-master" labels: heritage: "Helm" release: "elasticsearch" chart: "elasticsearch" app: "elasticsearch-master" annotations: configchecksum: a925349ed01ac0903a539d33164dabb0c174b9b602c943057c90033eee58253 spec: securityContext: fsGroup: 1000 runAsUser: 1000 affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: app operator: In values: - "elasticsearch-master" topologyKey: kubernetes.io/hostname terminationGracePeriodSeconds: 120 volumes: - name: elastic-certificates secret: secretName: elastic-certificates - name: esconfig configMap: name: elasticsearch-master-config initContainers: - name: configure-sysctl securityContext: runAsUser: 0 privileged: true image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0" imagePullPolicy: "IfNotPresent" command: ["sysctl", "-w", "vm.max_map_count=262144"] resources: {} containers: - name: "elasticsearch" securityContext: capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000 image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0" imagePullPolicy: "IfNotPresent" readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 3 timeoutSeconds: 5 exec: command: - sh - -c - | #!/usr/bin/env bash -e # If the node is starting up wait for the cluster to be ready (request params: 'wait_for_status=green&timeout=1s' ) # Once it has started only check that the node itself is responding START_FILE=/tmp/.es_start_file http () { local path="${1}" if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" else BASIC_AUTH='' fi curl -XGET -s -k --fail ${BASIC_AUTH} https://127.0.0.1:9200${path} } if [ -f "${START_FILE}" ]; then echo 'Elasticsearch is already running, lets check the node is healthy and there are master nodes available' http "/_cluster/health?timeout=0s" else echo 'Waiting for elasticsearch cluster to become ready (request params: "wait_for_status=green&timeout=1s" )' if http "/_cluster/health?wait_for_status=green&timeout=1s" ; then touch ${START_FILE} exit 0 else echo 'Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )' exit 1 fi fi ports: - name: http containerPort: 9200 - name: transport containerPort: 9300 resources: limits: cpu: 1000m memory: 2Gi requests: cpu: 200m memory: 2Gi env: - name: node.name valueFrom: fieldRef: fieldPath: metadata.name - name: cluster.initial_master_nodes value: "elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2," - name: discovery.seed_hosts value: "elasticsearch-master-headless" - name: cluster.name value: "elasticsearch" - name: network.host value: "0.0.0.0" - name: ES_JAVA_OPTS value: "-Xmx1g -Xms1g" - name: node.data value: "true" - name: node.ingest value: "true" - name: node.master value: "true" - name: ELASTIC_PASSWORD valueFrom: secretKeyRef: key: password name: elastic-credentials - name: ELASTIC_USERNAME valueFrom: secretKeyRef: key: username name: elastic-credentials volumeMounts: - name: "elasticsearch-master" mountPath: /usr/share/elasticsearch/data - name: elastic-certificates mountPath: /usr/share/elasticsearch/config/certs - name: esconfig mountPath: /usr/share/elasticsearch/config/elasticsearch.yml subPath: elasticsearch.yml NOTES: 1. Watch all cluster members come up. $ kubectl get pods --namespace=elasticsearch -l app=elasticsearch-master -w 2. Test cluster health using Helm test. $ helm test elasticsearch ``` NOTE: the images above show 7.6.0 as I have manually updated the statefulset as a workaround. </details> **Describe the bug:** Performing 'helm upgrade' returns the following error; Error: UPGRADE FAILED: cannot patch "elasticsearch-master" with kind StatefulSet: StatefulSet.apps "elasticsearch-master" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden **Steps to reproduce:** 1. helm install elasticsearch elastic/elasticsearch -n elasticsearch -f values.yaml 2. helm upgrade elasticsearch elastic/elasticsearch -n elasticsearch -f values.yaml **Expected behavior:** Successful upgrade to newer image **Provide logs and/or server output (if relevant):** ``` cat values.yaml --- clusterName: "elasticsearch" nodeGroup: "master" # The service that non master groups will try to connect to when joining the cluster # This should be set to clusterName + "-" + nodeGroup for your master group masterService: "" # Elasticsearch roles that will be applied to this nodeGroup # These will be set as environment variables. E.g. node.master=true roles: master: "true" ingest: "true" data: "true" replicas: 3 minimumMasterNodes: 2 esMajorVersion: "" # Allows you to add any config files in /usr/share/elasticsearch/config/ # such as elasticsearch.yml and log4j2.properties esConfig: elasticsearch.yml: | xpack.security.enabled: true xpack.security.transport.ssl.enabled: true xpack.security.transport.ssl.verification_mode: certificate xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.http.ssl.enabled: true xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 network.host: 0.0.0.0 # log4j2.properties: | # key = value # Extra environment variables to append to this nodeGroup # This will be appended to the current 'env:' key. You can use any of the kubernetes env # syntax here extraEnvs: - name: ELASTIC_PASSWORD valueFrom: secretKeyRef: name: elastic-credentials key: password - name: ELASTIC_USERNAME valueFrom: secretKeyRef: name: elastic-credentials key: username # A list of secrets and their paths to mount inside the pod # This is useful for mounting certificates for security and for mounting # the X-Pack license secretMounts: - name: elastic-certificates secretName: elastic-certificates path: /usr/share/elasticsearch/config/certs image: "docker.elastic.co/elasticsearch/elasticsearch" imageTag: "7.6.0" imagePullPolicy: "IfNotPresent" podAnnotations: {} # iam.amazonaws.com/role: es-cluster # additionals labels labels: {} esJavaOpts: "-Xmx1g -Xms1g" resources: requests: cpu: "200m" memory: "2Gi" limits: cpu: "1000m" memory: "2Gi" initResources: {} # limits: # cpu: "25m" # # memory: "128Mi" # requests: # cpu: "25m" # memory: "128Mi" sidecarResources: {} # limits: # cpu: "25m" # # memory: "128Mi" # requests: # cpu: "25m" # memory: "128Mi" networkHost: "0.0.0.0" volumeClaimTemplate: accessModes: [ "ReadWriteOnce" ] resources: requests: storage: 50Gi rbac: create: false serviceAccountName: "" podSecurityPolicy: create: false name: "" spec: privileged: true fsGroup: rule: RunAsAny runAsUser: rule: RunAsAny seLinux: rule: RunAsAny supplementalGroups: rule: RunAsAny volumes: - secret - configMap - persistentVolumeClaim persistence: enabled: true annotations: {} extraVolumes: "" # - name: extras # emptyDir: {} extraVolumeMounts: "" # - name: extras # mountPath: /usr/share/extras # readOnly: true extraInitContainers: "" # - name: do-something # image: busybox # command: ['do', 'something'] # This is the PriorityClass settings as defined in # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass priorityClassName: "" # By default this will make sure two pods don't end up on the same node # Changing this to a region would allow you to spread pods across regions antiAffinityTopologyKey: "kubernetes.io/hostname" # Hard means that by default pods will only be scheduled if there are enough nodes for them # and that they will never end up on the same node. Setting this to soft will do this "best effort" antiAffinity: "hard" # This is the node affinity settings as defined in # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature nodeAffinity: {} # The default is to deploy all pods serially. By setting this to parallel all pods are started at # the same time when bootstrapping the cluster podManagementPolicy: "Parallel" protocol: https httpPort: 9200 transportPort: 9300 service: labels: {} labelsHeadless: {} type: ClusterIP nodePort: "" annotations: {} httpPortName: http transportPortName: transport updateStrategy: RollingUpdate # This is the max unavailable setting for the pod disruption budget # The default value of 1 will make sure that kubernetes won't allow more than 1 # of your pods to be unavailable during maintenance maxUnavailable: 1 podSecurityContext: fsGroup: 1000 runAsUser: 1000 # The following value is deprecated, # please use the above podSecurityContext.fsGroup instead fsGroup: "" securityContext: capabilities: drop: - ALL # readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000 # How long to wait for elasticsearch to stop gracefully terminationGracePeriod: 120 sysctlVmMaxMapCount: 262144 readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 3 timeoutSeconds: 5 # https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status clusterHealthCheckParams: "wait_for_status=green&timeout=1s" ## Use an alternate scheduler. ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## schedulerName: "" imagePullSecrets: [] nodeSelector: {} tolerations: [] # Enabling this will publically expose your Elasticsearch instance. # Only enable this if you have security enabled on your cluster ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - elasticsearch.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local nameOverride: "" fullnameOverride: "" # https://github.com/elastic/helm-charts/issues/63 masterTerminationFix: false lifecycle: {} # preStop: # exec: # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] # postStart: # exec: # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] sysctlInitContainer: enabled: true keystore: [] ``` **Any additional context:** I manually updated the statefulset to get around the problem, hoping helm would then recognise the new image version was in place, however that has gained me nothing. The upgrade still fails
pre-commit__pre-commit-1709
[ { "content": "import os.path\nimport re\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import InvalidManifestError\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.store import Store\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import tmpdir\nfrom pre_commit.util import yaml_dump\nfrom pre_commit.util import yaml_load\n\n\nclass RevInfo(NamedTuple):\n repo: str\n rev: str\n frozen: Optional[str]\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> 'RevInfo':\n return cls(config['repo'], config['rev'], None)\n\n def update(self, tags_only: bool, freeze: bool) -> 'RevInfo':\n if tags_only:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--abbrev=0')\n else:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--exact')\n\n with tmpdir() as tmp:\n git.init_repo(tmp, self.repo)\n cmd_output_b('git', 'fetch', 'origin', 'HEAD', '--tags', cwd=tmp)\n\n try:\n rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()\n except CalledProcessError:\n cmd = ('git', 'rev-parse', 'FETCH_HEAD')\n rev = cmd_output(*cmd, cwd=tmp)[1].strip()\n\n frozen = None\n if freeze:\n exact = cmd_output('git', 'rev-parse', rev, cwd=tmp)[1].strip()\n if exact != rev:\n rev, frozen = exact, rev\n return self._replace(rev=rev, frozen=frozen)\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _check_hooks_still_exist_at_rev(\n repo_config: Dict[str, Any],\n info: RevInfo,\n store: Store,\n) -> None:\n try:\n path = store.clone(repo_config['repo'], info.rev)\n manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n except InvalidManifestError as e:\n raise RepositoryCannotBeUpdatedError(str(e))\n\n # See if any of our hooks were deleted with the new commits\n hooks = {hook['id'] for hook in repo_config['hooks']}\n hooks_missing = hooks - {hook['id'] for hook in manifest}\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n f'Cannot update because the tip of HEAD is missing these hooks:\\n'\n f'{\", \".join(sorted(hooks_missing))}',\n )\n\n\nREV_LINE_RE = re.compile(r'^(\\s+)rev:(\\s*)([\\'\"]?)([^\\s#]+)(.*)(\\r?\\n)$')\n\n\ndef _original_lines(\n path: str,\n rev_infos: List[Optional[RevInfo]],\n retry: bool = False,\n) -> Tuple[List[str], List[int]]:\n \"\"\"detect `rev:` lines or reformat the file\"\"\"\n with open(path, newline='') as f:\n original = f.read()\n\n lines = original.splitlines(True)\n idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]\n if len(idxs) == len(rev_infos):\n return lines, idxs\n elif retry:\n raise AssertionError('could not find rev lines')\n else:\n with open(path, 'w') as f:\n f.write(yaml_dump(yaml_load(original)))\n return _original_lines(path, rev_infos, retry=True)\n\n\ndef _write_new_config(path: str, rev_infos: List[Optional[RevInfo]]) -> None:\n lines, idxs = _original_lines(path, rev_infos)\n\n for idx, rev_info in zip(idxs, rev_infos):\n if rev_info is None:\n continue\n match = REV_LINE_RE.match(lines[idx])\n assert match is not None\n new_rev_s = yaml_dump({'rev': rev_info.rev}, default_style=match[3])\n new_rev = new_rev_s.split(':', 1)[1].strip()\n if rev_info.frozen is not None:\n comment = f' # frozen: {rev_info.frozen}'\n elif match[5].strip().startswith('# frozen:'):\n comment = ''\n else:\n comment = match[5]\n lines[idx] = f'{match[1]}rev:{match[2]}{new_rev}{comment}{match[6]}'\n\n with open(path, 'w', newline='') as f:\n f.write(''.join(lines))\n\n\ndef autoupdate(\n config_file: str,\n store: Store,\n tags_only: bool,\n freeze: bool,\n repos: Sequence[str] = (),\n) -> int:\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n migrate_config(config_file, quiet=True)\n retv = 0\n rev_infos: List[Optional[RevInfo]] = []\n changed = False\n\n config = load_config(config_file)\n for repo_config in config['repos']:\n if repo_config['repo'] in {LOCAL, META}:\n continue\n\n info = RevInfo.from_config(repo_config)\n if repos and info.repo not in repos:\n rev_infos.append(None)\n continue\n\n output.write(f'Updating {info.repo} ... ')\n new_info = info.update(tags_only=tags_only, freeze=freeze)\n try:\n _check_hooks_still_exist_at_rev(repo_config, new_info, store)\n except RepositoryCannotBeUpdatedError as error:\n output.write_line(error.args[0])\n rev_infos.append(None)\n retv = 1\n continue\n\n if new_info.rev != info.rev:\n changed = True\n if new_info.frozen:\n updated_to = f'{new_info.frozen} (frozen)'\n else:\n updated_to = new_info.rev\n msg = f'updating {info.rev} -> {updated_to}.'\n output.write_line(msg)\n rev_infos.append(new_info)\n else:\n output.write_line('already up to date.')\n rev_infos.append(None)\n\n if changed:\n _write_new_config(config_file, rev_infos)\n\n return retv\n", "path": "pre_commit/commands/autoupdate.py" } ]
[ { "content": "import os.path\nimport re\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import InvalidManifestError\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.store import Store\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import tmpdir\nfrom pre_commit.util import yaml_dump\nfrom pre_commit.util import yaml_load\n\n\nclass RevInfo(NamedTuple):\n repo: str\n rev: str\n frozen: Optional[str]\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> 'RevInfo':\n return cls(config['repo'], config['rev'], None)\n\n def update(self, tags_only: bool, freeze: bool) -> 'RevInfo':\n if tags_only:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--abbrev=0')\n else:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--exact')\n\n with tmpdir() as tmp:\n git.init_repo(tmp, self.repo)\n cmd_output_b('git', 'fetch', 'origin', 'HEAD', '--tags', cwd=tmp)\n\n try:\n rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()\n except CalledProcessError:\n cmd = ('git', 'rev-parse', 'FETCH_HEAD')\n rev = cmd_output(*cmd, cwd=tmp)[1].strip()\n\n frozen = None\n if freeze:\n exact = cmd_output('git', 'rev-parse', rev, cwd=tmp)[1].strip()\n if exact != rev:\n rev, frozen = exact, rev\n return self._replace(rev=rev, frozen=frozen)\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _check_hooks_still_exist_at_rev(\n repo_config: Dict[str, Any],\n info: RevInfo,\n store: Store,\n) -> None:\n try:\n path = store.clone(repo_config['repo'], info.rev)\n manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n except InvalidManifestError as e:\n raise RepositoryCannotBeUpdatedError(str(e))\n\n # See if any of our hooks were deleted with the new commits\n hooks = {hook['id'] for hook in repo_config['hooks']}\n hooks_missing = hooks - {hook['id'] for hook in manifest}\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n f'Cannot update because the update target is missing these '\n f'hooks:\\n{\", \".join(sorted(hooks_missing))}',\n )\n\n\nREV_LINE_RE = re.compile(r'^(\\s+)rev:(\\s*)([\\'\"]?)([^\\s#]+)(.*)(\\r?\\n)$')\n\n\ndef _original_lines(\n path: str,\n rev_infos: List[Optional[RevInfo]],\n retry: bool = False,\n) -> Tuple[List[str], List[int]]:\n \"\"\"detect `rev:` lines or reformat the file\"\"\"\n with open(path, newline='') as f:\n original = f.read()\n\n lines = original.splitlines(True)\n idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]\n if len(idxs) == len(rev_infos):\n return lines, idxs\n elif retry:\n raise AssertionError('could not find rev lines')\n else:\n with open(path, 'w') as f:\n f.write(yaml_dump(yaml_load(original)))\n return _original_lines(path, rev_infos, retry=True)\n\n\ndef _write_new_config(path: str, rev_infos: List[Optional[RevInfo]]) -> None:\n lines, idxs = _original_lines(path, rev_infos)\n\n for idx, rev_info in zip(idxs, rev_infos):\n if rev_info is None:\n continue\n match = REV_LINE_RE.match(lines[idx])\n assert match is not None\n new_rev_s = yaml_dump({'rev': rev_info.rev}, default_style=match[3])\n new_rev = new_rev_s.split(':', 1)[1].strip()\n if rev_info.frozen is not None:\n comment = f' # frozen: {rev_info.frozen}'\n elif match[5].strip().startswith('# frozen:'):\n comment = ''\n else:\n comment = match[5]\n lines[idx] = f'{match[1]}rev:{match[2]}{new_rev}{comment}{match[6]}'\n\n with open(path, 'w', newline='') as f:\n f.write(''.join(lines))\n\n\ndef autoupdate(\n config_file: str,\n store: Store,\n tags_only: bool,\n freeze: bool,\n repos: Sequence[str] = (),\n) -> int:\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n migrate_config(config_file, quiet=True)\n retv = 0\n rev_infos: List[Optional[RevInfo]] = []\n changed = False\n\n config = load_config(config_file)\n for repo_config in config['repos']:\n if repo_config['repo'] in {LOCAL, META}:\n continue\n\n info = RevInfo.from_config(repo_config)\n if repos and info.repo not in repos:\n rev_infos.append(None)\n continue\n\n output.write(f'Updating {info.repo} ... ')\n new_info = info.update(tags_only=tags_only, freeze=freeze)\n try:\n _check_hooks_still_exist_at_rev(repo_config, new_info, store)\n except RepositoryCannotBeUpdatedError as error:\n output.write_line(error.args[0])\n rev_infos.append(None)\n retv = 1\n continue\n\n if new_info.rev != info.rev:\n changed = True\n if new_info.frozen:\n updated_to = f'{new_info.frozen} (frozen)'\n else:\n updated_to = new_info.rev\n msg = f'updating {info.rev} -> {updated_to}.'\n output.write_line(msg)\n rev_infos.append(new_info)\n else:\n output.write_line('already up to date.')\n rev_infos.append(None)\n\n if changed:\n _write_new_config(config_file, rev_infos)\n\n return retv\n", "path": "pre_commit/commands/autoupdate.py" } ]
diff --git a/pre_commit/commands/autoupdate.py b/pre_commit/commands/autoupdate.py index 7320bb426..33a347302 100644 --- a/pre_commit/commands/autoupdate.py +++ b/pre_commit/commands/autoupdate.py @@ -79,8 +79,8 @@ def _check_hooks_still_exist_at_rev( hooks_missing = hooks - {hook['id'] for hook in manifest} if hooks_missing: raise RepositoryCannotBeUpdatedError( - f'Cannot update because the tip of HEAD is missing these hooks:\n' - f'{", ".join(sorted(hooks_missing))}', + f'Cannot update because the update target is missing these ' + f'hooks:\n{", ".join(sorted(hooks_missing))}', )
running `pre-commit autoupdate` fails because tip of HEAD is missing hook Hello 👋 I'm setting up `pre-commit` on a project and came across an issue when adding hook `destroyed-symlinks`. The error message suggested running `pre-commit autoupdate`. I ran that and saw that it cannot update because the tip of HEAD is missing that hook. I'm not sure what that means so posting here. ```console $ echo ' - id: destroyed-symlinks' >> .pre-commit-config.yaml $ git add -p !$ git add -p .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bfde4717..949f3ffc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,3 +21,4 @@ repos: - id: check-vcs-permalinks - id: check-xml - id: debug-statements + - id: destroyed-symlinks (1/1) Stage this hunk [y,n,q,a,d,e,?]? y $ git commit -m 'new hook destroyed-symlinks' [ERROR] `destroyed-symlinks` is not present in repository https://github.com/pre-commit/pre-commit-hooks. Typo? Perhaps it is introduced in a newer version? Often `pre-commit autoupdate` fixes this. $ git status On branch pre-commit Changes to be committed: (use "git restore --staged <file>..." to unstage) modified: .pre-commit-config.yaml Untracked files: (use "git add <file>..." to include in what will be committed) tests/__init__.py $ pre-commit autoupdate Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks. Cannot update because the tip of HEAD is missing these hooks: destroyed-symlinks $ git checkout . Updated 0 paths from the index $ pre-commit autoupdate Updating https://github.com/pre-commit/pre-commit-hooks ... Cannot update because the tip of HEAD is missing these hooks: destroyed-symlinks $ pre-commit --version pre-commit 2.9.0 ```
secdev__scapy-1007
[ { "content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## Modified by Maxence Tury <[email protected]>\n## Acknowledgment: Ralph Broenink\n## This program is published under a GPLv2 license\n\n\"\"\"\nBasic Encoding Rules (BER) for ASN.1\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.error import warning\nfrom scapy.compat import *\nfrom scapy.utils import binrepr,inet_aton,inet_ntoa\nfrom scapy.asn1.asn1 import ASN1_Decoding_Error,ASN1_Encoding_Error,ASN1_BadTag_Decoding_Error,ASN1_Codecs,ASN1_Class_UNIVERSAL,ASN1_Error,ASN1_DECODING_ERROR,ASN1_BADTAG\nimport scapy.modules.six as six\n\n##################\n## BER encoding ##\n##################\n\n\n\n#####[ BER tools ]#####\n\n\nclass BER_Exception(Exception):\n pass\n\nclass BER_Encoding_Error(ASN1_Encoding_Error):\n def __init__(self, msg, encoded=None, remaining=None):\n Exception.__init__(self, msg)\n self.remaining = remaining\n self.encoded = encoded\n def __str__(self):\n s = Exception.__str__(self)\n if isinstance(self.encoded, BERcodec_Object):\n s+=\"\\n### Already encoded ###\\n%s\" % self.encoded.strshow()\n else:\n s+=\"\\n### Already encoded ###\\n%r\" % self.encoded\n s+=\"\\n### Remaining ###\\n%r\" % self.remaining\n return s\n\nclass BER_Decoding_Error(ASN1_Decoding_Error):\n def __init__(self, msg, decoded=None, remaining=None):\n Exception.__init__(self, msg)\n self.remaining = remaining\n self.decoded = decoded\n def __str__(self):\n s = Exception.__str__(self)\n if isinstance(self.decoded, BERcodec_Object):\n s+=\"\\n### Already decoded ###\\n%s\" % self.decoded.strshow()\n else:\n s+=\"\\n### Already decoded ###\\n%r\" % self.decoded\n s+=\"\\n### Remaining ###\\n%r\" % self.remaining\n return s\n\nclass BER_BadTag_Decoding_Error(BER_Decoding_Error, ASN1_BadTag_Decoding_Error):\n pass\n\ndef BER_len_enc(l, size=0):\n if l <= 127 and size==0:\n return chb(l)\n s = b\"\"\n while l or size>0:\n s = chb(l&0xff)+s\n l >>= 8\n size -= 1\n if len(s) > 127:\n raise BER_Exception(\"BER_len_enc: Length too long (%i) to be encoded [%r]\" % (len(s),s))\n return chb(len(s)|0x80)+s\ndef BER_len_dec(s):\n l = orb(s[0])\n if not l & 0x80:\n return l,s[1:]\n l &= 0x7f\n if len(s) <= l:\n raise BER_Decoding_Error(\"BER_len_dec: Got %i bytes while expecting %i\" % (len(s)-1, l),remaining=s)\n ll = 0\n for c in s[1:l+1]:\n ll <<= 8\n ll |= orb(c)\n return ll,s[l+1:]\n \ndef BER_num_enc(l, size=1):\n x=[]\n while l or size>0:\n x.insert(0, l & 0x7f)\n if len(x) > 1:\n x[0] |= 0x80\n l >>= 7\n size -= 1\n return b\"\".join(chb(k) for k in x)\ndef BER_num_dec(s, cls_id=0):\n if len(s) == 0:\n raise BER_Decoding_Error(\"BER_num_dec: got empty string\", remaining=s)\n x = cls_id\n for i, c in enumerate(s):\n c = orb(c)\n x <<= 7\n x |= c&0x7f\n if not c&0x80:\n break\n if c&0x80:\n raise BER_Decoding_Error(\"BER_num_dec: unfinished number description\", remaining=s)\n return x, s[i+1:]\n\ndef BER_id_dec(s):\n # This returns the tag ALONG WITH THE PADDED CLASS+CONSTRUCTIVE INFO.\n # Let's recall that bits 8-7 from the first byte of the tag encode\n # the class information, while bit 6 means primitive or constructive.\n #\n # For instance, with low-tag-number b'\\x81', class would be 0b10\n # ('context-specific') and tag 0x01, but we return 0x81 as a whole.\n # For b'\\xff\\x22', class would be 0b11 ('private'), constructed, then\n # padding, then tag 0x22, but we return (0xff>>5)*128^1 + 0x22*128^0.\n # Why the 5-bit-shifting? Because it provides an unequivocal encoding\n # on base 128 (note that 0xff would equal 1*128^1 + 127*128^0...),\n # as we know that bits 5 to 1 are fixed to 1 anyway.\n #\n # As long as there is no class differentiation, we have to keep this info\n # encoded in scapy's tag in order to reuse it for packet building.\n # Note that tags thus may have to be hard-coded with their extended\n # information, e.g. a SEQUENCE from asn1.py has a direct tag 0x20|16.\n x = orb(s[0])\n if x & 0x1f != 0x1f:\n # low-tag-number\n return x,s[1:]\n else:\n # high-tag-number\n return BER_num_dec(s[1:], cls_id=x>>5)\ndef BER_id_enc(n):\n if n < 256:\n # low-tag-number\n return chb(n)\n else:\n # high-tag-number\n s = BER_num_enc(n)\n tag = orb(s[0]) # first byte, as an int\n tag &= 0x07 # reset every bit from 8 to 4\n tag <<= 5 # move back the info bits on top\n tag |= 0x1f # pad with 1s every bit from 5 to 1\n return chb(tag) + s[1:]\n\n# The functions below provide implicit and explicit tagging support.\ndef BER_tagging_dec(s, hidden_tag=None, implicit_tag=None,\n explicit_tag=None, safe=False):\n # We output the 'real_tag' if it is different from the (im|ex)plicit_tag.\n real_tag = None\n if len(s) > 0:\n err_msg = \"BER_tagging_dec: observed tag does not match expected tag\"\n if implicit_tag is not None:\n ber_id,s = BER_id_dec(s)\n if ber_id != implicit_tag:\n if not safe:\n raise BER_Decoding_Error(err_msg, remaining=s)\n else:\n real_tag = ber_id\n s = chb(hash(hidden_tag)) + s\n elif explicit_tag is not None:\n ber_id,s = BER_id_dec(s)\n if ber_id != explicit_tag:\n if not safe:\n raise BER_Decoding_Error(err_msg, remaining=s)\n else:\n real_tag = ber_id\n l,s = BER_len_dec(s)\n return real_tag, s\ndef BER_tagging_enc(s, implicit_tag=None, explicit_tag=None):\n if len(s) > 0:\n if implicit_tag is not None:\n s = BER_id_enc(implicit_tag) + s[1:]\n elif explicit_tag is not None:\n s = BER_id_enc(explicit_tag) + BER_len_enc(len(s)) + s\n return s\n\n#####[ BER classes ]#####\n\nclass BERcodec_metaclass(type):\n def __new__(cls, name, bases, dct):\n c = super(BERcodec_metaclass, cls).__new__(cls, name, bases, dct)\n try:\n c.tag.register(c.codec, c)\n except:\n warning(\"Error registering %r for %r\" % (c.tag, c.codec))\n return c\n\n\nclass BERcodec_Object(six.with_metaclass(BERcodec_metaclass)):\n codec = ASN1_Codecs.BER\n tag = ASN1_Class_UNIVERSAL.ANY\n\n @classmethod\n def asn1_object(cls, val):\n return cls.tag.asn1_object(val)\n\n @classmethod\n def check_string(cls, s):\n if not s:\n raise BER_Decoding_Error(\"%s: Got empty object while expecting tag %r\" %\n (cls.__name__,cls.tag), remaining=s) \n @classmethod\n def check_type(cls, s):\n cls.check_string(s)\n tag, remainder = BER_id_dec(s)\n if cls.tag != tag:\n raise BER_BadTag_Decoding_Error(\"%s: Got tag [%i/%#x] while expecting %r\" %\n (cls.__name__, tag, tag, cls.tag), remaining=s)\n return remainder\n @classmethod\n def check_type_get_len(cls, s):\n s2 = cls.check_type(s)\n if not s2:\n raise BER_Decoding_Error(\"%s: No bytes while expecting a length\" %\n cls.__name__, remaining=s)\n return BER_len_dec(s2)\n @classmethod\n def check_type_check_len(cls, s):\n l,s3 = cls.check_type_get_len(s)\n if len(s3) < l:\n raise BER_Decoding_Error(\"%s: Got %i bytes while expecting %i\" %\n (cls.__name__, len(s3), l), remaining=s)\n return l,s3[:l],s3[l:]\n\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n if context is None:\n context = cls.tag.context\n cls.check_string(s)\n p,_ = BER_id_dec(s)\n if p not in context:\n t = s\n if len(t) > 18:\n t = t[:15]+b\"...\"\n raise BER_Decoding_Error(\"Unknown prefix [%02x] for [%r]\" % (p,t), remaining=s)\n codec = context[p].get_codec(ASN1_Codecs.BER)\n return codec.dec(s,context,safe)\n\n @classmethod\n def dec(cls, s, context=None, safe=False):\n if not safe:\n return cls.do_dec(s, context, safe)\n try:\n return cls.do_dec(s, context, safe)\n except BER_BadTag_Decoding_Error as e:\n o,remain = BERcodec_Object.dec(e.remaining, context, safe)\n return ASN1_BADTAG(o),remain\n except BER_Decoding_Error as e:\n return ASN1_DECODING_ERROR(s, exc=e),\"\"\n except ASN1_Error as e:\n return ASN1_DECODING_ERROR(s, exc=e),\"\"\n\n @classmethod\n def safedec(cls, s, context=None):\n return cls.dec(s, context, safe=True)\n\n\n @classmethod\n def enc(cls, s):\n if isinstance(s, six.string_types):\n return BERcodec_STRING.enc(s)\n else:\n return BERcodec_INTEGER.enc(int(s))\n\nASN1_Codecs.BER.register_stem(BERcodec_Object)\n\n\n##########################\n#### BERcodec objects ####\n##########################\n\nclass BERcodec_INTEGER(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.INTEGER\n @classmethod\n def enc(cls, i):\n s = []\n while True:\n s.append(i&0xff)\n if -127 <= i < 0:\n break\n if 128 <= i <= 255:\n s.append(0)\n i >>= 8\n if not i:\n break\n s = [chb(hash(c)) for c in s]\n s.append(BER_len_enc(len(s)))\n s.append(chb(hash(cls.tag)))\n s.reverse()\n return b\"\".join(s)\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n x = 0\n if s:\n if orb(s[0])&0x80: # negative int\n x = -1\n for c in s:\n x <<= 8\n x |= orb(c)\n return cls.asn1_object(x),t\n \nclass BERcodec_BOOLEAN(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.BOOLEAN\n\nclass BERcodec_BIT_STRING(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.BIT_STRING\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n # /!\\ the unused_bits information is lost after this decoding\n l,s,t = cls.check_type_check_len(s)\n if len(s) > 0:\n unused_bits = orb(s[0])\n if safe and unused_bits > 7:\n raise BER_Decoding_Error(\"BERcodec_BIT_STRING: too many unused_bits advertised\", remaining=s)\n s = \"\".join(binrepr(orb(x)).zfill(8) for x in s[1:])\n if unused_bits > 0:\n s = s[:-unused_bits]\n return cls.tag.asn1_object(s),t\n else:\n raise BER_Decoding_Error(\"BERcodec_BIT_STRING found no content (not even unused_bits byte)\", remaining=s)\n @classmethod\n def enc(cls,s):\n # /!\\ this is DER encoding (bit strings are only zero-bit padded)\n s = raw(s)\n if len(s) % 8 == 0:\n unused_bits = 0\n else:\n unused_bits = 8 - len(s)%8\n s += b\"0\"*unused_bits\n s = b\"\".join(chb(int(b\"\".join(chb(y) for y in x),2)) for x in zip(*[iter(s)]*8))\n s = chb(unused_bits) + s\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n\nclass BERcodec_STRING(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.STRING\n @classmethod\n def enc(cls,s):\n return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n return cls.tag.asn1_object(s),t\n\nclass BERcodec_NULL(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.NULL\n @classmethod\n def enc(cls, i):\n if i == 0:\n return chb(hash(cls.tag))+b\"\\0\"\n else:\n return super(cls,cls).enc(i)\n\nclass BERcodec_OID(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.OID\n @classmethod\n def enc(cls, oid):\n oid = raw(oid)\n lst = [int(x) for x in oid.strip(b\".\").split(b\".\")]\n if len(lst) >= 2:\n lst[1] += 40*lst[0]\n del(lst[0])\n s = b\"\".join(BER_num_enc(k) for k in lst)\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n lst = []\n while s:\n l,s = BER_num_dec(s)\n lst.append(l)\n if (len(lst) > 0):\n lst.insert(0,lst[0]//40)\n lst[1] %= 40\n return cls.asn1_object(b\".\".join(str(k).encode('ascii') for k in lst)), t\n\nclass BERcodec_ENUMERATED(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.ENUMERATED\n\nclass BERcodec_UTF8_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UTF8_STRING\n\nclass BERcodec_NUMERIC_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING\n\nclass BERcodec_PRINTABLE_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING\n\nclass BERcodec_T61_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.T61_STRING\n\nclass BERcodec_VIDEOTEX_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING\n\nclass BERcodec_IA5_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.IA5_STRING\n\nclass BERcodec_UTC_TIME(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UTC_TIME\n\nclass BERcodec_GENERALIZED_TIME(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME\n\nclass BERcodec_ISO646_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.ISO646_STRING\n\nclass BERcodec_UNIVERSAL_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UNIVERSAL_STRING\n\nclass BERcodec_BMP_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.BMP_STRING\n\nclass BERcodec_SEQUENCE(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.SEQUENCE\n @classmethod\n def enc(cls, l):\n if not isinstance(l, bytes):\n l = b\"\".join(x.enc(cls.codec) for x in l)\n return chb(hash(cls.tag))+BER_len_enc(len(l))+l\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n if context is None:\n context = cls.tag.context\n l,st = cls.check_type_get_len(s) # we may have len(s) < l\n s,t = st[:l],st[l:]\n obj = []\n while s:\n try:\n o,s = BERcodec_Object.dec(s, context, safe)\n except BER_Decoding_Error as err:\n err.remaining += t\n if err.decoded is not None:\n obj.append(err.decoded)\n err.decoded = obj\n raise \n obj.append(o)\n if len(st) < l:\n raise BER_Decoding_Error(\"Not enough bytes to decode sequence\", decoded=obj)\n return cls.asn1_object(obj),t\n\nclass BERcodec_SET(BERcodec_SEQUENCE):\n tag = ASN1_Class_UNIVERSAL.SET\n\nclass BERcodec_IPADDRESS(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.IPADDRESS\n @classmethod\n def enc(cls, ipaddr_ascii):\n try:\n s = inet_aton(ipaddr_ascii)\n except Exception:\n raise BER_Encoding_Error(\"IPv4 address could not be encoded\") \n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n try:\n ipaddr_ascii = inet_ntoa(s)\n except Exception:\n raise BER_Decoding_Error(\"IP address could not be decoded\", remaining=s)\n return cls.asn1_object(ipaddr_ascii), t\n\nclass BERcodec_COUNTER32(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.COUNTER32\n\nclass BERcodec_GAUGE32(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.GAUGE32\n\nclass BERcodec_TIME_TICKS(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.TIME_TICKS\n", "path": "scapy/asn1/ber.py" } ]
[ { "content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## Modified by Maxence Tury <[email protected]>\n## Acknowledgment: Ralph Broenink\n## This program is published under a GPLv2 license\n\n\"\"\"\nBasic Encoding Rules (BER) for ASN.1\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.error import warning\nfrom scapy.compat import *\nfrom scapy.utils import binrepr,inet_aton,inet_ntoa\nfrom scapy.asn1.asn1 import ASN1_Decoding_Error,ASN1_Encoding_Error,ASN1_BadTag_Decoding_Error,ASN1_Codecs,ASN1_Class_UNIVERSAL,ASN1_Error,ASN1_DECODING_ERROR,ASN1_BADTAG\nimport scapy.modules.six as six\n\n##################\n## BER encoding ##\n##################\n\n\n\n#####[ BER tools ]#####\n\n\nclass BER_Exception(Exception):\n pass\n\nclass BER_Encoding_Error(ASN1_Encoding_Error):\n def __init__(self, msg, encoded=None, remaining=None):\n Exception.__init__(self, msg)\n self.remaining = remaining\n self.encoded = encoded\n def __str__(self):\n s = Exception.__str__(self)\n if isinstance(self.encoded, BERcodec_Object):\n s+=\"\\n### Already encoded ###\\n%s\" % self.encoded.strshow()\n else:\n s+=\"\\n### Already encoded ###\\n%r\" % self.encoded\n s+=\"\\n### Remaining ###\\n%r\" % self.remaining\n return s\n\nclass BER_Decoding_Error(ASN1_Decoding_Error):\n def __init__(self, msg, decoded=None, remaining=None):\n Exception.__init__(self, msg)\n self.remaining = remaining\n self.decoded = decoded\n def __str__(self):\n s = Exception.__str__(self)\n if isinstance(self.decoded, BERcodec_Object):\n s+=\"\\n### Already decoded ###\\n%s\" % self.decoded.strshow()\n else:\n s+=\"\\n### Already decoded ###\\n%r\" % self.decoded\n s+=\"\\n### Remaining ###\\n%r\" % self.remaining\n return s\n\nclass BER_BadTag_Decoding_Error(BER_Decoding_Error, ASN1_BadTag_Decoding_Error):\n pass\n\ndef BER_len_enc(l, size=0):\n if l <= 127 and size==0:\n return chb(l)\n s = b\"\"\n while l or size>0:\n s = chb(l&0xff)+s\n l >>= 8\n size -= 1\n if len(s) > 127:\n raise BER_Exception(\"BER_len_enc: Length too long (%i) to be encoded [%r]\" % (len(s),s))\n return chb(len(s)|0x80)+s\ndef BER_len_dec(s):\n l = orb(s[0])\n if not l & 0x80:\n return l,s[1:]\n l &= 0x7f\n if len(s) <= l:\n raise BER_Decoding_Error(\"BER_len_dec: Got %i bytes while expecting %i\" % (len(s)-1, l),remaining=s)\n ll = 0\n for c in s[1:l+1]:\n ll <<= 8\n ll |= orb(c)\n return ll,s[l+1:]\n \ndef BER_num_enc(l, size=1):\n x=[]\n while l or size>0:\n x.insert(0, l & 0x7f)\n if len(x) > 1:\n x[0] |= 0x80\n l >>= 7\n size -= 1\n return b\"\".join(chb(k) for k in x)\ndef BER_num_dec(s, cls_id=0):\n if len(s) == 0:\n raise BER_Decoding_Error(\"BER_num_dec: got empty string\", remaining=s)\n x = cls_id\n for i, c in enumerate(s):\n c = orb(c)\n x <<= 7\n x |= c&0x7f\n if not c&0x80:\n break\n if c&0x80:\n raise BER_Decoding_Error(\"BER_num_dec: unfinished number description\", remaining=s)\n return x, s[i+1:]\n\ndef BER_id_dec(s):\n # This returns the tag ALONG WITH THE PADDED CLASS+CONSTRUCTIVE INFO.\n # Let's recall that bits 8-7 from the first byte of the tag encode\n # the class information, while bit 6 means primitive or constructive.\n #\n # For instance, with low-tag-number b'\\x81', class would be 0b10\n # ('context-specific') and tag 0x01, but we return 0x81 as a whole.\n # For b'\\xff\\x22', class would be 0b11 ('private'), constructed, then\n # padding, then tag 0x22, but we return (0xff>>5)*128^1 + 0x22*128^0.\n # Why the 5-bit-shifting? Because it provides an unequivocal encoding\n # on base 128 (note that 0xff would equal 1*128^1 + 127*128^0...),\n # as we know that bits 5 to 1 are fixed to 1 anyway.\n #\n # As long as there is no class differentiation, we have to keep this info\n # encoded in scapy's tag in order to reuse it for packet building.\n # Note that tags thus may have to be hard-coded with their extended\n # information, e.g. a SEQUENCE from asn1.py has a direct tag 0x20|16.\n x = orb(s[0])\n if x & 0x1f != 0x1f:\n # low-tag-number\n return x,s[1:]\n else:\n # high-tag-number\n return BER_num_dec(s[1:], cls_id=x>>5)\ndef BER_id_enc(n):\n if n < 256:\n # low-tag-number\n return chb(n)\n else:\n # high-tag-number\n s = BER_num_enc(n)\n tag = orb(s[0]) # first byte, as an int\n tag &= 0x07 # reset every bit from 8 to 4\n tag <<= 5 # move back the info bits on top\n tag |= 0x1f # pad with 1s every bit from 5 to 1\n return chb(tag) + s[1:]\n\n# The functions below provide implicit and explicit tagging support.\ndef BER_tagging_dec(s, hidden_tag=None, implicit_tag=None,\n explicit_tag=None, safe=False):\n # We output the 'real_tag' if it is different from the (im|ex)plicit_tag.\n real_tag = None\n if len(s) > 0:\n err_msg = \"BER_tagging_dec: observed tag does not match expected tag\"\n if implicit_tag is not None:\n ber_id,s = BER_id_dec(s)\n if ber_id != implicit_tag:\n if not safe:\n raise BER_Decoding_Error(err_msg, remaining=s)\n else:\n real_tag = ber_id\n s = chb(hash(hidden_tag)) + s\n elif explicit_tag is not None:\n ber_id,s = BER_id_dec(s)\n if ber_id != explicit_tag:\n if not safe:\n raise BER_Decoding_Error(err_msg, remaining=s)\n else:\n real_tag = ber_id\n l,s = BER_len_dec(s)\n return real_tag, s\ndef BER_tagging_enc(s, implicit_tag=None, explicit_tag=None):\n if len(s) > 0:\n if implicit_tag is not None:\n s = BER_id_enc(implicit_tag) + s[1:]\n elif explicit_tag is not None:\n s = BER_id_enc(explicit_tag) + BER_len_enc(len(s)) + s\n return s\n\n#####[ BER classes ]#####\n\nclass BERcodec_metaclass(type):\n def __new__(cls, name, bases, dct):\n c = super(BERcodec_metaclass, cls).__new__(cls, name, bases, dct)\n try:\n c.tag.register(c.codec, c)\n except:\n warning(\"Error registering %r for %r\" % (c.tag, c.codec))\n return c\n\n\nclass BERcodec_Object(six.with_metaclass(BERcodec_metaclass)):\n codec = ASN1_Codecs.BER\n tag = ASN1_Class_UNIVERSAL.ANY\n\n @classmethod\n def asn1_object(cls, val):\n return cls.tag.asn1_object(val)\n\n @classmethod\n def check_string(cls, s):\n if not s:\n raise BER_Decoding_Error(\"%s: Got empty object while expecting tag %r\" %\n (cls.__name__,cls.tag), remaining=s) \n @classmethod\n def check_type(cls, s):\n cls.check_string(s)\n tag, remainder = BER_id_dec(s)\n if cls.tag != tag:\n raise BER_BadTag_Decoding_Error(\"%s: Got tag [%i/%#x] while expecting %r\" %\n (cls.__name__, tag, tag, cls.tag), remaining=s)\n return remainder\n @classmethod\n def check_type_get_len(cls, s):\n s2 = cls.check_type(s)\n if not s2:\n raise BER_Decoding_Error(\"%s: No bytes while expecting a length\" %\n cls.__name__, remaining=s)\n return BER_len_dec(s2)\n @classmethod\n def check_type_check_len(cls, s):\n l,s3 = cls.check_type_get_len(s)\n if len(s3) < l:\n raise BER_Decoding_Error(\"%s: Got %i bytes while expecting %i\" %\n (cls.__name__, len(s3), l), remaining=s)\n return l,s3[:l],s3[l:]\n\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n if context is None:\n context = cls.tag.context\n cls.check_string(s)\n p,_ = BER_id_dec(s)\n if p not in context:\n t = s\n if len(t) > 18:\n t = t[:15]+b\"...\"\n raise BER_Decoding_Error(\"Unknown prefix [%02x] for [%r]\" % (p,t), remaining=s)\n codec = context[p].get_codec(ASN1_Codecs.BER)\n return codec.dec(s,context,safe)\n\n @classmethod\n def dec(cls, s, context=None, safe=False):\n if not safe:\n return cls.do_dec(s, context, safe)\n try:\n return cls.do_dec(s, context, safe)\n except BER_BadTag_Decoding_Error as e:\n o,remain = BERcodec_Object.dec(e.remaining, context, safe)\n return ASN1_BADTAG(o),remain\n except BER_Decoding_Error as e:\n return ASN1_DECODING_ERROR(s, exc=e),\"\"\n except ASN1_Error as e:\n return ASN1_DECODING_ERROR(s, exc=e),\"\"\n\n @classmethod\n def safedec(cls, s, context=None):\n return cls.dec(s, context, safe=True)\n\n\n @classmethod\n def enc(cls, s):\n if isinstance(s, six.string_types):\n return BERcodec_STRING.enc(s)\n else:\n return BERcodec_INTEGER.enc(int(s))\n\nASN1_Codecs.BER.register_stem(BERcodec_Object)\n\n\n##########################\n#### BERcodec objects ####\n##########################\n\nclass BERcodec_INTEGER(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.INTEGER\n @classmethod\n def enc(cls, i):\n s = []\n while True:\n s.append(i&0xff)\n if -127 <= i < 0:\n break\n if 128 <= i <= 255:\n s.append(0)\n i >>= 8\n if not i:\n break\n s = [chb(hash(c)) for c in s]\n s.append(BER_len_enc(len(s)))\n s.append(chb(hash(cls.tag)))\n s.reverse()\n return b\"\".join(s)\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n x = 0\n if s:\n if orb(s[0])&0x80: # negative int\n x = -1\n for c in s:\n x <<= 8\n x |= orb(c)\n return cls.asn1_object(x),t\n \nclass BERcodec_BOOLEAN(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.BOOLEAN\n\nclass BERcodec_BIT_STRING(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.BIT_STRING\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n # /!\\ the unused_bits information is lost after this decoding\n l,s,t = cls.check_type_check_len(s)\n if len(s) > 0:\n unused_bits = orb(s[0])\n if safe and unused_bits > 7:\n raise BER_Decoding_Error(\"BERcodec_BIT_STRING: too many unused_bits advertised\", remaining=s)\n s = \"\".join(binrepr(orb(x)).zfill(8) for x in s[1:])\n if unused_bits > 0:\n s = s[:-unused_bits]\n return cls.tag.asn1_object(s),t\n else:\n raise BER_Decoding_Error(\"BERcodec_BIT_STRING found no content (not even unused_bits byte)\", remaining=s)\n @classmethod\n def enc(cls,s):\n # /!\\ this is DER encoding (bit strings are only zero-bit padded)\n s = raw(s)\n if len(s) % 8 == 0:\n unused_bits = 0\n else:\n unused_bits = 8 - len(s)%8\n s += b\"0\"*unused_bits\n s = b\"\".join(chb(int(b\"\".join(chb(y) for y in x),2)) for x in zip(*[iter(s)]*8))\n s = chb(unused_bits) + s\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n\nclass BERcodec_STRING(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.STRING\n @classmethod\n def enc(cls,s):\n s = raw(s)\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s # Be sure we are encoding bytes\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n return cls.tag.asn1_object(s),t\n\nclass BERcodec_NULL(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.NULL\n @classmethod\n def enc(cls, i):\n if i == 0:\n return chb(hash(cls.tag))+b\"\\0\"\n else:\n return super(cls,cls).enc(i)\n\nclass BERcodec_OID(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.OID\n @classmethod\n def enc(cls, oid):\n oid = raw(oid)\n lst = [int(x) for x in oid.strip(b\".\").split(b\".\")]\n if len(lst) >= 2:\n lst[1] += 40*lst[0]\n del(lst[0])\n s = b\"\".join(BER_num_enc(k) for k in lst)\n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n lst = []\n while s:\n l,s = BER_num_dec(s)\n lst.append(l)\n if (len(lst) > 0):\n lst.insert(0,lst[0]//40)\n lst[1] %= 40\n return cls.asn1_object(b\".\".join(str(k).encode('ascii') for k in lst)), t\n\nclass BERcodec_ENUMERATED(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.ENUMERATED\n\nclass BERcodec_UTF8_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UTF8_STRING\n\nclass BERcodec_NUMERIC_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING\n\nclass BERcodec_PRINTABLE_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING\n\nclass BERcodec_T61_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.T61_STRING\n\nclass BERcodec_VIDEOTEX_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING\n\nclass BERcodec_IA5_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.IA5_STRING\n\nclass BERcodec_UTC_TIME(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UTC_TIME\n\nclass BERcodec_GENERALIZED_TIME(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME\n\nclass BERcodec_ISO646_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.ISO646_STRING\n\nclass BERcodec_UNIVERSAL_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.UNIVERSAL_STRING\n\nclass BERcodec_BMP_STRING(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.BMP_STRING\n\nclass BERcodec_SEQUENCE(BERcodec_Object):\n tag = ASN1_Class_UNIVERSAL.SEQUENCE\n @classmethod\n def enc(cls, l):\n if not isinstance(l, bytes):\n l = b\"\".join(x.enc(cls.codec) for x in l)\n return chb(hash(cls.tag))+BER_len_enc(len(l))+l\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n if context is None:\n context = cls.tag.context\n l,st = cls.check_type_get_len(s) # we may have len(s) < l\n s,t = st[:l],st[l:]\n obj = []\n while s:\n try:\n o,s = BERcodec_Object.dec(s, context, safe)\n except BER_Decoding_Error as err:\n err.remaining += t\n if err.decoded is not None:\n obj.append(err.decoded)\n err.decoded = obj\n raise \n obj.append(o)\n if len(st) < l:\n raise BER_Decoding_Error(\"Not enough bytes to decode sequence\", decoded=obj)\n return cls.asn1_object(obj),t\n\nclass BERcodec_SET(BERcodec_SEQUENCE):\n tag = ASN1_Class_UNIVERSAL.SET\n\nclass BERcodec_IPADDRESS(BERcodec_STRING):\n tag = ASN1_Class_UNIVERSAL.IPADDRESS\n @classmethod\n def enc(cls, ipaddr_ascii):\n try:\n s = inet_aton(ipaddr_ascii)\n except Exception:\n raise BER_Encoding_Error(\"IPv4 address could not be encoded\") \n return chb(hash(cls.tag))+BER_len_enc(len(s))+s\n @classmethod\n def do_dec(cls, s, context=None, safe=False):\n l,s,t = cls.check_type_check_len(s)\n try:\n ipaddr_ascii = inet_ntoa(s)\n except Exception:\n raise BER_Decoding_Error(\"IP address could not be decoded\", remaining=s)\n return cls.asn1_object(ipaddr_ascii), t\n\nclass BERcodec_COUNTER32(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.COUNTER32\n\nclass BERcodec_GAUGE32(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.GAUGE32\n\nclass BERcodec_TIME_TICKS(BERcodec_INTEGER):\n tag = ASN1_Class_UNIVERSAL.TIME_TICKS\n", "path": "scapy/asn1/ber.py" } ]
diff --git a/scapy/asn1/ber.py b/scapy/asn1/ber.py index c2b0cf3a5cd..deadf770f5e 100644 --- a/scapy/asn1/ber.py +++ b/scapy/asn1/ber.py @@ -337,7 +337,8 @@ class BERcodec_STRING(BERcodec_Object): tag = ASN1_Class_UNIVERSAL.STRING @classmethod def enc(cls,s): - return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes + s = raw(s) + return chb(hash(cls.tag))+BER_len_enc(len(s))+s # Be sure we are encoding bytes @classmethod def do_dec(cls, s, context=None, safe=False): l,s,t = cls.check_type_check_len(s)
BERcodec_STRING With RandString cause length incorrect Scapy Version: 2.3.3.dev929 System: OSX 10.13.1 Python Version: 2.7.13 Using RandString with ASN1_STRING will cause length enc incorrect. This code can reappear that problem. ```python from scapy.all import * for i in range(10): data = str(ASN1_STRING(RandString())) enc_len = ord(data[1]) str_len = len(data[2:]) if enc_len != str_len: print("Got enc length incorrect, enc_length:%s, data_length:%s" % (enc_len, str_len)) print("Hex string is: %s" % data[2:].encode('hex')) ``` When BERcodec_STRING input should be string but got RandString instance. len(s) and raw(s) in BERcodec_STRING will trigger RandString._fix() twice and got two different string. A simple fix may look like this. ```python class BERcodec_STRING(BERcodec_Object): tag = ASN1_Class_UNIVERSAL.STRING @classmethod def enc(cls,s): s=str(s) return chb(hash(cls.tag))+BER_len_enc(len(s))+raw(s) # Be sure we are encoding bytes ```
pytorch__audio-1339
[ { "content": "class AudioMetaData:\n \"\"\"Return type of ``torchaudio.info`` function.\n\n This class is used by :ref:`\"sox_io\" backend<sox_io_backend>` and\n :ref:`\"soundfile\" backend with the new interface<soundfile_backend>`.\n\n :ivar int sample_rate: Sample rate\n :ivar int num_frames: The number of frames\n :ivar int num_channels: The number of channels\n :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,\n or when it cannot be accurately inferred.\n :ivar str encoding: Audio encoding\n The values encoding can take are one of the following:\n\n * ``PCM_S``: Signed integer linear PCM\n * ``PCM_U``: Unsigned integer linear PCM\n * ``PCM_F``: Floating point linear PCM\n * ``FLAC``: Flac, Free Lossless Audio Codec\n * ``ULAW``: Mu-law\n * ``ALAW``: A-law\n * ``MP3`` : MP3, MPEG-1 Audio Layer III\n * ``VORBIS``: OGG Vorbis\n * ``AMR_WB``: Adaptive Multi-Rate\n * ``AMR_NB``: Adaptive Multi-Rate Wideband\n * ``OPUS``: Opus\n * ``UNKNOWN`` : None of above\n \"\"\"\n def __init__(\n self,\n sample_rate: int,\n num_frames: int,\n num_channels: int,\n bits_per_sample: int,\n encoding: str,\n ):\n self.sample_rate = sample_rate\n self.num_frames = num_frames\n self.num_channels = num_channels\n self.bits_per_sample = bits_per_sample\n self.encoding = encoding\n", "path": "torchaudio/backend/common.py" } ]
[ { "content": "class AudioMetaData:\n \"\"\"Return type of ``torchaudio.info`` function.\n\n This class is used by :ref:`\"sox_io\" backend<sox_io_backend>` and\n :ref:`\"soundfile\" backend with the new interface<soundfile_backend>`.\n\n :ivar int sample_rate: Sample rate\n :ivar int num_frames: The number of frames\n :ivar int num_channels: The number of channels\n :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,\n or when it cannot be accurately inferred.\n :ivar str encoding: Audio encoding\n The values encoding can take are one of the following:\n\n * ``PCM_S``: Signed integer linear PCM\n * ``PCM_U``: Unsigned integer linear PCM\n * ``PCM_F``: Floating point linear PCM\n * ``FLAC``: Flac, Free Lossless Audio Codec\n * ``ULAW``: Mu-law\n * ``ALAW``: A-law\n * ``MP3`` : MP3, MPEG-1 Audio Layer III\n * ``VORBIS``: OGG Vorbis\n * ``AMR_WB``: Adaptive Multi-Rate\n * ``AMR_NB``: Adaptive Multi-Rate Wideband\n * ``OPUS``: Opus\n * ``UNKNOWN`` : None of above\n \"\"\"\n def __init__(\n self,\n sample_rate: int,\n num_frames: int,\n num_channels: int,\n bits_per_sample: int,\n encoding: str,\n ):\n self.sample_rate = sample_rate\n self.num_frames = num_frames\n self.num_channels = num_channels\n self.bits_per_sample = bits_per_sample\n self.encoding = encoding\n\n def __str__(self):\n return (\n f\"AudioMetaData(\"\n f\"sample_rate={self.sample_rate}, \"\n f\"num_frames={self.num_frames}, \"\n f\"num_channels={self.num_channels}, \"\n f\"bits_per_sample={self.bits_per_sample}, \"\n f\"encoding={self.encoding}\"\n f\")\"\n )\n", "path": "torchaudio/backend/common.py" } ]
diff --git a/torchaudio/backend/common.py b/torchaudio/backend/common.py index 722ce75879..f944b23d74 100644 --- a/torchaudio/backend/common.py +++ b/torchaudio/backend/common.py @@ -38,3 +38,14 @@ def __init__( self.num_channels = num_channels self.bits_per_sample = bits_per_sample self.encoding = encoding + + def __str__(self): + return ( + f"AudioMetaData(" + f"sample_rate={self.sample_rate}, " + f"num_frames={self.num_frames}, " + f"num_channels={self.num_channels}, " + f"bits_per_sample={self.bits_per_sample}, " + f"encoding={self.encoding}" + f")" + )
Making `AudioMetaData` print friendly `AudioMetaData` class reports meta-data of audio source. It is however not print friendly. ```python print(torchaudio.info(src)) >>> <torchaudio.backend.common.AudioMetaData object at 0x7f1bc5cd2890> ``` It is nice if we can simply print the attributes like `dataclass` objects do. ```python print(torchaudio.info(src)) >>> AudioMetaData(sample_rate=900, encoding="PCM", ...) ``` ## Steps There are two approaches I can think of 1. Add `__str__` method. 2. Use `dataclasses.dataclass` For 2, the `info` function has to be TorchScript-compatible. This means that its return type `AudioMetaData` has to be TorchScript-able. For this reason, `dataclass` might not be applicable. This can be checked with the following test; ```bash (cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py) ``` ## Build and test Please refer to the [contribution guide](https://github.com/pytorch/audio/blob/master/CONTRIBUTING.md) for how to setup development environment. To test, ```bash (cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py torchaudio_unittest/backend/sox_io/info_test.py torchaudio_unittest/backend/soundfile_io/info_test.py) ```
cython__cython-2497
[ { "content": "#\n# Cython -- Things that don't belong\n# anywhere else in particular\n#\n\nfrom __future__ import absolute_import\n\ntry:\n from __builtin__ import basestring\nexcept ImportError:\n basestring = str\n\nimport os\nimport sys\nimport re\nimport io\nimport codecs\nimport shutil\nfrom contextlib import contextmanager\n\nmodification_time = os.path.getmtime\n\n_function_caches = []\ndef clear_function_caches():\n for cache in _function_caches:\n cache.clear()\n\ndef cached_function(f):\n cache = {}\n _function_caches.append(cache)\n uncomputed = object()\n def wrapper(*args):\n res = cache.get(args, uncomputed)\n if res is uncomputed:\n res = cache[args] = f(*args)\n return res\n wrapper.uncached = f\n return wrapper\n\ndef cached_method(f):\n cache_name = '__%s_cache' % f.__name__\n def wrapper(self, *args):\n cache = getattr(self, cache_name, None)\n if cache is None:\n cache = {}\n setattr(self, cache_name, cache)\n if args in cache:\n return cache[args]\n res = cache[args] = f(self, *args)\n return res\n return wrapper\n\ndef replace_suffix(path, newsuf):\n base, _ = os.path.splitext(path)\n return base + newsuf\n\n\ndef open_new_file(path):\n if os.path.exists(path):\n # Make sure to create a new file here so we can\n # safely hard link the output files.\n os.unlink(path)\n\n # we use the ISO-8859-1 encoding here because we only write pure\n # ASCII strings or (e.g. for file names) byte encoded strings as\n # Unicode, so we need a direct mapping from the first 256 Unicode\n # characters to a byte sequence, which ISO-8859-1 provides\n\n # note: can't use io.open() in Py2 as we may be writing str objects\n return codecs.open(path, \"w\", encoding=\"ISO-8859-1\")\n\n\ndef castrate_file(path, st):\n # Remove junk contents from an output file after a\n # failed compilation.\n # Also sets access and modification times back to\n # those specified by st (a stat struct).\n try:\n f = open_new_file(path)\n except EnvironmentError:\n pass\n else:\n f.write(\n \"#error Do not use this file, it is the result of a failed Cython compilation.\\n\")\n f.close()\n if st:\n os.utime(path, (st.st_atime, st.st_mtime-1))\n\ndef file_newer_than(path, time):\n ftime = modification_time(path)\n return ftime > time\n\n\ndef safe_makedirs(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef copy_file_to_dir_if_newer(sourcefile, destdir):\n \"\"\"\n Copy file sourcefile to directory destdir (creating it if needed),\n preserving metadata. If the destination file exists and is not\n older than the source file, the copying is skipped.\n \"\"\"\n destfile = os.path.join(destdir, os.path.basename(sourcefile))\n try:\n desttime = modification_time(destfile)\n except OSError:\n # New file does not exist, destdir may or may not exist\n safe_makedirs(destdir)\n else:\n # New file already exists\n if not file_newer_than(sourcefile, desttime):\n return\n shutil.copy2(sourcefile, destfile)\n\n\n@cached_function\ndef search_include_directories(dirs, qualified_name, suffix, pos,\n include=False, sys_path=False):\n # Search the list of include directories for the given\n # file name. If a source file position is given, first\n # searches the directory containing that file. Returns\n # None if not found, but does not report an error.\n # The 'include' option will disable package dereferencing.\n # If 'sys_path' is True, also search sys.path.\n if sys_path:\n dirs = dirs + tuple(sys.path)\n if pos:\n file_desc = pos[0]\n from Cython.Compiler.Scanning import FileSourceDescriptor\n if not isinstance(file_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n if include:\n dirs = (os.path.dirname(file_desc.filename),) + dirs\n else:\n dirs = (find_root_package_dir(file_desc.filename),) + dirs\n\n dotted_filename = qualified_name\n if suffix:\n dotted_filename += suffix\n if not include:\n names = qualified_name.split('.')\n package_names = tuple(names[:-1])\n module_name = names[-1]\n module_filename = module_name + suffix\n package_filename = \"__init__\" + suffix\n\n for dir in dirs:\n path = os.path.join(dir, dotted_filename)\n if path_exists(path):\n return path\n if not include:\n package_dir = check_package_dir(dir, package_names)\n if package_dir is not None:\n path = os.path.join(package_dir, module_filename)\n if path_exists(path):\n return path\n path = os.path.join(dir, package_dir, module_name,\n package_filename)\n if path_exists(path):\n return path\n return None\n\n\n@cached_function\ndef find_root_package_dir(file_path):\n dir = os.path.dirname(file_path)\n if file_path == dir:\n return dir\n elif is_package_dir(dir):\n return find_root_package_dir(dir)\n else:\n return dir\n\n@cached_function\ndef check_package_dir(dir, package_names):\n for dirname in package_names:\n dir = os.path.join(dir, dirname)\n if not is_package_dir(dir):\n return None\n return dir\n\n@cached_function\ndef is_package_dir(dir_path):\n for filename in (\"__init__.py\",\n \"__init__.pyc\",\n \"__init__.pyx\",\n \"__init__.pxd\"):\n path = os.path.join(dir_path, filename)\n if path_exists(path):\n return 1\n\n@cached_function\ndef path_exists(path):\n # try on the filesystem first\n if os.path.exists(path):\n return True\n # figure out if a PEP 302 loader is around\n try:\n loader = __loader__\n # XXX the code below assumes a 'zipimport.zipimporter' instance\n # XXX should be easy to generalize, but too lazy right now to write it\n archive_path = getattr(loader, 'archive', None)\n if archive_path:\n normpath = os.path.normpath(path)\n if normpath.startswith(archive_path):\n arcname = normpath[len(archive_path)+1:]\n try:\n loader.get_data(arcname)\n return True\n except IOError:\n return False\n except NameError:\n pass\n return False\n\n# file name encodings\n\ndef decode_filename(filename):\n if isinstance(filename, bytes):\n try:\n filename_encoding = sys.getfilesystemencoding()\n if filename_encoding is None:\n filename_encoding = sys.getdefaultencoding()\n filename = filename.decode(filename_encoding)\n except UnicodeDecodeError:\n pass\n return filename\n\n# support for source file encoding detection\n\n_match_file_encoding = re.compile(u\"coding[:=]\\s*([-\\w.]+)\").search\n\n\ndef detect_file_encoding(source_filename):\n f = open_source_file(source_filename, encoding=\"UTF-8\", error_handling='ignore')\n try:\n return detect_opened_file_encoding(f)\n finally:\n f.close()\n\n\ndef detect_opened_file_encoding(f):\n # PEPs 263 and 3120\n # Most of the time the first two lines fall in the first 250 chars,\n # and this bulk read/split is much faster.\n lines = f.read(250).split(u\"\\n\")\n if len(lines) > 1:\n m = _match_file_encoding(lines[0])\n if m:\n return m.group(1)\n elif len(lines) > 2:\n m = _match_file_encoding(lines[1])\n if m:\n return m.group(1)\n else:\n return \"UTF-8\"\n # Fallback to one-char-at-a-time detection.\n f.seek(0)\n chars = []\n for i in range(2):\n c = f.read(1)\n while c and c != u'\\n':\n chars.append(c)\n c = f.read(1)\n encoding = _match_file_encoding(u''.join(chars))\n if encoding:\n return encoding.group(1)\n return \"UTF-8\"\n\n\ndef skip_bom(f):\n \"\"\"\n Read past a BOM at the beginning of a source file.\n This could be added to the scanner, but it's *substantially* easier\n to keep it at this level.\n \"\"\"\n if f.read(1) != u'\\uFEFF':\n f.seek(0)\n\n\ndef open_source_file(source_filename, mode=\"r\",\n encoding=None, error_handling=None):\n if encoding is None:\n # Most of the time the coding is unspecified, so be optimistic that\n # it's UTF-8.\n f = open_source_file(source_filename, encoding=\"UTF-8\", mode=mode, error_handling='ignore')\n encoding = detect_opened_file_encoding(f)\n if encoding == \"UTF-8\" and error_handling == 'ignore':\n f.seek(0)\n skip_bom(f)\n return f\n else:\n f.close()\n\n if not os.path.exists(source_filename):\n try:\n loader = __loader__\n if source_filename.startswith(loader.archive):\n return open_source_from_loader(\n loader, source_filename,\n encoding, error_handling)\n except (NameError, AttributeError):\n pass\n\n stream = io.open(source_filename, mode=mode,\n encoding=encoding, errors=error_handling)\n skip_bom(stream)\n return stream\n\n\ndef open_source_from_loader(loader,\n source_filename,\n encoding=None, error_handling=None):\n nrmpath = os.path.normpath(source_filename)\n arcname = nrmpath[len(loader.archive)+1:]\n data = loader.get_data(arcname)\n return io.TextIOWrapper(io.BytesIO(data),\n encoding=encoding,\n errors=error_handling)\n\n\ndef str_to_number(value):\n # note: this expects a string as input that was accepted by the\n # parser already, with an optional \"-\" sign in front\n is_neg = False\n if value[:1] == '-':\n is_neg = True\n value = value[1:]\n if len(value) < 2:\n value = int(value, 0)\n elif value[0] == '0':\n literal_type = value[1] # 0'o' - 0'b' - 0'x'\n if literal_type in 'xX':\n # hex notation ('0x1AF')\n value = int(value[2:], 16)\n elif literal_type in 'oO':\n # Py3 octal notation ('0o136')\n value = int(value[2:], 8)\n elif literal_type in 'bB':\n # Py3 binary notation ('0b101')\n value = int(value[2:], 2)\n else:\n # Py2 octal notation ('0136')\n value = int(value, 8)\n else:\n value = int(value, 0)\n return -value if is_neg else value\n\n\ndef long_literal(value):\n if isinstance(value, basestring):\n value = str_to_number(value)\n return not -2**31 <= value < 2**31\n\n\n@cached_function\ndef get_cython_cache_dir():\n r\"\"\"\n Return the base directory containing Cython's caches.\n\n Priority:\n\n 1. CYTHON_CACHE_DIR\n 2. (OS X): ~/Library/Caches/Cython\n (posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined\n 3. ~/.cython\n\n \"\"\"\n if 'CYTHON_CACHE_DIR' in os.environ:\n return os.environ['CYTHON_CACHE_DIR']\n\n parent = None\n if os.name == 'posix':\n if sys.platform == 'darwin':\n parent = os.path.expanduser('~/Library/Caches')\n else:\n # this could fallback on ~/.cache\n parent = os.environ.get('XDG_CACHE_HOME')\n\n if parent and os.path.isdir(parent):\n return os.path.join(parent, 'cython')\n\n # last fallback: ~/.cython\n return os.path.expanduser(os.path.join('~', '.cython'))\n\n\n@contextmanager\ndef captured_fd(stream=2, encoding=None):\n pipe_in = t = None\n orig_stream = os.dup(stream) # keep copy of original stream\n try:\n pipe_in, pipe_out = os.pipe()\n os.dup2(pipe_out, stream) # replace stream by copy of pipe\n try:\n os.close(pipe_out) # close original pipe-out stream\n data = []\n\n def copy():\n try:\n while True:\n d = os.read(pipe_in, 1000)\n if d:\n data.append(d)\n else:\n break\n finally:\n os.close(pipe_in)\n\n def get_output():\n output = b''.join(data)\n if encoding:\n output = output.decode(encoding)\n return output\n\n from threading import Thread\n t = Thread(target=copy)\n t.daemon = True # just in case\n t.start()\n yield get_output\n finally:\n os.dup2(orig_stream, stream) # restore original stream\n if t is not None:\n t.join()\n finally:\n os.close(orig_stream)\n\n\ndef print_bytes(s, end=b'\\n', file=sys.stdout, flush=True):\n file.flush()\n try:\n out = file.buffer # Py3\n except AttributeError:\n out = file # Py2\n out.write(s)\n if end:\n out.write(end)\n if flush:\n out.flush()\n\nclass LazyStr:\n def __init__(self, callback):\n self.callback = callback\n def __str__(self):\n return self.callback()\n def __repr__(self):\n return self.callback()\n def __add__(self, right):\n return self.callback() + right\n def __radd__(self, left):\n return left + self.callback()\n\n\nclass OrderedSet(object):\n def __init__(self, elements=()):\n self._list = []\n self._set = set()\n self.update(elements)\n def __iter__(self):\n return iter(self._list)\n def update(self, elements):\n for e in elements:\n self.add(e)\n def add(self, e):\n if e not in self._set:\n self._list.append(e)\n self._set.add(e)\n\n\n# Class decorator that adds a metaclass and recreates the class with it.\n# Copied from 'six'.\ndef add_metaclass(metaclass):\n \"\"\"Class decorator for creating a class with a metaclass.\"\"\"\n def wrapper(cls):\n orig_vars = cls.__dict__.copy()\n slots = orig_vars.get('__slots__')\n if slots is not None:\n if isinstance(slots, str):\n slots = [slots]\n for slots_var in slots:\n orig_vars.pop(slots_var)\n orig_vars.pop('__dict__', None)\n orig_vars.pop('__weakref__', None)\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n return wrapper\n\n\ndef raise_error_if_module_name_forbidden(full_module_name):\n #it is bad idea to call the pyx-file cython.pyx, so fail early\n if full_module_name == 'cython' or full_module_name.endswith('.cython'):\n raise ValueError('cython is a special module, cannot be used as a module name')\n", "path": "Cython/Utils.py" } ]
[ { "content": "#\n# Cython -- Things that don't belong\n# anywhere else in particular\n#\n\nfrom __future__ import absolute_import\n\ntry:\n from __builtin__ import basestring\nexcept ImportError:\n basestring = str\n\nimport os\nimport sys\nimport re\nimport io\nimport codecs\nimport shutil\nfrom contextlib import contextmanager\n\nmodification_time = os.path.getmtime\n\n_function_caches = []\ndef clear_function_caches():\n for cache in _function_caches:\n cache.clear()\n\ndef cached_function(f):\n cache = {}\n _function_caches.append(cache)\n uncomputed = object()\n def wrapper(*args):\n res = cache.get(args, uncomputed)\n if res is uncomputed:\n res = cache[args] = f(*args)\n return res\n wrapper.uncached = f\n return wrapper\n\ndef cached_method(f):\n cache_name = '__%s_cache' % f.__name__\n def wrapper(self, *args):\n cache = getattr(self, cache_name, None)\n if cache is None:\n cache = {}\n setattr(self, cache_name, cache)\n if args in cache:\n return cache[args]\n res = cache[args] = f(self, *args)\n return res\n return wrapper\n\ndef replace_suffix(path, newsuf):\n base, _ = os.path.splitext(path)\n return base + newsuf\n\n\ndef open_new_file(path):\n if os.path.exists(path):\n # Make sure to create a new file here so we can\n # safely hard link the output files.\n os.unlink(path)\n\n # we use the ISO-8859-1 encoding here because we only write pure\n # ASCII strings or (e.g. for file names) byte encoded strings as\n # Unicode, so we need a direct mapping from the first 256 Unicode\n # characters to a byte sequence, which ISO-8859-1 provides\n\n # note: can't use io.open() in Py2 as we may be writing str objects\n return codecs.open(path, \"w\", encoding=\"ISO-8859-1\")\n\n\ndef castrate_file(path, st):\n # Remove junk contents from an output file after a\n # failed compilation.\n # Also sets access and modification times back to\n # those specified by st (a stat struct).\n try:\n f = open_new_file(path)\n except EnvironmentError:\n pass\n else:\n f.write(\n \"#error Do not use this file, it is the result of a failed Cython compilation.\\n\")\n f.close()\n if st:\n os.utime(path, (st.st_atime, st.st_mtime-1))\n\ndef file_newer_than(path, time):\n ftime = modification_time(path)\n return ftime > time\n\n\ndef safe_makedirs(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef copy_file_to_dir_if_newer(sourcefile, destdir):\n \"\"\"\n Copy file sourcefile to directory destdir (creating it if needed),\n preserving metadata. If the destination file exists and is not\n older than the source file, the copying is skipped.\n \"\"\"\n destfile = os.path.join(destdir, os.path.basename(sourcefile))\n try:\n desttime = modification_time(destfile)\n except OSError:\n # New file does not exist, destdir may or may not exist\n safe_makedirs(destdir)\n else:\n # New file already exists\n if not file_newer_than(sourcefile, desttime):\n return\n shutil.copy2(sourcefile, destfile)\n\n\n@cached_function\ndef search_include_directories(dirs, qualified_name, suffix, pos,\n include=False, sys_path=False):\n # Search the list of include directories for the given\n # file name. If a source file position is given, first\n # searches the directory containing that file. Returns\n # None if not found, but does not report an error.\n # The 'include' option will disable package dereferencing.\n # If 'sys_path' is True, also search sys.path.\n if sys_path:\n dirs = dirs + tuple(sys.path)\n if pos:\n file_desc = pos[0]\n from Cython.Compiler.Scanning import FileSourceDescriptor\n if not isinstance(file_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n if include:\n dirs = (os.path.dirname(file_desc.filename),) + dirs\n else:\n dirs = (find_root_package_dir(file_desc.filename),) + dirs\n\n dotted_filename = qualified_name\n if suffix:\n dotted_filename += suffix\n if not include:\n names = qualified_name.split('.')\n package_names = tuple(names[:-1])\n module_name = names[-1]\n module_filename = module_name + suffix\n package_filename = \"__init__\" + suffix\n\n for dir in dirs:\n path = os.path.join(dir, dotted_filename)\n if path_exists(path):\n return path\n if not include:\n package_dir = check_package_dir(dir, package_names)\n if package_dir is not None:\n path = os.path.join(package_dir, module_filename)\n if path_exists(path):\n return path\n path = os.path.join(dir, package_dir, module_name,\n package_filename)\n if path_exists(path):\n return path\n return None\n\n\n@cached_function\ndef find_root_package_dir(file_path):\n dir = os.path.dirname(file_path)\n if file_path == dir:\n return dir\n elif is_package_dir(dir):\n return find_root_package_dir(dir)\n else:\n return dir\n\n@cached_function\ndef check_package_dir(dir, package_names):\n for dirname in package_names:\n dir = os.path.join(dir, dirname)\n if not is_package_dir(dir):\n return None\n return dir\n\n@cached_function\ndef is_package_dir(dir_path):\n for filename in (\"__init__.py\",\n \"__init__.pyc\",\n \"__init__.pyx\",\n \"__init__.pxd\"):\n path = os.path.join(dir_path, filename)\n if path_exists(path):\n return 1\n\n@cached_function\ndef path_exists(path):\n # try on the filesystem first\n if os.path.exists(path):\n return True\n # figure out if a PEP 302 loader is around\n try:\n loader = __loader__\n # XXX the code below assumes a 'zipimport.zipimporter' instance\n # XXX should be easy to generalize, but too lazy right now to write it\n archive_path = getattr(loader, 'archive', None)\n if archive_path:\n normpath = os.path.normpath(path)\n if normpath.startswith(archive_path):\n arcname = normpath[len(archive_path)+1:]\n try:\n loader.get_data(arcname)\n return True\n except IOError:\n return False\n except NameError:\n pass\n return False\n\n# file name encodings\n\ndef decode_filename(filename):\n if isinstance(filename, bytes):\n try:\n filename_encoding = sys.getfilesystemencoding()\n if filename_encoding is None:\n filename_encoding = sys.getdefaultencoding()\n filename = filename.decode(filename_encoding)\n except UnicodeDecodeError:\n pass\n return filename\n\n# support for source file encoding detection\n\n_match_file_encoding = re.compile(u\"coding[:=]\\s*([-\\w.]+)\").search\n\n\ndef detect_file_encoding(source_filename):\n f = open_source_file(source_filename, encoding=\"UTF-8\", error_handling='ignore')\n try:\n return detect_opened_file_encoding(f)\n finally:\n f.close()\n\n\ndef detect_opened_file_encoding(f):\n # PEPs 263 and 3120\n # Most of the time the first two lines fall in the first 250 chars,\n # and this bulk read/split is much faster.\n lines = f.read(250).split(u\"\\n\")\n if len(lines) > 1:\n m = _match_file_encoding(lines[0])\n if m:\n return m.group(1)\n elif len(lines) > 2:\n m = _match_file_encoding(lines[1])\n if m:\n return m.group(1)\n else:\n return \"UTF-8\"\n # Fallback to one-char-at-a-time detection.\n f.seek(0)\n chars = []\n for i in range(2):\n c = f.read(1)\n while c and c != u'\\n':\n chars.append(c)\n c = f.read(1)\n encoding = _match_file_encoding(u''.join(chars))\n if encoding:\n return encoding.group(1)\n return \"UTF-8\"\n\n\ndef skip_bom(f):\n \"\"\"\n Read past a BOM at the beginning of a source file.\n This could be added to the scanner, but it's *substantially* easier\n to keep it at this level.\n \"\"\"\n if f.read(1) != u'\\uFEFF':\n f.seek(0)\n\n\ndef open_source_file(source_filename, mode=\"r\",\n encoding=None, error_handling=None):\n if encoding is None:\n # Most of the time the coding is unspecified, so be optimistic that\n # it's UTF-8.\n f = open_source_file(source_filename, encoding=\"UTF-8\", mode=mode, error_handling='ignore')\n encoding = detect_opened_file_encoding(f)\n if encoding == \"UTF-8\" and error_handling == 'ignore':\n f.seek(0)\n skip_bom(f)\n return f\n else:\n f.close()\n\n if not os.path.exists(source_filename):\n try:\n loader = __loader__\n if source_filename.startswith(loader.archive):\n return open_source_from_loader(\n loader, source_filename,\n encoding, error_handling)\n except (NameError, AttributeError):\n pass\n\n stream = io.open(source_filename, mode=mode,\n encoding=encoding, errors=error_handling)\n skip_bom(stream)\n return stream\n\n\ndef open_source_from_loader(loader,\n source_filename,\n encoding=None, error_handling=None):\n nrmpath = os.path.normpath(source_filename)\n arcname = nrmpath[len(loader.archive)+1:]\n data = loader.get_data(arcname)\n return io.TextIOWrapper(io.BytesIO(data),\n encoding=encoding,\n errors=error_handling)\n\n\ndef str_to_number(value):\n # note: this expects a string as input that was accepted by the\n # parser already, with an optional \"-\" sign in front\n is_neg = False\n if value[:1] == '-':\n is_neg = True\n value = value[1:]\n if len(value) < 2:\n value = int(value, 0)\n elif value[0] == '0':\n literal_type = value[1] # 0'o' - 0'b' - 0'x'\n if literal_type in 'xX':\n # hex notation ('0x1AF')\n value = int(value[2:], 16)\n elif literal_type in 'oO':\n # Py3 octal notation ('0o136')\n value = int(value[2:], 8)\n elif literal_type in 'bB':\n # Py3 binary notation ('0b101')\n value = int(value[2:], 2)\n else:\n # Py2 octal notation ('0136')\n value = int(value, 8)\n else:\n value = int(value, 0)\n return -value if is_neg else value\n\n\ndef long_literal(value):\n if isinstance(value, basestring):\n value = str_to_number(value)\n return not -2**31 <= value < 2**31\n\n\n@cached_function\ndef get_cython_cache_dir():\n r\"\"\"\n Return the base directory containing Cython's caches.\n\n Priority:\n\n 1. CYTHON_CACHE_DIR\n 2. (OS X): ~/Library/Caches/Cython\n (posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined\n 3. ~/.cython\n\n \"\"\"\n if 'CYTHON_CACHE_DIR' in os.environ:\n return os.environ['CYTHON_CACHE_DIR']\n\n parent = None\n if os.name == 'posix':\n if sys.platform == 'darwin':\n parent = os.path.expanduser('~/Library/Caches')\n else:\n # this could fallback on ~/.cache\n parent = os.environ.get('XDG_CACHE_HOME')\n\n if parent and os.path.isdir(parent):\n return os.path.join(parent, 'cython')\n\n # last fallback: ~/.cython\n return os.path.expanduser(os.path.join('~', '.cython'))\n\n\n@contextmanager\ndef captured_fd(stream=2, encoding=None):\n pipe_in = t = None\n orig_stream = os.dup(stream) # keep copy of original stream\n try:\n pipe_in, pipe_out = os.pipe()\n os.dup2(pipe_out, stream) # replace stream by copy of pipe\n try:\n os.close(pipe_out) # close original pipe-out stream\n data = []\n\n def copy():\n try:\n while True:\n d = os.read(pipe_in, 1000)\n if d:\n data.append(d)\n else:\n break\n finally:\n os.close(pipe_in)\n\n def get_output():\n output = b''.join(data)\n if encoding:\n output = output.decode(encoding)\n return output\n\n from threading import Thread\n t = Thread(target=copy)\n t.daemon = True # just in case\n t.start()\n yield get_output\n finally:\n os.dup2(orig_stream, stream) # restore original stream\n if t is not None:\n t.join()\n finally:\n os.close(orig_stream)\n\n\ndef print_bytes(s, end=b'\\n', file=sys.stdout, flush=True):\n file.flush()\n try:\n out = file.buffer # Py3\n except AttributeError:\n out = file # Py2\n out.write(s)\n if end:\n out.write(end)\n if flush:\n out.flush()\n\nclass LazyStr:\n def __init__(self, callback):\n self.callback = callback\n def __str__(self):\n return self.callback()\n def __repr__(self):\n return self.callback()\n def __add__(self, right):\n return self.callback() + right\n def __radd__(self, left):\n return left + self.callback()\n\n\nclass OrderedSet(object):\n def __init__(self, elements=()):\n self._list = []\n self._set = set()\n self.update(elements)\n def __iter__(self):\n return iter(self._list)\n def update(self, elements):\n for e in elements:\n self.add(e)\n def add(self, e):\n if e not in self._set:\n self._list.append(e)\n self._set.add(e)\n\n\n# Class decorator that adds a metaclass and recreates the class with it.\n# Copied from 'six'.\ndef add_metaclass(metaclass):\n \"\"\"Class decorator for creating a class with a metaclass.\"\"\"\n def wrapper(cls):\n orig_vars = cls.__dict__.copy()\n slots = orig_vars.get('__slots__')\n if slots is not None:\n if isinstance(slots, str):\n slots = [slots]\n for slots_var in slots:\n orig_vars.pop(slots_var)\n orig_vars.pop('__dict__', None)\n orig_vars.pop('__weakref__', None)\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n return wrapper\n\n\ndef raise_error_if_module_name_forbidden(full_module_name):\n #it is bad idea to call the pyx-file cython.pyx, so fail early\n if full_module_name == 'cython' or full_module_name.startswith('cython.'):\n raise ValueError('cython is a special module, cannot be used as a module name')\n", "path": "Cython/Utils.py" } ]
diff --git a/Cython/Utils.py b/Cython/Utils.py index b3af7865135..1808e64fcce 100644 --- a/Cython/Utils.py +++ b/Cython/Utils.py @@ -491,5 +491,5 @@ def wrapper(cls): def raise_error_if_module_name_forbidden(full_module_name): #it is bad idea to call the pyx-file cython.pyx, so fail early - if full_module_name == 'cython' or full_module_name.endswith('.cython'): + if full_module_name == 'cython' or full_module_name.startswith('cython.'): raise ValueError('cython is a special module, cannot be used as a module name') diff --git a/tests/build/cythonize_cython.srctree b/tests/build/cythonize_cython.srctree index 88bbb8496eb..1712436e361 100644 --- a/tests/build/cythonize_cython.srctree +++ b/tests/build/cythonize_cython.srctree @@ -5,7 +5,7 @@ PYTHON -c "import cython_tests" from Cython.Build.Cythonize import main as cythonize -for test_case in ["cython.pyx", "src/cython.pyx", "src2/cython.pyx"]: +for test_case in ["cython.pyx", "src2/cython.pyx", "src/cython/helper.pyx"]: try: cythonize([test_case]) except ValueError: @@ -13,12 +13,13 @@ for test_case in ["cython.pyx", "src/cython.pyx", "src2/cython.pyx"]: else: assert False, "ValueError not raised - forbidding cythonize "+test_case+" doesn't work" -try: - cythonize(["notcython.pys"]) -except ValueError: - assert False, "ValueError raised - forbidding cythonize notcython.pyx should work" -else: - pass +for test_case in ["notcython.pyx", "my_module/cython.pyx", "cythontest/helper.pyx"]: + try: + cythonize([test_case]) + except ValueError: + assert False, "ValueError raised - cythonize "+test_case+" should work" + else: + pass ######## cython_tests.py ######## @@ -26,7 +27,7 @@ else: from Cython.Compiler.Main import main as cython import sys -for test_case in ["cython.pyx", "src/cython.pyx", "src2/cython.pyx"]: +for test_case in ["cython.pyx", "scr2/cython.pyx", "src/cython/helper.pyx"]: sys.argv=["cython", test_case] #cython.py will extract parameters from sys.argv try: cython(command_line=1) @@ -35,16 +36,23 @@ for test_case in ["cython.pyx", "src/cython.pyx", "src2/cython.pyx"]: else: assert False, "ValueError not raised - forbidding cython "+test_case+" doesn't work" -sys.argv=["cython", "notcython.pyx"] #cython.py will extract parameters from sys.argv -try: - cython(["notcython.pys"]) -except ValueError: - assert False, "ValueError raised - forbidding cythonize notcython.pyx should work" -else: - pass + +for test_case in ["notcython.pyx", "my_module/cython.pyx", "cythontest/helper.pyx"]: + sys.argv=["cython", test_case] #cython.py will extract parameters from sys.argv + try: + cython([test_case]) + except ValueError: + assert False, "ValueError raised - cython "+test_case+" should work" + else: + pass + ######## cython.pyx ######## -######## src/__init__.py ######## -######## src/cython.pyx ######## +######## my_module/__init__.py ######## +######## my_module/cython.pyx ######## ######## notcython.pyx ######## ######## src2/cython.pyx ######## +######## src/cython/__init__.py ######## +######## src/cython/helper.pyx ######## +######## cythontest/__init__.py ######## +######## cythontest/helper.pyx ########
submodule with name "cython.pyx" doesn't build The[ fix for #2422](https://github.com/cython/cython/commit/6c91bf8e5bc99b625405919f9318d5626ecfa782#diff-26945d164aa2d5cb24bbe2cb4b8903ed) introduced a regression: submodules called cython.pyx are no longer built, i.e. for such a test case: ``` ######## my_module/__init__.py ######## ######## mymodule/cython.pyx ######## ``` It might be a little bit inconsistent to build cython.pyx in one case (submodule) but not in the other, but it is probably better not to break existing workflows.
vyperlang__vyper-1465
[ { "content": "import hashlib\n\nfrom vyper import ast\nfrom vyper.exceptions import (\n ConstancyViolationException,\n InvalidLiteralException,\n ParserException,\n StructureException,\n TypeMismatchException,\n)\nfrom vyper.parser.expr import (\n Expr,\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n add_variable_offset,\n byte_array_to_num,\n get_length,\n get_number_as_fraction,\n getpos,\n make_byte_array_copier,\n make_byte_slice_copier,\n unwrap_location,\n)\nfrom vyper.signatures.function_signature import (\n VariableRecord,\n)\nfrom vyper.types import (\n BaseType,\n ByteArrayLike,\n ByteArrayType,\n ListType,\n StringType,\n TupleType,\n are_units_compatible,\n get_size_of_type,\n is_base_type,\n)\nfrom vyper.types.convert import (\n convert,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n RLP_DECODER_ADDRESS,\n MemoryPositions,\n SizeLimits,\n bytes_to_int,\n fourbytes_to_int,\n sha3,\n)\n\nfrom .signatures import (\n Optional,\n signature,\n)\n\nSHA256_ADDRESS = 2\nSHA256_BASE_GAS = 60\nSHA256_PER_WORD_GAS = 12\n\n\ndef enforce_units(typ, obj, expected):\n if not are_units_compatible(typ, expected):\n raise TypeMismatchException(\"Invalid units\", obj)\n\n\ndef get_keyword(expr, keyword):\n for kw in expr.keywords:\n if kw.arg == keyword:\n return kw.value\n # This should never happen, as kwargs['value'] will KeyError first.\n # Leaving exception for other use cases.\n raise Exception(\"Keyword %s not found\" % keyword) # pragma: no cover\n\n\n@signature('decimal')\ndef floor(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'if',\n ['slt', args[0], 0],\n ['sdiv', ['sub', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],\n ['sdiv', args[0], DECIMAL_DIVISOR]\n ],\n typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature('decimal')\ndef ceil(expr, args, kwards, context):\n return LLLnode.from_list(\n [\n 'if',\n ['slt', args[0], 0],\n ['sdiv', args[0], DECIMAL_DIVISOR],\n ['sdiv', ['add', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR]\n ],\n typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('uint256', 'int128', 'decimal'))\ndef as_unitless_number(expr, args, kwargs, context):\n return LLLnode(\n value=args[0].value,\n args=args[0].args,\n typ=BaseType(args[0].typ.typ, {}),\n pos=getpos(expr),\n )\n\n\ndef _convert(expr, context):\n return convert(expr, context)\n\n\n@signature(('bytes32', 'bytes', 'string'), start='int128', len='int128')\ndef _slice(expr, args, kwargs, context):\n\n sub, start, length = args[0], kwargs['start'], kwargs['len']\n if not are_units_compatible(start.typ, BaseType('int128')):\n raise TypeMismatchException(\"Type for slice start index must be a unitless number\", expr)\n # Expression representing the length of the slice\n if not are_units_compatible(length.typ, BaseType('int128')):\n raise TypeMismatchException(\"Type for slice length must be a unitless number\", expr)\n\n if is_base_type(sub.typ, 'bytes32'):\n if (start.typ.is_literal and length.typ.is_literal) and \\\n not (0 <= start.value + length.value <= 32):\n raise InvalidLiteralException(\n 'Invalid start / length values needs to be between 0 and 32.',\n expr,\n )\n sub_typ_maxlen = 32\n else:\n sub_typ_maxlen = sub.typ.maxlen\n\n # Get returntype string or bytes\n if isinstance(args[0].typ, ByteArrayType) or is_base_type(sub.typ, 'bytes32'):\n ReturnType = ByteArrayType\n else:\n ReturnType = StringType\n\n # Node representing the position of the output in memory\n np = context.new_placeholder(ReturnType(maxlen=sub_typ_maxlen + 32))\n placeholder_node = LLLnode.from_list(np, typ=sub.typ, location='memory')\n placeholder_plus_32_node = LLLnode.from_list(np + 32, typ=sub.typ, location='memory')\n # Copies over bytearray data\n if sub.location == 'storage':\n adj_sub = LLLnode.from_list(\n ['add', ['sha3_32', sub], ['add', ['div', '_start', 32], 1]],\n typ=sub.typ,\n location=sub.location,\n )\n else:\n adj_sub = LLLnode.from_list(\n ['add', sub, ['add', ['sub', '_start', ['mod', '_start', 32]], 32]],\n typ=sub.typ,\n location=sub.location,\n )\n\n if is_base_type(sub.typ, 'bytes32'):\n adj_sub = LLLnode.from_list(\n sub.args[0], typ=sub.typ, location=\"memory\"\n )\n\n copier = make_byte_slice_copier(\n placeholder_plus_32_node,\n adj_sub,\n ['add', '_length', 32],\n sub_typ_maxlen,\n pos=getpos(expr),\n )\n # New maximum length in the type of the result\n newmaxlen = length.value if not len(length.args) else sub_typ_maxlen\n if is_base_type(sub.typ, 'bytes32'):\n maxlen = 32\n else:\n maxlen = ['mload', Expr(sub, context=context).lll_node] # Retrieve length of the bytes.\n\n out = [\n 'with', '_start', start, [\n 'with', '_length', length, [\n 'with', '_opos', ['add', placeholder_node, ['mod', '_start', 32]], [\n 'seq',\n ['assert', ['le', ['add', '_start', '_length'], maxlen]],\n copier,\n ['mstore', '_opos', '_length'],\n '_opos'\n ],\n ],\n ],\n ]\n return LLLnode.from_list(out, typ=ReturnType(newmaxlen), location='memory', pos=getpos(expr))\n\n\n@signature(('bytes', 'string'))\ndef _len(expr, args, kwargs, context):\n return get_length(args[0])\n\n\ndef concat(expr, context):\n args = [Expr(arg, context).lll_node for arg in expr.args]\n if len(args) < 2:\n raise StructureException(\"Concat expects at least two arguments\", expr)\n\n prev_type = ''\n for _, (expr_arg, arg) in enumerate(zip(expr.args, args)):\n if not isinstance(arg.typ, ByteArrayLike) and not is_base_type(arg.typ, 'bytes32'):\n raise TypeMismatchException(\"Concat expects string, bytes or bytes32 objects\", expr_arg)\n\n current_type = (\n 'bytes'\n if isinstance(arg.typ, ByteArrayType) or is_base_type(arg.typ, 'bytes32')\n else 'string'\n )\n if prev_type and current_type != prev_type:\n raise TypeMismatchException(\n (\n \"Concat expects consistant use of string or byte types, \"\n \"user either bytes or string.\"\n ),\n expr_arg,\n )\n prev_type = current_type\n\n if current_type == 'string':\n ReturnType = StringType\n else:\n ReturnType = ByteArrayType\n\n # Maximum length of the output\n total_maxlen = sum([\n arg.typ.maxlen if isinstance(arg.typ, ByteArrayType) else 32 for arg in args\n ])\n # Node representing the position of the output in memory\n placeholder = context.new_placeholder(ReturnType(total_maxlen))\n # Object representing the output\n seq = []\n # For each argument we are concatenating...\n for arg in args:\n # Start pasting into a position the starts at zero, and keeps\n # incrementing as we concatenate arguments\n placeholder_node = LLLnode.from_list(\n ['add', placeholder, '_poz'],\n typ=ReturnType(total_maxlen),\n location='memory',\n )\n placeholder_node_plus_32 = LLLnode.from_list(\n ['add', ['add', placeholder, '_poz'], 32],\n typ=ReturnType(total_maxlen),\n location='memory',\n )\n if isinstance(arg.typ, ReturnType):\n # Ignore empty strings\n if arg.typ.maxlen == 0:\n continue\n # Get the length of the current argument\n if arg.location == \"memory\":\n length = LLLnode.from_list(['mload', '_arg'], typ=BaseType('int128'))\n argstart = LLLnode.from_list(\n ['add', '_arg', 32],\n typ=arg.typ,\n location=arg.location,\n )\n elif arg.location == \"storage\":\n length = LLLnode.from_list(['sload', ['sha3_32', '_arg']], typ=BaseType('int128'))\n argstart = LLLnode.from_list(\n ['add', ['sha3_32', '_arg'], 1],\n typ=arg.typ,\n location=arg.location,\n )\n # Make a copier to copy over data from that argument\n seq.append([\n 'with', '_arg', arg, [\n 'seq',\n make_byte_slice_copier(\n placeholder_node_plus_32,\n argstart,\n length,\n arg.typ.maxlen, pos=getpos(expr),\n ),\n # Change the position to start at the correct\n # place to paste the next value\n ['set', '_poz', ['add', '_poz', length]],\n ],\n ])\n else:\n seq.append([\n 'seq',\n ['mstore', ['add', placeholder_node, 32], unwrap_location(arg)],\n ['set', '_poz', ['add', '_poz', 32]],\n ])\n # The position, after all arguments are processing, equals the total\n # length. Paste this in to make the output a proper bytearray\n seq.append(['mstore', placeholder, '_poz'])\n # Memory location of the output\n seq.append(placeholder)\n return LLLnode.from_list(\n ['with', '_poz', 0, ['seq'] + seq],\n typ=ReturnType(total_maxlen),\n location='memory',\n pos=getpos(expr),\n annotation='concat',\n )\n\n\n@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))\ndef _sha3(expr, args, kwargs, context):\n sub = args[0]\n # Can hash literals\n if isinstance(sub, bytes):\n return LLLnode.from_list(\n bytes_to_int(sha3(sub)),\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n # Can hash bytes32 objects\n if is_base_type(sub.typ, 'bytes32'):\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],\n ['sha3', MemoryPositions.FREE_VAR_SPACE, 32]\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n # Copy the data to an in-memory array\n if sub.location == \"memory\":\n # If we are hashing a value in memory, no need to copy it, just hash in-place\n return LLLnode.from_list(\n ['with', '_sub', sub, ['sha3', ['add', '_sub', 32], ['mload', '_sub']]],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n elif sub.location == \"storage\":\n lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))\n else:\n # This should never happen, but just left here for future compiler-writers.\n raise Exception(\"Unsupported location: %s\" % sub.location) # pragma: no test\n placeholder = context.new_placeholder(sub.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),\n )\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n copier,\n ['sha3', ['add', placeholder, 32], lengetter]\n ],\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n\n\ndef _make_sha256_call(inp_start, inp_len, out_start, out_len):\n return [\n 'assert', [\n 'call',\n ['gas'], # gas\n SHA256_ADDRESS, # address\n 0, # value\n inp_start,\n inp_len,\n out_start,\n out_len\n ]\n ]\n\n\n@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))\ndef sha256(expr, args, kwargs, context):\n sub = args[0]\n # Literal input\n if isinstance(sub, bytes):\n return LLLnode.from_list(\n bytes_to_int(hashlib.sha256(sub).digest()),\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n # bytes32 input\n elif is_base_type(sub.typ, 'bytes32'):\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],\n _make_sha256_call(\n inp_start=MemoryPositions.FREE_VAR_SPACE,\n inp_len=32,\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE] # push value onto stack\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + 1 * SHA256_PER_WORD_GAS\n )\n # bytearay-like input\n if sub.location == \"storage\":\n # Copy storage to memory\n placeholder = context.new_placeholder(sub.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),\n )\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n copier,\n _make_sha256_call(\n inp_start=['add', placeholder, 32],\n inp_len=['mload', placeholder],\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE]\n ],\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS\n )\n elif sub.location == \"memory\":\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n _make_sha256_call(\n inp_start=['add', '_sub', 32],\n inp_len=['mload', '_sub'],\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE]\n ]\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS\n )\n else:\n # This should never happen, but just left here for future compiler-writers.\n raise Exception(\"Unsupported location: %s\" % sub.location) # pragma: no test\n\n\n@signature('str_literal', 'name_literal')\ndef method_id(expr, args, kwargs, context):\n if b' ' in args[0]:\n raise TypeMismatchException('Invalid function signature no spaces allowed.')\n method_id = fourbytes_to_int(sha3(args[0])[:4])\n if args[1] == 'bytes32':\n return LLLnode(method_id, typ=BaseType('bytes32'), pos=getpos(expr))\n elif args[1] == 'bytes[4]':\n placeholder = LLLnode.from_list(context.new_placeholder(ByteArrayType(4)))\n return LLLnode.from_list(\n ['seq',\n ['mstore', ['add', placeholder, 4], method_id],\n ['mstore', placeholder, 4], placeholder],\n typ=ByteArrayType(4), location='memory', pos=getpos(expr))\n else:\n raise StructureException('Can only produce bytes32 or bytes[4] as outputs')\n\n\n@signature('bytes32', 'uint256', 'uint256', 'uint256')\ndef ecrecover(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n return LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, args[0]],\n ['mstore', ['add', placeholder_node, 32], args[1]],\n ['mstore', ['add', placeholder_node, 64], args[2]],\n ['mstore', ['add', placeholder_node, 96], args[3]],\n ['pop', ['call', 3000, 1, 0, placeholder_node, 128, MemoryPositions.FREE_VAR_SPACE, 32]],\n ['mload', MemoryPositions.FREE_VAR_SPACE],\n ], typ=BaseType('address'), pos=getpos(expr))\n\n\ndef avo(arg, ind, pos):\n return unwrap_location(add_variable_offset(arg, LLLnode.from_list(ind, 'int128'), pos=pos))\n\n\n@signature('uint256[2]', 'uint256[2]')\ndef ecadd(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n pos = getpos(expr)\n o = LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, avo(args[0], 0, pos)],\n ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],\n ['mstore', ['add', placeholder_node, 64], avo(args[1], 0, pos)],\n ['mstore', ['add', placeholder_node, 96], avo(args[1], 1, pos)],\n ['assert', ['call', 500, 6, 0, placeholder_node, 128, placeholder_node, 64]],\n placeholder_node,\n ], typ=ListType(BaseType('uint256'), 2), pos=getpos(expr), location='memory')\n return o\n\n\n@signature('uint256[2]', 'uint256')\ndef ecmul(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n pos = getpos(expr)\n o = LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, avo(args[0], 0, pos)],\n ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],\n ['mstore', ['add', placeholder_node, 64], args[1]],\n ['assert', ['call', 40000, 7, 0, placeholder_node, 96, placeholder_node, 64]],\n placeholder_node,\n ], typ=ListType(BaseType('uint256'), 2), pos=pos, location='memory')\n return o\n\n\ndef _memory_element_getter(index):\n return LLLnode.from_list(\n ['mload', ['add', '_sub', ['add', 32, ['mul', 32, index]]]],\n typ=BaseType('int128'),\n )\n\n\ndef _storage_element_getter(index):\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', '_sub'], ['add', 1, index]]],\n typ=BaseType('int128'),\n )\n\n\n@signature('bytes', 'int128', type=Optional('name_literal', 'bytes32'))\ndef extract32(expr, args, kwargs, context):\n sub, index = args\n ret_type = kwargs['type']\n # Get length and specific element\n if sub.location == \"memory\":\n lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))\n elementgetter = _memory_element_getter\n elif sub.location == \"storage\":\n lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))\n elementgetter = _storage_element_getter\n # TODO: unclosed if/elif clause. Undefined behavior if `sub.location`\n # isn't one of `memory`/`storage`\n\n # Special case: index known to be a multiple of 32\n if isinstance(index.value, int) and not index.value % 32:\n o = LLLnode.from_list(\n [\n 'with', '_sub', sub,\n elementgetter(['div', ['clamp', 0, index, ['sub', lengetter, 32]], 32])\n ],\n typ=BaseType(ret_type),\n annotation='extracting 32 bytes',\n )\n # General case\n else:\n o = LLLnode.from_list([\n 'with', '_sub', sub, [\n 'with', '_len', lengetter, [\n 'with', '_index', ['clamp', 0, index, ['sub', '_len', 32]], [\n 'with', '_mi32', ['mod', '_index', 32], [\n 'with', '_di32', ['div', '_index', 32],\n [\n 'if',\n '_mi32',\n [\n 'add',\n ['mul', elementgetter('_di32'), ['exp', 256, '_mi32']],\n [\n 'div',\n elementgetter(['add', '_di32', 1]),\n ['exp', 256, ['sub', 32, '_mi32']],\n ],\n ],\n elementgetter('_di32'),\n ],\n ],\n ],\n ],\n ],\n ], typ=BaseType(ret_type), pos=getpos(expr), annotation='extracting 32 bytes')\n if ret_type == 'int128':\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]],\n typ=BaseType('int128'),\n pos=getpos(expr),\n )\n elif ret_type == 'address':\n return LLLnode.from_list(\n ['uclamplt', o, ['mload', MemoryPositions.ADDRSIZE]],\n typ=BaseType(ret_type),\n pos=getpos(expr),\n )\n else:\n return o\n\n\n@signature(('num_literal', 'int128', 'uint256', 'decimal'), 'str_literal')\ndef as_wei_value(expr, args, kwargs, context):\n # Denominations\n names_denom = {\n (b\"wei\", ): 1,\n (b\"femtoether\", b\"kwei\", b\"babbage\"): 10**3,\n (b\"picoether\", b\"mwei\", b\"lovelace\"): 10**6,\n (b\"nanoether\", b\"gwei\", b\"shannon\"): 10**9,\n (b\"microether\", b\"szabo\", ): 10**12,\n (b\"milliether\", b\"finney\", ): 10**15,\n (b\"ether\", ): 10**18,\n (b\"kether\", b\"grand\"): 10**21,\n }\n\n for names, denom in names_denom.items():\n if args[1] in names:\n denomination = denom\n break\n else:\n raise InvalidLiteralException(\n \"Invalid denomination: %s, valid denominations are: %s\" % (\n args[1],\n \",\".join(x[0].decode() for x in names_denom)\n ),\n expr.args[1]\n )\n # Compute the amount of wei and return that value\n if isinstance(args[0], (int, float)):\n expr_args_0 = expr.args[0]\n # On constant reference fetch value node of constant assignment.\n if context.constants.ast_is_constant(expr.args[0]):\n expr_args_0 = context.constants._constants_ast[expr.args[0].id]\n numstring, num, den = get_number_as_fraction(expr_args_0, context)\n if denomination % den:\n raise InvalidLiteralException(\"Too many decimal places: %s\" % numstring, expr.args[0])\n sub = num * denomination // den\n elif args[0].typ.is_literal:\n if args[0].value <= 0:\n raise InvalidLiteralException(\"Negative wei value not allowed\", expr)\n sub = ['mul', args[0].value, denomination]\n elif args[0].typ.typ == 'uint256':\n sub = ['mul', args[0], denomination]\n else:\n sub = ['div', ['mul', args[0], denomination], DECIMAL_DIVISOR]\n\n return LLLnode.from_list(\n sub,\n typ=BaseType('uint256', {'wei': 1}),\n location=None,\n pos=getpos(expr),\n )\n\n\nzero_value = LLLnode.from_list(0, typ=BaseType('uint256', {'wei': 1}))\nfalse_value = LLLnode.from_list(0, typ=BaseType('bool', is_literal=True))\n\n\n@signature(\n 'address',\n 'bytes',\n outsize='num_literal',\n gas='uint256',\n value=Optional('uint256', zero_value),\n delegate_call=Optional('bool', false_value),\n)\ndef raw_call(expr, args, kwargs, context):\n to, data = args\n gas, value, outsize, delegate_call = (\n kwargs['gas'],\n kwargs['value'],\n kwargs['outsize'],\n kwargs['delegate_call'],\n )\n if delegate_call.typ.is_literal is False:\n raise TypeMismatchException(\n 'The delegate_call parameter has to be a static/literal boolean value.'\n )\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot make calls from %s\" % context.pp_constancy(),\n expr,\n )\n if value != zero_value:\n enforce_units(\n value.typ,\n get_keyword(expr, 'value'),\n BaseType('uint256', {'wei': 1}),\n )\n placeholder = context.new_placeholder(data.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=data.typ, location='memory')\n copier = make_byte_array_copier(placeholder_node, data, pos=getpos(expr))\n output_placeholder = context.new_placeholder(ByteArrayType(outsize))\n output_node = LLLnode.from_list(\n output_placeholder,\n typ=ByteArrayType(outsize),\n location='memory',\n )\n\n if delegate_call.value == 1:\n z = LLLnode.from_list(\n [\n 'seq',\n copier,\n [\n 'assert',\n [\n 'delegatecall',\n gas,\n to,\n ['add', placeholder_node, 32],\n ['mload', placeholder_node],\n ['add', output_node, 32],\n outsize,\n ],\n ],\n ['mstore', output_node, outsize],\n output_node,\n ],\n typ=ByteArrayType(outsize),\n location='memory',\n pos=getpos(expr),\n )\n else:\n z = LLLnode.from_list(\n [\n 'seq',\n copier,\n [\n 'assert',\n [\n 'call',\n gas,\n to,\n value,\n ['add', placeholder_node, 32],\n ['mload', placeholder_node],\n ['add', output_node, 32],\n outsize,\n ],\n ],\n ['mstore', output_node, outsize],\n output_node,\n ],\n typ=ByteArrayType(outsize), location='memory', pos=getpos(expr)\n )\n return z\n\n\n@signature('address', 'uint256')\ndef send(expr, args, kwargs, context):\n to, value = args\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot send ether inside %s!\" % context.pp_constancy(),\n expr,\n )\n enforce_units(value.typ, expr.args[1], BaseType('uint256', {'wei': 1}))\n return LLLnode.from_list(\n ['assert', ['call', 0, to, value, 0, 0, 0, 0]],\n typ=None,\n pos=getpos(expr),\n )\n\n\n@signature('address')\ndef selfdestruct(expr, args, kwargs, context):\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot %s inside %s!\" % (expr.func.id, context.pp_constancy()),\n expr.func,\n )\n return LLLnode.from_list(['selfdestruct', args[0]], typ=None, pos=getpos(expr))\n\n\n@signature(('uint256'))\ndef blockhash(expr, args, kwargs, contact):\n return LLLnode.from_list(\n ['blockhash', ['uclamplt', ['clampge', args[0], ['sub', ['number'], 256]], 'number']],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n\n\n@signature('bytes', '*')\ndef _RLPlist(expr, args, kwargs, context):\n # Second argument must be a list of types\n if not isinstance(args[1], ast.List):\n raise TypeMismatchException(\"Expecting list of types for second argument\", args[1])\n if len(args[1].elts) == 0:\n raise TypeMismatchException(\"RLP list must have at least one item\", expr)\n if len(args[1].elts) > 32:\n raise TypeMismatchException(\"RLP list must have at most 32 items\", expr)\n # Get the output format\n _format = []\n for arg in args[1].elts:\n if isinstance(arg, ast.Name) and arg.id == \"bytes\":\n subtyp = ByteArrayType(args[0].typ.maxlen)\n else:\n subtyp = context.parse_type(arg, 'memory')\n if not isinstance(subtyp, BaseType):\n raise TypeMismatchException(\"RLP lists only accept BaseTypes and byte arrays\", arg)\n if not is_base_type(subtyp, ('int128', 'uint256', 'bytes32', 'address', 'bool')):\n raise TypeMismatchException(\"Unsupported base type: %s\" % subtyp.typ, arg)\n _format.append(subtyp)\n output_type = TupleType(_format)\n output_placeholder_type = ByteArrayType(\n (2 * len(_format) + 1 + get_size_of_type(output_type)) * 32,\n )\n output_placeholder = context.new_placeholder(output_placeholder_type)\n output_node = LLLnode.from_list(\n output_placeholder,\n typ=output_placeholder_type,\n location='memory',\n )\n # Create a decoder for each element in the tuple\n decoder = []\n for i, typ in enumerate(_format):\n # Decoder for bytes32\n if is_base_type(typ, 'bytes32'):\n decoder.append(LLLnode.from_list(\n [\n 'seq',\n [\n 'assert',\n [\n 'eq',\n [\n 'mload',\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n 32,\n ],\n ],\n [\n 'mload',\n [\n 'add',\n 32,\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n ],\n ],\n typ,\n annotation='getting and checking bytes32 item',\n ))\n # Decoder for address\n elif is_base_type(typ, 'address'):\n decoder.append(LLLnode.from_list(\n [\n 'seq',\n [\n 'assert',\n [\n 'eq',\n [\n 'mload',\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n 20,\n ]\n ],\n [\n 'mod',\n [\n 'mload',\n [\n 'add',\n 20,\n ['add', output_node, ['mload', ['add', output_node, 32 * i]]],\n ],\n ],\n ['mload', MemoryPositions.ADDRSIZE],\n ]\n ],\n typ,\n annotation='getting and checking address item',\n ))\n # Decoder for bytes\n elif isinstance(typ, ByteArrayType):\n decoder.append(LLLnode.from_list(\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n typ,\n location='memory',\n annotation='getting byte array',\n ))\n # Decoder for num and uint256\n elif is_base_type(typ, ('int128', 'uint256')):\n bytez = LLLnode.from_list(\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n typ,\n location='memory',\n annotation='getting and checking %s' % typ.typ,\n )\n decoder.append(byte_array_to_num(bytez, expr, typ.typ))\n # Decoder for bools\n elif is_base_type(typ, ('bool')):\n # This is basically a really clever way to test for a\n # length-prefixed one or zero. We take the 32 bytes starting one\n # byte *after* the start of the length declaration; this includes\n # the last 31 bytes of the length and the first byte of the value.\n # 0 corresponds to length 0, first byte 0, and 257 corresponds to\n # length 1, first byte \\x01\n decoder.append(LLLnode.from_list(\n [\n 'with', '_ans', [\n 'mload',\n [\n 'add',\n 1,\n ['add', output_node, ['mload', ['add', output_node, 32 * i]]]\n ],\n ],\n [\n 'seq',\n ['assert', ['or', ['eq', '_ans', 0], ['eq', '_ans', 257]]],\n ['div', '_ans', 257],\n ],\n ],\n typ,\n annotation='getting and checking bool',\n ))\n else:\n # Should never reach because of top level base level check.\n raise Exception(\"Type not yet supported\") # pragma: no cover\n # Copy the input data to memory\n if args[0].location == \"memory\":\n variable_pointer = args[0]\n elif args[0].location == \"storage\":\n placeholder = context.new_placeholder(args[0].typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=args[0].typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_ptr', typ=args[0].typ, location=args[0].location),\n )\n variable_pointer = ['with', '_ptr', args[0], ['seq', copier, placeholder_node]]\n else:\n # Should never reach because of top level base level check.\n raise Exception(\"Location not yet supported\") # pragma: no cover\n # Decode the input data\n initial_setter = LLLnode.from_list(\n ['seq',\n ['with', '_sub', variable_pointer,\n ['pop', ['call',\n 1500 + 400 * len(_format) + 10 * len(args),\n LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'),\n 0,\n ['add', '_sub', 32],\n ['mload', '_sub'],\n output_node,\n 64 * len(_format) + 32 + 32 * get_size_of_type(output_type)]]],\n ['assert', ['eq', ['mload', output_node], 32 * len(_format) + 32]]],\n typ=None)\n # Shove the input data decoder in front of the first variable decoder\n decoder[0] = LLLnode.from_list(\n ['seq', initial_setter, decoder[0]],\n typ=decoder[0].typ,\n location=decoder[0].location,\n )\n return LLLnode.from_list(\n [\"multi\"] + decoder,\n typ=output_type,\n location='memory',\n pos=getpos(expr),\n )\n\n\n@signature('*', 'bytes')\ndef raw_log(expr, args, kwargs, context):\n if not isinstance(args[0], ast.List) or len(args[0].elts) > 4:\n raise StructureException(\"Expecting a list of 0-4 topics as first argument\", args[0])\n topics = []\n for elt in args[0].elts:\n arg = Expr.parse_value_expr(elt, context)\n if not is_base_type(arg.typ, 'bytes32'):\n raise TypeMismatchException(\"Expecting a bytes32 argument as topic\", elt)\n topics.append(arg)\n if args[1].location == \"memory\":\n return LLLnode.from_list([\n \"with\", \"_arr\", args[1], [\n \"log\" + str(len(topics)),\n [\"add\", \"_arr\", 32],\n [\"mload\", \"_arr\"],\n ] + topics\n ], typ=None, pos=getpos(expr))\n placeholder = context.new_placeholder(args[1].typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=args[1].typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=args[1].typ, location=args[1].location),\n pos=getpos(expr),\n )\n return LLLnode.from_list(\n [\n \"with\", \"_sub\", args[1],\n [\n \"seq\",\n copier,\n [\n \"log\" + str(len(topics)),\n [\"add\", placeholder_node, 32],\n [\"mload\", placeholder_node],\n ] + topics\n ],\n ],\n typ=None,\n pos=getpos(expr),\n )\n\n\n@signature('uint256', 'uint256')\ndef bitwise_and(expr, args, kwargs, context):\n return LLLnode.from_list(['and', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256')\ndef bitwise_or(expr, args, kwargs, context):\n return LLLnode.from_list(['or', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256')\ndef bitwise_xor(expr, args, kwargs, context):\n return LLLnode.from_list(['xor', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256', 'uint256')\ndef uint256_addmod(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'seq',\n ['assert', args[2]],\n ['assert', ['or', ['iszero', args[1]], ['gt', ['add', args[0], args[1]], args[0]]]],\n ['addmod', args[0], args[1], args[2]],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\n@signature('uint256', 'uint256', 'uint256')\ndef uint256_mulmod(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'seq',\n ['assert', args[2]],\n ['assert', [\n 'or',\n ['iszero', args[0]],\n ['eq', ['div', ['mul', args[0], args[1]], args[0]], args[1]],\n ]],\n ['mulmod', args[0], args[1], args[2]],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\n@signature('uint256')\ndef bitwise_not(expr, args, kwargs, context):\n return LLLnode.from_list(['not', args[0]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'int128')\ndef shift(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'with', '_v', args[0], [\n 'with', '_s', args[1], [\n # If second argument is positive, left-shift so multiply by a power of two\n # If it is negative, divide by a power of two\n # node that if the abs of the second argument >= 256, then in the EVM\n # 2**(second arg) = 0, and multiplying OR dividing by 0 gives 0\n 'if',\n ['slt', '_s', 0],\n ['div', '_v', ['exp', 2, ['sub', 0, '_s']]],\n ['mul', '_v', ['exp', 2, '_s']]\n ],\n ],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\ndef get_create_forwarder_to_bytecode():\n from vyper.compile_lll import (\n assembly_to_evm,\n num_to_bytearray\n )\n code_a = [\n 'PUSH1', 0x33,\n 'PUSH1', 0x0c,\n 'PUSH1', 0x00,\n 'CODECOPY',\n 'PUSH1', 0x33,\n 'PUSH1', 0x00,\n 'RETURN',\n 'CALLDATASIZE',\n 'PUSH1', 0x00,\n 'PUSH1', 0x00,\n 'CALLDATACOPY',\n 'PUSH2', num_to_bytearray(0x1000),\n 'PUSH1', 0x00,\n 'CALLDATASIZE',\n 'PUSH1', 0x00,\n 'PUSH20', # [address to delegate to]\n ]\n code_b = [\n 'GAS',\n 'DELEGATECALL',\n 'PUSH1', 0x2c, # jumpdest of whole program.\n 'JUMPI',\n 'PUSH1', 0x0,\n 'DUP1',\n 'REVERT',\n 'JUMPDEST',\n 'PUSH2', num_to_bytearray(0x1000),\n 'PUSH1', 0x00,\n 'RETURN'\n ]\n return assembly_to_evm(code_a)[0] + (b'\\x00' * 20) + assembly_to_evm(code_b)[0]\n\n\n@signature('address', value=Optional('uint256', zero_value))\ndef create_forwarder_to(expr, args, kwargs, context):\n\n value = kwargs['value']\n if value != zero_value:\n enforce_units(value.typ, get_keyword(expr, 'value'),\n BaseType('uint256', {'wei': 1}))\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot make calls from %s\" % context.pp_constancy(),\n expr,\n )\n placeholder = context.new_placeholder(ByteArrayType(96))\n\n kode = get_create_forwarder_to_bytecode()\n high = bytes_to_int(kode[:32])\n low = bytes_to_int((kode + b'\\x00' * 32)[47:79])\n\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', placeholder, high],\n ['mstore', ['add', placeholder, 27], ['mul', args[0], 2**96]],\n ['mstore', ['add', placeholder, 47], low],\n ['clamp_nonzero', ['create', value, placeholder, 96]],\n ],\n typ=BaseType('address'),\n pos=getpos(expr),\n add_gas_estimate=11000,\n )\n\n\n@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))\ndef _min(expr, args, kwargs, context):\n return minmax(expr, args, kwargs, context, True)\n\n\n@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))\ndef _max(expr, args, kwargs, context):\n return minmax(expr, args, kwargs, context, False)\n\n\ndef minmax(expr, args, kwargs, context, is_min):\n def _can_compare_with_uint256(operand):\n if operand.typ.typ == 'uint256':\n return True\n elif operand.typ.typ == 'int128' and operand.typ.is_literal and SizeLimits.in_bounds('uint256', operand.value): # noqa: E501\n return True\n return False\n\n left, right = args[0], args[1]\n if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ): # noqa: E501\n raise TypeMismatchException(\"Units must be compatible\", expr)\n if left.typ.typ == 'uint256':\n comparator = 'gt' if is_min else 'lt'\n else:\n comparator = 'sgt' if is_min else 'slt'\n if left.typ.typ == right.typ.typ:\n o = ['if', [comparator, '_l', '_r'], '_r', '_l']\n otyp = left.typ\n otyp.is_literal = False\n elif _can_compare_with_uint256(left) and _can_compare_with_uint256(right):\n o = ['if', [comparator, '_l', '_r'], '_r', '_l']\n if right.typ.typ == 'uint256':\n otyp = right.typ\n else:\n otyp = left.typ\n otyp.is_literal = False\n else:\n raise TypeMismatchException(\n \"Minmax types incompatible: %s %s\" % (left.typ.typ, right.typ.typ)\n )\n return LLLnode.from_list(\n ['with', '_l', left, ['with', '_r', right, o]],\n typ=otyp,\n pos=getpos(expr),\n )\n\n\n@signature('decimal')\ndef sqrt(expr, args, kwargs, context):\n from vyper.functions.utils import (\n generate_inline_function,\n )\n arg = args[0]\n sqrt_code = \"\"\"\nassert x >= 0.0\nz: decimal\n\nif x == 0.0:\n z = 0.0\nelse:\n z = (x + 1.0) / 2.0\n y: decimal = x\n\n for i in range(256):\n if z == y:\n break\n y = z\n z = (x / z + z) / 2.0\n \"\"\"\n\n x_type = BaseType('decimal')\n placeholder_copy = ['pass']\n # Steal current position if variable is already allocated.\n if arg.value == 'mload':\n new_var_pos = arg.args[0]\n # Other locations need to be copied.\n else:\n new_var_pos = context.new_placeholder(x_type)\n placeholder_copy = ['mstore', new_var_pos, arg]\n # Create input variables.\n variables = {\n 'x': VariableRecord(\n name='x',\n pos=new_var_pos,\n typ=x_type,\n mutable=False\n )\n }\n # Generate inline LLL.\n new_ctx, sqrt_lll = generate_inline_function(\n code=sqrt_code,\n variables=variables,\n memory_allocator=context.memory_allocator\n )\n return LLLnode.from_list(\n [\n 'seq_unchecked',\n placeholder_copy, # load x variable\n sqrt_lll,\n ['mload', new_ctx.vars['z'].pos] # unload z variable into the stack,\n ],\n typ=BaseType('decimal'),\n pos=getpos(expr),\n )\n\n\ndef _clear():\n raise ParserException(\n \"This function should never be called! `clear()` is currently handled \"\n \"differently than other functions as it self modifies its input argument \"\n \"statement. Please see `_clear()` in `stmt.py`\"\n )\n\n\ndispatch_table = {\n 'floor': floor,\n 'ceil': ceil,\n 'as_unitless_number': as_unitless_number,\n 'convert': _convert,\n 'slice': _slice,\n 'len': _len,\n 'concat': concat,\n 'sha3': _sha3,\n 'sha256': sha256,\n 'method_id': method_id,\n 'keccak256': _sha3,\n 'ecrecover': ecrecover,\n 'ecadd': ecadd,\n 'ecmul': ecmul,\n 'extract32': extract32,\n 'as_wei_value': as_wei_value,\n 'raw_call': raw_call,\n 'RLPList': _RLPlist,\n 'blockhash': blockhash,\n 'bitwise_and': bitwise_and,\n 'bitwise_or': bitwise_or,\n 'bitwise_xor': bitwise_xor,\n 'bitwise_not': bitwise_not,\n 'uint256_addmod': uint256_addmod,\n 'uint256_mulmod': uint256_mulmod,\n 'sqrt': sqrt,\n 'shift': shift,\n 'create_forwarder_to': create_forwarder_to,\n 'min': _min,\n 'max': _max,\n}\n\nstmt_dispatch_table = {\n 'clear': _clear,\n 'send': send,\n 'selfdestruct': selfdestruct,\n 'raw_call': raw_call,\n 'raw_log': raw_log,\n 'create_forwarder_to': create_forwarder_to,\n}\n\nbuilt_in_functions = [\n x for x in stmt_dispatch_table.keys()\n] + [\n x for x in dispatch_table.keys()\n]\n", "path": "vyper/functions/functions.py" } ]
[ { "content": "import hashlib\n\nfrom vyper import ast\nfrom vyper.exceptions import (\n ConstancyViolationException,\n InvalidLiteralException,\n ParserException,\n StructureException,\n TypeMismatchException,\n)\nfrom vyper.parser.expr import (\n Expr,\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n add_variable_offset,\n byte_array_to_num,\n get_length,\n get_number_as_fraction,\n getpos,\n make_byte_array_copier,\n make_byte_slice_copier,\n unwrap_location,\n)\nfrom vyper.signatures.function_signature import (\n VariableRecord,\n)\nfrom vyper.types import (\n BaseType,\n ByteArrayLike,\n ByteArrayType,\n ListType,\n StringType,\n TupleType,\n are_units_compatible,\n get_size_of_type,\n is_base_type,\n)\nfrom vyper.types.convert import (\n convert,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n RLP_DECODER_ADDRESS,\n MemoryPositions,\n SizeLimits,\n bytes_to_int,\n fourbytes_to_int,\n sha3,\n)\n\nfrom .signatures import (\n Optional,\n signature,\n)\n\nSHA256_ADDRESS = 2\nSHA256_BASE_GAS = 60\nSHA256_PER_WORD_GAS = 12\n\n\ndef enforce_units(typ, obj, expected):\n if not are_units_compatible(typ, expected):\n raise TypeMismatchException(\"Invalid units\", obj)\n\n\ndef get_keyword(expr, keyword):\n for kw in expr.keywords:\n if kw.arg == keyword:\n return kw.value\n # This should never happen, as kwargs['value'] will KeyError first.\n # Leaving exception for other use cases.\n raise Exception(\"Keyword %s not found\" % keyword) # pragma: no cover\n\n\n@signature('decimal')\ndef floor(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'if',\n ['slt', args[0], 0],\n ['sdiv', ['sub', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],\n ['sdiv', args[0], DECIMAL_DIVISOR]\n ],\n typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature('decimal')\ndef ceil(expr, args, kwards, context):\n return LLLnode.from_list(\n [\n 'if',\n ['slt', args[0], 0],\n ['sdiv', args[0], DECIMAL_DIVISOR],\n ['sdiv', ['add', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR]\n ],\n typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('uint256', 'int128', 'decimal'))\ndef as_unitless_number(expr, args, kwargs, context):\n return LLLnode(\n value=args[0].value,\n args=args[0].args,\n typ=BaseType(args[0].typ.typ, {}),\n pos=getpos(expr),\n )\n\n\ndef _convert(expr, context):\n return convert(expr, context)\n\n\n@signature(('bytes32', 'bytes', 'string'), start='int128', len='int128')\ndef _slice(expr, args, kwargs, context):\n\n sub, start, length = args[0], kwargs['start'], kwargs['len']\n if not are_units_compatible(start.typ, BaseType('int128')):\n raise TypeMismatchException(\"Type for slice start index must be a unitless number\", expr)\n # Expression representing the length of the slice\n if not are_units_compatible(length.typ, BaseType('int128')):\n raise TypeMismatchException(\"Type for slice length must be a unitless number\", expr)\n\n if is_base_type(sub.typ, 'bytes32'):\n if (start.typ.is_literal and length.typ.is_literal) and \\\n not (0 <= start.value + length.value <= 32):\n raise InvalidLiteralException(\n 'Invalid start / length values needs to be between 0 and 32.',\n expr,\n )\n sub_typ_maxlen = 32\n else:\n sub_typ_maxlen = sub.typ.maxlen\n\n # Get returntype string or bytes\n if isinstance(args[0].typ, ByteArrayType) or is_base_type(sub.typ, 'bytes32'):\n ReturnType = ByteArrayType\n else:\n ReturnType = StringType\n\n # Node representing the position of the output in memory\n np = context.new_placeholder(ReturnType(maxlen=sub_typ_maxlen + 32))\n placeholder_node = LLLnode.from_list(np, typ=sub.typ, location='memory')\n placeholder_plus_32_node = LLLnode.from_list(np + 32, typ=sub.typ, location='memory')\n # Copies over bytearray data\n if sub.location == 'storage':\n adj_sub = LLLnode.from_list(\n ['add', ['sha3_32', sub], ['add', ['div', '_start', 32], 1]],\n typ=sub.typ,\n location=sub.location,\n )\n else:\n adj_sub = LLLnode.from_list(\n ['add', sub, ['add', ['sub', '_start', ['mod', '_start', 32]], 32]],\n typ=sub.typ,\n location=sub.location,\n )\n\n if is_base_type(sub.typ, 'bytes32'):\n adj_sub = LLLnode.from_list(\n sub.args[0], typ=sub.typ, location=\"memory\"\n )\n\n copier = make_byte_slice_copier(\n placeholder_plus_32_node,\n adj_sub,\n ['add', '_length', 32],\n sub_typ_maxlen,\n pos=getpos(expr),\n )\n # New maximum length in the type of the result\n newmaxlen = length.value if not len(length.args) else sub_typ_maxlen\n if is_base_type(sub.typ, 'bytes32'):\n maxlen = 32\n else:\n maxlen = ['mload', Expr(sub, context=context).lll_node] # Retrieve length of the bytes.\n\n out = [\n 'with', '_start', start, [\n 'with', '_length', length, [\n 'with', '_opos', ['add', placeholder_node, ['mod', '_start', 32]], [\n 'seq',\n ['assert', ['le', ['add', '_start', '_length'], maxlen]],\n copier,\n ['mstore', '_opos', '_length'],\n '_opos'\n ],\n ],\n ],\n ]\n return LLLnode.from_list(out, typ=ReturnType(newmaxlen), location='memory', pos=getpos(expr))\n\n\n@signature(('bytes', 'string'))\ndef _len(expr, args, kwargs, context):\n return get_length(args[0])\n\n\ndef concat(expr, context):\n args = [Expr(arg, context).lll_node for arg in expr.args]\n if len(args) < 2:\n raise StructureException(\"Concat expects at least two arguments\", expr)\n\n prev_type = ''\n for _, (expr_arg, arg) in enumerate(zip(expr.args, args)):\n if not isinstance(arg.typ, ByteArrayLike) and not is_base_type(arg.typ, 'bytes32'):\n raise TypeMismatchException(\"Concat expects string, bytes or bytes32 objects\", expr_arg)\n\n current_type = (\n 'bytes'\n if isinstance(arg.typ, ByteArrayType) or is_base_type(arg.typ, 'bytes32')\n else 'string'\n )\n if prev_type and current_type != prev_type:\n raise TypeMismatchException(\n (\n \"Concat expects consistant use of string or byte types, \"\n \"user either bytes or string.\"\n ),\n expr_arg,\n )\n prev_type = current_type\n\n if current_type == 'string':\n ReturnType = StringType\n else:\n ReturnType = ByteArrayType\n\n # Maximum length of the output\n total_maxlen = sum([\n arg.typ.maxlen if isinstance(arg.typ, ByteArrayLike) else 32 for arg in args\n ])\n # Node representing the position of the output in memory\n placeholder = context.new_placeholder(ReturnType(total_maxlen))\n # Object representing the output\n seq = []\n # For each argument we are concatenating...\n for arg in args:\n # Start pasting into a position the starts at zero, and keeps\n # incrementing as we concatenate arguments\n placeholder_node = LLLnode.from_list(\n ['add', placeholder, '_poz'],\n typ=ReturnType(total_maxlen),\n location='memory',\n )\n placeholder_node_plus_32 = LLLnode.from_list(\n ['add', ['add', placeholder, '_poz'], 32],\n typ=ReturnType(total_maxlen),\n location='memory',\n )\n if isinstance(arg.typ, ReturnType):\n # Ignore empty strings\n if arg.typ.maxlen == 0:\n continue\n # Get the length of the current argument\n if arg.location == \"memory\":\n length = LLLnode.from_list(['mload', '_arg'], typ=BaseType('int128'))\n argstart = LLLnode.from_list(\n ['add', '_arg', 32],\n typ=arg.typ,\n location=arg.location,\n )\n elif arg.location == \"storage\":\n length = LLLnode.from_list(['sload', ['sha3_32', '_arg']], typ=BaseType('int128'))\n argstart = LLLnode.from_list(\n ['add', ['sha3_32', '_arg'], 1],\n typ=arg.typ,\n location=arg.location,\n )\n # Make a copier to copy over data from that argument\n seq.append([\n 'with', '_arg', arg, [\n 'seq',\n make_byte_slice_copier(\n placeholder_node_plus_32,\n argstart,\n length,\n arg.typ.maxlen, pos=getpos(expr),\n ),\n # Change the position to start at the correct\n # place to paste the next value\n ['set', '_poz', ['add', '_poz', length]],\n ],\n ])\n else:\n seq.append([\n 'seq',\n ['mstore', ['add', placeholder_node, 32], unwrap_location(arg)],\n ['set', '_poz', ['add', '_poz', 32]],\n ])\n # The position, after all arguments are processing, equals the total\n # length. Paste this in to make the output a proper bytearray\n seq.append(['mstore', placeholder, '_poz'])\n # Memory location of the output\n seq.append(placeholder)\n return LLLnode.from_list(\n ['with', '_poz', 0, ['seq'] + seq],\n typ=ReturnType(total_maxlen),\n location='memory',\n pos=getpos(expr),\n annotation='concat',\n )\n\n\n@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))\ndef _sha3(expr, args, kwargs, context):\n sub = args[0]\n # Can hash literals\n if isinstance(sub, bytes):\n return LLLnode.from_list(\n bytes_to_int(sha3(sub)),\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n # Can hash bytes32 objects\n if is_base_type(sub.typ, 'bytes32'):\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],\n ['sha3', MemoryPositions.FREE_VAR_SPACE, 32]\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n # Copy the data to an in-memory array\n if sub.location == \"memory\":\n # If we are hashing a value in memory, no need to copy it, just hash in-place\n return LLLnode.from_list(\n ['with', '_sub', sub, ['sha3', ['add', '_sub', 32], ['mload', '_sub']]],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n elif sub.location == \"storage\":\n lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))\n else:\n # This should never happen, but just left here for future compiler-writers.\n raise Exception(\"Unsupported location: %s\" % sub.location) # pragma: no test\n placeholder = context.new_placeholder(sub.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),\n )\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n copier,\n ['sha3', ['add', placeholder, 32], lengetter]\n ],\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n\n\ndef _make_sha256_call(inp_start, inp_len, out_start, out_len):\n return [\n 'assert', [\n 'call',\n ['gas'], # gas\n SHA256_ADDRESS, # address\n 0, # value\n inp_start,\n inp_len,\n out_start,\n out_len\n ]\n ]\n\n\n@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))\ndef sha256(expr, args, kwargs, context):\n sub = args[0]\n # Literal input\n if isinstance(sub, bytes):\n return LLLnode.from_list(\n bytes_to_int(hashlib.sha256(sub).digest()),\n typ=BaseType('bytes32'),\n pos=getpos(expr)\n )\n # bytes32 input\n elif is_base_type(sub.typ, 'bytes32'):\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', MemoryPositions.FREE_VAR_SPACE, sub],\n _make_sha256_call(\n inp_start=MemoryPositions.FREE_VAR_SPACE,\n inp_len=32,\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE] # push value onto stack\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + 1 * SHA256_PER_WORD_GAS\n )\n # bytearay-like input\n if sub.location == \"storage\":\n # Copy storage to memory\n placeholder = context.new_placeholder(sub.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),\n )\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n copier,\n _make_sha256_call(\n inp_start=['add', placeholder, 32],\n inp_len=['mload', placeholder],\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE]\n ],\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS\n )\n elif sub.location == \"memory\":\n return LLLnode.from_list(\n [\n 'with', '_sub', sub, [\n 'seq',\n _make_sha256_call(\n inp_start=['add', '_sub', 32],\n inp_len=['mload', '_sub'],\n out_start=MemoryPositions.FREE_VAR_SPACE,\n out_len=32\n ),\n ['mload', MemoryPositions.FREE_VAR_SPACE]\n ]\n ],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS\n )\n else:\n # This should never happen, but just left here for future compiler-writers.\n raise Exception(\"Unsupported location: %s\" % sub.location) # pragma: no test\n\n\n@signature('str_literal', 'name_literal')\ndef method_id(expr, args, kwargs, context):\n if b' ' in args[0]:\n raise TypeMismatchException('Invalid function signature no spaces allowed.')\n method_id = fourbytes_to_int(sha3(args[0])[:4])\n if args[1] == 'bytes32':\n return LLLnode(method_id, typ=BaseType('bytes32'), pos=getpos(expr))\n elif args[1] == 'bytes[4]':\n placeholder = LLLnode.from_list(context.new_placeholder(ByteArrayType(4)))\n return LLLnode.from_list(\n ['seq',\n ['mstore', ['add', placeholder, 4], method_id],\n ['mstore', placeholder, 4], placeholder],\n typ=ByteArrayType(4), location='memory', pos=getpos(expr))\n else:\n raise StructureException('Can only produce bytes32 or bytes[4] as outputs')\n\n\n@signature('bytes32', 'uint256', 'uint256', 'uint256')\ndef ecrecover(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n return LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, args[0]],\n ['mstore', ['add', placeholder_node, 32], args[1]],\n ['mstore', ['add', placeholder_node, 64], args[2]],\n ['mstore', ['add', placeholder_node, 96], args[3]],\n ['pop', ['call', 3000, 1, 0, placeholder_node, 128, MemoryPositions.FREE_VAR_SPACE, 32]],\n ['mload', MemoryPositions.FREE_VAR_SPACE],\n ], typ=BaseType('address'), pos=getpos(expr))\n\n\ndef avo(arg, ind, pos):\n return unwrap_location(add_variable_offset(arg, LLLnode.from_list(ind, 'int128'), pos=pos))\n\n\n@signature('uint256[2]', 'uint256[2]')\ndef ecadd(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n pos = getpos(expr)\n o = LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, avo(args[0], 0, pos)],\n ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],\n ['mstore', ['add', placeholder_node, 64], avo(args[1], 0, pos)],\n ['mstore', ['add', placeholder_node, 96], avo(args[1], 1, pos)],\n ['assert', ['call', 500, 6, 0, placeholder_node, 128, placeholder_node, 64]],\n placeholder_node,\n ], typ=ListType(BaseType('uint256'), 2), pos=getpos(expr), location='memory')\n return o\n\n\n@signature('uint256[2]', 'uint256')\ndef ecmul(expr, args, kwargs, context):\n placeholder_node = LLLnode.from_list(\n context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'\n )\n pos = getpos(expr)\n o = LLLnode.from_list([\n 'seq',\n ['mstore', placeholder_node, avo(args[0], 0, pos)],\n ['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],\n ['mstore', ['add', placeholder_node, 64], args[1]],\n ['assert', ['call', 40000, 7, 0, placeholder_node, 96, placeholder_node, 64]],\n placeholder_node,\n ], typ=ListType(BaseType('uint256'), 2), pos=pos, location='memory')\n return o\n\n\ndef _memory_element_getter(index):\n return LLLnode.from_list(\n ['mload', ['add', '_sub', ['add', 32, ['mul', 32, index]]]],\n typ=BaseType('int128'),\n )\n\n\ndef _storage_element_getter(index):\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', '_sub'], ['add', 1, index]]],\n typ=BaseType('int128'),\n )\n\n\n@signature('bytes', 'int128', type=Optional('name_literal', 'bytes32'))\ndef extract32(expr, args, kwargs, context):\n sub, index = args\n ret_type = kwargs['type']\n # Get length and specific element\n if sub.location == \"memory\":\n lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))\n elementgetter = _memory_element_getter\n elif sub.location == \"storage\":\n lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))\n elementgetter = _storage_element_getter\n # TODO: unclosed if/elif clause. Undefined behavior if `sub.location`\n # isn't one of `memory`/`storage`\n\n # Special case: index known to be a multiple of 32\n if isinstance(index.value, int) and not index.value % 32:\n o = LLLnode.from_list(\n [\n 'with', '_sub', sub,\n elementgetter(['div', ['clamp', 0, index, ['sub', lengetter, 32]], 32])\n ],\n typ=BaseType(ret_type),\n annotation='extracting 32 bytes',\n )\n # General case\n else:\n o = LLLnode.from_list([\n 'with', '_sub', sub, [\n 'with', '_len', lengetter, [\n 'with', '_index', ['clamp', 0, index, ['sub', '_len', 32]], [\n 'with', '_mi32', ['mod', '_index', 32], [\n 'with', '_di32', ['div', '_index', 32],\n [\n 'if',\n '_mi32',\n [\n 'add',\n ['mul', elementgetter('_di32'), ['exp', 256, '_mi32']],\n [\n 'div',\n elementgetter(['add', '_di32', 1]),\n ['exp', 256, ['sub', 32, '_mi32']],\n ],\n ],\n elementgetter('_di32'),\n ],\n ],\n ],\n ],\n ],\n ], typ=BaseType(ret_type), pos=getpos(expr), annotation='extracting 32 bytes')\n if ret_type == 'int128':\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]],\n typ=BaseType('int128'),\n pos=getpos(expr),\n )\n elif ret_type == 'address':\n return LLLnode.from_list(\n ['uclamplt', o, ['mload', MemoryPositions.ADDRSIZE]],\n typ=BaseType(ret_type),\n pos=getpos(expr),\n )\n else:\n return o\n\n\n@signature(('num_literal', 'int128', 'uint256', 'decimal'), 'str_literal')\ndef as_wei_value(expr, args, kwargs, context):\n # Denominations\n names_denom = {\n (b\"wei\", ): 1,\n (b\"femtoether\", b\"kwei\", b\"babbage\"): 10**3,\n (b\"picoether\", b\"mwei\", b\"lovelace\"): 10**6,\n (b\"nanoether\", b\"gwei\", b\"shannon\"): 10**9,\n (b\"microether\", b\"szabo\", ): 10**12,\n (b\"milliether\", b\"finney\", ): 10**15,\n (b\"ether\", ): 10**18,\n (b\"kether\", b\"grand\"): 10**21,\n }\n\n for names, denom in names_denom.items():\n if args[1] in names:\n denomination = denom\n break\n else:\n raise InvalidLiteralException(\n \"Invalid denomination: %s, valid denominations are: %s\" % (\n args[1],\n \",\".join(x[0].decode() for x in names_denom)\n ),\n expr.args[1]\n )\n # Compute the amount of wei and return that value\n if isinstance(args[0], (int, float)):\n expr_args_0 = expr.args[0]\n # On constant reference fetch value node of constant assignment.\n if context.constants.ast_is_constant(expr.args[0]):\n expr_args_0 = context.constants._constants_ast[expr.args[0].id]\n numstring, num, den = get_number_as_fraction(expr_args_0, context)\n if denomination % den:\n raise InvalidLiteralException(\"Too many decimal places: %s\" % numstring, expr.args[0])\n sub = num * denomination // den\n elif args[0].typ.is_literal:\n if args[0].value <= 0:\n raise InvalidLiteralException(\"Negative wei value not allowed\", expr)\n sub = ['mul', args[0].value, denomination]\n elif args[0].typ.typ == 'uint256':\n sub = ['mul', args[0], denomination]\n else:\n sub = ['div', ['mul', args[0], denomination], DECIMAL_DIVISOR]\n\n return LLLnode.from_list(\n sub,\n typ=BaseType('uint256', {'wei': 1}),\n location=None,\n pos=getpos(expr),\n )\n\n\nzero_value = LLLnode.from_list(0, typ=BaseType('uint256', {'wei': 1}))\nfalse_value = LLLnode.from_list(0, typ=BaseType('bool', is_literal=True))\n\n\n@signature(\n 'address',\n 'bytes',\n outsize='num_literal',\n gas='uint256',\n value=Optional('uint256', zero_value),\n delegate_call=Optional('bool', false_value),\n)\ndef raw_call(expr, args, kwargs, context):\n to, data = args\n gas, value, outsize, delegate_call = (\n kwargs['gas'],\n kwargs['value'],\n kwargs['outsize'],\n kwargs['delegate_call'],\n )\n if delegate_call.typ.is_literal is False:\n raise TypeMismatchException(\n 'The delegate_call parameter has to be a static/literal boolean value.'\n )\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot make calls from %s\" % context.pp_constancy(),\n expr,\n )\n if value != zero_value:\n enforce_units(\n value.typ,\n get_keyword(expr, 'value'),\n BaseType('uint256', {'wei': 1}),\n )\n placeholder = context.new_placeholder(data.typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=data.typ, location='memory')\n copier = make_byte_array_copier(placeholder_node, data, pos=getpos(expr))\n output_placeholder = context.new_placeholder(ByteArrayType(outsize))\n output_node = LLLnode.from_list(\n output_placeholder,\n typ=ByteArrayType(outsize),\n location='memory',\n )\n\n if delegate_call.value == 1:\n z = LLLnode.from_list(\n [\n 'seq',\n copier,\n [\n 'assert',\n [\n 'delegatecall',\n gas,\n to,\n ['add', placeholder_node, 32],\n ['mload', placeholder_node],\n ['add', output_node, 32],\n outsize,\n ],\n ],\n ['mstore', output_node, outsize],\n output_node,\n ],\n typ=ByteArrayType(outsize),\n location='memory',\n pos=getpos(expr),\n )\n else:\n z = LLLnode.from_list(\n [\n 'seq',\n copier,\n [\n 'assert',\n [\n 'call',\n gas,\n to,\n value,\n ['add', placeholder_node, 32],\n ['mload', placeholder_node],\n ['add', output_node, 32],\n outsize,\n ],\n ],\n ['mstore', output_node, outsize],\n output_node,\n ],\n typ=ByteArrayType(outsize), location='memory', pos=getpos(expr)\n )\n return z\n\n\n@signature('address', 'uint256')\ndef send(expr, args, kwargs, context):\n to, value = args\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot send ether inside %s!\" % context.pp_constancy(),\n expr,\n )\n enforce_units(value.typ, expr.args[1], BaseType('uint256', {'wei': 1}))\n return LLLnode.from_list(\n ['assert', ['call', 0, to, value, 0, 0, 0, 0]],\n typ=None,\n pos=getpos(expr),\n )\n\n\n@signature('address')\ndef selfdestruct(expr, args, kwargs, context):\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot %s inside %s!\" % (expr.func.id, context.pp_constancy()),\n expr.func,\n )\n return LLLnode.from_list(['selfdestruct', args[0]], typ=None, pos=getpos(expr))\n\n\n@signature(('uint256'))\ndef blockhash(expr, args, kwargs, contact):\n return LLLnode.from_list(\n ['blockhash', ['uclamplt', ['clampge', args[0], ['sub', ['number'], 256]], 'number']],\n typ=BaseType('bytes32'),\n pos=getpos(expr),\n )\n\n\n@signature('bytes', '*')\ndef _RLPlist(expr, args, kwargs, context):\n # Second argument must be a list of types\n if not isinstance(args[1], ast.List):\n raise TypeMismatchException(\"Expecting list of types for second argument\", args[1])\n if len(args[1].elts) == 0:\n raise TypeMismatchException(\"RLP list must have at least one item\", expr)\n if len(args[1].elts) > 32:\n raise TypeMismatchException(\"RLP list must have at most 32 items\", expr)\n # Get the output format\n _format = []\n for arg in args[1].elts:\n if isinstance(arg, ast.Name) and arg.id == \"bytes\":\n subtyp = ByteArrayType(args[0].typ.maxlen)\n else:\n subtyp = context.parse_type(arg, 'memory')\n if not isinstance(subtyp, BaseType):\n raise TypeMismatchException(\"RLP lists only accept BaseTypes and byte arrays\", arg)\n if not is_base_type(subtyp, ('int128', 'uint256', 'bytes32', 'address', 'bool')):\n raise TypeMismatchException(\"Unsupported base type: %s\" % subtyp.typ, arg)\n _format.append(subtyp)\n output_type = TupleType(_format)\n output_placeholder_type = ByteArrayType(\n (2 * len(_format) + 1 + get_size_of_type(output_type)) * 32,\n )\n output_placeholder = context.new_placeholder(output_placeholder_type)\n output_node = LLLnode.from_list(\n output_placeholder,\n typ=output_placeholder_type,\n location='memory',\n )\n # Create a decoder for each element in the tuple\n decoder = []\n for i, typ in enumerate(_format):\n # Decoder for bytes32\n if is_base_type(typ, 'bytes32'):\n decoder.append(LLLnode.from_list(\n [\n 'seq',\n [\n 'assert',\n [\n 'eq',\n [\n 'mload',\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n 32,\n ],\n ],\n [\n 'mload',\n [\n 'add',\n 32,\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n ],\n ],\n typ,\n annotation='getting and checking bytes32 item',\n ))\n # Decoder for address\n elif is_base_type(typ, 'address'):\n decoder.append(LLLnode.from_list(\n [\n 'seq',\n [\n 'assert',\n [\n 'eq',\n [\n 'mload',\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n ],\n 20,\n ]\n ],\n [\n 'mod',\n [\n 'mload',\n [\n 'add',\n 20,\n ['add', output_node, ['mload', ['add', output_node, 32 * i]]],\n ],\n ],\n ['mload', MemoryPositions.ADDRSIZE],\n ]\n ],\n typ,\n annotation='getting and checking address item',\n ))\n # Decoder for bytes\n elif isinstance(typ, ByteArrayType):\n decoder.append(LLLnode.from_list(\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n typ,\n location='memory',\n annotation='getting byte array',\n ))\n # Decoder for num and uint256\n elif is_base_type(typ, ('int128', 'uint256')):\n bytez = LLLnode.from_list(\n [\n 'add',\n output_node,\n ['mload', ['add', output_node, 32 * i]],\n ],\n typ,\n location='memory',\n annotation='getting and checking %s' % typ.typ,\n )\n decoder.append(byte_array_to_num(bytez, expr, typ.typ))\n # Decoder for bools\n elif is_base_type(typ, ('bool')):\n # This is basically a really clever way to test for a\n # length-prefixed one or zero. We take the 32 bytes starting one\n # byte *after* the start of the length declaration; this includes\n # the last 31 bytes of the length and the first byte of the value.\n # 0 corresponds to length 0, first byte 0, and 257 corresponds to\n # length 1, first byte \\x01\n decoder.append(LLLnode.from_list(\n [\n 'with', '_ans', [\n 'mload',\n [\n 'add',\n 1,\n ['add', output_node, ['mload', ['add', output_node, 32 * i]]]\n ],\n ],\n [\n 'seq',\n ['assert', ['or', ['eq', '_ans', 0], ['eq', '_ans', 257]]],\n ['div', '_ans', 257],\n ],\n ],\n typ,\n annotation='getting and checking bool',\n ))\n else:\n # Should never reach because of top level base level check.\n raise Exception(\"Type not yet supported\") # pragma: no cover\n # Copy the input data to memory\n if args[0].location == \"memory\":\n variable_pointer = args[0]\n elif args[0].location == \"storage\":\n placeholder = context.new_placeholder(args[0].typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=args[0].typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_ptr', typ=args[0].typ, location=args[0].location),\n )\n variable_pointer = ['with', '_ptr', args[0], ['seq', copier, placeholder_node]]\n else:\n # Should never reach because of top level base level check.\n raise Exception(\"Location not yet supported\") # pragma: no cover\n # Decode the input data\n initial_setter = LLLnode.from_list(\n ['seq',\n ['with', '_sub', variable_pointer,\n ['pop', ['call',\n 1500 + 400 * len(_format) + 10 * len(args),\n LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'),\n 0,\n ['add', '_sub', 32],\n ['mload', '_sub'],\n output_node,\n 64 * len(_format) + 32 + 32 * get_size_of_type(output_type)]]],\n ['assert', ['eq', ['mload', output_node], 32 * len(_format) + 32]]],\n typ=None)\n # Shove the input data decoder in front of the first variable decoder\n decoder[0] = LLLnode.from_list(\n ['seq', initial_setter, decoder[0]],\n typ=decoder[0].typ,\n location=decoder[0].location,\n )\n return LLLnode.from_list(\n [\"multi\"] + decoder,\n typ=output_type,\n location='memory',\n pos=getpos(expr),\n )\n\n\n@signature('*', 'bytes')\ndef raw_log(expr, args, kwargs, context):\n if not isinstance(args[0], ast.List) or len(args[0].elts) > 4:\n raise StructureException(\"Expecting a list of 0-4 topics as first argument\", args[0])\n topics = []\n for elt in args[0].elts:\n arg = Expr.parse_value_expr(elt, context)\n if not is_base_type(arg.typ, 'bytes32'):\n raise TypeMismatchException(\"Expecting a bytes32 argument as topic\", elt)\n topics.append(arg)\n if args[1].location == \"memory\":\n return LLLnode.from_list([\n \"with\", \"_arr\", args[1], [\n \"log\" + str(len(topics)),\n [\"add\", \"_arr\", 32],\n [\"mload\", \"_arr\"],\n ] + topics\n ], typ=None, pos=getpos(expr))\n placeholder = context.new_placeholder(args[1].typ)\n placeholder_node = LLLnode.from_list(placeholder, typ=args[1].typ, location='memory')\n copier = make_byte_array_copier(\n placeholder_node,\n LLLnode.from_list('_sub', typ=args[1].typ, location=args[1].location),\n pos=getpos(expr),\n )\n return LLLnode.from_list(\n [\n \"with\", \"_sub\", args[1],\n [\n \"seq\",\n copier,\n [\n \"log\" + str(len(topics)),\n [\"add\", placeholder_node, 32],\n [\"mload\", placeholder_node],\n ] + topics\n ],\n ],\n typ=None,\n pos=getpos(expr),\n )\n\n\n@signature('uint256', 'uint256')\ndef bitwise_and(expr, args, kwargs, context):\n return LLLnode.from_list(['and', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256')\ndef bitwise_or(expr, args, kwargs, context):\n return LLLnode.from_list(['or', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256')\ndef bitwise_xor(expr, args, kwargs, context):\n return LLLnode.from_list(['xor', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'uint256', 'uint256')\ndef uint256_addmod(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'seq',\n ['assert', args[2]],\n ['assert', ['or', ['iszero', args[1]], ['gt', ['add', args[0], args[1]], args[0]]]],\n ['addmod', args[0], args[1], args[2]],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\n@signature('uint256', 'uint256', 'uint256')\ndef uint256_mulmod(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'seq',\n ['assert', args[2]],\n ['assert', [\n 'or',\n ['iszero', args[0]],\n ['eq', ['div', ['mul', args[0], args[1]], args[0]], args[1]],\n ]],\n ['mulmod', args[0], args[1], args[2]],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\n@signature('uint256')\ndef bitwise_not(expr, args, kwargs, context):\n return LLLnode.from_list(['not', args[0]], typ=BaseType('uint256'), pos=getpos(expr))\n\n\n@signature('uint256', 'int128')\ndef shift(expr, args, kwargs, context):\n return LLLnode.from_list(\n [\n 'with', '_v', args[0], [\n 'with', '_s', args[1], [\n # If second argument is positive, left-shift so multiply by a power of two\n # If it is negative, divide by a power of two\n # node that if the abs of the second argument >= 256, then in the EVM\n # 2**(second arg) = 0, and multiplying OR dividing by 0 gives 0\n 'if',\n ['slt', '_s', 0],\n ['div', '_v', ['exp', 2, ['sub', 0, '_s']]],\n ['mul', '_v', ['exp', 2, '_s']]\n ],\n ],\n ],\n typ=BaseType('uint256'),\n pos=getpos(expr),\n )\n\n\ndef get_create_forwarder_to_bytecode():\n from vyper.compile_lll import (\n assembly_to_evm,\n num_to_bytearray\n )\n code_a = [\n 'PUSH1', 0x33,\n 'PUSH1', 0x0c,\n 'PUSH1', 0x00,\n 'CODECOPY',\n 'PUSH1', 0x33,\n 'PUSH1', 0x00,\n 'RETURN',\n 'CALLDATASIZE',\n 'PUSH1', 0x00,\n 'PUSH1', 0x00,\n 'CALLDATACOPY',\n 'PUSH2', num_to_bytearray(0x1000),\n 'PUSH1', 0x00,\n 'CALLDATASIZE',\n 'PUSH1', 0x00,\n 'PUSH20', # [address to delegate to]\n ]\n code_b = [\n 'GAS',\n 'DELEGATECALL',\n 'PUSH1', 0x2c, # jumpdest of whole program.\n 'JUMPI',\n 'PUSH1', 0x0,\n 'DUP1',\n 'REVERT',\n 'JUMPDEST',\n 'PUSH2', num_to_bytearray(0x1000),\n 'PUSH1', 0x00,\n 'RETURN'\n ]\n return assembly_to_evm(code_a)[0] + (b'\\x00' * 20) + assembly_to_evm(code_b)[0]\n\n\n@signature('address', value=Optional('uint256', zero_value))\ndef create_forwarder_to(expr, args, kwargs, context):\n\n value = kwargs['value']\n if value != zero_value:\n enforce_units(value.typ, get_keyword(expr, 'value'),\n BaseType('uint256', {'wei': 1}))\n if context.is_constant():\n raise ConstancyViolationException(\n \"Cannot make calls from %s\" % context.pp_constancy(),\n expr,\n )\n placeholder = context.new_placeholder(ByteArrayType(96))\n\n kode = get_create_forwarder_to_bytecode()\n high = bytes_to_int(kode[:32])\n low = bytes_to_int((kode + b'\\x00' * 32)[47:79])\n\n return LLLnode.from_list(\n [\n 'seq',\n ['mstore', placeholder, high],\n ['mstore', ['add', placeholder, 27], ['mul', args[0], 2**96]],\n ['mstore', ['add', placeholder, 47], low],\n ['clamp_nonzero', ['create', value, placeholder, 96]],\n ],\n typ=BaseType('address'),\n pos=getpos(expr),\n add_gas_estimate=11000,\n )\n\n\n@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))\ndef _min(expr, args, kwargs, context):\n return minmax(expr, args, kwargs, context, True)\n\n\n@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))\ndef _max(expr, args, kwargs, context):\n return minmax(expr, args, kwargs, context, False)\n\n\ndef minmax(expr, args, kwargs, context, is_min):\n def _can_compare_with_uint256(operand):\n if operand.typ.typ == 'uint256':\n return True\n elif operand.typ.typ == 'int128' and operand.typ.is_literal and SizeLimits.in_bounds('uint256', operand.value): # noqa: E501\n return True\n return False\n\n left, right = args[0], args[1]\n if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ): # noqa: E501\n raise TypeMismatchException(\"Units must be compatible\", expr)\n if left.typ.typ == 'uint256':\n comparator = 'gt' if is_min else 'lt'\n else:\n comparator = 'sgt' if is_min else 'slt'\n if left.typ.typ == right.typ.typ:\n o = ['if', [comparator, '_l', '_r'], '_r', '_l']\n otyp = left.typ\n otyp.is_literal = False\n elif _can_compare_with_uint256(left) and _can_compare_with_uint256(right):\n o = ['if', [comparator, '_l', '_r'], '_r', '_l']\n if right.typ.typ == 'uint256':\n otyp = right.typ\n else:\n otyp = left.typ\n otyp.is_literal = False\n else:\n raise TypeMismatchException(\n \"Minmax types incompatible: %s %s\" % (left.typ.typ, right.typ.typ)\n )\n return LLLnode.from_list(\n ['with', '_l', left, ['with', '_r', right, o]],\n typ=otyp,\n pos=getpos(expr),\n )\n\n\n@signature('decimal')\ndef sqrt(expr, args, kwargs, context):\n from vyper.functions.utils import (\n generate_inline_function,\n )\n arg = args[0]\n sqrt_code = \"\"\"\nassert x >= 0.0\nz: decimal\n\nif x == 0.0:\n z = 0.0\nelse:\n z = (x + 1.0) / 2.0\n y: decimal = x\n\n for i in range(256):\n if z == y:\n break\n y = z\n z = (x / z + z) / 2.0\n \"\"\"\n\n x_type = BaseType('decimal')\n placeholder_copy = ['pass']\n # Steal current position if variable is already allocated.\n if arg.value == 'mload':\n new_var_pos = arg.args[0]\n # Other locations need to be copied.\n else:\n new_var_pos = context.new_placeholder(x_type)\n placeholder_copy = ['mstore', new_var_pos, arg]\n # Create input variables.\n variables = {\n 'x': VariableRecord(\n name='x',\n pos=new_var_pos,\n typ=x_type,\n mutable=False\n )\n }\n # Generate inline LLL.\n new_ctx, sqrt_lll = generate_inline_function(\n code=sqrt_code,\n variables=variables,\n memory_allocator=context.memory_allocator\n )\n return LLLnode.from_list(\n [\n 'seq_unchecked',\n placeholder_copy, # load x variable\n sqrt_lll,\n ['mload', new_ctx.vars['z'].pos] # unload z variable into the stack,\n ],\n typ=BaseType('decimal'),\n pos=getpos(expr),\n )\n\n\ndef _clear():\n raise ParserException(\n \"This function should never be called! `clear()` is currently handled \"\n \"differently than other functions as it self modifies its input argument \"\n \"statement. Please see `_clear()` in `stmt.py`\"\n )\n\n\ndispatch_table = {\n 'floor': floor,\n 'ceil': ceil,\n 'as_unitless_number': as_unitless_number,\n 'convert': _convert,\n 'slice': _slice,\n 'len': _len,\n 'concat': concat,\n 'sha3': _sha3,\n 'sha256': sha256,\n 'method_id': method_id,\n 'keccak256': _sha3,\n 'ecrecover': ecrecover,\n 'ecadd': ecadd,\n 'ecmul': ecmul,\n 'extract32': extract32,\n 'as_wei_value': as_wei_value,\n 'raw_call': raw_call,\n 'RLPList': _RLPlist,\n 'blockhash': blockhash,\n 'bitwise_and': bitwise_and,\n 'bitwise_or': bitwise_or,\n 'bitwise_xor': bitwise_xor,\n 'bitwise_not': bitwise_not,\n 'uint256_addmod': uint256_addmod,\n 'uint256_mulmod': uint256_mulmod,\n 'sqrt': sqrt,\n 'shift': shift,\n 'create_forwarder_to': create_forwarder_to,\n 'min': _min,\n 'max': _max,\n}\n\nstmt_dispatch_table = {\n 'clear': _clear,\n 'send': send,\n 'selfdestruct': selfdestruct,\n 'raw_call': raw_call,\n 'raw_log': raw_log,\n 'create_forwarder_to': create_forwarder_to,\n}\n\nbuilt_in_functions = [\n x for x in stmt_dispatch_table.keys()\n] + [\n x for x in dispatch_table.keys()\n]\n", "path": "vyper/functions/functions.py" } ]
diff --git a/tests/parser/functions/test_concat.py b/tests/parser/functions/test_concat.py index 60a2b0b9d2..f7156c4bf5 100644 --- a/tests/parser/functions/test_concat.py +++ b/tests/parser/functions/test_concat.py @@ -1,3 +1,8 @@ +from vyper.exceptions import ( + TypeMismatchException, +) + + def test_concat(get_contract_with_gas_estimation): test_concat = """ @public @@ -99,3 +104,29 @@ def hoo(x: bytes32, y: bytes32) -> bytes[64]: assert c.hoo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32 print('Passed second concat tests') + + +def test_small_output(get_contract_with_gas_estimation): + code = """ +@public +def small_output(a: string[5], b: string[4]) -> string[9]: + c: string[9] = concat(a, b) + return c + """ + c = get_contract_with_gas_estimation(code) + assert c.small_output('abcde', 'fghi') == 'abcdefghi' + assert c.small_output('', '') == '' + + +def test_large_output(get_contract_with_gas_estimation, assert_compile_failed): + code = """ +@public +def large_output(a: string[33], b: string[33]) -> string[64]: + c: string[64] = concat(a, b) + return c + """ + + assert_compile_failed( + lambda: get_contract_with_gas_estimation(code), + TypeMismatchException + ) diff --git a/tests/parser/types/test_string.py b/tests/parser/types/test_string.py index e7f870a778..38cd818043 100644 --- a/tests/parser/types/test_string.py +++ b/tests/parser/types/test_string.py @@ -22,8 +22,8 @@ def test_string_concat(get_contract_with_gas_estimation): @public def testb(inp: string[10]) -> string[128]: a: string[100] = "return message:" - a = concat(a, " ", inp) - return a + b: string[128] = concat(a, " ", inp) + return b @public def testa(inp: string[10]) -> string[160]: diff --git a/vyper/functions/functions.py b/vyper/functions/functions.py index ab3751ef8e..25508dae29 100644 --- a/vyper/functions/functions.py +++ b/vyper/functions/functions.py @@ -232,7 +232,7 @@ def concat(expr, context): # Maximum length of the output total_maxlen = sum([ - arg.typ.maxlen if isinstance(arg.typ, ByteArrayType) else 32 for arg in args + arg.typ.maxlen if isinstance(arg.typ, ByteArrayLike) else 32 for arg in args ]) # Node representing the position of the output in memory placeholder = context.new_placeholder(ReturnType(total_maxlen))
Vyper treats strings as having length 32 ### Version Information * vyper Version: 0.1.0b9 * OS: osx ### What's your issue about? When using `concat` Vyper treats all strings as having length 32. Therefore this ``` @public def conc(a: string[33], b: string[33]) -> string[64]: c: string[64] = concat(a, b) return c ``` will compile and even run (returning a string of length 66) and the following ``` @public def conc_fail(a: string[5], b: string[4]) -> string[9]: c: string[9] = concat(a, b) return c ``` leads to a compiler error "Cannot cast from greater max-length 64 to shorter max-length 9".
python-discord__bot-822
[ { "content": "import asyncio\nimport difflib\nimport itertools\nimport logging\nimport typing as t\nfrom datetime import datetime\nfrom itertools import zip_longest\n\nimport discord\nfrom dateutil.relativedelta import relativedelta\nfrom deepdiff import DeepDiff\nfrom discord import Colour\nfrom discord.abc import GuildChannel\nfrom discord.ext.commands import Cog, Context\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, Colours, Emojis, Event, Guild as GuildConstant, Icons, URLs\nfrom bot.utils.time import humanize_delta\n\nlog = logging.getLogger(__name__)\n\nGUILD_CHANNEL = t.Union[discord.CategoryChannel, discord.TextChannel, discord.VoiceChannel]\n\nCHANNEL_CHANGES_UNSUPPORTED = (\"permissions\",)\nCHANNEL_CHANGES_SUPPRESSED = (\"_overwrites\", \"position\")\nMEMBER_CHANGES_SUPPRESSED = (\"status\", \"activities\", \"_client_status\", \"nick\")\nROLE_CHANGES_UNSUPPORTED = (\"colour\", \"permissions\")\n\nVOICE_STATE_ATTRIBUTES = {\n \"channel.name\": \"Channel\",\n \"self_stream\": \"Streaming\",\n \"self_video\": \"Broadcasting\",\n}\n\n\nclass ModLog(Cog, name=\"ModLog\"):\n \"\"\"Logging for server events and staff actions.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self._ignored = {event: [] for event in Event}\n\n self._cached_deletes = []\n self._cached_edits = []\n\n async def upload_log(\n self,\n messages: t.Iterable[discord.Message],\n actor_id: int,\n attachments: t.Iterable[t.List[str]] = None\n ) -> str:\n \"\"\"Upload message logs to the database and return a URL to a page for viewing the logs.\"\"\"\n if attachments is None:\n attachments = []\n\n response = await self.bot.api_client.post(\n 'bot/deleted-messages',\n json={\n 'actor': actor_id,\n 'creation': datetime.utcnow().isoformat(),\n 'deletedmessage_set': [\n {\n 'id': message.id,\n 'author': message.author.id,\n 'channel_id': message.channel.id,\n 'content': message.content,\n 'embeds': [embed.to_dict() for embed in message.embeds],\n 'attachments': attachment,\n }\n for message, attachment in zip_longest(messages, attachments)\n ]\n }\n )\n\n return f\"{URLs.site_logs_view}/{response['id']}\"\n\n def ignore(self, event: Event, *items: int) -> None:\n \"\"\"Add event to ignored events to suppress log emission.\"\"\"\n for item in items:\n if item not in self._ignored[event]:\n self._ignored[event].append(item)\n\n async def send_log_message(\n self,\n icon_url: t.Optional[str],\n colour: t.Union[discord.Colour, int],\n title: t.Optional[str],\n text: str,\n thumbnail: t.Optional[t.Union[str, discord.Asset]] = None,\n channel_id: int = Channels.mod_log,\n ping_everyone: bool = False,\n files: t.Optional[t.List[discord.File]] = None,\n content: t.Optional[str] = None,\n additional_embeds: t.Optional[t.List[discord.Embed]] = None,\n additional_embeds_msg: t.Optional[str] = None,\n timestamp_override: t.Optional[datetime] = None,\n footer: t.Optional[str] = None,\n ) -> Context:\n \"\"\"Generate log embed and send to logging channel.\"\"\"\n embed = discord.Embed(description=text)\n\n if title and icon_url:\n embed.set_author(name=title, icon_url=icon_url)\n\n embed.colour = colour\n embed.timestamp = timestamp_override or datetime.utcnow()\n\n if footer:\n embed.set_footer(text=footer)\n\n if thumbnail:\n embed.set_thumbnail(url=thumbnail)\n\n if ping_everyone:\n if content:\n content = f\"@everyone\\n{content}\"\n else:\n content = \"@everyone\"\n\n channel = self.bot.get_channel(channel_id)\n log_message = await channel.send(content=content, embed=embed, files=files)\n\n if additional_embeds:\n if additional_embeds_msg:\n await channel.send(additional_embeds_msg)\n for additional_embed in additional_embeds:\n await channel.send(embed=additional_embed)\n\n return await self.bot.get_context(log_message) # Optionally return for use with antispam\n\n @Cog.listener()\n async def on_guild_channel_create(self, channel: GUILD_CHANNEL) -> None:\n \"\"\"Log channel create event to mod log.\"\"\"\n if channel.guild.id != GuildConstant.id:\n return\n\n if isinstance(channel, discord.CategoryChannel):\n title = \"Category created\"\n message = f\"{channel.name} (`{channel.id}`)\"\n elif isinstance(channel, discord.VoiceChannel):\n title = \"Voice channel created\"\n\n if channel.category:\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n else:\n title = \"Text channel created\"\n\n if channel.category:\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n\n await self.send_log_message(Icons.hash_green, Colours.soft_green, title, message)\n\n @Cog.listener()\n async def on_guild_channel_delete(self, channel: GUILD_CHANNEL) -> None:\n \"\"\"Log channel delete event to mod log.\"\"\"\n if channel.guild.id != GuildConstant.id:\n return\n\n if isinstance(channel, discord.CategoryChannel):\n title = \"Category deleted\"\n elif isinstance(channel, discord.VoiceChannel):\n title = \"Voice channel deleted\"\n else:\n title = \"Text channel deleted\"\n\n if channel.category and not isinstance(channel, discord.CategoryChannel):\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n\n await self.send_log_message(\n Icons.hash_red, Colours.soft_red,\n title, message\n )\n\n @Cog.listener()\n async def on_guild_channel_update(self, before: GUILD_CHANNEL, after: GuildChannel) -> None:\n \"\"\"Log channel update event to mod log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n if before.id in self._ignored[Event.guild_channel_update]:\n self._ignored[Event.guild_channel_update].remove(before.id)\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key in CHANNEL_CHANGES_SUPPRESSED:\n continue\n\n if key in CHANNEL_CHANGES_UNSUPPORTED:\n changes.append(f\"**{key.title()}** updated\")\n else:\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **→** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n if after.category:\n message = f\"**{after.category}/#{after.name} (`{after.id}`)**\\n{message}\"\n else:\n message = f\"**#{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.hash_blurple, Colour.blurple(),\n \"Channel updated\", message\n )\n\n @Cog.listener()\n async def on_guild_role_create(self, role: discord.Role) -> None:\n \"\"\"Log role create event to mod log.\"\"\"\n if role.guild.id != GuildConstant.id:\n return\n\n await self.send_log_message(\n Icons.crown_green, Colours.soft_green,\n \"Role created\", f\"`{role.id}`\"\n )\n\n @Cog.listener()\n async def on_guild_role_delete(self, role: discord.Role) -> None:\n \"\"\"Log role delete event to mod log.\"\"\"\n if role.guild.id != GuildConstant.id:\n return\n\n await self.send_log_message(\n Icons.crown_red, Colours.soft_red,\n \"Role removed\", f\"{role.name} (`{role.id}`)\"\n )\n\n @Cog.listener()\n async def on_guild_role_update(self, before: discord.Role, after: discord.Role) -> None:\n \"\"\"Log role update event to mod log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key == \"color\":\n continue\n\n if key in ROLE_CHANGES_UNSUPPORTED:\n changes.append(f\"**{key.title()}** updated\")\n else:\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **→** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.crown_blurple, Colour.blurple(),\n \"Role updated\", message\n )\n\n @Cog.listener()\n async def on_guild_update(self, before: discord.Guild, after: discord.Guild) -> None:\n \"\"\"Log guild update event to mod log.\"\"\"\n if before.id != GuildConstant.id:\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done:\n continue\n\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **→** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.guild_update, Colour.blurple(),\n \"Guild updated\", message,\n thumbnail=after.icon_url_as(format=\"png\")\n )\n\n @Cog.listener()\n async def on_member_ban(self, guild: discord.Guild, member: discord.Member) -> None:\n \"\"\"Log ban event to user log.\"\"\"\n if guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_ban]:\n self._ignored[Event.member_ban].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.user_ban, Colours.soft_red,\n \"User banned\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_join(self, member: discord.Member) -> None:\n \"\"\"Log member join event to user log.\"\"\"\n if member.guild.id != GuildConstant.id:\n return\n\n message = f\"{member} (`{member.id}`)\"\n now = datetime.utcnow()\n difference = abs(relativedelta(now, member.created_at))\n\n message += \"\\n\\n**Account age:** \" + humanize_delta(difference)\n\n if difference.days < 1 and difference.months < 1 and difference.years < 1: # New user account!\n message = f\"{Emojis.new} {message}\"\n\n await self.send_log_message(\n Icons.sign_in, Colours.soft_green,\n \"User joined\", message,\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_remove(self, member: discord.Member) -> None:\n \"\"\"Log member leave event to user log.\"\"\"\n if member.guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_remove]:\n self._ignored[Event.member_remove].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.sign_out, Colours.soft_red,\n \"User left\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_unban(self, guild: discord.Guild, member: discord.User) -> None:\n \"\"\"Log member unban event to mod log.\"\"\"\n if guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_unban]:\n self._ignored[Event.member_unban].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.user_unban, Colour.blurple(),\n \"User unbanned\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_log\n )\n\n @Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:\n \"\"\"Log member update event to user log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n if before.id in self._ignored[Event.member_update]:\n self._ignored[Event.member_update].remove(before.id)\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = {}\n\n diff_values.update(diff.get(\"values_changed\", {}))\n diff_values.update(diff.get(\"type_changes\", {}))\n diff_values.update(diff.get(\"iterable_item_removed\", {}))\n diff_values.update(diff.get(\"iterable_item_added\", {}))\n\n diff_user = DeepDiff(before._user, after._user)\n\n diff_values.update(diff_user.get(\"values_changed\", {}))\n diff_values.update(diff_user.get(\"type_changes\", {}))\n diff_values.update(diff_user.get(\"iterable_item_removed\", {}))\n diff_values.update(diff_user.get(\"iterable_item_added\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key in MEMBER_CHANGES_SUPPRESSED:\n continue\n\n if key == \"_roles\":\n new_roles = after.roles\n old_roles = before.roles\n\n for role in old_roles:\n if role not in new_roles:\n changes.append(f\"**Role removed:** {role.name} (`{role.id}`)\")\n\n for role in new_roles:\n if role not in old_roles:\n changes.append(f\"**Role added:** {role.name} (`{role.id}`)\")\n\n else:\n new = value.get(\"new_value\")\n old = value.get(\"old_value\")\n\n if new and old:\n changes.append(f\"**{key.title()}:** `{old}` **→** `{new}`\")\n\n done.append(key)\n\n if before.name != after.name:\n changes.append(\n f\"**Username:** `{before.name}` **→** `{after.name}`\"\n )\n\n if before.discriminator != after.discriminator:\n changes.append(\n f\"**Discriminator:** `{before.discriminator}` **→** `{after.discriminator}`\"\n )\n\n if before.display_name != after.display_name:\n changes.append(\n f\"**Display name:** `{before.display_name}` **→** `{after.display_name}`\"\n )\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.user_update, Colour.blurple(),\n \"Member updated\", message,\n thumbnail=after.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_message_delete(self, message: discord.Message) -> None:\n \"\"\"Log message delete event to message change log.\"\"\"\n channel = message.channel\n author = message.author\n\n if message.guild.id != GuildConstant.id or channel.id in GuildConstant.modlog_blacklist:\n return\n\n self._cached_deletes.append(message.id)\n\n if message.id in self._ignored[Event.message_delete]:\n self._ignored[Event.message_delete].remove(message.id)\n return\n\n if author.bot:\n return\n\n if channel.category:\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n )\n else:\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** #{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n )\n\n if message.attachments:\n # Prepend the message metadata with the number of attachments\n response = f\"**Attachments:** {len(message.attachments)}\\n\" + response\n\n # Shorten the message content if necessary\n content = message.clean_content\n remaining_chars = 2040 - len(response)\n\n if len(content) > remaining_chars:\n botlog_url = await self.upload_log(messages=[message], actor_id=message.author.id)\n ending = f\"\\n\\nMessage truncated, [full message here]({botlog_url}).\"\n truncation_point = remaining_chars - len(ending)\n content = f\"{content[:truncation_point]}...{ending}\"\n\n response += f\"{content}\"\n\n await self.send_log_message(\n Icons.message_delete, Colours.soft_red,\n \"Message deleted\",\n response,\n channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_raw_message_delete(self, event: discord.RawMessageDeleteEvent) -> None:\n \"\"\"Log raw message delete event to message change log.\"\"\"\n if event.guild_id != GuildConstant.id or event.channel_id in GuildConstant.modlog_blacklist:\n return\n\n await asyncio.sleep(1) # Wait here in case the normal event was fired\n\n if event.message_id in self._cached_deletes:\n # It was in the cache and the normal event was fired, so we can just ignore it\n self._cached_deletes.remove(event.message_id)\n return\n\n if event.message_id in self._ignored[Event.message_delete]:\n self._ignored[Event.message_delete].remove(event.message_id)\n return\n\n channel = self.bot.get_channel(event.channel_id)\n\n if channel.category:\n response = (\n f\"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{event.message_id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n else:\n response = (\n f\"**Channel:** #{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{event.message_id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n\n await self.send_log_message(\n Icons.message_delete, Colours.soft_red,\n \"Message deleted\",\n response,\n channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_message_edit(self, msg_before: discord.Message, msg_after: discord.Message) -> None:\n \"\"\"Log message edit event to message change log.\"\"\"\n if (\n not msg_before.guild\n or msg_before.guild.id != GuildConstant.id\n or msg_before.channel.id in GuildConstant.modlog_blacklist\n or msg_before.author.bot\n ):\n return\n\n self._cached_edits.append(msg_before.id)\n\n if msg_before.content == msg_after.content:\n return\n\n author = msg_before.author\n channel = msg_before.channel\n channel_name = f\"{channel.category}/#{channel.name}\" if channel.category else f\"#{channel.name}\"\n\n # Getting the difference per words and group them by type - add, remove, same\n # Note that this is intended grouping without sorting\n diff = difflib.ndiff(msg_before.clean_content.split(), msg_after.clean_content.split())\n diff_groups = tuple(\n (diff_type, tuple(s[2:] for s in diff_words))\n for diff_type, diff_words in itertools.groupby(diff, key=lambda s: s[0])\n )\n\n content_before: t.List[str] = []\n content_after: t.List[str] = []\n\n for index, (diff_type, words) in enumerate(diff_groups):\n sub = ' '.join(words)\n if diff_type == '-':\n content_before.append(f\"[{sub}](http://o.hi)\")\n elif diff_type == '+':\n content_after.append(f\"[{sub}](http://o.hi)\")\n elif diff_type == ' ':\n if len(words) > 2:\n sub = (\n f\"{words[0] if index > 0 else ''}\"\n \" ... \"\n f\"{words[-1] if index < len(diff_groups) - 1 else ''}\"\n )\n content_before.append(sub)\n content_after.append(sub)\n\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{msg_before.id}`\\n\"\n \"\\n\"\n f\"**Before**:\\n{' '.join(content_before)}\\n\"\n f\"**After**:\\n{' '.join(content_after)}\\n\"\n \"\\n\"\n f\"[Jump to message]({msg_after.jump_url})\"\n )\n\n if msg_before.edited_at:\n # Message was previously edited, to assist with self-bot detection, use the edited_at\n # datetime as the baseline and create a human-readable delta between this edit event\n # and the last time the message was edited\n timestamp = msg_before.edited_at\n delta = humanize_delta(relativedelta(msg_after.edited_at, msg_before.edited_at))\n footer = f\"Last edited {delta} ago\"\n else:\n # Message was not previously edited, use the created_at datetime as the baseline, no\n # delta calculation needed\n timestamp = msg_before.created_at\n footer = None\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited\", response,\n channel_id=Channels.message_log, timestamp_override=timestamp, footer=footer\n )\n\n @Cog.listener()\n async def on_raw_message_edit(self, event: discord.RawMessageUpdateEvent) -> None:\n \"\"\"Log raw message edit event to message change log.\"\"\"\n try:\n channel = self.bot.get_channel(int(event.data[\"channel_id\"]))\n message = await channel.fetch_message(event.message_id)\n except discord.NotFound: # Was deleted before we got the event\n return\n\n if (\n not message.guild\n or message.guild.id != GuildConstant.id\n or message.channel.id in GuildConstant.modlog_blacklist\n or message.author.bot\n ):\n return\n\n await asyncio.sleep(1) # Wait here in case the normal event was fired\n\n if event.message_id in self._cached_edits:\n # It was in the cache and the normal event was fired, so we can just ignore it\n self._cached_edits.remove(event.message_id)\n return\n\n author = message.author\n channel = message.channel\n channel_name = f\"{channel.category}/#{channel.name}\" if channel.category else f\"#{channel.name}\"\n\n before_response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n\n after_response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n f\"{message.clean_content}\"\n )\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited (Before)\",\n before_response, channel_id=Channels.message_log\n )\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited (After)\",\n after_response, channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_voice_state_update(\n self,\n member: discord.Member,\n before: discord.VoiceState,\n after: discord.VoiceState\n ) -> None:\n \"\"\"Log member voice state changes to the voice log channel.\"\"\"\n if (\n member.guild.id != GuildConstant.id\n or (before.channel and before.channel.id in GuildConstant.modlog_blacklist)\n ):\n return\n\n if member.id in self._ignored[Event.voice_state_update]:\n self._ignored[Event.voice_state_update].remove(member.id)\n return\n\n # Exclude all channel attributes except the name.\n diff = DeepDiff(\n before,\n after,\n exclude_paths=(\"root.session_id\", \"root.afk\"),\n exclude_regex_paths=r\"root\\.channel\\.(?!name)\",\n )\n\n # A type change seems to always take precedent over a value change. Furthermore, it will\n # include the value change along with the type change anyway. Therefore, it's OK to\n # \"overwrite\" values_changed; in practice there will never even be anything to overwrite.\n diff_values = {**diff.get(\"values_changed\", {}), **diff.get(\"type_changes\", {})}\n\n icon = Icons.voice_state_blue\n colour = Colour.blurple()\n changes = []\n\n for attr, values in diff_values.items():\n if not attr: # Not sure why, but it happens.\n continue\n\n old = values[\"old_value\"]\n new = values[\"new_value\"]\n\n attr = attr[5:] # Remove \"root.\" prefix.\n attr = VOICE_STATE_ATTRIBUTES.get(attr, attr.replace(\"_\", \" \").capitalize())\n\n changes.append(f\"**{attr}:** `{old}` **→** `{new}`\")\n\n # Set the embed icon and colour depending on which attribute changed.\n if any(name in attr for name in (\"Channel\", \"deaf\", \"mute\")):\n if new is None or new is True:\n # Left a channel or was muted/deafened.\n icon = Icons.voice_state_red\n colour = Colours.soft_red\n elif old is None or old is True:\n # Joined a channel or was unmuted/undeafened.\n icon = Icons.voice_state_green\n colour = Colours.soft_green\n\n if not changes:\n return\n\n message = \"\\n\".join(f\"{Emojis.bullet} {item}\" for item in sorted(changes))\n message = f\"**{member}** (`{member.id}`)\\n{message}\"\n\n await self.send_log_message(\n icon_url=icon,\n colour=colour,\n title=\"Voice state updated\",\n text=message,\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.voice_log\n )\n", "path": "bot/cogs/moderation/modlog.py" } ]
[ { "content": "import asyncio\nimport difflib\nimport itertools\nimport logging\nimport typing as t\nfrom datetime import datetime\nfrom itertools import zip_longest\n\nimport discord\nfrom dateutil.relativedelta import relativedelta\nfrom deepdiff import DeepDiff\nfrom discord import Colour\nfrom discord.abc import GuildChannel\nfrom discord.ext.commands import Cog, Context\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, Colours, Emojis, Event, Guild as GuildConstant, Icons, URLs\nfrom bot.utils.time import humanize_delta\n\nlog = logging.getLogger(__name__)\n\nGUILD_CHANNEL = t.Union[discord.CategoryChannel, discord.TextChannel, discord.VoiceChannel]\n\nCHANNEL_CHANGES_UNSUPPORTED = (\"permissions\",)\nCHANNEL_CHANGES_SUPPRESSED = (\"_overwrites\", \"position\")\nMEMBER_CHANGES_SUPPRESSED = (\"status\", \"activities\", \"_client_status\", \"nick\")\nROLE_CHANGES_UNSUPPORTED = (\"colour\", \"permissions\")\n\nVOICE_STATE_ATTRIBUTES = {\n \"channel.name\": \"Channel\",\n \"self_stream\": \"Streaming\",\n \"self_video\": \"Broadcasting\",\n}\n\n\nclass ModLog(Cog, name=\"ModLog\"):\n \"\"\"Logging for server events and staff actions.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self._ignored = {event: [] for event in Event}\n\n self._cached_deletes = []\n self._cached_edits = []\n\n async def upload_log(\n self,\n messages: t.Iterable[discord.Message],\n actor_id: int,\n attachments: t.Iterable[t.List[str]] = None\n ) -> str:\n \"\"\"Upload message logs to the database and return a URL to a page for viewing the logs.\"\"\"\n if attachments is None:\n attachments = []\n\n response = await self.bot.api_client.post(\n 'bot/deleted-messages',\n json={\n 'actor': actor_id,\n 'creation': datetime.utcnow().isoformat(),\n 'deletedmessage_set': [\n {\n 'id': message.id,\n 'author': message.author.id,\n 'channel_id': message.channel.id,\n 'content': message.content,\n 'embeds': [embed.to_dict() for embed in message.embeds],\n 'attachments': attachment,\n }\n for message, attachment in zip_longest(messages, attachments, fillvalue=[])\n ]\n }\n )\n\n return f\"{URLs.site_logs_view}/{response['id']}\"\n\n def ignore(self, event: Event, *items: int) -> None:\n \"\"\"Add event to ignored events to suppress log emission.\"\"\"\n for item in items:\n if item not in self._ignored[event]:\n self._ignored[event].append(item)\n\n async def send_log_message(\n self,\n icon_url: t.Optional[str],\n colour: t.Union[discord.Colour, int],\n title: t.Optional[str],\n text: str,\n thumbnail: t.Optional[t.Union[str, discord.Asset]] = None,\n channel_id: int = Channels.mod_log,\n ping_everyone: bool = False,\n files: t.Optional[t.List[discord.File]] = None,\n content: t.Optional[str] = None,\n additional_embeds: t.Optional[t.List[discord.Embed]] = None,\n additional_embeds_msg: t.Optional[str] = None,\n timestamp_override: t.Optional[datetime] = None,\n footer: t.Optional[str] = None,\n ) -> Context:\n \"\"\"Generate log embed and send to logging channel.\"\"\"\n embed = discord.Embed(description=text)\n\n if title and icon_url:\n embed.set_author(name=title, icon_url=icon_url)\n\n embed.colour = colour\n embed.timestamp = timestamp_override or datetime.utcnow()\n\n if footer:\n embed.set_footer(text=footer)\n\n if thumbnail:\n embed.set_thumbnail(url=thumbnail)\n\n if ping_everyone:\n if content:\n content = f\"@everyone\\n{content}\"\n else:\n content = \"@everyone\"\n\n channel = self.bot.get_channel(channel_id)\n log_message = await channel.send(content=content, embed=embed, files=files)\n\n if additional_embeds:\n if additional_embeds_msg:\n await channel.send(additional_embeds_msg)\n for additional_embed in additional_embeds:\n await channel.send(embed=additional_embed)\n\n return await self.bot.get_context(log_message) # Optionally return for use with antispam\n\n @Cog.listener()\n async def on_guild_channel_create(self, channel: GUILD_CHANNEL) -> None:\n \"\"\"Log channel create event to mod log.\"\"\"\n if channel.guild.id != GuildConstant.id:\n return\n\n if isinstance(channel, discord.CategoryChannel):\n title = \"Category created\"\n message = f\"{channel.name} (`{channel.id}`)\"\n elif isinstance(channel, discord.VoiceChannel):\n title = \"Voice channel created\"\n\n if channel.category:\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n else:\n title = \"Text channel created\"\n\n if channel.category:\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n\n await self.send_log_message(Icons.hash_green, Colours.soft_green, title, message)\n\n @Cog.listener()\n async def on_guild_channel_delete(self, channel: GUILD_CHANNEL) -> None:\n \"\"\"Log channel delete event to mod log.\"\"\"\n if channel.guild.id != GuildConstant.id:\n return\n\n if isinstance(channel, discord.CategoryChannel):\n title = \"Category deleted\"\n elif isinstance(channel, discord.VoiceChannel):\n title = \"Voice channel deleted\"\n else:\n title = \"Text channel deleted\"\n\n if channel.category and not isinstance(channel, discord.CategoryChannel):\n message = f\"{channel.category}/{channel.name} (`{channel.id}`)\"\n else:\n message = f\"{channel.name} (`{channel.id}`)\"\n\n await self.send_log_message(\n Icons.hash_red, Colours.soft_red,\n title, message\n )\n\n @Cog.listener()\n async def on_guild_channel_update(self, before: GUILD_CHANNEL, after: GuildChannel) -> None:\n \"\"\"Log channel update event to mod log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n if before.id in self._ignored[Event.guild_channel_update]:\n self._ignored[Event.guild_channel_update].remove(before.id)\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key in CHANNEL_CHANGES_SUPPRESSED:\n continue\n\n if key in CHANNEL_CHANGES_UNSUPPORTED:\n changes.append(f\"**{key.title()}** updated\")\n else:\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **→** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n if after.category:\n message = f\"**{after.category}/#{after.name} (`{after.id}`)**\\n{message}\"\n else:\n message = f\"**#{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.hash_blurple, Colour.blurple(),\n \"Channel updated\", message\n )\n\n @Cog.listener()\n async def on_guild_role_create(self, role: discord.Role) -> None:\n \"\"\"Log role create event to mod log.\"\"\"\n if role.guild.id != GuildConstant.id:\n return\n\n await self.send_log_message(\n Icons.crown_green, Colours.soft_green,\n \"Role created\", f\"`{role.id}`\"\n )\n\n @Cog.listener()\n async def on_guild_role_delete(self, role: discord.Role) -> None:\n \"\"\"Log role delete event to mod log.\"\"\"\n if role.guild.id != GuildConstant.id:\n return\n\n await self.send_log_message(\n Icons.crown_red, Colours.soft_red,\n \"Role removed\", f\"{role.name} (`{role.id}`)\"\n )\n\n @Cog.listener()\n async def on_guild_role_update(self, before: discord.Role, after: discord.Role) -> None:\n \"\"\"Log role update event to mod log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key == \"color\":\n continue\n\n if key in ROLE_CHANGES_UNSUPPORTED:\n changes.append(f\"**{key.title()}** updated\")\n else:\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **→** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.crown_blurple, Colour.blurple(),\n \"Role updated\", message\n )\n\n @Cog.listener()\n async def on_guild_update(self, before: discord.Guild, after: discord.Guild) -> None:\n \"\"\"Log guild update event to mod log.\"\"\"\n if before.id != GuildConstant.id:\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = diff.get(\"values_changed\", {})\n diff_values.update(diff.get(\"type_changes\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done:\n continue\n\n new = value[\"new_value\"]\n old = value[\"old_value\"]\n\n changes.append(f\"**{key.title()}:** `{old}` **→** `{new}`\")\n\n done.append(key)\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after.name}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.guild_update, Colour.blurple(),\n \"Guild updated\", message,\n thumbnail=after.icon_url_as(format=\"png\")\n )\n\n @Cog.listener()\n async def on_member_ban(self, guild: discord.Guild, member: discord.Member) -> None:\n \"\"\"Log ban event to user log.\"\"\"\n if guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_ban]:\n self._ignored[Event.member_ban].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.user_ban, Colours.soft_red,\n \"User banned\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_join(self, member: discord.Member) -> None:\n \"\"\"Log member join event to user log.\"\"\"\n if member.guild.id != GuildConstant.id:\n return\n\n message = f\"{member} (`{member.id}`)\"\n now = datetime.utcnow()\n difference = abs(relativedelta(now, member.created_at))\n\n message += \"\\n\\n**Account age:** \" + humanize_delta(difference)\n\n if difference.days < 1 and difference.months < 1 and difference.years < 1: # New user account!\n message = f\"{Emojis.new} {message}\"\n\n await self.send_log_message(\n Icons.sign_in, Colours.soft_green,\n \"User joined\", message,\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_remove(self, member: discord.Member) -> None:\n \"\"\"Log member leave event to user log.\"\"\"\n if member.guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_remove]:\n self._ignored[Event.member_remove].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.sign_out, Colours.soft_red,\n \"User left\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_member_unban(self, guild: discord.Guild, member: discord.User) -> None:\n \"\"\"Log member unban event to mod log.\"\"\"\n if guild.id != GuildConstant.id:\n return\n\n if member.id in self._ignored[Event.member_unban]:\n self._ignored[Event.member_unban].remove(member.id)\n return\n\n await self.send_log_message(\n Icons.user_unban, Colour.blurple(),\n \"User unbanned\", f\"{member} (`{member.id}`)\",\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_log\n )\n\n @Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:\n \"\"\"Log member update event to user log.\"\"\"\n if before.guild.id != GuildConstant.id:\n return\n\n if before.id in self._ignored[Event.member_update]:\n self._ignored[Event.member_update].remove(before.id)\n return\n\n diff = DeepDiff(before, after)\n changes = []\n done = []\n\n diff_values = {}\n\n diff_values.update(diff.get(\"values_changed\", {}))\n diff_values.update(diff.get(\"type_changes\", {}))\n diff_values.update(diff.get(\"iterable_item_removed\", {}))\n diff_values.update(diff.get(\"iterable_item_added\", {}))\n\n diff_user = DeepDiff(before._user, after._user)\n\n diff_values.update(diff_user.get(\"values_changed\", {}))\n diff_values.update(diff_user.get(\"type_changes\", {}))\n diff_values.update(diff_user.get(\"iterable_item_removed\", {}))\n diff_values.update(diff_user.get(\"iterable_item_added\", {}))\n\n for key, value in diff_values.items():\n if not key: # Not sure why, but it happens\n continue\n\n key = key[5:] # Remove \"root.\" prefix\n\n if \"[\" in key:\n key = key.split(\"[\", 1)[0]\n\n if \".\" in key:\n key = key.split(\".\", 1)[0]\n\n if key in done or key in MEMBER_CHANGES_SUPPRESSED:\n continue\n\n if key == \"_roles\":\n new_roles = after.roles\n old_roles = before.roles\n\n for role in old_roles:\n if role not in new_roles:\n changes.append(f\"**Role removed:** {role.name} (`{role.id}`)\")\n\n for role in new_roles:\n if role not in old_roles:\n changes.append(f\"**Role added:** {role.name} (`{role.id}`)\")\n\n else:\n new = value.get(\"new_value\")\n old = value.get(\"old_value\")\n\n if new and old:\n changes.append(f\"**{key.title()}:** `{old}` **→** `{new}`\")\n\n done.append(key)\n\n if before.name != after.name:\n changes.append(\n f\"**Username:** `{before.name}` **→** `{after.name}`\"\n )\n\n if before.discriminator != after.discriminator:\n changes.append(\n f\"**Discriminator:** `{before.discriminator}` **→** `{after.discriminator}`\"\n )\n\n if before.display_name != after.display_name:\n changes.append(\n f\"**Display name:** `{before.display_name}` **→** `{after.display_name}`\"\n )\n\n if not changes:\n return\n\n message = \"\"\n\n for item in sorted(changes):\n message += f\"{Emojis.bullet} {item}\\n\"\n\n message = f\"**{after}** (`{after.id}`)\\n{message}\"\n\n await self.send_log_message(\n Icons.user_update, Colour.blurple(),\n \"Member updated\", message,\n thumbnail=after.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.user_log\n )\n\n @Cog.listener()\n async def on_message_delete(self, message: discord.Message) -> None:\n \"\"\"Log message delete event to message change log.\"\"\"\n channel = message.channel\n author = message.author\n\n if message.guild.id != GuildConstant.id or channel.id in GuildConstant.modlog_blacklist:\n return\n\n self._cached_deletes.append(message.id)\n\n if message.id in self._ignored[Event.message_delete]:\n self._ignored[Event.message_delete].remove(message.id)\n return\n\n if author.bot:\n return\n\n if channel.category:\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n )\n else:\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** #{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n )\n\n if message.attachments:\n # Prepend the message metadata with the number of attachments\n response = f\"**Attachments:** {len(message.attachments)}\\n\" + response\n\n # Shorten the message content if necessary\n content = message.clean_content\n remaining_chars = 2040 - len(response)\n\n if len(content) > remaining_chars:\n botlog_url = await self.upload_log(messages=[message], actor_id=message.author.id)\n ending = f\"\\n\\nMessage truncated, [full message here]({botlog_url}).\"\n truncation_point = remaining_chars - len(ending)\n content = f\"{content[:truncation_point]}...{ending}\"\n\n response += f\"{content}\"\n\n await self.send_log_message(\n Icons.message_delete, Colours.soft_red,\n \"Message deleted\",\n response,\n channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_raw_message_delete(self, event: discord.RawMessageDeleteEvent) -> None:\n \"\"\"Log raw message delete event to message change log.\"\"\"\n if event.guild_id != GuildConstant.id or event.channel_id in GuildConstant.modlog_blacklist:\n return\n\n await asyncio.sleep(1) # Wait here in case the normal event was fired\n\n if event.message_id in self._cached_deletes:\n # It was in the cache and the normal event was fired, so we can just ignore it\n self._cached_deletes.remove(event.message_id)\n return\n\n if event.message_id in self._ignored[Event.message_delete]:\n self._ignored[Event.message_delete].remove(event.message_id)\n return\n\n channel = self.bot.get_channel(event.channel_id)\n\n if channel.category:\n response = (\n f\"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{event.message_id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n else:\n response = (\n f\"**Channel:** #{channel.name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{event.message_id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n\n await self.send_log_message(\n Icons.message_delete, Colours.soft_red,\n \"Message deleted\",\n response,\n channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_message_edit(self, msg_before: discord.Message, msg_after: discord.Message) -> None:\n \"\"\"Log message edit event to message change log.\"\"\"\n if (\n not msg_before.guild\n or msg_before.guild.id != GuildConstant.id\n or msg_before.channel.id in GuildConstant.modlog_blacklist\n or msg_before.author.bot\n ):\n return\n\n self._cached_edits.append(msg_before.id)\n\n if msg_before.content == msg_after.content:\n return\n\n author = msg_before.author\n channel = msg_before.channel\n channel_name = f\"{channel.category}/#{channel.name}\" if channel.category else f\"#{channel.name}\"\n\n # Getting the difference per words and group them by type - add, remove, same\n # Note that this is intended grouping without sorting\n diff = difflib.ndiff(msg_before.clean_content.split(), msg_after.clean_content.split())\n diff_groups = tuple(\n (diff_type, tuple(s[2:] for s in diff_words))\n for diff_type, diff_words in itertools.groupby(diff, key=lambda s: s[0])\n )\n\n content_before: t.List[str] = []\n content_after: t.List[str] = []\n\n for index, (diff_type, words) in enumerate(diff_groups):\n sub = ' '.join(words)\n if diff_type == '-':\n content_before.append(f\"[{sub}](http://o.hi)\")\n elif diff_type == '+':\n content_after.append(f\"[{sub}](http://o.hi)\")\n elif diff_type == ' ':\n if len(words) > 2:\n sub = (\n f\"{words[0] if index > 0 else ''}\"\n \" ... \"\n f\"{words[-1] if index < len(diff_groups) - 1 else ''}\"\n )\n content_before.append(sub)\n content_after.append(sub)\n\n response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{msg_before.id}`\\n\"\n \"\\n\"\n f\"**Before**:\\n{' '.join(content_before)}\\n\"\n f\"**After**:\\n{' '.join(content_after)}\\n\"\n \"\\n\"\n f\"[Jump to message]({msg_after.jump_url})\"\n )\n\n if msg_before.edited_at:\n # Message was previously edited, to assist with self-bot detection, use the edited_at\n # datetime as the baseline and create a human-readable delta between this edit event\n # and the last time the message was edited\n timestamp = msg_before.edited_at\n delta = humanize_delta(relativedelta(msg_after.edited_at, msg_before.edited_at))\n footer = f\"Last edited {delta} ago\"\n else:\n # Message was not previously edited, use the created_at datetime as the baseline, no\n # delta calculation needed\n timestamp = msg_before.created_at\n footer = None\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited\", response,\n channel_id=Channels.message_log, timestamp_override=timestamp, footer=footer\n )\n\n @Cog.listener()\n async def on_raw_message_edit(self, event: discord.RawMessageUpdateEvent) -> None:\n \"\"\"Log raw message edit event to message change log.\"\"\"\n try:\n channel = self.bot.get_channel(int(event.data[\"channel_id\"]))\n message = await channel.fetch_message(event.message_id)\n except discord.NotFound: # Was deleted before we got the event\n return\n\n if (\n not message.guild\n or message.guild.id != GuildConstant.id\n or message.channel.id in GuildConstant.modlog_blacklist\n or message.author.bot\n ):\n return\n\n await asyncio.sleep(1) # Wait here in case the normal event was fired\n\n if event.message_id in self._cached_edits:\n # It was in the cache and the normal event was fired, so we can just ignore it\n self._cached_edits.remove(event.message_id)\n return\n\n author = message.author\n channel = message.channel\n channel_name = f\"{channel.category}/#{channel.name}\" if channel.category else f\"#{channel.name}\"\n\n before_response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n \"This message was not cached, so the message content cannot be displayed.\"\n )\n\n after_response = (\n f\"**Author:** {author} (`{author.id}`)\\n\"\n f\"**Channel:** {channel_name} (`{channel.id}`)\\n\"\n f\"**Message ID:** `{message.id}`\\n\"\n \"\\n\"\n f\"{message.clean_content}\"\n )\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited (Before)\",\n before_response, channel_id=Channels.message_log\n )\n\n await self.send_log_message(\n Icons.message_edit, Colour.blurple(), \"Message edited (After)\",\n after_response, channel_id=Channels.message_log\n )\n\n @Cog.listener()\n async def on_voice_state_update(\n self,\n member: discord.Member,\n before: discord.VoiceState,\n after: discord.VoiceState\n ) -> None:\n \"\"\"Log member voice state changes to the voice log channel.\"\"\"\n if (\n member.guild.id != GuildConstant.id\n or (before.channel and before.channel.id in GuildConstant.modlog_blacklist)\n ):\n return\n\n if member.id in self._ignored[Event.voice_state_update]:\n self._ignored[Event.voice_state_update].remove(member.id)\n return\n\n # Exclude all channel attributes except the name.\n diff = DeepDiff(\n before,\n after,\n exclude_paths=(\"root.session_id\", \"root.afk\"),\n exclude_regex_paths=r\"root\\.channel\\.(?!name)\",\n )\n\n # A type change seems to always take precedent over a value change. Furthermore, it will\n # include the value change along with the type change anyway. Therefore, it's OK to\n # \"overwrite\" values_changed; in practice there will never even be anything to overwrite.\n diff_values = {**diff.get(\"values_changed\", {}), **diff.get(\"type_changes\", {})}\n\n icon = Icons.voice_state_blue\n colour = Colour.blurple()\n changes = []\n\n for attr, values in diff_values.items():\n if not attr: # Not sure why, but it happens.\n continue\n\n old = values[\"old_value\"]\n new = values[\"new_value\"]\n\n attr = attr[5:] # Remove \"root.\" prefix.\n attr = VOICE_STATE_ATTRIBUTES.get(attr, attr.replace(\"_\", \" \").capitalize())\n\n changes.append(f\"**{attr}:** `{old}` **→** `{new}`\")\n\n # Set the embed icon and colour depending on which attribute changed.\n if any(name in attr for name in (\"Channel\", \"deaf\", \"mute\")):\n if new is None or new is True:\n # Left a channel or was muted/deafened.\n icon = Icons.voice_state_red\n colour = Colours.soft_red\n elif old is None or old is True:\n # Joined a channel or was unmuted/undeafened.\n icon = Icons.voice_state_green\n colour = Colours.soft_green\n\n if not changes:\n return\n\n message = \"\\n\".join(f\"{Emojis.bullet} {item}\" for item in sorted(changes))\n message = f\"**{member}** (`{member.id}`)\\n{message}\"\n\n await self.send_log_message(\n icon_url=icon,\n colour=colour,\n title=\"Voice state updated\",\n text=message,\n thumbnail=member.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.voice_log\n )\n", "path": "bot/cogs/moderation/modlog.py" } ]
diff --git a/bot/cogs/moderation/modlog.py b/bot/cogs/moderation/modlog.py index 59ae6b587f..81d95298db 100644 --- a/bot/cogs/moderation/modlog.py +++ b/bot/cogs/moderation/modlog.py @@ -67,7 +67,7 @@ async def upload_log( 'embeds': [embed.to_dict() for embed in message.embeds], 'attachments': attachment, } - for message, attachment in zip_longest(messages, attachments) + for message, attachment in zip_longest(messages, attachments, fillvalue=[]) ] } )
!clean command returns malformed API request Since we added attachments to the deleted message logs the clean command was not updated to store this. When running a clean command the following error is returned: ``` web_1 | Bad Request: /bot/deleted-messages web_1 | "POST /bot/deleted-messages HTTP/1.1" 400 792 bot_1 | 2020-02-25 19:32:41,081 | bot.cogs.error_handler | DEBUG | API responded with 400 for command clean all: {'deletedmessage_set': [{'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}, {'attachments': ['This field may not be null.']}]}. ``` This is not critical since the clean command still operates, it just does not store deleted messages like intended.
beetbox__beets-3905
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Fetches, embeds, and displays lyrics.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport difflib\nimport errno\nimport itertools\nimport json\nimport struct\nimport os.path\nimport re\nimport requests\nimport unicodedata\nfrom unidecode import unidecode\nimport warnings\nimport six\nfrom six.moves import urllib\n\ntry:\n from bs4 import SoupStrainer, BeautifulSoup\n HAS_BEAUTIFUL_SOUP = True\nexcept ImportError:\n HAS_BEAUTIFUL_SOUP = False\n\ntry:\n import langdetect\n HAS_LANGDETECT = True\nexcept ImportError:\n HAS_LANGDETECT = False\n\ntry:\n # PY3: HTMLParseError was removed in 3.5 as strict mode\n # was deprecated in 3.3.\n # https://docs.python.org/3.3/library/html.parser.html\n from six.moves.html_parser import HTMLParseError\nexcept ImportError:\n class HTMLParseError(Exception):\n pass\n\nfrom beets import plugins\nfrom beets import ui\nimport beets\n\nDIV_RE = re.compile(r'<(/?)div>?', re.I)\nCOMMENT_RE = re.compile(r'<!--.*-->', re.S)\nTAG_RE = re.compile(r'<[^>]*>')\nBREAK_RE = re.compile(r'\\n?\\s*<br([\\s|/][^>]*)*>\\s*\\n?', re.I)\nURL_CHARACTERS = {\n u'\\u2018': u\"'\",\n u'\\u2019': u\"'\",\n u'\\u201c': u'\"',\n u'\\u201d': u'\"',\n u'\\u2010': u'-',\n u'\\u2011': u'-',\n u'\\u2012': u'-',\n u'\\u2013': u'-',\n u'\\u2014': u'-',\n u'\\u2015': u'-',\n u'\\u2016': u'-',\n u'\\u2026': u'...',\n}\nUSER_AGENT = 'beets/{}'.format(beets.__version__)\n\n# The content for the base index.rst generated in ReST mode.\nREST_INDEX_TEMPLATE = u'''Lyrics\n======\n\n* :ref:`Song index <genindex>`\n* :ref:`search`\n\nArtist index:\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n artists/*\n'''\n\n# The content for the base conf.py generated.\nREST_CONF_TEMPLATE = u'''# -*- coding: utf-8 -*-\nmaster_doc = 'index'\nproject = u'Lyrics'\ncopyright = u'none'\nauthor = u'Various Authors'\nlatex_documents = [\n (master_doc, 'Lyrics.tex', project,\n author, 'manual'),\n]\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\nepub_exclude_files = ['search.html']\nepub_tocdepth = 1\nepub_tocdup = False\n'''\n\n\n# Utilities.\n\ndef unichar(i):\n try:\n return six.unichr(i)\n except ValueError:\n return struct.pack('i', i).decode('utf-32')\n\n\ndef unescape(text):\n \"\"\"Resolve &#xxx; HTML entities (and some others).\"\"\"\n if isinstance(text, bytes):\n text = text.decode('utf-8', 'ignore')\n out = text.replace(u'&nbsp;', u' ')\n\n def replchar(m):\n num = m.group(1)\n return unichar(int(num))\n out = re.sub(u\"&#(\\\\d+);\", replchar, out)\n return out\n\n\ndef extract_text_between(html, start_marker, end_marker):\n try:\n _, html = html.split(start_marker, 1)\n html, _ = html.split(end_marker, 1)\n except ValueError:\n return u''\n return html\n\n\ndef search_pairs(item):\n \"\"\"Yield a pairs of artists and titles to search for.\n\n The first item in the pair is the name of the artist, the second\n item is a list of song names.\n\n In addition to the artist and title obtained from the `item` the\n method tries to strip extra information like paranthesized suffixes\n and featured artists from the strings and add them as candidates.\n The artist sort name is added as a fallback candidate to help in\n cases where artist name includes special characters or is in a\n non-latin script.\n The method also tries to split multiple titles separated with `/`.\n \"\"\"\n def generate_alternatives(string, patterns):\n \"\"\"Generate string alternatives by extracting first matching group for\n each given pattern.\n \"\"\"\n alternatives = [string]\n for pattern in patterns:\n match = re.search(pattern, string, re.IGNORECASE)\n if match:\n alternatives.append(match.group(1))\n return alternatives\n\n title, artist, artist_sort = item.title, item.artist, item.artist_sort\n\n patterns = [\n # Remove any featuring artists from the artists name\n r\"(.*?) {0}\".format(plugins.feat_tokens())]\n artists = generate_alternatives(artist, patterns)\n # Use the artist_sort as fallback only if it differs from artist to avoid\n # repeated remote requests with the same search terms\n if artist != artist_sort:\n artists.append(artist_sort)\n\n patterns = [\n # Remove a parenthesized suffix from a title string. Common\n # examples include (live), (remix), and (acoustic).\n r\"(.+?)\\s+[(].*[)]$\",\n # Remove any featuring artists from the title\n r\"(.*?) {0}\".format(plugins.feat_tokens(for_artist=False)),\n # Remove part of title after colon ':' for songs with subtitles\n r\"(.+?)\\s*:.*\"]\n titles = generate_alternatives(title, patterns)\n\n # Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)\n # and each of them.\n multi_titles = []\n for title in titles:\n multi_titles.append([title])\n if '/' in title:\n multi_titles.append([x.strip() for x in title.split('/')])\n\n return itertools.product(artists, multi_titles)\n\n\ndef slug(text):\n \"\"\"Make a URL-safe, human-readable version of the given text\n\n This will do the following:\n\n 1. decode unicode characters into ASCII\n 2. shift everything to lowercase\n 3. strip whitespace\n 4. replace other non-word characters with dashes\n 5. strip extra dashes\n\n This somewhat duplicates the :func:`Google.slugify` function but\n slugify is not as generic as this one, which can be reused\n elsewhere.\n \"\"\"\n return re.sub(r'\\W+', '-', unidecode(text).lower().strip()).strip('-')\n\n\nclass Backend(object):\n def __init__(self, config, log):\n self._log = log\n\n @staticmethod\n def _encode(s):\n \"\"\"Encode the string for inclusion in a URL\"\"\"\n if isinstance(s, six.text_type):\n for char, repl in URL_CHARACTERS.items():\n s = s.replace(char, repl)\n s = s.encode('utf-8', 'ignore')\n return urllib.parse.quote(s)\n\n def build_url(self, artist, title):\n return self.URL_PATTERN % (self._encode(artist.title()),\n self._encode(title.title()))\n\n def fetch_url(self, url):\n \"\"\"Retrieve the content at a given URL, or return None if the source\n is unreachable.\n \"\"\"\n try:\n # Disable the InsecureRequestWarning that comes from using\n # `verify=false`.\n # https://github.com/kennethreitz/requests/issues/2214\n # We're not overly worried about the NSA MITMing our lyrics scraper\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n r = requests.get(url, verify=False, headers={\n 'User-Agent': USER_AGENT,\n })\n except requests.RequestException as exc:\n self._log.debug(u'lyrics request failed: {0}', exc)\n return\n if r.status_code == requests.codes.ok:\n return r.text\n else:\n self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code)\n\n def fetch(self, artist, title):\n raise NotImplementedError()\n\n\nclass MusiXmatch(Backend):\n REPLACEMENTS = {\n r'\\s+': '-',\n '<': 'Less_Than',\n '>': 'Greater_Than',\n '#': 'Number_',\n r'[\\[\\{]': '(',\n r'[\\]\\}]': ')',\n }\n\n URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'\n\n @classmethod\n def _encode(cls, s):\n for old, new in cls.REPLACEMENTS.items():\n s = re.sub(old, new, s)\n\n return super(MusiXmatch, cls)._encode(s)\n\n def fetch(self, artist, title):\n url = self.build_url(artist, title)\n\n html = self.fetch_url(url)\n if not html:\n return\n if \"We detected that your IP is blocked\" in html:\n self._log.warning(u'we are blocked at MusixMatch: url %s failed'\n % url)\n return\n html_parts = html.split('<p class=\"mxm-lyrics__content')\n # Sometimes lyrics come in 2 or more parts\n lyrics_parts = []\n for html_part in html_parts:\n lyrics_parts.append(extract_text_between(html_part, '>', '</p>'))\n lyrics = '\\n'.join(lyrics_parts)\n lyrics = lyrics.strip(',\"').replace('\\\\n', '\\n')\n # another odd case: sometimes only that string remains, for\n # missing songs. this seems to happen after being blocked\n # above, when filling in the CAPTCHA.\n if \"Instant lyrics for all your music.\" in lyrics:\n return\n # sometimes there are non-existent lyrics with some content\n if 'Lyrics | Musixmatch' in lyrics:\n return\n return lyrics\n\n\nclass Genius(Backend):\n \"\"\"Fetch lyrics from Genius via genius-api.\n\n Simply adapted from\n bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/\n \"\"\"\n\n base_url = \"https://api.genius.com\"\n\n def __init__(self, config, log):\n super(Genius, self).__init__(config, log)\n self.api_key = config['genius_api_key'].as_str()\n self.headers = {\n 'Authorization': \"Bearer %s\" % self.api_key,\n 'User-Agent': USER_AGENT,\n }\n\n def fetch(self, artist, title):\n \"\"\"Fetch lyrics from genius.com\n\n Because genius doesn't allow accesssing lyrics via the api,\n we first query the api for a url matching our artist & title,\n then attempt to scrape that url for the lyrics.\n \"\"\"\n json = self._search(artist, title)\n if not json:\n self._log.debug(u'Genius API request returned invalid JSON')\n return None\n\n # find a matching artist in the json\n for hit in json[\"response\"][\"hits\"]:\n hit_artist = hit[\"result\"][\"primary_artist\"][\"name\"]\n\n if slug(hit_artist) == slug(artist):\n return self._scrape_lyrics_from_html(\n self.fetch_url(hit[\"result\"][\"url\"]))\n\n self._log.debug(u'Genius failed to find a matching artist for \\'{0}\\'',\n artist)\n\n def _search(self, artist, title):\n \"\"\"Searches the genius api for a given artist and title\n\n https://docs.genius.com/#search-h2\n\n :returns: json response\n \"\"\"\n search_url = self.base_url + \"/search\"\n data = {'q': title + \" \" + artist.lower()}\n try:\n response = requests.get(\n search_url, data=data, headers=self.headers)\n except requests.RequestException as exc:\n self._log.debug(u'Genius API request failed: {0}', exc)\n return None\n\n try:\n return response.json()\n except ValueError:\n return None\n\n def _scrape_lyrics_from_html(self, html):\n \"\"\"Scrape lyrics from a given genius.com html\"\"\"\n\n html = BeautifulSoup(html, \"html.parser\")\n\n # Remove script tags that they put in the middle of the lyrics.\n [h.extract() for h in html('script')]\n\n # Most of the time, the page contains a div with class=\"lyrics\" where\n # all of the lyrics can be found already correctly formatted\n # Sometimes, though, it packages the lyrics into separate divs, most\n # likely for easier ad placement\n lyrics_div = html.find(\"div\", class_=\"lyrics\")\n if not lyrics_div:\n self._log.debug(u'Received unusual song page html')\n verse_div = html.find(\"div\",\n class_=re.compile(\"Lyrics__Container\"))\n if not verse_div:\n if html.find(\"div\",\n class_=re.compile(\"LyricsPlaceholder__Message\"),\n string=\"This song is an instrumental\"):\n self._log.debug('Detected instrumental')\n return \"[Instrumental]\"\n else:\n self._log.debug(\"Couldn't scrape page using known layouts\")\n return None\n\n lyrics_div = verse_div.parent\n for br in lyrics_div.find_all(\"br\"):\n br.replace_with(\"\\n\")\n ads = lyrics_div.find_all(\"div\",\n class_=re.compile(\"InreadAd__Container\"))\n for ad in ads:\n ad.replace_with(\"\\n\")\n\n return lyrics_div.get_text()\n\n\nclass Tekstowo(Backend):\n # Fetch lyrics from Tekstowo.pl.\n\n BASE_URL = 'http://www.tekstowo.pl'\n URL_PATTERN = BASE_URL + '/wyszukaj.html?search-title=%s&search-artist=%s'\n\n def fetch(self, artist, title):\n url = self.build_url(title, artist)\n search_results = self.fetch_url(url)\n song_page_url = self.parse_search_results(search_results)\n\n if not song_page_url:\n return None\n\n song_page_html = self.fetch_url(song_page_url)\n return self.extract_lyrics(song_page_html)\n\n def parse_search_results(self, html):\n if not HAS_BEAUTIFUL_SOUP:\n return None\n\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n try:\n html = BeautifulSoup(html, \"html.parser\")\n except HTMLParseError:\n return None\n\n song_row = html.find(\"div\", class_=\"content\"). \\\n find_all(\"div\", class_=\"box-przeboje\")[0]\n\n if not song_row:\n return None\n\n href = song_row.find('a').get('href')\n return self.BASE_URL + href\n\n def extract_lyrics(self, html):\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n try:\n html = BeautifulSoup(html, \"html.parser\")\n except HTMLParseError:\n return None\n\n return html.find(\"div\", class_=\"song-text\").get_text()\n\n\ndef remove_credits(text):\n \"\"\"Remove first/last line of text if it contains the word 'lyrics'\n eg 'Lyrics by songsdatabase.com'\n \"\"\"\n textlines = text.split('\\n')\n credits = None\n for i in (0, -1):\n if textlines and 'lyrics' in textlines[i].lower():\n credits = textlines.pop(i)\n if credits:\n text = '\\n'.join(textlines)\n return text\n\n\ndef _scrape_strip_cruft(html, plain_text_out=False):\n \"\"\"Clean up HTML\n \"\"\"\n html = unescape(html)\n\n html = html.replace('\\r', '\\n') # Normalize EOL.\n html = re.sub(r' +', ' ', html) # Whitespaces collapse.\n html = BREAK_RE.sub('\\n', html) # <br> eats up surrounding '\\n'.\n html = re.sub(r'(?s)<(script).*?</\\1>', '', html) # Strip script tags.\n html = re.sub(u'\\u2005', \" \", html) # replace unicode with regular space\n\n if plain_text_out: # Strip remaining HTML tags\n html = COMMENT_RE.sub('', html)\n html = TAG_RE.sub('', html)\n\n html = '\\n'.join([x.strip() for x in html.strip().split('\\n')])\n html = re.sub(r'\\n{3,}', r'\\n\\n', html)\n return html\n\n\ndef _scrape_merge_paragraphs(html):\n html = re.sub(r'</p>\\s*<p(\\s*[^>]*)>', '\\n', html)\n return re.sub(r'<div .*>\\s*</div>', '\\n', html)\n\n\ndef scrape_lyrics_from_html(html):\n \"\"\"Scrape lyrics from a URL. If no lyrics can be found, return None\n instead.\n \"\"\"\n if not HAS_BEAUTIFUL_SOUP:\n return None\n\n if not html:\n return None\n\n def is_text_notcode(text):\n length = len(text)\n return (length > 20 and\n text.count(' ') > length / 25 and\n (text.find('{') == -1 or text.find(';') == -1))\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n # extract all long text blocks that are not code\n try:\n soup = BeautifulSoup(html, \"html.parser\",\n parse_only=SoupStrainer(text=is_text_notcode))\n except HTMLParseError:\n return None\n\n # Get the longest text element (if any).\n strings = sorted(soup.stripped_strings, key=len, reverse=True)\n if strings:\n return strings[0]\n else:\n return None\n\n\nclass Google(Backend):\n \"\"\"Fetch lyrics from Google search results.\"\"\"\n\n def __init__(self, config, log):\n super(Google, self).__init__(config, log)\n self.api_key = config['google_API_key'].as_str()\n self.engine_id = config['google_engine_ID'].as_str()\n\n def is_lyrics(self, text, artist=None):\n \"\"\"Determine whether the text seems to be valid lyrics.\n \"\"\"\n if not text:\n return False\n bad_triggers_occ = []\n nb_lines = text.count('\\n')\n if nb_lines <= 1:\n self._log.debug(u\"Ignoring too short lyrics '{0}'\", text)\n return False\n elif nb_lines < 5:\n bad_triggers_occ.append('too_short')\n else:\n # Lyrics look legit, remove credits to avoid being penalized\n # further down\n text = remove_credits(text)\n\n bad_triggers = ['lyrics', 'copyright', 'property', 'links']\n if artist:\n bad_triggers += [artist]\n\n for item in bad_triggers:\n bad_triggers_occ += [item] * len(re.findall(r'\\W%s\\W' % item,\n text, re.I))\n\n if bad_triggers_occ:\n self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ)\n return len(bad_triggers_occ) < 2\n\n def slugify(self, text):\n \"\"\"Normalize a string and remove non-alphanumeric characters.\n \"\"\"\n text = re.sub(r\"[-'_\\s]\", '_', text)\n text = re.sub(r\"_+\", '_', text).strip('_')\n pat = r\"([^,\\(]*)\\((.*?)\\)\" # Remove content within parentheses\n text = re.sub(pat, r'\\g<1>', text).strip()\n try:\n text = unicodedata.normalize('NFKD', text).encode('ascii',\n 'ignore')\n text = six.text_type(re.sub(r'[-\\s]+', ' ', text.decode('utf-8')))\n except UnicodeDecodeError:\n self._log.exception(u\"Failing to normalize '{0}'\", text)\n return text\n\n BY_TRANS = ['by', 'par', 'de', 'von']\n LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']\n\n def is_page_candidate(self, url_link, url_title, title, artist):\n \"\"\"Return True if the URL title makes it a good candidate to be a\n page that contains lyrics of title by artist.\n \"\"\"\n title = self.slugify(title.lower())\n artist = self.slugify(artist.lower())\n sitename = re.search(u\"//([^/]+)/.*\",\n self.slugify(url_link.lower())).group(1)\n url_title = self.slugify(url_title.lower())\n\n # Check if URL title contains song title (exact match)\n if url_title.find(title) != -1:\n return True\n\n # or try extracting song title from URL title and check if\n # they are close enough\n tokens = [by + '_' + artist for by in self.BY_TRANS] + \\\n [artist, sitename, sitename.replace('www.', '')] + \\\n self.LYRICS_TRANS\n tokens = [re.escape(t) for t in tokens]\n song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title)\n\n song_title = song_title.strip('_|')\n typo_ratio = .9\n ratio = difflib.SequenceMatcher(None, song_title, title).ratio()\n return ratio >= typo_ratio\n\n def fetch(self, artist, title):\n query = u\"%s %s\" % (artist, title)\n url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \\\n % (self.api_key, self.engine_id,\n urllib.parse.quote(query.encode('utf-8')))\n\n data = self.fetch_url(url)\n if not data:\n self._log.debug(u'google backend returned no data')\n return None\n try:\n data = json.loads(data)\n except ValueError as exc:\n self._log.debug(u'google backend returned malformed JSON: {}', exc)\n if 'error' in data:\n reason = data['error']['errors'][0]['reason']\n self._log.debug(u'google backend error: {0}', reason)\n return None\n\n if 'items' in data.keys():\n for item in data['items']:\n url_link = item['link']\n url_title = item.get('title', u'')\n if not self.is_page_candidate(url_link, url_title,\n title, artist):\n continue\n html = self.fetch_url(url_link)\n lyrics = scrape_lyrics_from_html(html)\n if not lyrics:\n continue\n\n if self.is_lyrics(lyrics, artist):\n self._log.debug(u'got lyrics from {0}',\n item['displayLink'])\n return lyrics\n\n\nclass LyricsPlugin(plugins.BeetsPlugin):\n SOURCES = ['google', 'musixmatch', 'genius', 'tekstowo']\n BS_SOURCES = ['google', 'genius', 'tekstowo']\n SOURCE_BACKENDS = {\n 'google': Google,\n 'musixmatch': MusiXmatch,\n 'genius': Genius,\n 'tekstowo': Tekstowo,\n }\n\n def __init__(self):\n super(LyricsPlugin, self).__init__()\n self.import_stages = [self.imported]\n self.config.add({\n 'auto': True,\n 'bing_client_secret': None,\n 'bing_lang_from': [],\n 'bing_lang_to': None,\n 'google_API_key': None,\n 'google_engine_ID': u'009217259823014548361:lndtuqkycfu',\n 'genius_api_key':\n \"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W\"\n \"76V-uFL5jks5dNvcGCdarqFjDhP9c\",\n 'fallback': None,\n 'force': False,\n 'local': False,\n 'sources': self.SOURCES,\n })\n self.config['bing_client_secret'].redact = True\n self.config['google_API_key'].redact = True\n self.config['google_engine_ID'].redact = True\n self.config['genius_api_key'].redact = True\n\n # State information for the ReST writer.\n # First, the current artist we're writing.\n self.artist = u'Unknown artist'\n # The current album: False means no album yet.\n self.album = False\n # The current rest file content. None means the file is not\n # open yet.\n self.rest = None\n\n available_sources = list(self.SOURCES)\n sources = plugins.sanitize_choices(\n self.config['sources'].as_str_seq(), available_sources)\n\n if not HAS_BEAUTIFUL_SOUP:\n sources = self.sanitize_bs_sources(sources)\n\n if 'google' in sources:\n if not self.config['google_API_key'].get():\n # We log a *debug* message here because the default\n # configuration includes `google`. This way, the source\n # is silent by default but can be enabled just by\n # setting an API key.\n self._log.debug(u'Disabling google source: '\n u'no API key configured.')\n sources.remove('google')\n\n self.config['bing_lang_from'] = [\n x.lower() for x in self.config['bing_lang_from'].as_str_seq()]\n self.bing_auth_token = None\n\n if not HAS_LANGDETECT and self.config['bing_client_secret'].get():\n self._log.warning(u'To use bing translations, you need to '\n u'install the langdetect module. See the '\n u'documentation for further details.')\n\n self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)\n for source in sources]\n\n def sanitize_bs_sources(self, sources):\n for source in self.BS_SOURCES:\n if source in sources:\n self._log.debug(u'To use the %s lyrics source, you must '\n u'install the beautifulsoup4 module. See '\n u'the documentation for further details.'\n % source)\n sources.remove(source)\n\n return sources\n\n def get_bing_access_token(self):\n params = {\n 'client_id': 'beets',\n 'client_secret': self.config['bing_client_secret'],\n 'scope': \"https://api.microsofttranslator.com\",\n 'grant_type': 'client_credentials',\n }\n\n oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'\n oauth_token = json.loads(requests.post(\n oauth_url,\n data=urllib.parse.urlencode(params)).content)\n if 'access_token' in oauth_token:\n return \"Bearer \" + oauth_token['access_token']\n else:\n self._log.warning(u'Could not get Bing Translate API access token.'\n u' Check your \"bing_client_secret\" password')\n\n def commands(self):\n cmd = ui.Subcommand('lyrics', help='fetch song lyrics')\n cmd.parser.add_option(\n u'-p', u'--print', dest='printlyr',\n action='store_true', default=False,\n help=u'print lyrics to console',\n )\n cmd.parser.add_option(\n u'-r', u'--write-rest', dest='writerest',\n action='store', default=None, metavar='dir',\n help=u'write lyrics to given directory as ReST files',\n )\n cmd.parser.add_option(\n u'-f', u'--force', dest='force_refetch',\n action='store_true', default=False,\n help=u'always re-download lyrics',\n )\n cmd.parser.add_option(\n u'-l', u'--local', dest='local_only',\n action='store_true', default=False,\n help=u'do not fetch missing lyrics',\n )\n\n def func(lib, opts, args):\n # The \"write to files\" option corresponds to the\n # import_write config value.\n write = ui.should_write()\n if opts.writerest:\n self.writerest_indexes(opts.writerest)\n items = lib.items(ui.decargs(args))\n for item in items:\n if not opts.local_only and not self.config['local']:\n self.fetch_item_lyrics(\n lib, item, write,\n opts.force_refetch or self.config['force'],\n )\n if item.lyrics:\n if opts.printlyr:\n ui.print_(item.lyrics)\n if opts.writerest:\n self.appendrest(opts.writerest, item)\n if opts.writerest and items:\n # flush last artist & write to ReST\n self.writerest(opts.writerest)\n ui.print_(u'ReST files generated. to build, use one of:')\n ui.print_(u' sphinx-build -b html %s _build/html'\n % opts.writerest)\n ui.print_(u' sphinx-build -b epub %s _build/epub'\n % opts.writerest)\n ui.print_((u' sphinx-build -b latex %s _build/latex '\n u'&& make -C _build/latex all-pdf')\n % opts.writerest)\n cmd.func = func\n return [cmd]\n\n def appendrest(self, directory, item):\n \"\"\"Append the item to an ReST file\n\n This will keep state (in the `rest` variable) in order to avoid\n writing continuously to the same files.\n \"\"\"\n\n if slug(self.artist) != slug(item.albumartist):\n # Write current file and start a new one ~ item.albumartist\n self.writerest(directory)\n self.artist = item.albumartist.strip()\n self.rest = u\"%s\\n%s\\n\\n.. contents::\\n :local:\\n\\n\" \\\n % (self.artist,\n u'=' * len(self.artist))\n\n if self.album != item.album:\n tmpalbum = self.album = item.album.strip()\n if self.album == '':\n tmpalbum = u'Unknown album'\n self.rest += u\"%s\\n%s\\n\\n\" % (tmpalbum, u'-' * len(tmpalbum))\n title_str = u\":index:`%s`\" % item.title.strip()\n block = u'| ' + item.lyrics.replace(u'\\n', u'\\n| ')\n self.rest += u\"%s\\n%s\\n\\n%s\\n\\n\" % (title_str,\n u'~' * len(title_str),\n block)\n\n def writerest(self, directory):\n \"\"\"Write self.rest to a ReST file\n \"\"\"\n if self.rest is not None and self.artist is not None:\n path = os.path.join(directory, 'artists',\n slug(self.artist) + u'.rst')\n with open(path, 'wb') as output:\n output.write(self.rest.encode('utf-8'))\n\n def writerest_indexes(self, directory):\n \"\"\"Write conf.py and index.rst files necessary for Sphinx\n\n We write minimal configurations that are necessary for Sphinx\n to operate. We do not overwrite existing files so that\n customizations are respected.\"\"\"\n try:\n os.makedirs(os.path.join(directory, 'artists'))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n indexfile = os.path.join(directory, 'index.rst')\n if not os.path.exists(indexfile):\n with open(indexfile, 'w') as output:\n output.write(REST_INDEX_TEMPLATE)\n conffile = os.path.join(directory, 'conf.py')\n if not os.path.exists(conffile):\n with open(conffile, 'w') as output:\n output.write(REST_CONF_TEMPLATE)\n\n def imported(self, session, task):\n \"\"\"Import hook for fetching lyrics automatically.\n \"\"\"\n if self.config['auto']:\n for item in task.imported_items():\n self.fetch_item_lyrics(session.lib, item,\n False, self.config['force'])\n\n def fetch_item_lyrics(self, lib, item, write, force):\n \"\"\"Fetch and store lyrics for a single item. If ``write``, then the\n lyrics will also be written to the file itself.\n \"\"\"\n # Skip if the item already has lyrics.\n if not force and item.lyrics:\n self._log.info(u'lyrics already present: {0}', item)\n return\n\n lyrics = None\n for artist, titles in search_pairs(item):\n lyrics = [self.get_lyrics(artist, title) for title in titles]\n if any(lyrics):\n break\n\n lyrics = u\"\\n\\n---\\n\\n\".join([l for l in lyrics if l])\n\n if lyrics:\n self._log.info(u'fetched lyrics: {0}', item)\n if HAS_LANGDETECT and self.config['bing_client_secret'].get():\n lang_from = langdetect.detect(lyrics)\n if self.config['bing_lang_to'].get() != lang_from and (\n not self.config['bing_lang_from'] or (\n lang_from in self.config[\n 'bing_lang_from'].as_str_seq())):\n lyrics = self.append_translation(\n lyrics, self.config['bing_lang_to'])\n else:\n self._log.info(u'lyrics not found: {0}', item)\n fallback = self.config['fallback'].get()\n if fallback:\n lyrics = fallback\n else:\n return\n item.lyrics = lyrics\n if write:\n item.try_write()\n item.store()\n\n def get_lyrics(self, artist, title):\n \"\"\"Fetch lyrics, trying each source in turn. Return a string or\n None if no lyrics were found.\n \"\"\"\n for backend in self.backends:\n lyrics = backend.fetch(artist, title)\n if lyrics:\n self._log.debug(u'got lyrics from backend: {0}',\n backend.__class__.__name__)\n return _scrape_strip_cruft(lyrics, True)\n\n def append_translation(self, text, to_lang):\n from xml.etree import ElementTree\n\n if not self.bing_auth_token:\n self.bing_auth_token = self.get_bing_access_token()\n if self.bing_auth_token:\n # Extract unique lines to limit API request size per song\n text_lines = set(text.split('\\n'))\n url = ('https://api.microsofttranslator.com/v2/Http.svc/'\n 'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))\n r = requests.get(url,\n headers={\"Authorization \": self.bing_auth_token})\n if r.status_code != 200:\n self._log.debug('translation API error {}: {}', r.status_code,\n r.text)\n if 'token has expired' in r.text:\n self.bing_auth_token = None\n return self.append_translation(text, to_lang)\n return text\n lines_translated = ElementTree.fromstring(\n r.text.encode('utf-8')).text\n # Use a translation mapping dict to build resulting lyrics\n translations = dict(zip(text_lines, lines_translated.split('|')))\n result = ''\n for line in text.split('\\n'):\n result += '%s / %s\\n' % (line, translations[line])\n return result\n", "path": "beetsplug/lyrics.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Fetches, embeds, and displays lyrics.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport difflib\nimport errno\nimport itertools\nimport json\nimport struct\nimport os.path\nimport re\nimport requests\nimport unicodedata\nfrom unidecode import unidecode\nimport warnings\nimport six\nfrom six.moves import urllib\n\ntry:\n from bs4 import SoupStrainer, BeautifulSoup\n HAS_BEAUTIFUL_SOUP = True\nexcept ImportError:\n HAS_BEAUTIFUL_SOUP = False\n\ntry:\n import langdetect\n HAS_LANGDETECT = True\nexcept ImportError:\n HAS_LANGDETECT = False\n\ntry:\n # PY3: HTMLParseError was removed in 3.5 as strict mode\n # was deprecated in 3.3.\n # https://docs.python.org/3.3/library/html.parser.html\n from six.moves.html_parser import HTMLParseError\nexcept ImportError:\n class HTMLParseError(Exception):\n pass\n\nfrom beets import plugins\nfrom beets import ui\nimport beets\n\nDIV_RE = re.compile(r'<(/?)div>?', re.I)\nCOMMENT_RE = re.compile(r'<!--.*-->', re.S)\nTAG_RE = re.compile(r'<[^>]*>')\nBREAK_RE = re.compile(r'\\n?\\s*<br([\\s|/][^>]*)*>\\s*\\n?', re.I)\nURL_CHARACTERS = {\n u'\\u2018': u\"'\",\n u'\\u2019': u\"'\",\n u'\\u201c': u'\"',\n u'\\u201d': u'\"',\n u'\\u2010': u'-',\n u'\\u2011': u'-',\n u'\\u2012': u'-',\n u'\\u2013': u'-',\n u'\\u2014': u'-',\n u'\\u2015': u'-',\n u'\\u2016': u'-',\n u'\\u2026': u'...',\n}\nUSER_AGENT = 'beets/{}'.format(beets.__version__)\n\n# The content for the base index.rst generated in ReST mode.\nREST_INDEX_TEMPLATE = u'''Lyrics\n======\n\n* :ref:`Song index <genindex>`\n* :ref:`search`\n\nArtist index:\n\n.. toctree::\n :maxdepth: 1\n :glob:\n\n artists/*\n'''\n\n# The content for the base conf.py generated.\nREST_CONF_TEMPLATE = u'''# -*- coding: utf-8 -*-\nmaster_doc = 'index'\nproject = u'Lyrics'\ncopyright = u'none'\nauthor = u'Various Authors'\nlatex_documents = [\n (master_doc, 'Lyrics.tex', project,\n author, 'manual'),\n]\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\nepub_exclude_files = ['search.html']\nepub_tocdepth = 1\nepub_tocdup = False\n'''\n\n\n# Utilities.\n\ndef unichar(i):\n try:\n return six.unichr(i)\n except ValueError:\n return struct.pack('i', i).decode('utf-32')\n\n\ndef unescape(text):\n \"\"\"Resolve &#xxx; HTML entities (and some others).\"\"\"\n if isinstance(text, bytes):\n text = text.decode('utf-8', 'ignore')\n out = text.replace(u'&nbsp;', u' ')\n\n def replchar(m):\n num = m.group(1)\n return unichar(int(num))\n out = re.sub(u\"&#(\\\\d+);\", replchar, out)\n return out\n\n\ndef extract_text_between(html, start_marker, end_marker):\n try:\n _, html = html.split(start_marker, 1)\n html, _ = html.split(end_marker, 1)\n except ValueError:\n return u''\n return html\n\n\ndef search_pairs(item):\n \"\"\"Yield a pairs of artists and titles to search for.\n\n The first item in the pair is the name of the artist, the second\n item is a list of song names.\n\n In addition to the artist and title obtained from the `item` the\n method tries to strip extra information like paranthesized suffixes\n and featured artists from the strings and add them as candidates.\n The artist sort name is added as a fallback candidate to help in\n cases where artist name includes special characters or is in a\n non-latin script.\n The method also tries to split multiple titles separated with `/`.\n \"\"\"\n def generate_alternatives(string, patterns):\n \"\"\"Generate string alternatives by extracting first matching group for\n each given pattern.\n \"\"\"\n alternatives = [string]\n for pattern in patterns:\n match = re.search(pattern, string, re.IGNORECASE)\n if match:\n alternatives.append(match.group(1))\n return alternatives\n\n title, artist, artist_sort = item.title, item.artist, item.artist_sort\n\n patterns = [\n # Remove any featuring artists from the artists name\n r\"(.*?) {0}\".format(plugins.feat_tokens())]\n artists = generate_alternatives(artist, patterns)\n # Use the artist_sort as fallback only if it differs from artist to avoid\n # repeated remote requests with the same search terms\n if artist != artist_sort:\n artists.append(artist_sort)\n\n patterns = [\n # Remove a parenthesized suffix from a title string. Common\n # examples include (live), (remix), and (acoustic).\n r\"(.+?)\\s+[(].*[)]$\",\n # Remove any featuring artists from the title\n r\"(.*?) {0}\".format(plugins.feat_tokens(for_artist=False)),\n # Remove part of title after colon ':' for songs with subtitles\n r\"(.+?)\\s*:.*\"]\n titles = generate_alternatives(title, patterns)\n\n # Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)\n # and each of them.\n multi_titles = []\n for title in titles:\n multi_titles.append([title])\n if '/' in title:\n multi_titles.append([x.strip() for x in title.split('/')])\n\n return itertools.product(artists, multi_titles)\n\n\ndef slug(text):\n \"\"\"Make a URL-safe, human-readable version of the given text\n\n This will do the following:\n\n 1. decode unicode characters into ASCII\n 2. shift everything to lowercase\n 3. strip whitespace\n 4. replace other non-word characters with dashes\n 5. strip extra dashes\n\n This somewhat duplicates the :func:`Google.slugify` function but\n slugify is not as generic as this one, which can be reused\n elsewhere.\n \"\"\"\n return re.sub(r'\\W+', '-', unidecode(text).lower().strip()).strip('-')\n\n\nclass Backend(object):\n def __init__(self, config, log):\n self._log = log\n\n @staticmethod\n def _encode(s):\n \"\"\"Encode the string for inclusion in a URL\"\"\"\n if isinstance(s, six.text_type):\n for char, repl in URL_CHARACTERS.items():\n s = s.replace(char, repl)\n s = s.encode('utf-8', 'ignore')\n return urllib.parse.quote(s)\n\n def build_url(self, artist, title):\n return self.URL_PATTERN % (self._encode(artist.title()),\n self._encode(title.title()))\n\n def fetch_url(self, url):\n \"\"\"Retrieve the content at a given URL, or return None if the source\n is unreachable.\n \"\"\"\n try:\n # Disable the InsecureRequestWarning that comes from using\n # `verify=false`.\n # https://github.com/kennethreitz/requests/issues/2214\n # We're not overly worried about the NSA MITMing our lyrics scraper\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n r = requests.get(url, verify=False, headers={\n 'User-Agent': USER_AGENT,\n })\n except requests.RequestException as exc:\n self._log.debug(u'lyrics request failed: {0}', exc)\n return\n if r.status_code == requests.codes.ok:\n return r.text\n else:\n self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code)\n\n def fetch(self, artist, title):\n raise NotImplementedError()\n\n\nclass MusiXmatch(Backend):\n REPLACEMENTS = {\n r'\\s+': '-',\n '<': 'Less_Than',\n '>': 'Greater_Than',\n '#': 'Number_',\n r'[\\[\\{]': '(',\n r'[\\]\\}]': ')',\n }\n\n URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'\n\n @classmethod\n def _encode(cls, s):\n for old, new in cls.REPLACEMENTS.items():\n s = re.sub(old, new, s)\n\n return super(MusiXmatch, cls)._encode(s)\n\n def fetch(self, artist, title):\n url = self.build_url(artist, title)\n\n html = self.fetch_url(url)\n if not html:\n return\n if \"We detected that your IP is blocked\" in html:\n self._log.warning(u'we are blocked at MusixMatch: url %s failed'\n % url)\n return\n html_parts = html.split('<p class=\"mxm-lyrics__content')\n # Sometimes lyrics come in 2 or more parts\n lyrics_parts = []\n for html_part in html_parts:\n lyrics_parts.append(extract_text_between(html_part, '>', '</p>'))\n lyrics = '\\n'.join(lyrics_parts)\n lyrics = lyrics.strip(',\"').replace('\\\\n', '\\n')\n # another odd case: sometimes only that string remains, for\n # missing songs. this seems to happen after being blocked\n # above, when filling in the CAPTCHA.\n if \"Instant lyrics for all your music.\" in lyrics:\n return\n # sometimes there are non-existent lyrics with some content\n if 'Lyrics | Musixmatch' in lyrics:\n return\n return lyrics\n\n\nclass Genius(Backend):\n \"\"\"Fetch lyrics from Genius via genius-api.\n\n Simply adapted from\n bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/\n \"\"\"\n\n base_url = \"https://api.genius.com\"\n\n def __init__(self, config, log):\n super(Genius, self).__init__(config, log)\n self.api_key = config['genius_api_key'].as_str()\n self.headers = {\n 'Authorization': \"Bearer %s\" % self.api_key,\n 'User-Agent': USER_AGENT,\n }\n\n def fetch(self, artist, title):\n \"\"\"Fetch lyrics from genius.com\n\n Because genius doesn't allow accesssing lyrics via the api,\n we first query the api for a url matching our artist & title,\n then attempt to scrape that url for the lyrics.\n \"\"\"\n json = self._search(artist, title)\n if not json:\n self._log.debug(u'Genius API request returned invalid JSON')\n return None\n\n # find a matching artist in the json\n for hit in json[\"response\"][\"hits\"]:\n hit_artist = hit[\"result\"][\"primary_artist\"][\"name\"]\n\n if slug(hit_artist) == slug(artist):\n return self._scrape_lyrics_from_html(\n self.fetch_url(hit[\"result\"][\"url\"]))\n\n self._log.debug(u'Genius failed to find a matching artist for \\'{0}\\'',\n artist)\n\n def _search(self, artist, title):\n \"\"\"Searches the genius api for a given artist and title\n\n https://docs.genius.com/#search-h2\n\n :returns: json response\n \"\"\"\n search_url = self.base_url + \"/search\"\n data = {'q': title + \" \" + artist.lower()}\n try:\n response = requests.get(\n search_url, data=data, headers=self.headers)\n except requests.RequestException as exc:\n self._log.debug(u'Genius API request failed: {0}', exc)\n return None\n\n try:\n return response.json()\n except ValueError:\n return None\n\n def _scrape_lyrics_from_html(self, html):\n \"\"\"Scrape lyrics from a given genius.com html\"\"\"\n\n html = BeautifulSoup(html, \"html.parser\")\n\n # Remove script tags that they put in the middle of the lyrics.\n [h.extract() for h in html('script')]\n\n # Most of the time, the page contains a div with class=\"lyrics\" where\n # all of the lyrics can be found already correctly formatted\n # Sometimes, though, it packages the lyrics into separate divs, most\n # likely for easier ad placement\n lyrics_div = html.find(\"div\", class_=\"lyrics\")\n if not lyrics_div:\n self._log.debug(u'Received unusual song page html')\n verse_div = html.find(\"div\",\n class_=re.compile(\"Lyrics__Container\"))\n if not verse_div:\n if html.find(\"div\",\n class_=re.compile(\"LyricsPlaceholder__Message\"),\n string=\"This song is an instrumental\"):\n self._log.debug('Detected instrumental')\n return \"[Instrumental]\"\n else:\n self._log.debug(\"Couldn't scrape page using known layouts\")\n return None\n\n lyrics_div = verse_div.parent\n for br in lyrics_div.find_all(\"br\"):\n br.replace_with(\"\\n\")\n ads = lyrics_div.find_all(\"div\",\n class_=re.compile(\"InreadAd__Container\"))\n for ad in ads:\n ad.replace_with(\"\\n\")\n\n return lyrics_div.get_text()\n\n\nclass Tekstowo(Backend):\n # Fetch lyrics from Tekstowo.pl.\n\n BASE_URL = 'http://www.tekstowo.pl'\n URL_PATTERN = BASE_URL + '/wyszukaj.html?search-title=%s&search-artist=%s'\n\n def fetch(self, artist, title):\n url = self.build_url(title, artist)\n search_results = self.fetch_url(url)\n song_page_url = self.parse_search_results(search_results)\n\n if not song_page_url:\n return None\n\n song_page_html = self.fetch_url(song_page_url)\n return self.extract_lyrics(song_page_html)\n\n def parse_search_results(self, html):\n if not HAS_BEAUTIFUL_SOUP:\n return None\n\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n try:\n html = BeautifulSoup(html, \"html.parser\")\n except HTMLParseError:\n return None\n\n song_rows = html.find(\"div\", class_=\"content\"). \\\n find(\"div\", class_=\"card\"). \\\n find_all(\"div\", class_=\"box-przeboje\")\n\n if not song_rows:\n return None\n\n song_row = song_rows[0]\n\n if not song_row:\n return None\n\n href = song_row.find('a').get('href')\n return self.BASE_URL + href\n\n def extract_lyrics(self, html):\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n try:\n html = BeautifulSoup(html, \"html.parser\")\n except HTMLParseError:\n return None\n\n return html.find(\"div\", class_=\"song-text\").get_text()\n\n\ndef remove_credits(text):\n \"\"\"Remove first/last line of text if it contains the word 'lyrics'\n eg 'Lyrics by songsdatabase.com'\n \"\"\"\n textlines = text.split('\\n')\n credits = None\n for i in (0, -1):\n if textlines and 'lyrics' in textlines[i].lower():\n credits = textlines.pop(i)\n if credits:\n text = '\\n'.join(textlines)\n return text\n\n\ndef _scrape_strip_cruft(html, plain_text_out=False):\n \"\"\"Clean up HTML\n \"\"\"\n html = unescape(html)\n\n html = html.replace('\\r', '\\n') # Normalize EOL.\n html = re.sub(r' +', ' ', html) # Whitespaces collapse.\n html = BREAK_RE.sub('\\n', html) # <br> eats up surrounding '\\n'.\n html = re.sub(r'(?s)<(script).*?</\\1>', '', html) # Strip script tags.\n html = re.sub(u'\\u2005', \" \", html) # replace unicode with regular space\n\n if plain_text_out: # Strip remaining HTML tags\n html = COMMENT_RE.sub('', html)\n html = TAG_RE.sub('', html)\n\n html = '\\n'.join([x.strip() for x in html.strip().split('\\n')])\n html = re.sub(r'\\n{3,}', r'\\n\\n', html)\n return html\n\n\ndef _scrape_merge_paragraphs(html):\n html = re.sub(r'</p>\\s*<p(\\s*[^>]*)>', '\\n', html)\n return re.sub(r'<div .*>\\s*</div>', '\\n', html)\n\n\ndef scrape_lyrics_from_html(html):\n \"\"\"Scrape lyrics from a URL. If no lyrics can be found, return None\n instead.\n \"\"\"\n if not HAS_BEAUTIFUL_SOUP:\n return None\n\n if not html:\n return None\n\n def is_text_notcode(text):\n length = len(text)\n return (length > 20 and\n text.count(' ') > length / 25 and\n (text.find('{') == -1 or text.find(';') == -1))\n html = _scrape_strip_cruft(html)\n html = _scrape_merge_paragraphs(html)\n\n # extract all long text blocks that are not code\n try:\n soup = BeautifulSoup(html, \"html.parser\",\n parse_only=SoupStrainer(text=is_text_notcode))\n except HTMLParseError:\n return None\n\n # Get the longest text element (if any).\n strings = sorted(soup.stripped_strings, key=len, reverse=True)\n if strings:\n return strings[0]\n else:\n return None\n\n\nclass Google(Backend):\n \"\"\"Fetch lyrics from Google search results.\"\"\"\n\n def __init__(self, config, log):\n super(Google, self).__init__(config, log)\n self.api_key = config['google_API_key'].as_str()\n self.engine_id = config['google_engine_ID'].as_str()\n\n def is_lyrics(self, text, artist=None):\n \"\"\"Determine whether the text seems to be valid lyrics.\n \"\"\"\n if not text:\n return False\n bad_triggers_occ = []\n nb_lines = text.count('\\n')\n if nb_lines <= 1:\n self._log.debug(u\"Ignoring too short lyrics '{0}'\", text)\n return False\n elif nb_lines < 5:\n bad_triggers_occ.append('too_short')\n else:\n # Lyrics look legit, remove credits to avoid being penalized\n # further down\n text = remove_credits(text)\n\n bad_triggers = ['lyrics', 'copyright', 'property', 'links']\n if artist:\n bad_triggers += [artist]\n\n for item in bad_triggers:\n bad_triggers_occ += [item] * len(re.findall(r'\\W%s\\W' % item,\n text, re.I))\n\n if bad_triggers_occ:\n self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ)\n return len(bad_triggers_occ) < 2\n\n def slugify(self, text):\n \"\"\"Normalize a string and remove non-alphanumeric characters.\n \"\"\"\n text = re.sub(r\"[-'_\\s]\", '_', text)\n text = re.sub(r\"_+\", '_', text).strip('_')\n pat = r\"([^,\\(]*)\\((.*?)\\)\" # Remove content within parentheses\n text = re.sub(pat, r'\\g<1>', text).strip()\n try:\n text = unicodedata.normalize('NFKD', text).encode('ascii',\n 'ignore')\n text = six.text_type(re.sub(r'[-\\s]+', ' ', text.decode('utf-8')))\n except UnicodeDecodeError:\n self._log.exception(u\"Failing to normalize '{0}'\", text)\n return text\n\n BY_TRANS = ['by', 'par', 'de', 'von']\n LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']\n\n def is_page_candidate(self, url_link, url_title, title, artist):\n \"\"\"Return True if the URL title makes it a good candidate to be a\n page that contains lyrics of title by artist.\n \"\"\"\n title = self.slugify(title.lower())\n artist = self.slugify(artist.lower())\n sitename = re.search(u\"//([^/]+)/.*\",\n self.slugify(url_link.lower())).group(1)\n url_title = self.slugify(url_title.lower())\n\n # Check if URL title contains song title (exact match)\n if url_title.find(title) != -1:\n return True\n\n # or try extracting song title from URL title and check if\n # they are close enough\n tokens = [by + '_' + artist for by in self.BY_TRANS] + \\\n [artist, sitename, sitename.replace('www.', '')] + \\\n self.LYRICS_TRANS\n tokens = [re.escape(t) for t in tokens]\n song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title)\n\n song_title = song_title.strip('_|')\n typo_ratio = .9\n ratio = difflib.SequenceMatcher(None, song_title, title).ratio()\n return ratio >= typo_ratio\n\n def fetch(self, artist, title):\n query = u\"%s %s\" % (artist, title)\n url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \\\n % (self.api_key, self.engine_id,\n urllib.parse.quote(query.encode('utf-8')))\n\n data = self.fetch_url(url)\n if not data:\n self._log.debug(u'google backend returned no data')\n return None\n try:\n data = json.loads(data)\n except ValueError as exc:\n self._log.debug(u'google backend returned malformed JSON: {}', exc)\n if 'error' in data:\n reason = data['error']['errors'][0]['reason']\n self._log.debug(u'google backend error: {0}', reason)\n return None\n\n if 'items' in data.keys():\n for item in data['items']:\n url_link = item['link']\n url_title = item.get('title', u'')\n if not self.is_page_candidate(url_link, url_title,\n title, artist):\n continue\n html = self.fetch_url(url_link)\n lyrics = scrape_lyrics_from_html(html)\n if not lyrics:\n continue\n\n if self.is_lyrics(lyrics, artist):\n self._log.debug(u'got lyrics from {0}',\n item['displayLink'])\n return lyrics\n\n\nclass LyricsPlugin(plugins.BeetsPlugin):\n SOURCES = ['google', 'musixmatch', 'genius', 'tekstowo']\n BS_SOURCES = ['google', 'genius', 'tekstowo']\n SOURCE_BACKENDS = {\n 'google': Google,\n 'musixmatch': MusiXmatch,\n 'genius': Genius,\n 'tekstowo': Tekstowo,\n }\n\n def __init__(self):\n super(LyricsPlugin, self).__init__()\n self.import_stages = [self.imported]\n self.config.add({\n 'auto': True,\n 'bing_client_secret': None,\n 'bing_lang_from': [],\n 'bing_lang_to': None,\n 'google_API_key': None,\n 'google_engine_ID': u'009217259823014548361:lndtuqkycfu',\n 'genius_api_key':\n \"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W\"\n \"76V-uFL5jks5dNvcGCdarqFjDhP9c\",\n 'fallback': None,\n 'force': False,\n 'local': False,\n 'sources': self.SOURCES,\n })\n self.config['bing_client_secret'].redact = True\n self.config['google_API_key'].redact = True\n self.config['google_engine_ID'].redact = True\n self.config['genius_api_key'].redact = True\n\n # State information for the ReST writer.\n # First, the current artist we're writing.\n self.artist = u'Unknown artist'\n # The current album: False means no album yet.\n self.album = False\n # The current rest file content. None means the file is not\n # open yet.\n self.rest = None\n\n available_sources = list(self.SOURCES)\n sources = plugins.sanitize_choices(\n self.config['sources'].as_str_seq(), available_sources)\n\n if not HAS_BEAUTIFUL_SOUP:\n sources = self.sanitize_bs_sources(sources)\n\n if 'google' in sources:\n if not self.config['google_API_key'].get():\n # We log a *debug* message here because the default\n # configuration includes `google`. This way, the source\n # is silent by default but can be enabled just by\n # setting an API key.\n self._log.debug(u'Disabling google source: '\n u'no API key configured.')\n sources.remove('google')\n\n self.config['bing_lang_from'] = [\n x.lower() for x in self.config['bing_lang_from'].as_str_seq()]\n self.bing_auth_token = None\n\n if not HAS_LANGDETECT and self.config['bing_client_secret'].get():\n self._log.warning(u'To use bing translations, you need to '\n u'install the langdetect module. See the '\n u'documentation for further details.')\n\n self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)\n for source in sources]\n\n def sanitize_bs_sources(self, sources):\n for source in self.BS_SOURCES:\n if source in sources:\n self._log.debug(u'To use the %s lyrics source, you must '\n u'install the beautifulsoup4 module. See '\n u'the documentation for further details.'\n % source)\n sources.remove(source)\n\n return sources\n\n def get_bing_access_token(self):\n params = {\n 'client_id': 'beets',\n 'client_secret': self.config['bing_client_secret'],\n 'scope': \"https://api.microsofttranslator.com\",\n 'grant_type': 'client_credentials',\n }\n\n oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'\n oauth_token = json.loads(requests.post(\n oauth_url,\n data=urllib.parse.urlencode(params)).content)\n if 'access_token' in oauth_token:\n return \"Bearer \" + oauth_token['access_token']\n else:\n self._log.warning(u'Could not get Bing Translate API access token.'\n u' Check your \"bing_client_secret\" password')\n\n def commands(self):\n cmd = ui.Subcommand('lyrics', help='fetch song lyrics')\n cmd.parser.add_option(\n u'-p', u'--print', dest='printlyr',\n action='store_true', default=False,\n help=u'print lyrics to console',\n )\n cmd.parser.add_option(\n u'-r', u'--write-rest', dest='writerest',\n action='store', default=None, metavar='dir',\n help=u'write lyrics to given directory as ReST files',\n )\n cmd.parser.add_option(\n u'-f', u'--force', dest='force_refetch',\n action='store_true', default=False,\n help=u'always re-download lyrics',\n )\n cmd.parser.add_option(\n u'-l', u'--local', dest='local_only',\n action='store_true', default=False,\n help=u'do not fetch missing lyrics',\n )\n\n def func(lib, opts, args):\n # The \"write to files\" option corresponds to the\n # import_write config value.\n write = ui.should_write()\n if opts.writerest:\n self.writerest_indexes(opts.writerest)\n items = lib.items(ui.decargs(args))\n for item in items:\n if not opts.local_only and not self.config['local']:\n self.fetch_item_lyrics(\n lib, item, write,\n opts.force_refetch or self.config['force'],\n )\n if item.lyrics:\n if opts.printlyr:\n ui.print_(item.lyrics)\n if opts.writerest:\n self.appendrest(opts.writerest, item)\n if opts.writerest and items:\n # flush last artist & write to ReST\n self.writerest(opts.writerest)\n ui.print_(u'ReST files generated. to build, use one of:')\n ui.print_(u' sphinx-build -b html %s _build/html'\n % opts.writerest)\n ui.print_(u' sphinx-build -b epub %s _build/epub'\n % opts.writerest)\n ui.print_((u' sphinx-build -b latex %s _build/latex '\n u'&& make -C _build/latex all-pdf')\n % opts.writerest)\n cmd.func = func\n return [cmd]\n\n def appendrest(self, directory, item):\n \"\"\"Append the item to an ReST file\n\n This will keep state (in the `rest` variable) in order to avoid\n writing continuously to the same files.\n \"\"\"\n\n if slug(self.artist) != slug(item.albumartist):\n # Write current file and start a new one ~ item.albumartist\n self.writerest(directory)\n self.artist = item.albumartist.strip()\n self.rest = u\"%s\\n%s\\n\\n.. contents::\\n :local:\\n\\n\" \\\n % (self.artist,\n u'=' * len(self.artist))\n\n if self.album != item.album:\n tmpalbum = self.album = item.album.strip()\n if self.album == '':\n tmpalbum = u'Unknown album'\n self.rest += u\"%s\\n%s\\n\\n\" % (tmpalbum, u'-' * len(tmpalbum))\n title_str = u\":index:`%s`\" % item.title.strip()\n block = u'| ' + item.lyrics.replace(u'\\n', u'\\n| ')\n self.rest += u\"%s\\n%s\\n\\n%s\\n\\n\" % (title_str,\n u'~' * len(title_str),\n block)\n\n def writerest(self, directory):\n \"\"\"Write self.rest to a ReST file\n \"\"\"\n if self.rest is not None and self.artist is not None:\n path = os.path.join(directory, 'artists',\n slug(self.artist) + u'.rst')\n with open(path, 'wb') as output:\n output.write(self.rest.encode('utf-8'))\n\n def writerest_indexes(self, directory):\n \"\"\"Write conf.py and index.rst files necessary for Sphinx\n\n We write minimal configurations that are necessary for Sphinx\n to operate. We do not overwrite existing files so that\n customizations are respected.\"\"\"\n try:\n os.makedirs(os.path.join(directory, 'artists'))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n indexfile = os.path.join(directory, 'index.rst')\n if not os.path.exists(indexfile):\n with open(indexfile, 'w') as output:\n output.write(REST_INDEX_TEMPLATE)\n conffile = os.path.join(directory, 'conf.py')\n if not os.path.exists(conffile):\n with open(conffile, 'w') as output:\n output.write(REST_CONF_TEMPLATE)\n\n def imported(self, session, task):\n \"\"\"Import hook for fetching lyrics automatically.\n \"\"\"\n if self.config['auto']:\n for item in task.imported_items():\n self.fetch_item_lyrics(session.lib, item,\n False, self.config['force'])\n\n def fetch_item_lyrics(self, lib, item, write, force):\n \"\"\"Fetch and store lyrics for a single item. If ``write``, then the\n lyrics will also be written to the file itself.\n \"\"\"\n # Skip if the item already has lyrics.\n if not force and item.lyrics:\n self._log.info(u'lyrics already present: {0}', item)\n return\n\n lyrics = None\n for artist, titles in search_pairs(item):\n lyrics = [self.get_lyrics(artist, title) for title in titles]\n if any(lyrics):\n break\n\n lyrics = u\"\\n\\n---\\n\\n\".join([l for l in lyrics if l])\n\n if lyrics:\n self._log.info(u'fetched lyrics: {0}', item)\n if HAS_LANGDETECT and self.config['bing_client_secret'].get():\n lang_from = langdetect.detect(lyrics)\n if self.config['bing_lang_to'].get() != lang_from and (\n not self.config['bing_lang_from'] or (\n lang_from in self.config[\n 'bing_lang_from'].as_str_seq())):\n lyrics = self.append_translation(\n lyrics, self.config['bing_lang_to'])\n else:\n self._log.info(u'lyrics not found: {0}', item)\n fallback = self.config['fallback'].get()\n if fallback:\n lyrics = fallback\n else:\n return\n item.lyrics = lyrics\n if write:\n item.try_write()\n item.store()\n\n def get_lyrics(self, artist, title):\n \"\"\"Fetch lyrics, trying each source in turn. Return a string or\n None if no lyrics were found.\n \"\"\"\n for backend in self.backends:\n lyrics = backend.fetch(artist, title)\n if lyrics:\n self._log.debug(u'got lyrics from backend: {0}',\n backend.__class__.__name__)\n return _scrape_strip_cruft(lyrics, True)\n\n def append_translation(self, text, to_lang):\n from xml.etree import ElementTree\n\n if not self.bing_auth_token:\n self.bing_auth_token = self.get_bing_access_token()\n if self.bing_auth_token:\n # Extract unique lines to limit API request size per song\n text_lines = set(text.split('\\n'))\n url = ('https://api.microsofttranslator.com/v2/Http.svc/'\n 'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))\n r = requests.get(url,\n headers={\"Authorization \": self.bing_auth_token})\n if r.status_code != 200:\n self._log.debug('translation API error {}: {}', r.status_code,\n r.text)\n if 'token has expired' in r.text:\n self.bing_auth_token = None\n return self.append_translation(text, to_lang)\n return text\n lines_translated = ElementTree.fromstring(\n r.text.encode('utf-8')).text\n # Use a translation mapping dict to build resulting lyrics\n translations = dict(zip(text_lines, lines_translated.split('|')))\n result = ''\n for line in text.split('\\n'):\n result += '%s / %s\\n' % (line, translations[line])\n return result\n", "path": "beetsplug/lyrics.py" } ]
diff --git a/beetsplug/lyrics.py b/beetsplug/lyrics.py index 125d1330d5..f0290f74a8 100644 --- a/beetsplug/lyrics.py +++ b/beetsplug/lyrics.py @@ -437,8 +437,14 @@ def parse_search_results(self, html): except HTMLParseError: return None - song_row = html.find("div", class_="content"). \ - find_all("div", class_="box-przeboje")[0] + song_rows = html.find("div", class_="content"). \ + find("div", class_="card"). \ + find_all("div", class_="box-przeboje") + + if not song_rows: + return None + + song_row = song_rows[0] if not song_row: return None diff --git a/docs/changelog.rst b/docs/changelog.rst index 6aca3ac8e3..88338f7a79 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -353,6 +353,8 @@ Fixes: :bug:`3870` * Allow equals within ``--set`` value when importing. :bug:`2984` +* :doc:`/plugins/lyrics`: Fix crashes for Tekstowo false positives + :bug:`3904` For plugin developers:
Lyrics plugin crashing on tekstowo.pl I just found it crashing while searching tekstowo.pl (I'm on https://github.com/beetbox/beets/commit/afc072801c4a254e6b4114d89133d6bebc3e34b9). These are the relevant lines of error. ``` return html.find("div", class_="song-text").get_text() AttributeError: 'NoneType' object has no attribute 'get_text' ``` I also printed the html to the console, but I am unsure why the error is appearing. The div with the class song-text seems to exist and I've never worked on beets before. Could it have something to do with there being two divs with the class? I uploaded the html, but had to do it with txt extension due to GitHub limitations. [temp.txt](https://github.com/beetbox/beets/files/6292601/temp.txt) _Originally posted by @njelich in https://github.com/beetbox/beets/issues/3903#issuecomment-817359091_
getredash__redash-3323
[ { "content": "import cStringIO\nimport csv\nimport datetime\nimport calendar\nimport functools\nimport hashlib\nimport itertools\nimport logging\nimport time\nimport pytz\nfrom functools import reduce\n\nimport xlsxwriter\nfrom six import python_2_unicode_compatible, text_type\nfrom sqlalchemy import distinct, or_, and_, UniqueConstraint\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.event import listens_for\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import backref, contains_eager, joinedload, subqueryload, load_only\nfrom sqlalchemy.orm.exc import NoResultFound # noqa: F401\nfrom sqlalchemy import func\nfrom sqlalchemy_utils import generic_relationship\nfrom sqlalchemy_utils.types import TSVectorType\nfrom sqlalchemy_utils.models import generic_repr\n\nfrom redash import redis_connection, utils\nfrom redash.destinations import (get_configuration_schema_for_destination_type,\n get_destination)\nfrom redash.metrics import database # noqa: F401\nfrom redash.query_runner import (get_configuration_schema_for_query_runner_type,\n get_query_runner)\nfrom redash.utils import generate_token, json_dumps, json_loads\nfrom redash.utils.configuration import ConfigurationContainer\n\nfrom .base import db, gfk_type, Column, GFKBase, SearchBaseQuery\nfrom .changes import ChangeTrackingMixin, Change # noqa\nfrom .mixins import BelongsToOrgMixin, TimestampMixin\nfrom .organizations import Organization\nfrom .types import Configuration, MutableDict, MutableList, PseudoJSON\nfrom .users import (AccessPermission, AnonymousUser, ApiUser, Group, User) # noqa\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScheduledQueriesExecutions(object):\n KEY_NAME = 'sq:executed_at'\n\n def __init__(self):\n self.executions = {}\n\n def refresh(self):\n self.executions = redis_connection.hgetall(self.KEY_NAME)\n\n def update(self, query_id):\n redis_connection.hmset(self.KEY_NAME, {\n query_id: time.time()\n })\n\n def get(self, query_id):\n timestamp = self.executions.get(str(query_id))\n if timestamp:\n timestamp = utils.dt_from_timestamp(timestamp)\n\n return timestamp\n\n\nscheduled_queries_executions = ScheduledQueriesExecutions()\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'org_id', 'created_at')\nclass DataSource(BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization, backref=\"data_sources\")\n\n name = Column(db.String(255))\n type = Column(db.String(255))\n options = Column(ConfigurationContainer.as_mutable(Configuration))\n queue_name = Column(db.String(255), default=\"queries\")\n scheduled_queue_name = Column(db.String(255), default=\"scheduled_queries\")\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n data_source_groups = db.relationship(\"DataSourceGroup\", back_populates=\"data_source\",\n cascade=\"all\")\n __tablename__ = 'data_sources'\n __table_args__ = (db.Index('data_sources_org_id_name', 'org_id', 'name'),)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def to_dict(self, all=False, with_permissions_for=None):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'syntax': self.query_runner.syntax,\n 'paused': self.paused,\n 'pause_reason': self.pause_reason\n }\n\n if all:\n schema = get_configuration_schema_for_query_runner_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n d['queue_name'] = self.queue_name\n d['scheduled_queue_name'] = self.scheduled_queue_name\n d['groups'] = self.groups\n\n if with_permissions_for is not None:\n d['view_only'] = db.session.query(DataSourceGroup.view_only).filter(\n DataSourceGroup.group == with_permissions_for,\n DataSourceGroup.data_source == self).one()[0]\n\n return d\n\n def __str__(self):\n return text_type(self.name)\n\n @classmethod\n def create_with_group(cls, *args, **kwargs):\n data_source = cls(*args, **kwargs)\n data_source_group = DataSourceGroup(\n data_source=data_source,\n group=data_source.org.default_group)\n db.session.add_all([data_source, data_source_group])\n return data_source\n\n @classmethod\n def all(cls, org, group_ids=None):\n data_sources = cls.query.filter(cls.org == org).order_by(cls.id.asc())\n\n if group_ids:\n data_sources = data_sources.join(DataSourceGroup).filter(\n DataSourceGroup.group_id.in_(group_ids))\n\n return data_sources.distinct()\n\n @classmethod\n def get_by_id(cls, _id):\n return cls.query.filter(cls.id == _id).one()\n\n def delete(self):\n Query.query.filter(Query.data_source == self).update(dict(data_source_id=None, latest_query_data_id=None))\n QueryResult.query.filter(QueryResult.data_source == self).delete()\n res = db.session.delete(self)\n db.session.commit()\n return res\n\n def get_schema(self, refresh=False):\n key = \"data_source:schema:{}\".format(self.id)\n\n cache = None\n if not refresh:\n cache = redis_connection.get(key)\n\n if cache is None:\n query_runner = self.query_runner\n schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name'])\n\n redis_connection.set(key, json_dumps(schema))\n else:\n schema = json_loads(cache)\n\n return schema\n\n def _pause_key(self):\n return 'ds:{}:pause'.format(self.id)\n\n @property\n def paused(self):\n return redis_connection.exists(self._pause_key())\n\n @property\n def pause_reason(self):\n return redis_connection.get(self._pause_key())\n\n def pause(self, reason=None):\n redis_connection.set(self._pause_key(), reason or '')\n\n def resume(self):\n redis_connection.delete(self._pause_key())\n\n def add_group(self, group, view_only=False):\n dsg = DataSourceGroup(group=group, data_source=self, view_only=view_only)\n db.session.add(dsg)\n return dsg\n\n def remove_group(self, group):\n DataSourceGroup.query.filter(\n DataSourceGroup.group == group,\n DataSourceGroup.data_source == self\n ).delete()\n db.session.commit()\n\n def update_group_permission(self, group, view_only):\n dsg = DataSourceGroup.query.filter(\n DataSourceGroup.group == group,\n DataSourceGroup.data_source == self).one()\n dsg.view_only = view_only\n db.session.add(dsg)\n return dsg\n\n @property\n def query_runner(self):\n return get_query_runner(self.type, self.options)\n\n @classmethod\n def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).one()\n\n # XXX examine call sites to see if a regular SQLA collection would work better\n @property\n def groups(self):\n groups = DataSourceGroup.query.filter(\n DataSourceGroup.data_source == self\n )\n return dict(map(lambda g: (g.group_id, g.view_only), groups))\n\n\n@generic_repr('id', 'data_source_id', 'group_id', 'view_only')\nclass DataSourceGroup(db.Model):\n # XXX drop id, use datasource/group as PK\n id = Column(db.Integer, primary_key=True)\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"))\n data_source = db.relationship(DataSource, back_populates=\"data_source_groups\")\n group_id = Column(db.Integer, db.ForeignKey(\"groups.id\"))\n group = db.relationship(Group, back_populates=\"data_sources\")\n view_only = Column(db.Boolean, default=False)\n\n __tablename__ = \"data_source_groups\"\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'org_id', 'data_source_id', 'query_hash', 'runtime', 'retrieved_at')\nclass QueryResult(db.Model, BelongsToOrgMixin):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization)\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"))\n data_source = db.relationship(DataSource, backref=backref('query_results'))\n query_hash = Column(db.String(32), index=True)\n query_text = Column('query', db.Text)\n data = Column(db.Text)\n runtime = Column(postgresql.DOUBLE_PRECISION)\n retrieved_at = Column(db.DateTime(True))\n\n __tablename__ = 'query_results'\n\n def __str__(self):\n return u\"%d | %s | %s\" % (self.id, self.query_hash, self.retrieved_at)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'query_hash': self.query_hash,\n 'query': self.query_text,\n 'data': json_loads(self.data),\n 'data_source_id': self.data_source_id,\n 'runtime': self.runtime,\n 'retrieved_at': self.retrieved_at\n }\n\n @classmethod\n def unused(cls, days=7):\n age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)\n return (\n cls.query.filter(\n Query.id.is_(None),\n cls.retrieved_at < age_threshold\n )\n .outerjoin(Query)\n ).options(load_only('id'))\n\n @classmethod\n def get_latest(cls, data_source, query, max_age=0):\n query_hash = utils.gen_query_hash(query)\n\n if max_age == -1:\n query = cls.query.filter(\n cls.query_hash == query_hash,\n cls.data_source == data_source\n )\n else:\n query = cls.query.filter(\n cls.query_hash == query_hash,\n cls.data_source == data_source,\n (\n db.func.timezone('utc', cls.retrieved_at) +\n datetime.timedelta(seconds=max_age) >=\n db.func.timezone('utc', db.func.now())\n )\n )\n\n return query.order_by(cls.retrieved_at.desc()).first()\n\n @classmethod\n def store_result(cls, org, data_source, query_hash, query, data, run_time, retrieved_at):\n query_result = cls(org_id=org,\n query_hash=query_hash,\n query_text=query,\n runtime=run_time,\n data_source=data_source,\n retrieved_at=retrieved_at,\n data=data)\n db.session.add(query_result)\n logging.info(\"Inserted query (%s) data; id=%s\", query_hash, query_result.id)\n # TODO: Investigate how big an impact this select-before-update makes.\n queries = Query.query.filter(\n Query.query_hash == query_hash,\n Query.data_source == data_source\n )\n for q in queries:\n q.latest_query_data = query_result\n # don't auto-update the updated_at timestamp\n q.skip_updated_at = True\n db.session.add(q)\n query_ids = [q.id for q in queries]\n logging.info(\"Updated %s queries with result (%s).\", len(query_ids), query_hash)\n\n return query_result, query_ids\n\n @property\n def groups(self):\n return self.data_source.groups\n\n def make_csv_content(self):\n s = cStringIO.StringIO()\n\n query_data = json_loads(self.data)\n writer = csv.DictWriter(s, extrasaction=\"ignore\", fieldnames=[col['name'] for col in query_data['columns']])\n writer.writer = utils.UnicodeWriter(s)\n writer.writeheader()\n for row in query_data['rows']:\n writer.writerow(row)\n\n return s.getvalue()\n\n def make_excel_content(self):\n s = cStringIO.StringIO()\n\n query_data = json_loads(self.data)\n book = xlsxwriter.Workbook(s, {'constant_memory': True})\n sheet = book.add_worksheet(\"result\")\n\n column_names = []\n for (c, col) in enumerate(query_data['columns']):\n sheet.write(0, c, col['name'])\n column_names.append(col['name'])\n\n for (r, row) in enumerate(query_data['rows']):\n for (c, name) in enumerate(column_names):\n v = row.get(name)\n if isinstance(v, list):\n v = str(v).encode('utf-8')\n sheet.write(r + 1, c, v)\n\n book.close()\n\n return s.getvalue()\n\n\ndef should_schedule_next(previous_iteration, now, interval, time=None, day_of_week=None, failures=0):\n # if time exists then interval > 23 hours (82800s)\n # if day_of_week exists then interval > 6 days (518400s)\n if (time is None):\n ttl = int(interval)\n next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)\n else:\n hour, minute = time.split(':')\n hour, minute = int(hour), int(minute)\n\n # The following logic is needed for cases like the following:\n # - The query scheduled to run at 23:59.\n # - The scheduler wakes up at 00:01.\n # - Using naive implementation of comparing timestamps, it will skip the execution.\n normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)\n\n if normalized_previous_iteration > previous_iteration:\n previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)\n\n days_delay = int(interval) / 60 / 60 / 24\n\n days_to_add = 0\n if (day_of_week is not None):\n days_to_add = list(calendar.day_name).index(day_of_week) - normalized_previous_iteration.weekday()\n\n next_iteration = (previous_iteration + datetime.timedelta(days=days_delay) +\n datetime.timedelta(days=days_to_add)).replace(hour=hour, minute=minute)\n if failures:\n next_iteration += datetime.timedelta(minutes=2**failures)\n return now > next_iteration\n\n\n@python_2_unicode_compatible\n@gfk_type\n@generic_repr('id', 'name', 'query_hash', 'version', 'user_id', 'org_id',\n 'data_source_id', 'query_hash', 'last_modified_by_id',\n 'is_archived', 'is_draft', 'schedule', 'schedule_failures')\nclass Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n version = Column(db.Integer, default=1)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization, backref=\"queries\")\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"), nullable=True)\n data_source = db.relationship(DataSource, backref='queries')\n latest_query_data_id = Column(db.Integer, db.ForeignKey(\"query_results.id\"), nullable=True)\n latest_query_data = db.relationship(QueryResult)\n name = Column(db.String(255))\n description = Column(db.String(4096), nullable=True)\n query_text = Column(\"query\", db.Text)\n query_hash = Column(db.String(32))\n api_key = Column(db.String(40), default=lambda: generate_token(40))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, foreign_keys=[user_id])\n last_modified_by_id = Column(db.Integer, db.ForeignKey('users.id'), nullable=True)\n last_modified_by = db.relationship(User, backref=\"modified_queries\",\n foreign_keys=[last_modified_by_id])\n is_archived = Column(db.Boolean, default=False, index=True)\n is_draft = Column(db.Boolean, default=True, index=True)\n schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True)\n schedule_failures = Column(db.Integer, default=0)\n visualizations = db.relationship(\"Visualization\", cascade=\"all, delete-orphan\")\n options = Column(MutableDict.as_mutable(PseudoJSON), default={})\n search_vector = Column(TSVectorType('id', 'name', 'description', 'query',\n weights={'name': 'A',\n 'id': 'B',\n 'description': 'C',\n 'query': 'D'}),\n nullable=True)\n tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)\n\n query_class = SearchBaseQuery\n __tablename__ = 'queries'\n __mapper_args__ = {\n \"version_id_col\": version,\n 'version_id_generator': False\n }\n\n def __str__(self):\n return text_type(self.id)\n\n def archive(self, user=None):\n db.session.add(self)\n self.is_archived = True\n self.schedule = None\n\n for vis in self.visualizations:\n for w in vis.widgets:\n db.session.delete(w)\n\n for a in self.alerts:\n db.session.delete(a)\n\n if user:\n self.record_changes(user)\n\n @classmethod\n def create(cls, **kwargs):\n query = cls(**kwargs)\n db.session.add(Visualization(query_rel=query,\n name=\"Table\",\n description='',\n type=\"TABLE\",\n options=\"{}\"))\n return query\n\n @classmethod\n def all_queries(cls, group_ids, user_id=None, drafts=False):\n query_ids = (\n db.session\n .query(distinct(cls.id))\n .join(\n DataSourceGroup,\n Query.data_source_id == DataSourceGroup.data_source_id\n )\n .filter(Query.is_archived == False)\n .filter(DataSourceGroup.group_id.in_(group_ids))\n )\n q = (\n cls\n .query\n .options(\n joinedload(Query.user),\n joinedload(\n Query.latest_query_data\n ).load_only(\n 'runtime',\n 'retrieved_at',\n )\n )\n .filter(cls.id.in_(query_ids))\n # Adding outer joins to be able to order by relationship\n .outerjoin(User, User.id == Query.user_id)\n .outerjoin(\n QueryResult,\n QueryResult.id == Query.latest_query_data_id\n )\n .options(\n contains_eager(Query.user),\n contains_eager(Query.latest_query_data),\n )\n .order_by(Query.created_at.desc())\n )\n\n if not drafts:\n q = q.filter(\n or_(\n Query.is_draft == False,\n Query.user_id == user_id\n )\n )\n return q\n\n @classmethod\n def favorites(cls, user, base_query=None):\n if base_query is None:\n base_query = cls.all_queries(user.group_ids, user.id, drafts=True)\n return base_query.join((\n Favorite,\n and_(\n Favorite.object_type == u'Query',\n Favorite.object_id == Query.id\n )\n )).filter(Favorite.user_id == user.id)\n\n @classmethod\n def all_tags(cls, user, include_drafts=False):\n queries = cls.all_queries(\n group_ids=user.group_ids,\n user_id=user.id,\n drafts=include_drafts,\n )\n\n tag_column = func.unnest(cls.tags).label('tag')\n usage_count = func.count(1).label('usage_count')\n\n query = (\n db.session\n .query(tag_column, usage_count)\n .group_by(tag_column)\n .filter(Query.id.in_(queries.options(load_only('id'))))\n .order_by(usage_count.desc())\n )\n return query\n\n @classmethod\n def by_user(cls, user):\n return cls.all_queries(user.group_ids, user.id).filter(Query.user == user)\n\n @classmethod\n def outdated_queries(cls):\n queries = (Query.query\n .options(joinedload(Query.latest_query_data).load_only('retrieved_at'))\n .filter(Query.schedule.isnot(None))\n .order_by(Query.id))\n \n now = utils.utcnow()\n outdated_queries = {}\n scheduled_queries_executions.refresh()\n\n for query in queries:\n schedule_until = pytz.utc.localize(datetime.datetime.strptime(\n query.schedule['until'], '%Y-%m-%d')) if query.schedule['until'] else None\n if (query.schedule['interval'] == None or (\n schedule_until != None and (\n schedule_until <= now))):\n continue\n\n if query.latest_query_data:\n retrieved_at = query.latest_query_data.retrieved_at\n else:\n retrieved_at = now\n\n retrieved_at = scheduled_queries_executions.get(query.id) or retrieved_at\n\n if should_schedule_next(retrieved_at, now, query.schedule['interval'], query.schedule['time'],\n query.schedule['day_of_week'], query.schedule_failures):\n key = \"{}:{}\".format(query.query_hash, query.data_source_id)\n outdated_queries[key] = query\n\n return outdated_queries.values()\n\n @classmethod\n def search(cls, term, group_ids, user_id=None, include_drafts=False, limit=None):\n all_queries = cls.all_queries(group_ids, user_id=user_id, drafts=include_drafts)\n # sort the result using the weight as defined in the search vector column\n return all_queries.search(term, sort=True).limit(limit)\n\n @classmethod\n def search_by_user(cls, term, user, limit=None):\n return cls.by_user(user).search(term, sort=True).limit(limit)\n\n @classmethod\n def recent(cls, group_ids, user_id=None, limit=20):\n query = (cls.query\n .filter(Event.created_at > (db.func.current_date() - 7))\n .join(Event, Query.id == Event.object_id.cast(db.Integer))\n .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)\n .filter(\n Event.action.in_(['edit', 'execute', 'edit_name',\n 'edit_description', 'view_source']),\n Event.object_id != None,\n Event.object_type == 'query',\n DataSourceGroup.group_id.in_(group_ids),\n or_(Query.is_draft == False, Query.user_id == user_id),\n Query.is_archived == False)\n .group_by(Event.object_id, Query.id)\n .order_by(db.desc(db.func.count(0))))\n\n if user_id:\n query = query.filter(Event.user_id == user_id)\n\n query = query.limit(limit)\n\n return query\n\n @classmethod\n def get_by_id(cls, _id):\n return cls.query.filter(cls.id == _id).one()\n\n def fork(self, user):\n forked_list = ['org', 'data_source', 'latest_query_data', 'description',\n 'query_text', 'query_hash', 'options']\n kwargs = {a: getattr(self, a) for a in forked_list}\n forked_query = Query.create(name=u'Copy of (#{}) {}'.format(self.id, self.name),\n user=user, **kwargs)\n\n for v in self.visualizations:\n if v.type == 'TABLE':\n continue\n forked_v = v.copy()\n forked_v['query_rel'] = forked_query\n forked_query.visualizations.append(Visualization(**forked_v))\n db.session.add(forked_query)\n return forked_query\n\n @property\n def runtime(self):\n return self.latest_query_data.runtime\n\n @property\n def retrieved_at(self):\n return self.latest_query_data.retrieved_at\n\n @property\n def groups(self):\n if self.data_source is None:\n return {}\n\n return self.data_source.groups\n\n @hybrid_property\n def lowercase_name(self):\n \"Optional property useful for sorting purposes.\"\n return self.name.lower()\n\n @lowercase_name.expression\n def lowercase_name(cls):\n \"The SQLAlchemy expression for the property above.\"\n return func.lower(cls.name)\n\n\n@listens_for(Query.query_text, 'set')\ndef gen_query_hash(target, val, oldval, initiator):\n target.query_hash = utils.gen_query_hash(val)\n target.schedule_failures = 0\n\n\n@listens_for(Query.user_id, 'set')\ndef query_last_modified_by(target, val, oldval, initiator):\n target.last_modified_by_id = val\n\n\n@generic_repr('id', 'object_type', 'object_id', 'user_id', 'org_id')\nclass Favorite(TimestampMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n\n object_type = Column(db.Unicode(255))\n object_id = Column(db.Integer)\n object = generic_relationship(object_type, object_id)\n\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref='favorites')\n\n __tablename__ = \"favorites\"\n __table_args__ = (\n UniqueConstraint(\"object_type\", \"object_id\", \"user_id\", name=\"unique_favorite\"),\n )\n\n @classmethod\n def is_favorite(cls, user, object):\n return cls.query.filter(cls.object == object, cls.user_id == user).count() > 0\n\n @classmethod\n def are_favorites(cls, user, objects):\n objects = list(objects)\n if not objects:\n return []\n\n object_type = text_type(objects[0].__class__.__name__)\n return map(lambda fav: fav.object_id, cls.query.filter(cls.object_id.in_(map(lambda o: o.id, objects)), cls.object_type == object_type, cls.user_id == user))\n\n\n@generic_repr('id', 'name', 'query_id', 'user_id', 'state', 'last_triggered_at', 'rearm')\nclass Alert(TimestampMixin, BelongsToOrgMixin, db.Model):\n UNKNOWN_STATE = 'unknown'\n OK_STATE = 'ok'\n TRIGGERED_STATE = 'triggered'\n\n id = Column(db.Integer, primary_key=True)\n name = Column(db.String(255))\n query_id = Column(db.Integer, db.ForeignKey(\"queries.id\"))\n query_rel = db.relationship(Query, backref=backref('alerts', cascade=\"all\"))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref='alerts')\n options = Column(MutableDict.as_mutable(PseudoJSON))\n state = Column(db.String(255), default=UNKNOWN_STATE)\n subscriptions = db.relationship(\"AlertSubscription\", cascade=\"all, delete-orphan\")\n last_triggered_at = Column(db.DateTime(True), nullable=True)\n rearm = Column(db.Integer, nullable=True)\n\n __tablename__ = 'alerts'\n\n @classmethod\n def all(cls, group_ids):\n return (\n cls.query\n .options(\n joinedload(Alert.user),\n joinedload(Alert.query_rel),\n )\n .join(Query)\n .join(\n DataSourceGroup,\n DataSourceGroup.data_source_id == Query.data_source_id\n )\n .filter(DataSourceGroup.group_id.in_(group_ids))\n )\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Alert, cls).get_by_id_and_org(object_id, org, Query)\n\n def evaluate(self):\n data = json_loads(self.query_rel.latest_query_data.data)\n\n if data['rows'] and self.options['column'] in data['rows'][0]:\n value = data['rows'][0][self.options['column']]\n op = self.options['op']\n\n if op == 'greater than' and value > self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'less than' and value < self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'equals' and value == self.options['value']:\n new_state = self.TRIGGERED_STATE\n else:\n new_state = self.OK_STATE\n else:\n new_state = self.UNKNOWN_STATE\n\n return new_state\n\n def subscribers(self):\n return User.query.join(AlertSubscription).filter(AlertSubscription.alert == self)\n\n @property\n def groups(self):\n return self.query_rel.groups\n\n\ndef generate_slug(ctx):\n slug = utils.slugify(ctx.current_parameters['name'])\n tries = 1\n while Dashboard.query.filter(Dashboard.slug == slug).first() is not None:\n slug = utils.slugify(ctx.current_parameters['name']) + \"_\" + str(tries)\n tries += 1\n return slug\n\n\n@python_2_unicode_compatible\n@gfk_type\n@generic_repr('id', 'name', 'slug', 'user_id', 'org_id', 'version', 'is_archived', 'is_draft')\nclass Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n version = Column(db.Integer)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"dashboards\")\n slug = Column(db.String(140), index=True, default=generate_slug)\n name = Column(db.String(100))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User)\n # layout is no longer used, but kept so we know how to render old dashboards.\n layout = Column(db.Text)\n dashboard_filters_enabled = Column(db.Boolean, default=False)\n is_archived = Column(db.Boolean, default=False, index=True)\n is_draft = Column(db.Boolean, default=True, index=True)\n widgets = db.relationship('Widget', backref='dashboard', lazy='dynamic')\n tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)\n\n __tablename__ = 'dashboards'\n __mapper_args__ = {\n \"version_id_col\": version\n }\n\n def __str__(self):\n return u\"%s=%s\" % (self.id, self.name)\n\n @classmethod\n def all(cls, org, group_ids, user_id):\n query = (\n Dashboard.query\n .options(\n subqueryload(Dashboard.user).load_only('_profile_image_url', 'name'),\n )\n .outerjoin(Widget)\n .outerjoin(Visualization)\n .outerjoin(Query)\n .outerjoin(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)\n .filter(\n Dashboard.is_archived == False,\n (DataSourceGroup.group_id.in_(group_ids) |\n (Dashboard.user_id == user_id) |\n ((Widget.dashboard != None) & (Widget.visualization == None))),\n Dashboard.org == org)\n .distinct())\n\n query = query.filter(or_(Dashboard.user_id == user_id, Dashboard.is_draft == False))\n\n return query\n\n @classmethod\n def search(cls, org, groups_ids, user_id, search_term):\n # TODO: switch to FTS\n return cls.all(org, groups_ids, user_id).filter(cls.name.ilike(u'%{}%'.format(search_term)))\n\n @classmethod\n def all_tags(cls, org, user):\n dashboards = cls.all(org, user.group_ids, user.id)\n\n tag_column = func.unnest(cls.tags).label('tag')\n usage_count = func.count(1).label('usage_count')\n\n query = (\n db.session\n .query(tag_column, usage_count)\n .group_by(tag_column)\n .filter(Dashboard.id.in_(dashboards.options(load_only('id'))))\n .order_by(usage_count.desc())\n )\n return query\n\n @classmethod\n def favorites(cls, user, base_query=None):\n if base_query is None:\n base_query = cls.all(user.org, user.group_ids, user.id)\n return base_query.join(\n (\n Favorite,\n and_(\n Favorite.object_type == u'Dashboard',\n Favorite.object_id == Dashboard.id\n )\n )\n ).filter(Favorite.user_id == user.id)\n\n @classmethod\n def get_by_slug_and_org(cls, slug, org):\n return cls.query.filter(cls.slug == slug, cls.org == org).one()\n\n @hybrid_property\n def lowercase_name(self):\n \"Optional property useful for sorting purposes.\"\n return self.name.lower()\n\n @lowercase_name.expression\n def lowercase_name(cls):\n \"The SQLAlchemy expression for the property above.\"\n return func.lower(cls.name)\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'query_id')\nclass Visualization(TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n type = Column(db.String(100))\n query_id = Column(db.Integer, db.ForeignKey(\"queries.id\"))\n # query_rel and not query, because db.Model already has query defined.\n query_rel = db.relationship(Query, back_populates='visualizations')\n name = Column(db.String(255))\n description = Column(db.String(4096), nullable=True)\n options = Column(db.Text)\n\n __tablename__ = 'visualizations'\n\n def __str__(self):\n return u\"%s %s\" % (self.id, self.type)\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Visualization, cls).get_by_id_and_org(object_id, org, Query)\n\n def copy(self):\n return {\n 'type': self.type,\n 'name': self.name,\n 'description': self.description,\n 'options': self.options\n }\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'visualization_id', 'dashboard_id')\nclass Widget(TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n visualization_id = Column(db.Integer, db.ForeignKey('visualizations.id'), nullable=True)\n visualization = db.relationship(Visualization, backref='widgets')\n text = Column(db.Text, nullable=True)\n width = Column(db.Integer)\n options = Column(db.Text)\n dashboard_id = Column(db.Integer, db.ForeignKey(\"dashboards.id\"), index=True)\n\n __tablename__ = 'widgets'\n\n def __str__(self):\n return u\"%s\" % self.id\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Widget, cls).get_by_id_and_org(object_id, org, Dashboard)\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'object_type', 'object_id', 'action', 'user_id', 'org_id', 'created_at')\nclass Event(db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, back_populates=\"events\")\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=True)\n user = db.relationship(User, backref=\"events\")\n action = Column(db.String(255))\n object_type = Column(db.String(255))\n object_id = Column(db.String(255), nullable=True)\n additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={})\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n __tablename__ = 'events'\n\n def __str__(self):\n return u\"%s,%s,%s,%s\" % (self.user_id, self.action, self.object_type, self.object_id)\n\n def to_dict(self):\n return {\n 'org_id': self.org_id,\n 'user_id': self.user_id,\n 'action': self.action,\n 'object_type': self.object_type,\n 'object_id': self.object_id,\n 'additional_properties': self.additional_properties,\n 'created_at': self.created_at.isoformat()\n }\n\n @classmethod\n def record(cls, event):\n org_id = event.pop('org_id')\n user_id = event.pop('user_id', None)\n action = event.pop('action')\n object_type = event.pop('object_type')\n object_id = event.pop('object_id', None)\n\n created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))\n\n event = cls(org_id=org_id, user_id=user_id, action=action,\n object_type=object_type, object_id=object_id,\n additional_properties=event,\n created_at=created_at)\n db.session.add(event)\n return event\n\n\n@generic_repr('id', 'created_by_id', 'org_id', 'active')\nclass ApiKey(TimestampMixin, GFKBase, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization)\n api_key = Column(db.String(255), index=True, default=lambda: generate_token(40))\n active = Column(db.Boolean, default=True)\n # 'object' provided by GFKBase\n created_by_id = Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=True)\n created_by = db.relationship(User)\n\n __tablename__ = 'api_keys'\n __table_args__ = (\n db.Index('api_keys_object_type_object_id', 'object_type', 'object_id'),\n )\n\n @classmethod\n def get_by_api_key(cls, api_key):\n return cls.query.filter(cls.api_key == api_key, cls.active == True).one()\n\n @classmethod\n def get_by_object(cls, object):\n return cls.query.filter(\n cls.object_type == object.__class__.__tablename__,\n cls.object_id == object.id,\n cls.active == True\n ).first()\n\n @classmethod\n def create_for_object(cls, object, user):\n k = cls(org=user.org, object=object, created_by=user)\n db.session.add(k)\n return k\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'user_id', 'org_id', 'created_at')\nclass NotificationDestination(BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"notification_destinations\")\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref=\"notification_destinations\")\n name = Column(db.String(255))\n type = Column(db.String(255))\n options = Column(ConfigurationContainer.as_mutable(Configuration))\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n __tablename__ = 'notification_destinations'\n __table_args__ = (\n db.Index(\n 'notification_destinations_org_id_name', 'org_id', 'name', unique=True\n ),\n )\n\n def __str__(self):\n return text_type(self.name)\n\n def to_dict(self, all=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'icon': self.destination.icon()\n }\n\n if all:\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n\n return d\n\n @property\n def destination(self):\n return get_destination(self.type, self.options)\n\n @classmethod\n def all(cls, org):\n notification_destinations = cls.query.filter(cls.org == org).order_by(cls.id.asc())\n\n return notification_destinations\n\n def notify(self, alert, query, user, new_state, app, host):\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n return self.destination.notify(alert, query, user, new_state,\n app, host, self.options)\n\n\n@generic_repr('id', 'user_id', 'destination_id', 'alert_id')\nclass AlertSubscription(TimestampMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User)\n destination_id = Column(db.Integer,\n db.ForeignKey(\"notification_destinations.id\"),\n nullable=True)\n destination = db.relationship(NotificationDestination)\n alert_id = Column(db.Integer, db.ForeignKey(\"alerts.id\"))\n alert = db.relationship(Alert, back_populates=\"subscriptions\")\n\n __tablename__ = 'alert_subscriptions'\n __table_args__ = (\n db.Index(\n 'alert_subscriptions_destination_id_alert_id',\n 'destination_id', 'alert_id', unique=True\n ),\n )\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'user': self.user.to_dict(),\n 'alert_id': self.alert_id\n }\n\n if self.destination:\n d['destination'] = self.destination.to_dict()\n\n return d\n\n @classmethod\n def all(cls, alert_id):\n return AlertSubscription.query.join(User).filter(AlertSubscription.alert_id == alert_id)\n\n def notify(self, alert, query, user, new_state, app, host):\n if self.destination:\n return self.destination.notify(alert, query, user, new_state,\n app, host)\n else:\n # User email subscription, so create an email destination object\n config = {'addresses': self.user.email}\n schema = get_configuration_schema_for_destination_type('email')\n options = ConfigurationContainer(config, schema)\n destination = get_destination('email', options)\n return destination.notify(alert, query, user, new_state, app, host, options)\n\n\n@generic_repr('id', 'trigger', 'user_id', 'org_id')\nclass QuerySnippet(TimestampMixin, db.Model, BelongsToOrgMixin):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"query_snippets\")\n trigger = Column(db.String(255), unique=True)\n description = Column(db.Text)\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref=\"query_snippets\")\n snippet = Column(db.Text)\n\n __tablename__ = 'query_snippets'\n\n @classmethod\n def all(cls, org):\n return cls.query.filter(cls.org == org)\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'trigger': self.trigger,\n 'description': self.description,\n 'snippet': self.snippet,\n 'user': self.user.to_dict(),\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n return d\n\n\ndef init_db():\n default_org = Organization(name=\"Default\", slug='default', settings={})\n admin_group = Group(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP)\n default_group = Group(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP)\n\n db.session.add_all([default_org, admin_group, default_group])\n # XXX remove after fixing User.group_ids\n db.session.commit()\n return default_org, admin_group, default_group\n", "path": "redash/models/__init__.py" } ]
[ { "content": "import cStringIO\nimport csv\nimport datetime\nimport calendar\nimport functools\nimport hashlib\nimport itertools\nimport logging\nimport time\nimport pytz\nfrom functools import reduce\n\nimport xlsxwriter\nfrom six import python_2_unicode_compatible, text_type\nfrom sqlalchemy import distinct, or_, and_, UniqueConstraint\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.event import listens_for\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import backref, contains_eager, joinedload, subqueryload, load_only\nfrom sqlalchemy.orm.exc import NoResultFound # noqa: F401\nfrom sqlalchemy import func\nfrom sqlalchemy_utils import generic_relationship\nfrom sqlalchemy_utils.types import TSVectorType\nfrom sqlalchemy_utils.models import generic_repr\n\nfrom redash import redis_connection, utils\nfrom redash.destinations import (get_configuration_schema_for_destination_type,\n get_destination)\nfrom redash.metrics import database # noqa: F401\nfrom redash.query_runner import (get_configuration_schema_for_query_runner_type,\n get_query_runner)\nfrom redash.utils import generate_token, json_dumps, json_loads\nfrom redash.utils.configuration import ConfigurationContainer\n\nfrom .base import db, gfk_type, Column, GFKBase, SearchBaseQuery\nfrom .changes import ChangeTrackingMixin, Change # noqa\nfrom .mixins import BelongsToOrgMixin, TimestampMixin\nfrom .organizations import Organization\nfrom .types import Configuration, MutableDict, MutableList, PseudoJSON\nfrom .users import (AccessPermission, AnonymousUser, ApiUser, Group, User) # noqa\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScheduledQueriesExecutions(object):\n KEY_NAME = 'sq:executed_at'\n\n def __init__(self):\n self.executions = {}\n\n def refresh(self):\n self.executions = redis_connection.hgetall(self.KEY_NAME)\n\n def update(self, query_id):\n redis_connection.hmset(self.KEY_NAME, {\n query_id: time.time()\n })\n\n def get(self, query_id):\n timestamp = self.executions.get(str(query_id))\n if timestamp:\n timestamp = utils.dt_from_timestamp(timestamp)\n\n return timestamp\n\n\nscheduled_queries_executions = ScheduledQueriesExecutions()\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'org_id', 'created_at')\nclass DataSource(BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization, backref=\"data_sources\")\n\n name = Column(db.String(255))\n type = Column(db.String(255))\n options = Column(ConfigurationContainer.as_mutable(Configuration))\n queue_name = Column(db.String(255), default=\"queries\")\n scheduled_queue_name = Column(db.String(255), default=\"scheduled_queries\")\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n data_source_groups = db.relationship(\"DataSourceGroup\", back_populates=\"data_source\",\n cascade=\"all\")\n __tablename__ = 'data_sources'\n __table_args__ = (db.Index('data_sources_org_id_name', 'org_id', 'name'),)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def to_dict(self, all=False, with_permissions_for=None):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'syntax': self.query_runner.syntax,\n 'paused': self.paused,\n 'pause_reason': self.pause_reason\n }\n\n if all:\n schema = get_configuration_schema_for_query_runner_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n d['queue_name'] = self.queue_name\n d['scheduled_queue_name'] = self.scheduled_queue_name\n d['groups'] = self.groups\n\n if with_permissions_for is not None:\n d['view_only'] = db.session.query(DataSourceGroup.view_only).filter(\n DataSourceGroup.group == with_permissions_for,\n DataSourceGroup.data_source == self).one()[0]\n\n return d\n\n def __str__(self):\n return text_type(self.name)\n\n @classmethod\n def create_with_group(cls, *args, **kwargs):\n data_source = cls(*args, **kwargs)\n data_source_group = DataSourceGroup(\n data_source=data_source,\n group=data_source.org.default_group)\n db.session.add_all([data_source, data_source_group])\n return data_source\n\n @classmethod\n def all(cls, org, group_ids=None):\n data_sources = cls.query.filter(cls.org == org).order_by(cls.id.asc())\n\n if group_ids:\n data_sources = data_sources.join(DataSourceGroup).filter(\n DataSourceGroup.group_id.in_(group_ids))\n\n return data_sources.distinct()\n\n @classmethod\n def get_by_id(cls, _id):\n return cls.query.filter(cls.id == _id).one()\n\n def delete(self):\n Query.query.filter(Query.data_source == self).update(dict(data_source_id=None, latest_query_data_id=None))\n QueryResult.query.filter(QueryResult.data_source == self).delete()\n res = db.session.delete(self)\n db.session.commit()\n return res\n\n def get_schema(self, refresh=False):\n key = \"data_source:schema:{}\".format(self.id)\n\n cache = None\n if not refresh:\n cache = redis_connection.get(key)\n\n if cache is None:\n query_runner = self.query_runner\n schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name'])\n\n redis_connection.set(key, json_dumps(schema))\n else:\n schema = json_loads(cache)\n\n return schema\n\n def _pause_key(self):\n return 'ds:{}:pause'.format(self.id)\n\n @property\n def paused(self):\n return redis_connection.exists(self._pause_key())\n\n @property\n def pause_reason(self):\n return redis_connection.get(self._pause_key())\n\n def pause(self, reason=None):\n redis_connection.set(self._pause_key(), reason or '')\n\n def resume(self):\n redis_connection.delete(self._pause_key())\n\n def add_group(self, group, view_only=False):\n dsg = DataSourceGroup(group=group, data_source=self, view_only=view_only)\n db.session.add(dsg)\n return dsg\n\n def remove_group(self, group):\n DataSourceGroup.query.filter(\n DataSourceGroup.group == group,\n DataSourceGroup.data_source == self\n ).delete()\n db.session.commit()\n\n def update_group_permission(self, group, view_only):\n dsg = DataSourceGroup.query.filter(\n DataSourceGroup.group == group,\n DataSourceGroup.data_source == self).one()\n dsg.view_only = view_only\n db.session.add(dsg)\n return dsg\n\n @property\n def query_runner(self):\n return get_query_runner(self.type, self.options)\n\n @classmethod\n def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).one()\n\n # XXX examine call sites to see if a regular SQLA collection would work better\n @property\n def groups(self):\n groups = DataSourceGroup.query.filter(\n DataSourceGroup.data_source == self\n )\n return dict(map(lambda g: (g.group_id, g.view_only), groups))\n\n\n@generic_repr('id', 'data_source_id', 'group_id', 'view_only')\nclass DataSourceGroup(db.Model):\n # XXX drop id, use datasource/group as PK\n id = Column(db.Integer, primary_key=True)\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"))\n data_source = db.relationship(DataSource, back_populates=\"data_source_groups\")\n group_id = Column(db.Integer, db.ForeignKey(\"groups.id\"))\n group = db.relationship(Group, back_populates=\"data_sources\")\n view_only = Column(db.Boolean, default=False)\n\n __tablename__ = \"data_source_groups\"\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'org_id', 'data_source_id', 'query_hash', 'runtime', 'retrieved_at')\nclass QueryResult(db.Model, BelongsToOrgMixin):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization)\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"))\n data_source = db.relationship(DataSource, backref=backref('query_results'))\n query_hash = Column(db.String(32), index=True)\n query_text = Column('query', db.Text)\n data = Column(db.Text)\n runtime = Column(postgresql.DOUBLE_PRECISION)\n retrieved_at = Column(db.DateTime(True))\n\n __tablename__ = 'query_results'\n\n def __str__(self):\n return u\"%d | %s | %s\" % (self.id, self.query_hash, self.retrieved_at)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'query_hash': self.query_hash,\n 'query': self.query_text,\n 'data': json_loads(self.data),\n 'data_source_id': self.data_source_id,\n 'runtime': self.runtime,\n 'retrieved_at': self.retrieved_at\n }\n\n @classmethod\n def unused(cls, days=7):\n age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)\n return (\n cls.query.filter(\n Query.id.is_(None),\n cls.retrieved_at < age_threshold\n )\n .outerjoin(Query)\n ).options(load_only('id'))\n\n @classmethod\n def get_latest(cls, data_source, query, max_age=0):\n query_hash = utils.gen_query_hash(query)\n\n if max_age == -1:\n query = cls.query.filter(\n cls.query_hash == query_hash,\n cls.data_source == data_source\n )\n else:\n query = cls.query.filter(\n cls.query_hash == query_hash,\n cls.data_source == data_source,\n (\n db.func.timezone('utc', cls.retrieved_at) +\n datetime.timedelta(seconds=max_age) >=\n db.func.timezone('utc', db.func.now())\n )\n )\n\n return query.order_by(cls.retrieved_at.desc()).first()\n\n @classmethod\n def store_result(cls, org, data_source, query_hash, query, data, run_time, retrieved_at):\n query_result = cls(org_id=org,\n query_hash=query_hash,\n query_text=query,\n runtime=run_time,\n data_source=data_source,\n retrieved_at=retrieved_at,\n data=data)\n db.session.add(query_result)\n logging.info(\"Inserted query (%s) data; id=%s\", query_hash, query_result.id)\n # TODO: Investigate how big an impact this select-before-update makes.\n queries = Query.query.filter(\n Query.query_hash == query_hash,\n Query.data_source == data_source\n )\n for q in queries:\n q.latest_query_data = query_result\n # don't auto-update the updated_at timestamp\n q.skip_updated_at = True\n db.session.add(q)\n query_ids = [q.id for q in queries]\n logging.info(\"Updated %s queries with result (%s).\", len(query_ids), query_hash)\n\n return query_result, query_ids\n\n @property\n def groups(self):\n return self.data_source.groups\n\n def make_csv_content(self):\n s = cStringIO.StringIO()\n\n query_data = json_loads(self.data)\n writer = csv.DictWriter(s, extrasaction=\"ignore\", fieldnames=[col['name'] for col in query_data['columns']])\n writer.writer = utils.UnicodeWriter(s)\n writer.writeheader()\n for row in query_data['rows']:\n writer.writerow(row)\n\n return s.getvalue()\n\n def make_excel_content(self):\n s = cStringIO.StringIO()\n\n query_data = json_loads(self.data)\n book = xlsxwriter.Workbook(s, {'constant_memory': True})\n sheet = book.add_worksheet(\"result\")\n\n column_names = []\n for (c, col) in enumerate(query_data['columns']):\n sheet.write(0, c, col['name'])\n column_names.append(col['name'])\n\n for (r, row) in enumerate(query_data['rows']):\n for (c, name) in enumerate(column_names):\n v = row.get(name)\n if isinstance(v, list) or isinstance(v, dict):\n v = str(v).encode('utf-8')\n sheet.write(r + 1, c, v)\n\n book.close()\n\n return s.getvalue()\n\n\ndef should_schedule_next(previous_iteration, now, interval, time=None, day_of_week=None, failures=0):\n # if time exists then interval > 23 hours (82800s)\n # if day_of_week exists then interval > 6 days (518400s)\n if (time is None):\n ttl = int(interval)\n next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)\n else:\n hour, minute = time.split(':')\n hour, minute = int(hour), int(minute)\n\n # The following logic is needed for cases like the following:\n # - The query scheduled to run at 23:59.\n # - The scheduler wakes up at 00:01.\n # - Using naive implementation of comparing timestamps, it will skip the execution.\n normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)\n\n if normalized_previous_iteration > previous_iteration:\n previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)\n\n days_delay = int(interval) / 60 / 60 / 24\n\n days_to_add = 0\n if (day_of_week is not None):\n days_to_add = list(calendar.day_name).index(day_of_week) - normalized_previous_iteration.weekday()\n\n next_iteration = (previous_iteration + datetime.timedelta(days=days_delay) +\n datetime.timedelta(days=days_to_add)).replace(hour=hour, minute=minute)\n if failures:\n next_iteration += datetime.timedelta(minutes=2**failures)\n return now > next_iteration\n\n\n@python_2_unicode_compatible\n@gfk_type\n@generic_repr('id', 'name', 'query_hash', 'version', 'user_id', 'org_id',\n 'data_source_id', 'query_hash', 'last_modified_by_id',\n 'is_archived', 'is_draft', 'schedule', 'schedule_failures')\nclass Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n version = Column(db.Integer, default=1)\n org_id = Column(db.Integer, db.ForeignKey('organizations.id'))\n org = db.relationship(Organization, backref=\"queries\")\n data_source_id = Column(db.Integer, db.ForeignKey(\"data_sources.id\"), nullable=True)\n data_source = db.relationship(DataSource, backref='queries')\n latest_query_data_id = Column(db.Integer, db.ForeignKey(\"query_results.id\"), nullable=True)\n latest_query_data = db.relationship(QueryResult)\n name = Column(db.String(255))\n description = Column(db.String(4096), nullable=True)\n query_text = Column(\"query\", db.Text)\n query_hash = Column(db.String(32))\n api_key = Column(db.String(40), default=lambda: generate_token(40))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, foreign_keys=[user_id])\n last_modified_by_id = Column(db.Integer, db.ForeignKey('users.id'), nullable=True)\n last_modified_by = db.relationship(User, backref=\"modified_queries\",\n foreign_keys=[last_modified_by_id])\n is_archived = Column(db.Boolean, default=False, index=True)\n is_draft = Column(db.Boolean, default=True, index=True)\n schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True)\n schedule_failures = Column(db.Integer, default=0)\n visualizations = db.relationship(\"Visualization\", cascade=\"all, delete-orphan\")\n options = Column(MutableDict.as_mutable(PseudoJSON), default={})\n search_vector = Column(TSVectorType('id', 'name', 'description', 'query',\n weights={'name': 'A',\n 'id': 'B',\n 'description': 'C',\n 'query': 'D'}),\n nullable=True)\n tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)\n\n query_class = SearchBaseQuery\n __tablename__ = 'queries'\n __mapper_args__ = {\n \"version_id_col\": version,\n 'version_id_generator': False\n }\n\n def __str__(self):\n return text_type(self.id)\n\n def archive(self, user=None):\n db.session.add(self)\n self.is_archived = True\n self.schedule = None\n\n for vis in self.visualizations:\n for w in vis.widgets:\n db.session.delete(w)\n\n for a in self.alerts:\n db.session.delete(a)\n\n if user:\n self.record_changes(user)\n\n @classmethod\n def create(cls, **kwargs):\n query = cls(**kwargs)\n db.session.add(Visualization(query_rel=query,\n name=\"Table\",\n description='',\n type=\"TABLE\",\n options=\"{}\"))\n return query\n\n @classmethod\n def all_queries(cls, group_ids, user_id=None, drafts=False):\n query_ids = (\n db.session\n .query(distinct(cls.id))\n .join(\n DataSourceGroup,\n Query.data_source_id == DataSourceGroup.data_source_id\n )\n .filter(Query.is_archived == False)\n .filter(DataSourceGroup.group_id.in_(group_ids))\n )\n q = (\n cls\n .query\n .options(\n joinedload(Query.user),\n joinedload(\n Query.latest_query_data\n ).load_only(\n 'runtime',\n 'retrieved_at',\n )\n )\n .filter(cls.id.in_(query_ids))\n # Adding outer joins to be able to order by relationship\n .outerjoin(User, User.id == Query.user_id)\n .outerjoin(\n QueryResult,\n QueryResult.id == Query.latest_query_data_id\n )\n .options(\n contains_eager(Query.user),\n contains_eager(Query.latest_query_data),\n )\n .order_by(Query.created_at.desc())\n )\n\n if not drafts:\n q = q.filter(\n or_(\n Query.is_draft == False,\n Query.user_id == user_id\n )\n )\n return q\n\n @classmethod\n def favorites(cls, user, base_query=None):\n if base_query is None:\n base_query = cls.all_queries(user.group_ids, user.id, drafts=True)\n return base_query.join((\n Favorite,\n and_(\n Favorite.object_type == u'Query',\n Favorite.object_id == Query.id\n )\n )).filter(Favorite.user_id == user.id)\n\n @classmethod\n def all_tags(cls, user, include_drafts=False):\n queries = cls.all_queries(\n group_ids=user.group_ids,\n user_id=user.id,\n drafts=include_drafts,\n )\n\n tag_column = func.unnest(cls.tags).label('tag')\n usage_count = func.count(1).label('usage_count')\n\n query = (\n db.session\n .query(tag_column, usage_count)\n .group_by(tag_column)\n .filter(Query.id.in_(queries.options(load_only('id'))))\n .order_by(usage_count.desc())\n )\n return query\n\n @classmethod\n def by_user(cls, user):\n return cls.all_queries(user.group_ids, user.id).filter(Query.user == user)\n\n @classmethod\n def outdated_queries(cls):\n queries = (Query.query\n .options(joinedload(Query.latest_query_data).load_only('retrieved_at'))\n .filter(Query.schedule.isnot(None))\n .order_by(Query.id))\n \n now = utils.utcnow()\n outdated_queries = {}\n scheduled_queries_executions.refresh()\n\n for query in queries:\n schedule_until = pytz.utc.localize(datetime.datetime.strptime(\n query.schedule['until'], '%Y-%m-%d')) if query.schedule['until'] else None\n if (query.schedule['interval'] == None or (\n schedule_until != None and (\n schedule_until <= now))):\n continue\n\n if query.latest_query_data:\n retrieved_at = query.latest_query_data.retrieved_at\n else:\n retrieved_at = now\n\n retrieved_at = scheduled_queries_executions.get(query.id) or retrieved_at\n\n if should_schedule_next(retrieved_at, now, query.schedule['interval'], query.schedule['time'],\n query.schedule['day_of_week'], query.schedule_failures):\n key = \"{}:{}\".format(query.query_hash, query.data_source_id)\n outdated_queries[key] = query\n\n return outdated_queries.values()\n\n @classmethod\n def search(cls, term, group_ids, user_id=None, include_drafts=False, limit=None):\n all_queries = cls.all_queries(group_ids, user_id=user_id, drafts=include_drafts)\n # sort the result using the weight as defined in the search vector column\n return all_queries.search(term, sort=True).limit(limit)\n\n @classmethod\n def search_by_user(cls, term, user, limit=None):\n return cls.by_user(user).search(term, sort=True).limit(limit)\n\n @classmethod\n def recent(cls, group_ids, user_id=None, limit=20):\n query = (cls.query\n .filter(Event.created_at > (db.func.current_date() - 7))\n .join(Event, Query.id == Event.object_id.cast(db.Integer))\n .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)\n .filter(\n Event.action.in_(['edit', 'execute', 'edit_name',\n 'edit_description', 'view_source']),\n Event.object_id != None,\n Event.object_type == 'query',\n DataSourceGroup.group_id.in_(group_ids),\n or_(Query.is_draft == False, Query.user_id == user_id),\n Query.is_archived == False)\n .group_by(Event.object_id, Query.id)\n .order_by(db.desc(db.func.count(0))))\n\n if user_id:\n query = query.filter(Event.user_id == user_id)\n\n query = query.limit(limit)\n\n return query\n\n @classmethod\n def get_by_id(cls, _id):\n return cls.query.filter(cls.id == _id).one()\n\n def fork(self, user):\n forked_list = ['org', 'data_source', 'latest_query_data', 'description',\n 'query_text', 'query_hash', 'options']\n kwargs = {a: getattr(self, a) for a in forked_list}\n forked_query = Query.create(name=u'Copy of (#{}) {}'.format(self.id, self.name),\n user=user, **kwargs)\n\n for v in self.visualizations:\n if v.type == 'TABLE':\n continue\n forked_v = v.copy()\n forked_v['query_rel'] = forked_query\n forked_query.visualizations.append(Visualization(**forked_v))\n db.session.add(forked_query)\n return forked_query\n\n @property\n def runtime(self):\n return self.latest_query_data.runtime\n\n @property\n def retrieved_at(self):\n return self.latest_query_data.retrieved_at\n\n @property\n def groups(self):\n if self.data_source is None:\n return {}\n\n return self.data_source.groups\n\n @hybrid_property\n def lowercase_name(self):\n \"Optional property useful for sorting purposes.\"\n return self.name.lower()\n\n @lowercase_name.expression\n def lowercase_name(cls):\n \"The SQLAlchemy expression for the property above.\"\n return func.lower(cls.name)\n\n\n@listens_for(Query.query_text, 'set')\ndef gen_query_hash(target, val, oldval, initiator):\n target.query_hash = utils.gen_query_hash(val)\n target.schedule_failures = 0\n\n\n@listens_for(Query.user_id, 'set')\ndef query_last_modified_by(target, val, oldval, initiator):\n target.last_modified_by_id = val\n\n\n@generic_repr('id', 'object_type', 'object_id', 'user_id', 'org_id')\nclass Favorite(TimestampMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n\n object_type = Column(db.Unicode(255))\n object_id = Column(db.Integer)\n object = generic_relationship(object_type, object_id)\n\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref='favorites')\n\n __tablename__ = \"favorites\"\n __table_args__ = (\n UniqueConstraint(\"object_type\", \"object_id\", \"user_id\", name=\"unique_favorite\"),\n )\n\n @classmethod\n def is_favorite(cls, user, object):\n return cls.query.filter(cls.object == object, cls.user_id == user).count() > 0\n\n @classmethod\n def are_favorites(cls, user, objects):\n objects = list(objects)\n if not objects:\n return []\n\n object_type = text_type(objects[0].__class__.__name__)\n return map(lambda fav: fav.object_id, cls.query.filter(cls.object_id.in_(map(lambda o: o.id, objects)), cls.object_type == object_type, cls.user_id == user))\n\n\n@generic_repr('id', 'name', 'query_id', 'user_id', 'state', 'last_triggered_at', 'rearm')\nclass Alert(TimestampMixin, BelongsToOrgMixin, db.Model):\n UNKNOWN_STATE = 'unknown'\n OK_STATE = 'ok'\n TRIGGERED_STATE = 'triggered'\n\n id = Column(db.Integer, primary_key=True)\n name = Column(db.String(255))\n query_id = Column(db.Integer, db.ForeignKey(\"queries.id\"))\n query_rel = db.relationship(Query, backref=backref('alerts', cascade=\"all\"))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref='alerts')\n options = Column(MutableDict.as_mutable(PseudoJSON))\n state = Column(db.String(255), default=UNKNOWN_STATE)\n subscriptions = db.relationship(\"AlertSubscription\", cascade=\"all, delete-orphan\")\n last_triggered_at = Column(db.DateTime(True), nullable=True)\n rearm = Column(db.Integer, nullable=True)\n\n __tablename__ = 'alerts'\n\n @classmethod\n def all(cls, group_ids):\n return (\n cls.query\n .options(\n joinedload(Alert.user),\n joinedload(Alert.query_rel),\n )\n .join(Query)\n .join(\n DataSourceGroup,\n DataSourceGroup.data_source_id == Query.data_source_id\n )\n .filter(DataSourceGroup.group_id.in_(group_ids))\n )\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Alert, cls).get_by_id_and_org(object_id, org, Query)\n\n def evaluate(self):\n data = json_loads(self.query_rel.latest_query_data.data)\n\n if data['rows'] and self.options['column'] in data['rows'][0]:\n value = data['rows'][0][self.options['column']]\n op = self.options['op']\n\n if op == 'greater than' and value > self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'less than' and value < self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'equals' and value == self.options['value']:\n new_state = self.TRIGGERED_STATE\n else:\n new_state = self.OK_STATE\n else:\n new_state = self.UNKNOWN_STATE\n\n return new_state\n\n def subscribers(self):\n return User.query.join(AlertSubscription).filter(AlertSubscription.alert == self)\n\n @property\n def groups(self):\n return self.query_rel.groups\n\n\ndef generate_slug(ctx):\n slug = utils.slugify(ctx.current_parameters['name'])\n tries = 1\n while Dashboard.query.filter(Dashboard.slug == slug).first() is not None:\n slug = utils.slugify(ctx.current_parameters['name']) + \"_\" + str(tries)\n tries += 1\n return slug\n\n\n@python_2_unicode_compatible\n@gfk_type\n@generic_repr('id', 'name', 'slug', 'user_id', 'org_id', 'version', 'is_archived', 'is_draft')\nclass Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n version = Column(db.Integer)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"dashboards\")\n slug = Column(db.String(140), index=True, default=generate_slug)\n name = Column(db.String(100))\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User)\n # layout is no longer used, but kept so we know how to render old dashboards.\n layout = Column(db.Text)\n dashboard_filters_enabled = Column(db.Boolean, default=False)\n is_archived = Column(db.Boolean, default=False, index=True)\n is_draft = Column(db.Boolean, default=True, index=True)\n widgets = db.relationship('Widget', backref='dashboard', lazy='dynamic')\n tags = Column('tags', MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True)\n\n __tablename__ = 'dashboards'\n __mapper_args__ = {\n \"version_id_col\": version\n }\n\n def __str__(self):\n return u\"%s=%s\" % (self.id, self.name)\n\n @classmethod\n def all(cls, org, group_ids, user_id):\n query = (\n Dashboard.query\n .options(\n subqueryload(Dashboard.user).load_only('_profile_image_url', 'name'),\n )\n .outerjoin(Widget)\n .outerjoin(Visualization)\n .outerjoin(Query)\n .outerjoin(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)\n .filter(\n Dashboard.is_archived == False,\n (DataSourceGroup.group_id.in_(group_ids) |\n (Dashboard.user_id == user_id) |\n ((Widget.dashboard != None) & (Widget.visualization == None))),\n Dashboard.org == org)\n .distinct())\n\n query = query.filter(or_(Dashboard.user_id == user_id, Dashboard.is_draft == False))\n\n return query\n\n @classmethod\n def search(cls, org, groups_ids, user_id, search_term):\n # TODO: switch to FTS\n return cls.all(org, groups_ids, user_id).filter(cls.name.ilike(u'%{}%'.format(search_term)))\n\n @classmethod\n def all_tags(cls, org, user):\n dashboards = cls.all(org, user.group_ids, user.id)\n\n tag_column = func.unnest(cls.tags).label('tag')\n usage_count = func.count(1).label('usage_count')\n\n query = (\n db.session\n .query(tag_column, usage_count)\n .group_by(tag_column)\n .filter(Dashboard.id.in_(dashboards.options(load_only('id'))))\n .order_by(usage_count.desc())\n )\n return query\n\n @classmethod\n def favorites(cls, user, base_query=None):\n if base_query is None:\n base_query = cls.all(user.org, user.group_ids, user.id)\n return base_query.join(\n (\n Favorite,\n and_(\n Favorite.object_type == u'Dashboard',\n Favorite.object_id == Dashboard.id\n )\n )\n ).filter(Favorite.user_id == user.id)\n\n @classmethod\n def get_by_slug_and_org(cls, slug, org):\n return cls.query.filter(cls.slug == slug, cls.org == org).one()\n\n @hybrid_property\n def lowercase_name(self):\n \"Optional property useful for sorting purposes.\"\n return self.name.lower()\n\n @lowercase_name.expression\n def lowercase_name(cls):\n \"The SQLAlchemy expression for the property above.\"\n return func.lower(cls.name)\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'query_id')\nclass Visualization(TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n type = Column(db.String(100))\n query_id = Column(db.Integer, db.ForeignKey(\"queries.id\"))\n # query_rel and not query, because db.Model already has query defined.\n query_rel = db.relationship(Query, back_populates='visualizations')\n name = Column(db.String(255))\n description = Column(db.String(4096), nullable=True)\n options = Column(db.Text)\n\n __tablename__ = 'visualizations'\n\n def __str__(self):\n return u\"%s %s\" % (self.id, self.type)\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Visualization, cls).get_by_id_and_org(object_id, org, Query)\n\n def copy(self):\n return {\n 'type': self.type,\n 'name': self.name,\n 'description': self.description,\n 'options': self.options\n }\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'visualization_id', 'dashboard_id')\nclass Widget(TimestampMixin, BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n visualization_id = Column(db.Integer, db.ForeignKey('visualizations.id'), nullable=True)\n visualization = db.relationship(Visualization, backref='widgets')\n text = Column(db.Text, nullable=True)\n width = Column(db.Integer)\n options = Column(db.Text)\n dashboard_id = Column(db.Integer, db.ForeignKey(\"dashboards.id\"), index=True)\n\n __tablename__ = 'widgets'\n\n def __str__(self):\n return u\"%s\" % self.id\n\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return super(Widget, cls).get_by_id_and_org(object_id, org, Dashboard)\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'object_type', 'object_id', 'action', 'user_id', 'org_id', 'created_at')\nclass Event(db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, back_populates=\"events\")\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=True)\n user = db.relationship(User, backref=\"events\")\n action = Column(db.String(255))\n object_type = Column(db.String(255))\n object_id = Column(db.String(255), nullable=True)\n additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={})\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n __tablename__ = 'events'\n\n def __str__(self):\n return u\"%s,%s,%s,%s\" % (self.user_id, self.action, self.object_type, self.object_id)\n\n def to_dict(self):\n return {\n 'org_id': self.org_id,\n 'user_id': self.user_id,\n 'action': self.action,\n 'object_type': self.object_type,\n 'object_id': self.object_id,\n 'additional_properties': self.additional_properties,\n 'created_at': self.created_at.isoformat()\n }\n\n @classmethod\n def record(cls, event):\n org_id = event.pop('org_id')\n user_id = event.pop('user_id', None)\n action = event.pop('action')\n object_type = event.pop('object_type')\n object_id = event.pop('object_id', None)\n\n created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))\n\n event = cls(org_id=org_id, user_id=user_id, action=action,\n object_type=object_type, object_id=object_id,\n additional_properties=event,\n created_at=created_at)\n db.session.add(event)\n return event\n\n\n@generic_repr('id', 'created_by_id', 'org_id', 'active')\nclass ApiKey(TimestampMixin, GFKBase, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization)\n api_key = Column(db.String(255), index=True, default=lambda: generate_token(40))\n active = Column(db.Boolean, default=True)\n # 'object' provided by GFKBase\n created_by_id = Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=True)\n created_by = db.relationship(User)\n\n __tablename__ = 'api_keys'\n __table_args__ = (\n db.Index('api_keys_object_type_object_id', 'object_type', 'object_id'),\n )\n\n @classmethod\n def get_by_api_key(cls, api_key):\n return cls.query.filter(cls.api_key == api_key, cls.active == True).one()\n\n @classmethod\n def get_by_object(cls, object):\n return cls.query.filter(\n cls.object_type == object.__class__.__tablename__,\n cls.object_id == object.id,\n cls.active == True\n ).first()\n\n @classmethod\n def create_for_object(cls, object, user):\n k = cls(org=user.org, object=object, created_by=user)\n db.session.add(k)\n return k\n\n\n@python_2_unicode_compatible\n@generic_repr('id', 'name', 'type', 'user_id', 'org_id', 'created_at')\nclass NotificationDestination(BelongsToOrgMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"notification_destinations\")\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref=\"notification_destinations\")\n name = Column(db.String(255))\n type = Column(db.String(255))\n options = Column(ConfigurationContainer.as_mutable(Configuration))\n created_at = Column(db.DateTime(True), default=db.func.now())\n\n __tablename__ = 'notification_destinations'\n __table_args__ = (\n db.Index(\n 'notification_destinations_org_id_name', 'org_id', 'name', unique=True\n ),\n )\n\n def __str__(self):\n return text_type(self.name)\n\n def to_dict(self, all=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'icon': self.destination.icon()\n }\n\n if all:\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n\n return d\n\n @property\n def destination(self):\n return get_destination(self.type, self.options)\n\n @classmethod\n def all(cls, org):\n notification_destinations = cls.query.filter(cls.org == org).order_by(cls.id.asc())\n\n return notification_destinations\n\n def notify(self, alert, query, user, new_state, app, host):\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n return self.destination.notify(alert, query, user, new_state,\n app, host, self.options)\n\n\n@generic_repr('id', 'user_id', 'destination_id', 'alert_id')\nclass AlertSubscription(TimestampMixin, db.Model):\n id = Column(db.Integer, primary_key=True)\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User)\n destination_id = Column(db.Integer,\n db.ForeignKey(\"notification_destinations.id\"),\n nullable=True)\n destination = db.relationship(NotificationDestination)\n alert_id = Column(db.Integer, db.ForeignKey(\"alerts.id\"))\n alert = db.relationship(Alert, back_populates=\"subscriptions\")\n\n __tablename__ = 'alert_subscriptions'\n __table_args__ = (\n db.Index(\n 'alert_subscriptions_destination_id_alert_id',\n 'destination_id', 'alert_id', unique=True\n ),\n )\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'user': self.user.to_dict(),\n 'alert_id': self.alert_id\n }\n\n if self.destination:\n d['destination'] = self.destination.to_dict()\n\n return d\n\n @classmethod\n def all(cls, alert_id):\n return AlertSubscription.query.join(User).filter(AlertSubscription.alert_id == alert_id)\n\n def notify(self, alert, query, user, new_state, app, host):\n if self.destination:\n return self.destination.notify(alert, query, user, new_state,\n app, host)\n else:\n # User email subscription, so create an email destination object\n config = {'addresses': self.user.email}\n schema = get_configuration_schema_for_destination_type('email')\n options = ConfigurationContainer(config, schema)\n destination = get_destination('email', options)\n return destination.notify(alert, query, user, new_state, app, host, options)\n\n\n@generic_repr('id', 'trigger', 'user_id', 'org_id')\nclass QuerySnippet(TimestampMixin, db.Model, BelongsToOrgMixin):\n id = Column(db.Integer, primary_key=True)\n org_id = Column(db.Integer, db.ForeignKey(\"organizations.id\"))\n org = db.relationship(Organization, backref=\"query_snippets\")\n trigger = Column(db.String(255), unique=True)\n description = Column(db.Text)\n user_id = Column(db.Integer, db.ForeignKey(\"users.id\"))\n user = db.relationship(User, backref=\"query_snippets\")\n snippet = Column(db.Text)\n\n __tablename__ = 'query_snippets'\n\n @classmethod\n def all(cls, org):\n return cls.query.filter(cls.org == org)\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'trigger': self.trigger,\n 'description': self.description,\n 'snippet': self.snippet,\n 'user': self.user.to_dict(),\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n return d\n\n\ndef init_db():\n default_org = Organization(name=\"Default\", slug='default', settings={})\n admin_group = Group(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP)\n default_group = Group(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP)\n\n db.session.add_all([default_org, admin_group, default_group])\n # XXX remove after fixing User.group_ids\n db.session.commit()\n return default_org, admin_group, default_group\n", "path": "redash/models/__init__.py" } ]
diff --git a/redash/models/__init__.py b/redash/models/__init__.py index a59bb25f28..73ff622bd3 100644 --- a/redash/models/__init__.py +++ b/redash/models/__init__.py @@ -351,7 +351,7 @@ def make_excel_content(self): for (r, row) in enumerate(query_data['rows']): for (c, name) in enumerate(column_names): v = row.get(name) - if isinstance(v, list): + if isinstance(v, list) or isinstance(v, dict): v = str(v).encode('utf-8') sheet.write(r + 1, c, v)
Exporting to Excel file fails when one of the columns is a dictionary <!-- We use GitHub only for bug reports 🐛 Anything else should be posted to https://discuss.redash.io 👫 🚨For support, help & questions use https://discuss.redash.io/c/support 💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests **Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary get error when exporting query results to excel file: ![image](https://user-images.githubusercontent.com/33534430/51517880-30217d80-1e57-11e9-9cbf-0ef7ad187218.png) environment:ec2 on ecs ### Steps to Reproduce 1. create a new query 2. excute the query,save it ,then download it as excel file Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? ### Technical details: * Redash Version:6.0.0+b8537 * Browser/OS:chrome * How did you install Redash:run redash by ecs on aws
jschneier__django-storages-762
[ { "content": "# Dropbox storage class for Django pluggable storage system.\n# Author: Anthony Monthe <[email protected]>\n# License: BSD\n#\n# Usage:\n#\n# Add below to settings.py:\n# DROPBOX_OAUTH2_TOKEN = 'YourOauthToken'\n# DROPBOX_ROOT_PATH = '/dir/'\n\nfrom __future__ import absolute_import\n\nfrom io import BytesIO\nfrom shutil import copyfileobj\nfrom tempfile import SpooledTemporaryFile\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.files.base import File\nfrom django.core.files.storage import Storage\nfrom django.utils._os import safe_join\nfrom django.utils.deconstruct import deconstructible\nfrom dropbox import Dropbox\nfrom dropbox.exceptions import ApiError\nfrom dropbox.files import CommitInfo, FolderMetadata, UploadSessionCursor\n\nfrom storages.utils import setting\n\n_DEFAULT_TIMEOUT = 100\n\n\nclass DropBoxStorageException(Exception):\n pass\n\n\nclass DropBoxFile(File):\n def __init__(self, name, storage):\n self.name = name\n self._storage = storage\n self._file = None\n\n def _get_file(self):\n if self._file is None:\n self._file = SpooledTemporaryFile()\n # As dropbox==9.3.0, the client returns a tuple\n # (dropbox.files.FileMetadata, requests.models.Response)\n file_metadata, response = \\\n self._storage.client.files_download(self.name)\n if response.status_code == 200:\n with BytesIO(response.content) as file_content:\n copyfileobj(file_content, self._file)\n else:\n # JIC the exception isn't catched by the dropbox client\n raise DropBoxStorageException(\n \"Dropbox server returned a {} response when accessing {}\"\n .format(response.status_code, self.name)\n )\n self._file.seek(0)\n return self._file\n\n def _set_file(self, value):\n self._file = value\n\n file = property(_get_file, _set_file)\n\n\n@deconstructible\nclass DropBoxStorage(Storage):\n \"\"\"DropBox Storage class for Django pluggable storage system.\"\"\"\n\n CHUNK_SIZE = 4 * 1024 * 1024\n\n def __init__(self, oauth2_access_token=None, root_path=None, timeout=None):\n oauth2_access_token = oauth2_access_token or setting('DROPBOX_OAUTH2_TOKEN')\n if oauth2_access_token is None:\n raise ImproperlyConfigured(\"You must configure an auth token at\"\n \"'settings.DROPBOX_OAUTH2_TOKEN'.\")\n\n self.root_path = root_path or setting('DROPBOX_ROOT_PATH', '/')\n timeout = timeout or setting('DROPBOX_TIMEOUT', _DEFAULT_TIMEOUT)\n self.client = Dropbox(oauth2_access_token, timeout=timeout)\n\n def _full_path(self, name):\n if name == '/':\n name = ''\n return safe_join(self.root_path, name).replace('\\\\', '/')\n\n def delete(self, name):\n self.client.files_delete(self._full_path(name))\n\n def exists(self, name):\n try:\n return bool(self.client.files_get_metadata(self._full_path(name)))\n except ApiError:\n return False\n\n def listdir(self, path):\n directories, files = [], []\n full_path = self._full_path(path)\n metadata = self.client.files_list_folder(full_path)\n for entry in metadata.entries:\n if isinstance(entry, FolderMetadata):\n directories.append(entry.name)\n else:\n files.append(entry.name)\n return directories, files\n\n def size(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.size\n\n def modified_time(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.server_modified\n\n def accessed_time(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.client_modified\n\n def url(self, name):\n media = self.client.files_get_temporary_link(self._full_path(name))\n return media.link\n\n def _open(self, name, mode='rb'):\n remote_file = DropBoxFile(self._full_path(name), self)\n return remote_file\n\n def _save(self, name, content):\n content.open()\n if content.size <= self.CHUNK_SIZE:\n self.client.files_upload(content.read(), self._full_path(name))\n else:\n self._chunked_upload(content, self._full_path(name))\n content.close()\n return name\n\n def _chunked_upload(self, content, dest_path):\n upload_session = self.client.files_upload_session_start(\n content.read(self.CHUNK_SIZE)\n )\n cursor = UploadSessionCursor(\n session_id=upload_session.session_id,\n offset=content.tell()\n )\n commit = CommitInfo(path=dest_path)\n\n while content.tell() < content.size:\n if (content.size - content.tell()) <= self.CHUNK_SIZE:\n self.client.files_upload_session_finish(\n content.read(self.CHUNK_SIZE), cursor, commit\n )\n else:\n self.client.files_upload_session_append_v2(\n content.read(self.CHUNK_SIZE), cursor\n )\n cursor.offset = content.tell()\n", "path": "storages/backends/dropbox.py" } ]
[ { "content": "# Dropbox storage class for Django pluggable storage system.\n# Author: Anthony Monthe <[email protected]>\n# License: BSD\n#\n# Usage:\n#\n# Add below to settings.py:\n# DROPBOX_OAUTH2_TOKEN = 'YourOauthToken'\n# DROPBOX_ROOT_PATH = '/dir/'\n\nfrom __future__ import absolute_import\n\nfrom io import BytesIO\nfrom shutil import copyfileobj\nfrom tempfile import SpooledTemporaryFile\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.files.base import File\nfrom django.core.files.storage import Storage\nfrom django.utils._os import safe_join\nfrom django.utils.deconstruct import deconstructible\nfrom dropbox import Dropbox\nfrom dropbox.exceptions import ApiError\nfrom dropbox.files import CommitInfo, FolderMetadata, UploadSessionCursor\n\nfrom storages.utils import setting\n\n_DEFAULT_TIMEOUT = 100\n\n\nclass DropBoxStorageException(Exception):\n pass\n\n\nclass DropBoxFile(File):\n def __init__(self, name, storage):\n self.name = name\n self._storage = storage\n self._file = None\n\n def _get_file(self):\n if self._file is None:\n self._file = SpooledTemporaryFile()\n # As dropbox==9.3.0, the client returns a tuple\n # (dropbox.files.FileMetadata, requests.models.Response)\n file_metadata, response = \\\n self._storage.client.files_download(self.name)\n if response.status_code == 200:\n with BytesIO(response.content) as file_content:\n copyfileobj(file_content, self._file)\n else:\n # JIC the exception isn't catched by the dropbox client\n raise DropBoxStorageException(\n \"Dropbox server returned a {} response when accessing {}\"\n .format(response.status_code, self.name)\n )\n self._file.seek(0)\n return self._file\n\n def _set_file(self, value):\n self._file = value\n\n file = property(_get_file, _set_file)\n\n\n@deconstructible\nclass DropBoxStorage(Storage):\n \"\"\"DropBox Storage class for Django pluggable storage system.\"\"\"\n\n CHUNK_SIZE = 4 * 1024 * 1024\n\n def __init__(self, oauth2_access_token=None, root_path=None, timeout=None):\n oauth2_access_token = oauth2_access_token or setting('DROPBOX_OAUTH2_TOKEN')\n if oauth2_access_token is None:\n raise ImproperlyConfigured(\"You must configure an auth token at\"\n \"'settings.DROPBOX_OAUTH2_TOKEN'.\")\n\n self.root_path = root_path or setting('DROPBOX_ROOT_PATH', '/')\n timeout = timeout or setting('DROPBOX_TIMEOUT', _DEFAULT_TIMEOUT)\n self.client = Dropbox(oauth2_access_token, timeout=timeout)\n\n def _full_path(self, name):\n if name == '/':\n name = ''\n return safe_join(self.root_path, name).replace('\\\\', '/')\n\n def delete(self, name):\n self.client.files_delete(self._full_path(name))\n\n def exists(self, name):\n try:\n return bool(self.client.files_get_metadata(self._full_path(name)))\n except ApiError:\n return False\n\n def listdir(self, path):\n directories, files = [], []\n full_path = self._full_path(path)\n\n if full_path == '/':\n full_path = ''\n\n metadata = self.client.files_list_folder(full_path)\n for entry in metadata.entries:\n if isinstance(entry, FolderMetadata):\n directories.append(entry.name)\n else:\n files.append(entry.name)\n return directories, files\n\n def size(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.size\n\n def modified_time(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.server_modified\n\n def accessed_time(self, name):\n metadata = self.client.files_get_metadata(self._full_path(name))\n return metadata.client_modified\n\n def url(self, name):\n media = self.client.files_get_temporary_link(self._full_path(name))\n return media.link\n\n def _open(self, name, mode='rb'):\n remote_file = DropBoxFile(self._full_path(name), self)\n return remote_file\n\n def _save(self, name, content):\n content.open()\n if content.size <= self.CHUNK_SIZE:\n self.client.files_upload(content.read(), self._full_path(name))\n else:\n self._chunked_upload(content, self._full_path(name))\n content.close()\n return name\n\n def _chunked_upload(self, content, dest_path):\n upload_session = self.client.files_upload_session_start(\n content.read(self.CHUNK_SIZE)\n )\n cursor = UploadSessionCursor(\n session_id=upload_session.session_id,\n offset=content.tell()\n )\n commit = CommitInfo(path=dest_path)\n\n while content.tell() < content.size:\n if (content.size - content.tell()) <= self.CHUNK_SIZE:\n self.client.files_upload_session_finish(\n content.read(self.CHUNK_SIZE), cursor, commit\n )\n else:\n self.client.files_upload_session_append_v2(\n content.read(self.CHUNK_SIZE), cursor\n )\n cursor.offset = content.tell()\n", "path": "storages/backends/dropbox.py" } ]
diff --git a/storages/backends/dropbox.py b/storages/backends/dropbox.py index 0640d1cf3..39be1ab9a 100644 --- a/storages/backends/dropbox.py +++ b/storages/backends/dropbox.py @@ -96,6 +96,10 @@ def exists(self, name): def listdir(self, path): directories, files = [], [] full_path = self._full_path(path) + + if full_path == '/': + full_path = '' + metadata = self.client.files_list_folder(full_path) for entry in metadata.entries: if isinstance(entry, FolderMetadata): diff --git a/tests/test_dropbox.py b/tests/test_dropbox.py index 5d163d0ec..d237a7cb4 100644 --- a/tests/test_dropbox.py +++ b/tests/test_dropbox.py @@ -80,6 +80,10 @@ def test_not_exists(self, *args): return_value=FILES_MOCK) def test_listdir(self, *args): dirs, files = self.storage.listdir('/') + dirs2, files2 = self.storage.listdir('') + self.assertEqual(dirs, dirs2) + self.assertEqual(files2, files2) + self.assertGreater(len(dirs), 0) self.assertGreater(len(files), 0) self.assertEqual(dirs[0], 'bar')
Dropbox base path / should be an empty string Using `django-storages` `1.7.2`, a base path (`'/`) is provided, the Dropbox API is rejecting it: ``` File "/usr/local/lib/python3.7/site-packages/dbbackup/storage.py", line 78, in list_directory return self.storage.listdir(path)[1] File "/usr/local/lib/python3.7/site-packages/storages/backends/dropbox.py", line 99, in listdir metadata = self.client.files_list_folder(full_path) File "/usr/local/lib/python3.7/site-packages/dropbox/base.py", line 1744, in files_list_folder None, File "/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py", line 274, in request timeout=timeout) File "/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py", line 365, in request_json_string_with_retry timeout=timeout) File "/usr/local/lib/python3.7/site-packages/dropbox/dropbox.py", line 456, in request_json_string raise BadInputError(request_id, r.text) dropbox.exceptions.BadInputError: BadInputError('XXXXXXXXXXXXXXXXXXXX', 'Error in call to API function "files/list_folder": request body: path: Specify the root folder as an empty string rather than as "/".') ``` As the error says it should be an empty string rather than as '/'.
getnikola__nikola-2593
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright © 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Classify the posts in archives.\"\"\"\n\nimport os\nimport nikola.utils\nimport datetime\nfrom nikola.plugin_categories import Taxonomy\n\n\nclass Archive(Taxonomy):\n \"\"\"Classify the post archives.\"\"\"\n\n name = \"classify_archive\"\n\n classification_name = \"archive\"\n overview_page_variable_name = \"archive\"\n more_than_one_classifications_per_post = False\n has_hierarchy = True\n include_posts_from_subhierarchies = True\n include_posts_into_hierarchy_root = True\n subcategories_list_template = \"list.tmpl\"\n generate_atom_feeds_for_post_lists = False\n template_for_classification_overview = None\n always_disable_rss = True\n apply_to_posts = True\n apply_to_pages = False\n minimum_post_count_per_classification_in_overview = 1\n omit_empty_classifications = False\n also_create_classifications_from_other_languages = False\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n # Sanity checks\n if (site.config['CREATE_MONTHLY_ARCHIVE'] and site.config['CREATE_SINGLE_ARCHIVE']) and not site.config['CREATE_FULL_ARCHIVES']:\n raise Exception('Cannot create monthly and single archives at the same time.')\n # Finish setup\n self.show_list_as_subcategories_list = not site.config['CREATE_FULL_ARCHIVES']\n self.show_list_as_index = site.config['ARCHIVES_ARE_INDEXES']\n self.template_for_single_list = \"archiveindex.tmpl\" if site.config['ARCHIVES_ARE_INDEXES'] else \"list_post.tmpl\"\n # Determine maximum hierarchy height\n if site.config['CREATE_DAILY_ARCHIVE'] or site.config['CREATE_FULL_ARCHIVES']:\n self.max_levels = 3\n elif site.config['CREATE_MONTHLY_ARCHIVE']:\n self.max_levels = 2\n elif site.config['CREATE_SINGLE_ARCHIVE']:\n self.max_levels = 0\n else:\n self.max_levels = 1\n return super(Archive, self).set_site(site)\n\n def get_implicit_classifications(self, lang):\n \"\"\"Return a list of classification strings which should always appear in posts_per_classification.\"\"\"\n return ['']\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n levels = [str(post.date.year).zfill(4), str(post.date.month).zfill(2), str(post.date.day).zfill(2)]\n return ['/'.join(levels[:self.max_levels])]\n\n def sort_classifications(self, classifications, lang, level=None):\n \"\"\"Sort the given list of classification strings.\"\"\"\n if level in (0, 1):\n # Years or months: sort descending\n classifications.sort()\n classifications.reverse()\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n classification = self.extract_hierarchy(classification)\n if len(classification) == 0:\n return \"\"\n elif len(classification) == 1:\n return classification[0]\n elif len(classification) == 2:\n nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n else:\n # Fallback\n return '/'.join(classification)\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"A path handler for the given classification.\"\"\"\n components = [self.site.config['ARCHIVE_PATH']]\n if classification:\n components.extend(classification)\n add_index = 'always'\n else:\n components.append(os.path.splitext(self.site.config['ARCHIVE_FILENAME'])[0])\n add_index = 'never'\n return [_f for _f in components if _f], add_index\n\n def extract_hierarchy(self, classification):\n \"\"\"Given a classification, return a list of parts in the hierarchy.\"\"\"\n return classification.split('/') if classification else []\n\n def recombine_classification_from_hierarchy(self, hierarchy):\n \"\"\"Given a list of parts in the hierarchy, return the classification string.\"\"\"\n return '/'.join(hierarchy)\n\n def provide_context_and_uptodate(self, classification, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n hierarchy = self.extract_hierarchy(classification)\n kw = {\n \"messages\": self.site.MESSAGES,\n }\n page_kind = \"list\"\n if self.show_list_as_index:\n if not self.show_list_as_subcategories_list or len(hierarchy) == self.max_levels:\n page_kind = \"index\"\n if len(hierarchy) == 0:\n title = kw[\"messages\"][lang][\"Archive\"]\n kw[\"is_feed_stale\"] = False\n elif len(hierarchy) == 1:\n title = kw[\"messages\"][lang][\"Posts for year %s\"] % hierarchy[0]\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y\") != hierarchy[0])\n elif len(hierarchy) == 2:\n title = kw[\"messages\"][lang][\"Posts for {month} {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m\") != classification)\n elif len(hierarchy) == 3:\n title = kw[\"messages\"][lang][\"Posts for {month} {day}, {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang),\n day=int(hierarchy[2]))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m/%d\") != classification)\n else:\n raise Exception(\"Cannot interpret classification {}!\".format(repr(classification)))\n context = {\n \"title\": title,\n \"pagekind\": [page_kind, \"archive_page\"],\n }\n if page_kind == 'index':\n context[\"archive_name\"] = classification if classification else None\n context[\"is_feed_stale\"] = kw[\"is_feed_stale\"]\n kw.update(context)\n return context, kw\n\n def should_generate_classification_page(self, classification, post_list, lang):\n \"\"\"Only generates list of posts for classification if this function returns True.\"\"\"\n return len(classification.split('/')) < 3 or len(post_list) > 0\n", "path": "nikola/plugins/task/archive.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright © 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Classify the posts in archives.\"\"\"\n\nimport os\nimport nikola.utils\nimport datetime\nfrom nikola.plugin_categories import Taxonomy\n\n\nclass Archive(Taxonomy):\n \"\"\"Classify the post archives.\"\"\"\n\n name = \"classify_archive\"\n\n classification_name = \"archive\"\n overview_page_variable_name = \"archive\"\n more_than_one_classifications_per_post = False\n has_hierarchy = True\n include_posts_from_subhierarchies = True\n include_posts_into_hierarchy_root = True\n subcategories_list_template = \"list.tmpl\"\n generate_atom_feeds_for_post_lists = False\n template_for_classification_overview = None\n always_disable_rss = True\n apply_to_posts = True\n apply_to_pages = False\n minimum_post_count_per_classification_in_overview = 1\n omit_empty_classifications = False\n also_create_classifications_from_other_languages = False\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n # Sanity checks\n if (site.config['CREATE_MONTHLY_ARCHIVE'] and site.config['CREATE_SINGLE_ARCHIVE']) and not site.config['CREATE_FULL_ARCHIVES']:\n raise Exception('Cannot create monthly and single archives at the same time.')\n # Finish setup\n self.show_list_as_subcategories_list = not site.config['CREATE_FULL_ARCHIVES']\n self.show_list_as_index = site.config['ARCHIVES_ARE_INDEXES']\n self.template_for_single_list = \"archiveindex.tmpl\" if site.config['ARCHIVES_ARE_INDEXES'] else \"list_post.tmpl\"\n # Determine maximum hierarchy height\n if site.config['CREATE_DAILY_ARCHIVE'] or site.config['CREATE_FULL_ARCHIVES']:\n self.max_levels = 3\n elif site.config['CREATE_MONTHLY_ARCHIVE']:\n self.max_levels = 2\n elif site.config['CREATE_SINGLE_ARCHIVE']:\n self.max_levels = 0\n else:\n self.max_levels = 1\n return super(Archive, self).set_site(site)\n\n def get_implicit_classifications(self, lang):\n \"\"\"Return a list of classification strings which should always appear in posts_per_classification.\"\"\"\n return ['']\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n levels = [str(post.date.year).zfill(4), str(post.date.month).zfill(2), str(post.date.day).zfill(2)]\n return ['/'.join(levels[:self.max_levels])]\n\n def sort_classifications(self, classifications, lang, level=None):\n \"\"\"Sort the given list of classification strings.\"\"\"\n if level in (0, 1):\n # Years or months: sort descending\n classifications.sort()\n classifications.reverse()\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n classification = self.extract_hierarchy(classification)\n if len(classification) == 0:\n return \"\"\n elif len(classification) == 1:\n return classification[0]\n elif len(classification) == 2:\n return nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n else:\n # Fallback\n return '/'.join(classification)\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"A path handler for the given classification.\"\"\"\n components = [self.site.config['ARCHIVE_PATH']]\n if classification:\n components.extend(classification)\n add_index = 'always'\n else:\n components.append(os.path.splitext(self.site.config['ARCHIVE_FILENAME'])[0])\n add_index = 'never'\n return [_f for _f in components if _f], add_index\n\n def extract_hierarchy(self, classification):\n \"\"\"Given a classification, return a list of parts in the hierarchy.\"\"\"\n return classification.split('/') if classification else []\n\n def recombine_classification_from_hierarchy(self, hierarchy):\n \"\"\"Given a list of parts in the hierarchy, return the classification string.\"\"\"\n return '/'.join(hierarchy)\n\n def provide_context_and_uptodate(self, classification, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n hierarchy = self.extract_hierarchy(classification)\n kw = {\n \"messages\": self.site.MESSAGES,\n }\n page_kind = \"list\"\n if self.show_list_as_index:\n if not self.show_list_as_subcategories_list or len(hierarchy) == self.max_levels:\n page_kind = \"index\"\n if len(hierarchy) == 0:\n title = kw[\"messages\"][lang][\"Archive\"]\n kw[\"is_feed_stale\"] = False\n elif len(hierarchy) == 1:\n title = kw[\"messages\"][lang][\"Posts for year %s\"] % hierarchy[0]\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y\") != hierarchy[0])\n elif len(hierarchy) == 2:\n title = kw[\"messages\"][lang][\"Posts for {month} {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m\") != classification)\n elif len(hierarchy) == 3:\n title = kw[\"messages\"][lang][\"Posts for {month} {day}, {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang),\n day=int(hierarchy[2]))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m/%d\") != classification)\n else:\n raise Exception(\"Cannot interpret classification {}!\".format(repr(classification)))\n context = {\n \"title\": title,\n \"pagekind\": [page_kind, \"archive_page\"],\n }\n if page_kind == 'index':\n context[\"archive_name\"] = classification if classification else None\n context[\"is_feed_stale\"] = kw[\"is_feed_stale\"]\n kw.update(context)\n return context, kw\n\n def should_generate_classification_page(self, classification, post_list, lang):\n \"\"\"Only generates list of posts for classification if this function returns True.\"\"\"\n return len(classification.split('/')) < 3 or len(post_list) > 0\n", "path": "nikola/plugins/task/archive.py" } ]
diff --git a/nikola/plugins/task/archive.py b/nikola/plugins/task/archive.py index 74c28c2451..40baf9e168 100644 --- a/nikola/plugins/task/archive.py +++ b/nikola/plugins/task/archive.py @@ -97,7 +97,7 @@ def get_classification_friendly_name(self, classification, lang, only_last_compo elif len(classification) == 1: return classification[0] elif len(classification) == 2: - nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang) + return nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang) else: # Fallback return '/'.join(classification)
Archives (CREATE_MONTHLY_ARCHIVE = True) think all months are "None" https://irclogs.getnikola.com/2015/ To reproduce locally: get https://github.com/getnikola/irclogs-site create any random files in `posts/*.rst`.
microsoft__DeepSpeed-5134
[ { "content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\n\nfrom .abstract_accelerator import DeepSpeedAccelerator\n\n# During setup stage torch may not be installed, pass on no torch will\n# allow op builder related API to be executed.\ntry:\n import torch.mps\nexcept ImportError:\n pass\n\n\nclass MPS_Accelerator(DeepSpeedAccelerator):\n\n def __init__(self):\n self._name = \"mps\"\n self._communication_backend_name = None\n\n def is_synchronized_device(self):\n return False\n\n # Device APIs\n def device_name(self, device_index=None):\n if device_index is None:\n return \"mps\"\n return \"mps:{}\".format(device_index)\n\n def device(self, device_index):\n return torch.device(\"mps\", index=0)\n\n def set_device(self, device_index):\n return\n\n def current_device(self):\n return torch.device(\"mps\", index=0)\n\n def current_device_name(self):\n return \"mps:0\"\n\n def device_count(self):\n return 1\n\n def synchronize(self, device_index=None):\n return torch.mps.synchronize()\n\n # RNG APIs\n def random(self):\n return torch.random\n\n def set_rng_state(self, new_state, device_index=None):\n return torch.mps.set_rng_state(new_state)\n\n def get_rng_state(self, device_index=None):\n return torch.mps.get_rng_state()\n\n def manual_seed(self, seed):\n return torch.mps.manual_seed(seed)\n\n def manual_seed_all(self, seed):\n return torch.mps.manual_seed(seed)\n\n def seed(self):\n return torch.mps.seed()\n\n def initial_seed(self, seed):\n return\n\n def default_generator(self, device_index):\n return\n\n # Streams/Events\n @property\n def Stream(self):\n return None\n\n def stream(self, stream):\n return None\n\n def current_stream(self, device_index=None):\n return None\n\n def default_stream(self, device_index=None):\n return None\n\n @property\n def Event(self):\n return None\n\n # Memory management\n def empty_cache(self):\n return torch.mps.empty_cache()\n\n def memory_allocated(self, device_index=None):\n return torch.mps.current_allocated_memory()\n\n def max_memory_allocated(self, device_index=None):\n return torch.mps.driver_allocated_memory()\n\n def set_per_process_memory_fraction(self, fraction):\n return torch.mps.set_per_process_memory_fraction(fraction)\n\n def reset_max_memory_allocated(self, device_index=None):\n return\n\n def memory_cached(self, device_index=None):\n return\n\n def max_memory_cached(self, device_index=None):\n return\n\n def reset_max_memory_cached(self, device_index=None):\n return\n\n def memory_stats(self, device_index=None):\n return\n\n def reset_peak_memory_stats(self, device_index=None):\n return\n\n def memory_reserved(self, device_index=None):\n return\n\n def max_memory_reserved(self, device_index=None):\n return\n\n def total_memory(self, device_index=None):\n return\n\n def available_memory(self, device_index=None):\n return\n\n # Data types\n def is_bf16_supported(self):\n return False\n\n def is_fp16_supported(self):\n return False\n\n def supported_dtypes(self):\n return [torch.float]\n\n # Misc\n def amp(self):\n return\n\n def is_available(self):\n return hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available()\n\n def range_push(self, msg):\n return\n\n def range_pop(self):\n return\n\n def lazy_call(self, callback):\n return\n\n def communication_backend_name(self):\n return self._communication_backend_name\n\n def is_triton_supported(self):\n return False\n\n # Graph operations\n def create_graph(self):\n return None\n\n def capture_to_graph(self, graph, pool=None, stream=None):\n from deepspeed.runtime.utils import noop_context\n return noop_context()\n\n def replay_graph(self, graph):\n return\n\n # Tensor operations\n @property\n def BFloat16Tensor(self):\n return\n\n @property\n def ByteTensor(self):\n return\n\n @property\n def DoubleTensor(self):\n return\n\n @property\n def FloatTensor(self):\n return\n\n @property\n def HalfTensor(self):\n return\n\n @property\n def IntTensor(self):\n return\n\n @property\n def LongTensor(self):\n return\n\n def pin_memory(self, tensor, align_bytes=1):\n return tensor.pin_memory()\n\n def is_pinned(self, tensor):\n return tensor.is_pinned()\n\n def on_accelerator(self, tensor):\n device_str = str(tensor.device)\n if device_str.startswith(\"mps\"):\n return True\n else:\n return False\n\n def op_builder_dir(self):\n try:\n # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed\n # if successful this also means we're doing a local install and not JIT compile path\n from op_builder import __deepspeed__ # noqa: F401 # type: ignore\n\n return \"op_builder\"\n except ImportError:\n return \"deepspeed.ops.op_builder\"\n\n # create an instance of op builder, specified by class_name\n def create_op_builder(self, op_name):\n builder_class = self.get_op_builder(op_name)\n if builder_class is not None:\n return builder_class()\n return None\n\n # return an op builder class, specified by class_name\n def get_op_builder(self, class_name):\n from deepspeed.ops.op_builder.cpu import NotImplementedBuilder\n\n return NotImplementedBuilder\n\n def build_extension(self):\n from torch.utils.cpp_extension import BuildExtension\n\n return BuildExtension\n\n def export_envs(self):\n return []\n", "path": "accelerator/mps_accelerator.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\n\nfrom .abstract_accelerator import DeepSpeedAccelerator\n\n# During setup stage torch may not be installed, pass on no torch will\n# allow op builder related API to be executed.\ntry:\n import torch.mps\nexcept ImportError:\n pass\n\n\nclass MPS_Accelerator(DeepSpeedAccelerator):\n\n def __init__(self):\n self._name = \"mps\"\n self._communication_backend_name = None\n\n def is_synchronized_device(self):\n return False\n\n def use_host_timers(self):\n return self.is_synchronized_device()\n\n def resolves_data_dependency(self):\n return self.is_synchronized_device()\n\n def handles_memory_backpressure(self):\n return self.is_synchronized_device()\n\n # Device APIs\n def device_name(self, device_index=None):\n if device_index is None:\n return \"mps\"\n return \"mps:{}\".format(device_index)\n\n def device(self, device_index):\n return torch.device(\"mps\", index=0)\n\n def set_device(self, device_index):\n return\n\n def current_device(self):\n return torch.device(\"mps\", index=0)\n\n def current_device_name(self):\n return \"mps:0\"\n\n def device_count(self):\n return 1\n\n def synchronize(self, device_index=None):\n return torch.mps.synchronize()\n\n # RNG APIs\n def random(self):\n return torch.random\n\n def set_rng_state(self, new_state, device_index=None):\n return torch.mps.set_rng_state(new_state)\n\n def get_rng_state(self, device_index=None):\n return torch.mps.get_rng_state()\n\n def manual_seed(self, seed):\n return torch.mps.manual_seed(seed)\n\n def manual_seed_all(self, seed):\n return torch.mps.manual_seed(seed)\n\n def seed(self):\n return torch.mps.seed()\n\n def initial_seed(self, seed):\n return\n\n def default_generator(self, device_index):\n return\n\n # Streams/Events\n @property\n def Stream(self):\n return None\n\n def stream(self, stream):\n return None\n\n def current_stream(self, device_index=None):\n return None\n\n def default_stream(self, device_index=None):\n return None\n\n @property\n def Event(self):\n return None\n\n # Memory management\n def empty_cache(self):\n return torch.mps.empty_cache()\n\n def memory_allocated(self, device_index=None):\n return torch.mps.current_allocated_memory()\n\n def max_memory_allocated(self, device_index=None):\n return torch.mps.driver_allocated_memory()\n\n def set_per_process_memory_fraction(self, fraction):\n return torch.mps.set_per_process_memory_fraction(fraction)\n\n def reset_max_memory_allocated(self, device_index=None):\n return\n\n def memory_cached(self, device_index=None):\n return\n\n def max_memory_cached(self, device_index=None):\n return\n\n def reset_max_memory_cached(self, device_index=None):\n return\n\n def memory_stats(self, device_index=None):\n return\n\n def reset_peak_memory_stats(self, device_index=None):\n return\n\n def memory_reserved(self, device_index=None):\n return\n\n def max_memory_reserved(self, device_index=None):\n return\n\n def total_memory(self, device_index=None):\n return\n\n def available_memory(self, device_index=None):\n return\n\n # Data types\n def is_bf16_supported(self):\n return False\n\n def is_fp16_supported(self):\n return False\n\n def supported_dtypes(self):\n return [torch.float]\n\n # Misc\n def amp(self):\n return\n\n def is_available(self):\n return hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available()\n\n def range_push(self, msg):\n return\n\n def range_pop(self):\n return\n\n def lazy_call(self, callback):\n return\n\n def communication_backend_name(self):\n return self._communication_backend_name\n\n def is_triton_supported(self):\n return False\n\n # Graph operations\n def create_graph(self):\n return None\n\n def capture_to_graph(self, graph, pool=None, stream=None):\n from deepspeed.runtime.utils import noop_context\n return noop_context()\n\n def replay_graph(self, graph):\n return\n\n # Tensor operations\n @property\n def BFloat16Tensor(self):\n return\n\n @property\n def ByteTensor(self):\n return\n\n @property\n def DoubleTensor(self):\n return\n\n @property\n def FloatTensor(self):\n return\n\n @property\n def HalfTensor(self):\n return\n\n @property\n def IntTensor(self):\n return\n\n @property\n def LongTensor(self):\n return\n\n def pin_memory(self, tensor, align_bytes=1):\n return tensor.pin_memory()\n\n def is_pinned(self, tensor):\n return tensor.is_pinned()\n\n def on_accelerator(self, tensor):\n device_str = str(tensor.device)\n if device_str.startswith(\"mps\"):\n return True\n else:\n return False\n\n def op_builder_dir(self):\n try:\n # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed\n # if successful this also means we're doing a local install and not JIT compile path\n from op_builder import __deepspeed__ # noqa: F401 # type: ignore\n\n return \"op_builder\"\n except ImportError:\n return \"deepspeed.ops.op_builder\"\n\n # create an instance of op builder, specified by class_name\n def create_op_builder(self, op_name):\n builder_class = self.get_op_builder(op_name)\n if builder_class is not None:\n return builder_class()\n return None\n\n # return an op builder class, specified by class_name\n def get_op_builder(self, class_name):\n from deepspeed.ops.op_builder.cpu import NotImplementedBuilder\n\n return NotImplementedBuilder\n\n def build_extension(self):\n from torch.utils.cpp_extension import BuildExtension\n\n return BuildExtension\n\n def export_envs(self):\n return []\n", "path": "accelerator/mps_accelerator.py" } ]
diff --git a/accelerator/mps_accelerator.py b/accelerator/mps_accelerator.py index f6303cf9890f..972b33caece1 100644 --- a/accelerator/mps_accelerator.py +++ b/accelerator/mps_accelerator.py @@ -24,6 +24,15 @@ def __init__(self): def is_synchronized_device(self): return False + def use_host_timers(self): + return self.is_synchronized_device() + + def resolves_data_dependency(self): + return self.is_synchronized_device() + + def handles_memory_backpressure(self): + return self.is_synchronized_device() + # Device APIs def device_name(self, device_index=None): if device_index is None: diff --git a/tests/unit/accelerator/test_accelerator.py b/tests/unit/accelerator/test_accelerator.py new file mode 100644 index 000000000000..964cf2b24f4e --- /dev/null +++ b/tests/unit/accelerator/test_accelerator.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import pytest + +import os +import sys +import importlib +import re + +import deepspeed + +DS_ACCEL_PATH = "deepspeed.accelerator" +IGNORE_FILES = ["abstract_accelerator.py", "real_accelerator.py"] + + [email protected] +def accel_class_name(module_name): + class_list = [] + mocked_modules = [] + + # Get the accelerator class name for a given module + while True: + try: + module = importlib.import_module(module_name) + break + except ModuleNotFoundError as e: + # If the environment is missing a module, mock it so we can still + # test importing the accelerator class + missing_module = re.search(r"\'(.*)\'", e.msg).group().strip("'") + sys.modules[missing_module] = lambda x: None + mocked_modules.append(missing_module) + for name in dir(module): + if name.endswith("_Accelerator"): + class_list.append(name) + + assert len(class_list) == 1, f"Multiple accelerator classes found in {module_name}" + + yield class_list[0] + + # Clean up mocked modules so as to not impact other tests + for module in mocked_modules: + del sys.modules[module] + + [email protected]( + "module_name", + [ + DS_ACCEL_PATH + "." + f.rstrip(".py") for f in os.listdir(deepspeed.accelerator.__path__[0]) + if f.endswith("_accelerator.py") and f not in IGNORE_FILES + ], +) +def test_abstract_methods_defined(module_name, accel_class_name): + module = importlib.import_module(module_name) + accel_class = getattr(module, accel_class_name) + accel_class.__init__ = lambda self: None + _ = accel_class()
[BUG] TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods **Describe the bug** TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers **To Reproduce** Steps to reproduce the behavior: 1. git clone https://github.com/OpenBMB/MiniCPM.git 2. follow setup step. 3. run `!bash lora_finetune.sh` via `lora_finetune.ipynb` **Expected behavior** runnable **ds_report output** Please run `ds_report` to give us details about your setup. **Screenshots** ``` class CudaEventTimer(object): File "/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/utils/timer.py", line 33, in CudaEventTimer def __init__(self, start_event: get_accelerator().Event, end_event: get_accelerator().Event): ^^^^^^^^^^^^^^^^^ File "/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/accelerator/real_accelerator.py", line 184, in get_accelerator ds_accelerator = MPS_Accelerator() ^^^^^^^^^^^^^^^^^ TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers ``` **System info (please complete the following information):** - OS: macOS 14.2.1 (23C71) - metal - Python 3.11.7 **Launcher context** deepspeed **Docker context** no **Additional context**
kovidgoyal__kitty-5932
[ { "content": "#!/usr/bin/env python3\n# License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>\n\nimport fnmatch\nimport glob\nimport io\nimport json\nimport os\nimport re\nimport secrets\nimport shlex\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport termios\nimport time\nimport traceback\nfrom base64 import standard_b64decode, standard_b64encode\nfrom contextlib import contextmanager, suppress\nfrom getpass import getuser\nfrom select import select\nfrom typing import Any, Callable, Dict, Iterator, List, NoReturn, Optional, Sequence, Set, Tuple, Union, cast\n\nfrom kitty.constants import cache_dir, runtime_dir, shell_integration_dir, ssh_control_master_template, str_version, terminfo_dir\nfrom kitty.shell_integration import as_str_literal\nfrom kitty.shm import SharedMemory\nfrom kitty.types import run_once\nfrom kitty.utils import SSHConnectionData, expandvars, resolve_abs_or_config_path\nfrom kitty.utils import set_echo as turn_off_echo\n\nfrom ..tui.operations import RESTORE_PRIVATE_MODE_VALUES, SAVE_PRIVATE_MODE_VALUES, Mode, restore_colors, save_colors, set_mode\nfrom ..tui.utils import kitty_opts, running_in_tmux\nfrom .config import init_config\nfrom .copy import CopyInstruction\nfrom .options.types import Options as SSHOptions\nfrom .options.utils import DELETE_ENV_VAR\nfrom .utils import create_shared_memory, ssh_options\n\n\n@run_once\ndef ssh_exe() -> str:\n return shutil.which('ssh') or 'ssh'\n\n\ndef read_data_from_shared_memory(shm_name: str) -> Any:\n with SharedMemory(shm_name, readonly=True) as shm:\n shm.unlink()\n if shm.stats.st_uid != os.geteuid() or shm.stats.st_gid != os.getegid():\n raise ValueError('Incorrect owner on pwfile')\n mode = stat.S_IMODE(shm.stats.st_mode)\n if mode != stat.S_IREAD:\n raise ValueError('Incorrect permissions on pwfile')\n return json.loads(shm.read_data_with_size())\n\n\n# See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html\nquote_pat = re.compile('([\\\\`\"])')\n\n\ndef quote_env_val(x: str, literal_quote: bool = False) -> str:\n if literal_quote:\n return as_str_literal(x)\n x = quote_pat.sub(r'\\\\\\1', x)\n x = x.replace('$(', r'\\$(') # prevent execution with $()\n return f'\"{x}\"'\n\n\ndef serialize_env(literal_env: Dict[str, str], env: Dict[str, str], base_env: Dict[str, str], for_python: bool = False) -> bytes:\n lines = []\n literal_quote = True\n\n if for_python:\n def a(k: str, val: str = '', prefix: str = 'export') -> None:\n if val:\n lines.append(f'{prefix} {json.dumps((k, val, literal_quote))}')\n else:\n lines.append(f'{prefix} {json.dumps((k,))}')\n else:\n def a(k: str, val: str = '', prefix: str = 'export') -> None:\n if val:\n lines.append(f'{prefix} {shlex.quote(k)}={quote_env_val(val, literal_quote)}')\n else:\n lines.append(f'{prefix} {shlex.quote(k)}')\n\n for k, v in literal_env.items():\n a(k, v)\n\n literal_quote = False\n for k in sorted(env):\n v = env[k]\n if v == DELETE_ENV_VAR:\n a(k, prefix='unset')\n elif v == '_kitty_copy_env_var_':\n q = base_env.get(k)\n if q is not None:\n a(k, q)\n else:\n a(k, v)\n return '\\n'.join(lines).encode('utf-8')\n\n\ndef make_tarfile(ssh_opts: SSHOptions, base_env: Dict[str, str], compression: str = 'gz', literal_env: Dict[str, str] = {}) -> bytes:\n\n def normalize_tarinfo(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo:\n tarinfo.uname = tarinfo.gname = ''\n tarinfo.uid = tarinfo.gid = 0\n # some distro's like nix mess with installed file permissions so ensure\n # files are at least readable and writable by owning user\n tarinfo.mode |= stat.S_IWUSR | stat.S_IRUSR\n return tarinfo\n\n def add_data_as_file(tf: tarfile.TarFile, arcname: str, data: Union[str, bytes]) -> tarfile.TarInfo:\n ans = tarfile.TarInfo(arcname)\n ans.mtime = 0\n ans.type = tarfile.REGTYPE\n if isinstance(data, str):\n data = data.encode('utf-8')\n ans.size = len(data)\n normalize_tarinfo(ans)\n tf.addfile(ans, io.BytesIO(data))\n return ans\n\n def filter_from_globs(*pats: str) -> Callable[[tarfile.TarInfo], Optional[tarfile.TarInfo]]:\n def filter(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]:\n for junk_dir in ('.DS_Store', '__pycache__'):\n for pat in (f'*/{junk_dir}', f'*/{junk_dir}/*'):\n if fnmatch.fnmatch(tarinfo.name, pat):\n return None\n for pat in pats:\n if fnmatch.fnmatch(tarinfo.name, pat):\n return None\n return normalize_tarinfo(tarinfo)\n return filter\n\n from kitty.shell_integration import get_effective_ksi_env_var\n if ssh_opts.shell_integration == 'inherited':\n ksi = get_effective_ksi_env_var(kitty_opts())\n else:\n from kitty.options.types import Options\n from kitty.options.utils import shell_integration\n ksi = get_effective_ksi_env_var(Options({'shell_integration': shell_integration(ssh_opts.shell_integration)}))\n\n env = {\n 'TERM': os.environ.get('TERM') or kitty_opts().term,\n 'COLORTERM': 'truecolor',\n }\n env.update(ssh_opts.env)\n for q in ('KITTY_WINDOW_ID', 'WINDOWID'):\n val = os.environ.get(q)\n if val is not None:\n env[q] = val\n env['KITTY_SHELL_INTEGRATION'] = ksi or DELETE_ENV_VAR\n env['KITTY_SSH_KITTEN_DATA_DIR'] = ssh_opts.remote_dir\n if ssh_opts.login_shell:\n env['KITTY_LOGIN_SHELL'] = ssh_opts.login_shell\n if ssh_opts.cwd:\n env['KITTY_LOGIN_CWD'] = ssh_opts.cwd\n if ssh_opts.remote_kitty != 'no':\n env['KITTY_REMOTE'] = ssh_opts.remote_kitty\n if os.environ.get('KITTY_PUBLIC_KEY'):\n env.pop('KITTY_PUBLIC_KEY', None)\n literal_env['KITTY_PUBLIC_KEY'] = os.environ['KITTY_PUBLIC_KEY']\n env_script = serialize_env(literal_env, env, base_env, for_python=compression != 'gz')\n buf = io.BytesIO()\n with tarfile.open(mode=f'w:{compression}', fileobj=buf, encoding='utf-8') as tf:\n rd = ssh_opts.remote_dir.rstrip('/')\n for ci in ssh_opts.copy.values():\n tf.add(ci.local_path, arcname=ci.arcname, filter=filter_from_globs(*ci.exclude_patterns))\n add_data_as_file(tf, 'data.sh', env_script)\n if compression == 'gz':\n tf.add(f'{shell_integration_dir}/ssh/bootstrap-utils.sh', arcname='bootstrap-utils.sh', filter=normalize_tarinfo)\n if ksi:\n arcname = 'home/' + rd + '/shell-integration'\n tf.add(shell_integration_dir, arcname=arcname, filter=filter_from_globs(\n f'{arcname}/ssh/*', # bootstrap files are sent as command line args\n f'{arcname}/zsh/kitty.zsh', # present for legacy compat not needed by ssh kitten\n ))\n if ssh_opts.remote_kitty != 'no':\n arcname = 'home/' + rd + '/kitty'\n add_data_as_file(tf, arcname + '/version', str_version.encode('ascii'))\n tf.add(shell_integration_dir + '/ssh/kitty', arcname=arcname + '/bin/kitty', filter=normalize_tarinfo)\n tf.add(shell_integration_dir + '/ssh/kitten', arcname=arcname + '/bin/kitten', filter=normalize_tarinfo)\n tf.add(f'{terminfo_dir}/kitty.terminfo', arcname='home/.terminfo/kitty.terminfo', filter=normalize_tarinfo)\n tf.add(glob.glob(f'{terminfo_dir}/*/xterm-kitty')[0], arcname='home/.terminfo/x/xterm-kitty', filter=normalize_tarinfo)\n return buf.getvalue()\n\n\ndef get_ssh_data(msg: str, request_id: str) -> Iterator[bytes]:\n yield b'\\nKITTY_DATA_START\\n' # to discard leading data\n try:\n msg = standard_b64decode(msg).decode('utf-8')\n md = dict(x.split('=', 1) for x in msg.split(':'))\n pw = md['pw']\n pwfilename = md['pwfile']\n rq_id = md['id']\n except Exception:\n traceback.print_exc()\n yield b'invalid ssh data request message\\n'\n else:\n try:\n env_data = read_data_from_shared_memory(pwfilename)\n if pw != env_data['pw']:\n raise ValueError('Incorrect password')\n if rq_id != request_id:\n raise ValueError(f'Incorrect request id: {rq_id!r} expecting the KITTY_PID-KITTY_WINDOW_ID for the current kitty window')\n except Exception as e:\n traceback.print_exc()\n yield f'{e}\\n'.encode('utf-8')\n else:\n yield b'OK\\n'\n ssh_opts = SSHOptions(env_data['opts'])\n ssh_opts.copy = {k: CopyInstruction(*v) for k, v in ssh_opts.copy.items()}\n encoded_data = memoryview(env_data['tarfile'].encode('ascii'))\n # macOS has a 255 byte limit on its input queue as per man stty.\n # Not clear if that applies to canonical mode input as well, but\n # better to be safe.\n line_sz = 254\n while encoded_data:\n yield encoded_data[:line_sz]\n yield b'\\n'\n encoded_data = encoded_data[line_sz:]\n yield b'KITTY_DATA_END\\n'\n\n\ndef safe_remove(x: str) -> None:\n with suppress(OSError):\n os.remove(x)\n\n\ndef prepare_script(ans: str, replacements: Dict[str, str], script_type: str) -> str:\n for k in ('EXEC_CMD', 'EXPORT_HOME_CMD'):\n replacements[k] = replacements.get(k, '')\n\n def sub(m: 're.Match[str]') -> str:\n return replacements[m.group()]\n\n return re.sub('|'.join(fr'\\b{k}\\b' for k in replacements), sub, ans)\n\n\ndef prepare_exec_cmd(remote_args: Sequence[str], is_python: bool) -> str:\n # ssh simply concatenates multiple commands using a space see\n # line 1129 of ssh.c and on the remote side sshd.c runs the\n # concatenated command as shell -c cmd\n if is_python:\n return standard_b64encode(' '.join(remote_args).encode('utf-8')).decode('ascii')\n args = ' '.join(c.replace(\"'\", \"\"\"'\"'\"'\"\"\") for c in remote_args)\n return f\"\"\"unset KITTY_SHELL_INTEGRATION; exec \"$login_shell\" -c '{args}'\"\"\"\n\n\ndef prepare_export_home_cmd(ssh_opts: SSHOptions, is_python: bool) -> str:\n home = ssh_opts.env.get('HOME')\n if home == '_kitty_copy_env_var_':\n home = os.environ.get('HOME')\n if home:\n if is_python:\n return standard_b64encode(home.encode('utf-8')).decode('ascii')\n else:\n return f'export HOME={quote_env_val(home)}; cd \"$HOME\"'\n return ''\n\n\ndef bootstrap_script(\n ssh_opts: SSHOptions, script_type: str = 'sh', remote_args: Sequence[str] = (),\n test_script: str = '', request_id: Optional[str] = None, cli_hostname: str = '', cli_uname: str = '',\n request_data: bool = False, echo_on: bool = True, literal_env: Dict[str, str] = {}\n) -> Tuple[str, Dict[str, str], str]:\n if request_id is None:\n request_id = os.environ['KITTY_PID'] + '-' + os.environ['KITTY_WINDOW_ID']\n is_python = script_type == 'py'\n export_home_cmd = prepare_export_home_cmd(ssh_opts, is_python) if 'HOME' in ssh_opts.env else ''\n exec_cmd = prepare_exec_cmd(remote_args, is_python) if remote_args else ''\n with open(os.path.join(shell_integration_dir, 'ssh', f'bootstrap.{script_type}')) as f:\n ans = f.read()\n pw = secrets.token_hex()\n tfd = standard_b64encode(make_tarfile(ssh_opts, dict(os.environ), 'gz' if script_type == 'sh' else 'bz2', literal_env=literal_env)).decode('ascii')\n data = {'pw': pw, 'opts': ssh_opts._asdict(), 'hostname': cli_hostname, 'uname': cli_uname, 'tarfile': tfd}\n shm_name = create_shared_memory(data, prefix=f'kssh-{os.getpid()}-')\n sensitive_data = {'REQUEST_ID': request_id, 'DATA_PASSWORD': pw, 'PASSWORD_FILENAME': shm_name}\n replacements = {\n 'EXPORT_HOME_CMD': export_home_cmd,\n 'EXEC_CMD': exec_cmd, 'TEST_SCRIPT': test_script,\n 'REQUEST_DATA': '1' if request_data else '0', 'ECHO_ON': '1' if echo_on else '0',\n }\n sd = replacements.copy()\n if request_data:\n sd.update(sensitive_data)\n replacements.update(sensitive_data)\n return prepare_script(ans, sd, script_type), replacements, shm_name\n\n\ndef get_ssh_cli() -> Tuple[Set[str], Set[str]]:\n other_ssh_args: Set[str] = set()\n boolean_ssh_args: Set[str] = set()\n for k, v in ssh_options().items():\n k = f'-{k}'\n if v:\n other_ssh_args.add(k)\n else:\n boolean_ssh_args.add(k)\n return boolean_ssh_args, other_ssh_args\n\n\ndef is_extra_arg(arg: str, extra_args: Tuple[str, ...]) -> str:\n for x in extra_args:\n if arg == x or arg.startswith(f'{x}='):\n return x\n return ''\n\n\ndef get_connection_data(args: List[str], cwd: str = '', extra_args: Tuple[str, ...] = ()) -> Optional[SSHConnectionData]:\n boolean_ssh_args, other_ssh_args = get_ssh_cli()\n port: Optional[int] = None\n expecting_port = expecting_identity = False\n expecting_option_val = False\n expecting_hostname = False\n expecting_extra_val = ''\n host_name = identity_file = found_ssh = ''\n found_extra_args: List[Tuple[str, str]] = []\n\n for i, arg in enumerate(args):\n if not found_ssh:\n if os.path.basename(arg).lower() in ('ssh', 'ssh.exe'):\n found_ssh = arg\n continue\n if expecting_hostname:\n host_name = arg\n continue\n if arg.startswith('-') and not expecting_option_val:\n if arg in boolean_ssh_args:\n continue\n if arg == '--':\n expecting_hostname = True\n if arg.startswith('-p'):\n if arg[2:].isdigit():\n with suppress(Exception):\n port = int(arg[2:])\n continue\n elif arg == '-p':\n expecting_port = True\n elif arg.startswith('-i'):\n if arg == '-i':\n expecting_identity = True\n else:\n identity_file = arg[2:]\n continue\n if arg.startswith('--') and extra_args:\n matching_ex = is_extra_arg(arg, extra_args)\n if matching_ex:\n if '=' in arg:\n exval = arg.partition('=')[-1]\n found_extra_args.append((matching_ex, exval))\n continue\n expecting_extra_val = matching_ex\n\n expecting_option_val = True\n continue\n\n if expecting_option_val:\n if expecting_port:\n with suppress(Exception):\n port = int(arg)\n expecting_port = False\n elif expecting_identity:\n identity_file = arg\n elif expecting_extra_val:\n found_extra_args.append((expecting_extra_val, arg))\n expecting_extra_val = ''\n expecting_option_val = False\n continue\n\n if not host_name:\n host_name = arg\n if not host_name:\n return None\n if host_name.startswith('ssh://'):\n from urllib.parse import urlparse\n purl = urlparse(host_name)\n if purl.hostname:\n host_name = purl.hostname\n if purl.username:\n host_name = f'{purl.username}@{host_name}'\n if port is None and purl.port:\n port = purl.port\n if identity_file:\n if not os.path.isabs(identity_file):\n identity_file = os.path.expanduser(identity_file)\n if not os.path.isabs(identity_file):\n identity_file = os.path.normpath(os.path.join(cwd or os.getcwd(), identity_file))\n\n return SSHConnectionData(found_ssh, host_name, port, identity_file, tuple(found_extra_args))\n\n\nclass InvalidSSHArgs(ValueError):\n\n def __init__(self, msg: str = ''):\n super().__init__(msg)\n self.err_msg = msg\n\n def system_exit(self) -> None:\n if self.err_msg:\n print(self.err_msg, file=sys.stderr)\n os.execlp(ssh_exe(), 'ssh')\n\n\npassthrough_args = {f'-{x}' for x in 'NnfGT'}\n\n\ndef parse_ssh_args(args: List[str], extra_args: Tuple[str, ...] = ()) -> Tuple[List[str], List[str], bool, Tuple[str, ...]]:\n boolean_ssh_args, other_ssh_args = get_ssh_cli()\n ssh_args = []\n server_args: List[str] = []\n expecting_option_val = False\n passthrough = False\n stop_option_processing = False\n found_extra_args: List[str] = []\n expecting_extra_val = ''\n for argument in args:\n if len(server_args) > 1 or stop_option_processing:\n server_args.append(argument)\n continue\n if argument.startswith('-') and not expecting_option_val:\n if argument == '--':\n stop_option_processing = True\n continue\n if extra_args:\n matching_ex = is_extra_arg(argument, extra_args)\n if matching_ex:\n if '=' in argument:\n exval = argument.partition('=')[-1]\n found_extra_args.extend((matching_ex, exval))\n else:\n expecting_extra_val = matching_ex\n expecting_option_val = True\n continue\n # could be a multi-character option\n all_args = argument[1:]\n for i, arg in enumerate(all_args):\n arg = f'-{arg}'\n if arg in passthrough_args:\n passthrough = True\n if arg in boolean_ssh_args:\n ssh_args.append(arg)\n continue\n if arg in other_ssh_args:\n ssh_args.append(arg)\n rest = all_args[i+1:]\n if rest:\n ssh_args.append(rest)\n else:\n expecting_option_val = True\n break\n raise InvalidSSHArgs(f'unknown option -- {arg[1:]}')\n continue\n if expecting_option_val:\n if expecting_extra_val:\n found_extra_args.extend((expecting_extra_val, argument))\n expecting_extra_val = ''\n else:\n ssh_args.append(argument)\n expecting_option_val = False\n continue\n server_args.append(argument)\n if not server_args:\n raise InvalidSSHArgs()\n return ssh_args, server_args, passthrough, tuple(found_extra_args)\n\n\ndef wrap_bootstrap_script(sh_script: str, interpreter: str) -> List[str]:\n # sshd will execute the command we pass it by join all command line\n # arguments with a space and passing it as a single argument to the users\n # login shell with -c. If the user has a non POSIX login shell it might\n # have different escaping semantics and syntax, so the command it should\n # execute has to be as simple as possible, basically of the form\n # interpreter -c unwrap_script escaped_bootstrap_script\n # The unwrap_script is responsible for unescaping the bootstrap script and\n # executing it.\n q = os.path.basename(interpreter).lower()\n is_python = 'python' in q\n if is_python:\n es = standard_b64encode(sh_script.encode('utf-8')).decode('ascii')\n unwrap_script = '''\"import base64, sys; eval(compile(base64.standard_b64decode(sys.argv[-1]), 'bootstrap.py', 'exec'))\"'''\n else:\n # We cant rely on base64 being available on the remote system, so instead\n # we quote the bootstrap script by replacing ' and \\ with \\v and \\f\n # also replacing \\n and ! with \\r and \\b for tcsh\n # finally surrounding with '\n es = \"'\" + sh_script.replace(\"'\", '\\v').replace('\\\\', '\\f').replace('\\n', '\\r').replace('!', '\\b') + \"'\"\n unwrap_script = r\"\"\"'eval \"$(echo \"$0\" | tr \\\\\\v\\\\\\f\\\\\\r\\\\\\b \\\\\\047\\\\\\134\\\\\\n\\\\\\041)\"' \"\"\"\n # exec is supported by all sh like shells, and fish and csh\n return ['exec', interpreter, '-c', unwrap_script, es]\n\n\ndef get_remote_command(\n remote_args: List[str], ssh_opts: SSHOptions, cli_hostname: str = '', cli_uname: str = '',\n echo_on: bool = True, request_data: bool = False, literal_env: Dict[str, str] = {}\n) -> Tuple[List[str], Dict[str, str], str]:\n interpreter = ssh_opts.interpreter\n q = os.path.basename(interpreter).lower()\n is_python = 'python' in q\n sh_script, replacements, shm_name = bootstrap_script(\n ssh_opts, script_type='py' if is_python else 'sh', remote_args=remote_args, literal_env=literal_env,\n cli_hostname=cli_hostname, cli_uname=cli_uname, echo_on=echo_on, request_data=request_data)\n return wrap_bootstrap_script(sh_script, interpreter), replacements, shm_name\n\n\ndef connection_sharing_args(kitty_pid: int) -> List[str]:\n rd = runtime_dir()\n # Bloody OpenSSH generates a 40 char hash and in creating the socket\n # appends a 27 char temp suffix to it. Socket max path length is approx\n # ~104 chars. macOS has no system runtime dir so we use a cache dir in\n # /Users/WHY_DOES_ANYONE_USE_MACOS/Library/Caches/APPLE_ARE_IDIOTIC\n if len(rd) > 35 and os.path.isdir('/tmp'):\n idiotic_design = f'/tmp/kssh-rdir-{os.getuid()}'\n try:\n os.symlink(rd, idiotic_design)\n except FileExistsError:\n try:\n dest = os.readlink(idiotic_design)\n except OSError as e:\n raise ValueError(f'The {idiotic_design} symlink could not be created as something with that name exists already') from e\n else:\n if dest != rd:\n with tempfile.TemporaryDirectory(dir='/tmp') as tdir:\n tlink = os.path.join(tdir, 'sigh')\n os.symlink(rd, tlink)\n os.rename(tlink, idiotic_design)\n rd = idiotic_design\n\n cp = os.path.join(rd, ssh_control_master_template.format(kitty_pid=kitty_pid, ssh_placeholder='%C'))\n ans: List[str] = [\n '-o', 'ControlMaster=auto',\n '-o', f'ControlPath={cp}',\n '-o', 'ControlPersist=yes',\n '-o', 'ServerAliveInterval=60',\n '-o', 'ServerAliveCountMax=5',\n '-o', 'TCPKeepAlive=no',\n ]\n return ans\n\n\n@contextmanager\ndef restore_terminal_state() -> Iterator[bool]:\n with open(os.ctermid()) as f:\n val = termios.tcgetattr(f.fileno())\n print(end=SAVE_PRIVATE_MODE_VALUES)\n print(end=set_mode(Mode.HANDLE_TERMIOS_SIGNALS), flush=True)\n try:\n yield bool(val[3] & termios.ECHO)\n finally:\n termios.tcsetattr(f.fileno(), termios.TCSAFLUSH, val)\n print(end=RESTORE_PRIVATE_MODE_VALUES, flush=True)\n\n\ndef dcs_to_kitty(payload: Union[bytes, str], type: str = 'ssh') -> bytes:\n if isinstance(payload, str):\n payload = payload.encode('utf-8')\n payload = standard_b64encode(payload)\n ans = b'\\033P@kitty-' + type.encode('ascii') + b'|' + payload\n tmux = running_in_tmux()\n if tmux:\n cp = subprocess.run([tmux, 'set', '-p', 'allow-passthrough', 'on'])\n if cp.returncode != 0:\n raise SystemExit(cp.returncode)\n ans = b'\\033Ptmux;\\033' + ans + b'\\033\\033\\\\\\033\\\\'\n else:\n ans += b'\\033\\\\'\n return ans\n\n\n@run_once\ndef ssh_version() -> Tuple[int, int]:\n o = subprocess.check_output([ssh_exe(), '-V'], stderr=subprocess.STDOUT).decode()\n m = re.match(r'OpenSSH_(\\d+).(\\d+)', o)\n if m is None:\n raise ValueError(f'Invalid version string for OpenSSH: {o}')\n return int(m.group(1)), int(m.group(2))\n\n\n@contextmanager\ndef drain_potential_tty_garbage(p: 'subprocess.Popen[bytes]', data_request: str) -> Iterator[None]:\n with open(os.open(os.ctermid(), os.O_CLOEXEC | os.O_RDWR | os.O_NOCTTY), 'wb') as tty:\n if data_request:\n turn_off_echo(tty.fileno())\n tty.write(dcs_to_kitty(data_request))\n tty.flush()\n try:\n yield\n finally:\n # discard queued input data on tty in case data transmission was\n # interrupted due to SSH failure, avoids spewing garbage to screen\n from uuid import uuid4\n canary = uuid4().hex.encode('ascii')\n turn_off_echo(tty.fileno())\n tty.write(dcs_to_kitty(canary + b'\\n\\r', type='echo'))\n tty.flush()\n data = b''\n give_up_at = time.monotonic() + 2\n tty_fd = tty.fileno()\n while time.monotonic() < give_up_at and canary not in data:\n with suppress(KeyboardInterrupt):\n rd, wr, err = select([tty_fd], [], [tty_fd], max(0, give_up_at - time.monotonic()))\n if err or not rd:\n break\n q = os.read(tty_fd, io.DEFAULT_BUFFER_SIZE)\n if not q:\n break\n data += q\n\n\ndef change_colors(color_scheme: str) -> bool:\n if not color_scheme:\n return False\n from kittens.themes.collection import NoCacheFound, load_themes, text_as_opts\n from kittens.themes.main import colors_as_escape_codes\n if color_scheme.endswith('.conf'):\n conf_file = resolve_abs_or_config_path(color_scheme)\n try:\n with open(conf_file) as f:\n opts = text_as_opts(f.read())\n except FileNotFoundError:\n raise SystemExit(f'Failed to find the color conf file: {expandvars(conf_file)}')\n else:\n try:\n themes = load_themes(-1)\n except NoCacheFound:\n themes = load_themes()\n cs = expandvars(color_scheme)\n try:\n theme = themes[cs]\n except KeyError:\n raise SystemExit(f'Failed to find the color theme: {cs}')\n opts = theme.kitty_opts\n raw = colors_as_escape_codes(opts)\n print(save_colors(), sep='', end=raw, flush=True)\n return True\n\n\ndef add_cloned_env(shm_name: str) -> Dict[str, str]:\n try:\n return cast(Dict[str, str], read_data_from_shared_memory(shm_name))\n except FileNotFoundError:\n pass\n return {}\n\n\ndef run_ssh(ssh_args: List[str], server_args: List[str], found_extra_args: Tuple[str, ...]) -> NoReturn:\n cmd = [ssh_exe()] + ssh_args\n hostname, remote_args = server_args[0], server_args[1:]\n if not remote_args:\n cmd.append('-t')\n insertion_point = len(cmd)\n cmd.append('--')\n cmd.append(hostname)\n uname = getuser()\n if hostname.startswith('ssh://'):\n from urllib.parse import urlparse\n purl = urlparse(hostname)\n hostname_for_match = purl.hostname or hostname[6:].split('/', 1)[0]\n uname = purl.username or uname\n elif '@' in hostname and hostname[0] != '@':\n uname, hostname_for_match = hostname.split('@', 1)\n else:\n hostname_for_match = hostname\n hostname_for_match = hostname_for_match.split('@', 1)[-1].split(':', 1)[0]\n overrides: List[str] = []\n literal_env: Dict[str, str] = {}\n pat = re.compile(r'^([a-zA-Z0-9_]+)[ \\t]*=')\n for i, a in enumerate(found_extra_args):\n if i % 2 == 1:\n aq = pat.sub(r'\\1 ', a.lstrip())\n key = aq.split(maxsplit=1)[0]\n if key == 'clone_env':\n literal_env = add_cloned_env(aq.split(maxsplit=1)[1])\n elif key != 'hostname':\n overrides.append(aq)\n if overrides:\n overrides.insert(0, f'hostname {uname}@{hostname_for_match}')\n host_opts = init_config(hostname_for_match, uname, overrides)\n if host_opts.share_connections:\n cmd[insertion_point:insertion_point] = connection_sharing_args(int(os.environ['KITTY_PID']))\n use_kitty_askpass = host_opts.askpass == 'native' or (host_opts.askpass == 'unless-set' and 'SSH_ASKPASS' not in os.environ)\n need_to_request_data = True\n if use_kitty_askpass:\n sentinel = os.path.join(cache_dir(), 'openssh-is-new-enough-for-askpass')\n sentinel_exists = os.path.exists(sentinel)\n if sentinel_exists or ssh_version() >= (8, 4):\n if not sentinel_exists:\n open(sentinel, 'w').close()\n # SSH_ASKPASS_REQUIRE was introduced in 8.4 release on 2020-09-27\n need_to_request_data = False\n os.environ['SSH_ASKPASS_REQUIRE'] = 'force'\n os.environ['SSH_ASKPASS'] = os.path.join(shell_integration_dir, 'ssh', 'askpass.py')\n if need_to_request_data and host_opts.share_connections:\n cp = subprocess.run(cmd[:1] + ['-O', 'check'] + cmd[1:], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n if cp.returncode == 0:\n # we will use the master connection so SSH does not need to use the tty\n need_to_request_data = False\n with restore_terminal_state() as echo_on:\n rcmd, replacements, shm_name = get_remote_command(\n remote_args, host_opts, hostname_for_match, uname, echo_on, request_data=need_to_request_data, literal_env=literal_env)\n cmd += rcmd\n colors_changed = change_colors(host_opts.color_scheme)\n try:\n p = subprocess.Popen(cmd)\n except FileNotFoundError:\n raise SystemExit('Could not find the ssh executable, is it in your PATH?')\n else:\n rq = '' if need_to_request_data else 'id={REQUEST_ID}:pwfile={PASSWORD_FILENAME}:pw={DATA_PASSWORD}'.format(**replacements)\n with drain_potential_tty_garbage(p, rq):\n raise SystemExit(p.wait())\n finally:\n if colors_changed:\n print(end=restore_colors(), flush=True)\n\n\ndef main(args: List[str]) -> None:\n args = args[1:]\n if args and args[0] == 'use-python':\n args = args[1:] # backwards compat from when we had a python implementation\n try:\n ssh_args, server_args, passthrough, found_extra_args = parse_ssh_args(args, extra_args=('--kitten',))\n except InvalidSSHArgs as e:\n e.system_exit()\n if passthrough:\n if found_extra_args:\n raise SystemExit(f'The SSH kitten cannot work with the options: {\", \".join(passthrough_args)}')\n os.execlp(ssh_exe(), 'ssh', *args)\n\n if not os.environ.get('KITTY_WINDOW_ID') or not os.environ.get('KITTY_PID'):\n raise SystemExit('The SSH kitten is meant to run inside a kitty window')\n if not sys.stdin.isatty():\n raise SystemExit('The SSH kitten is meant for interactive use only, STDIN must be a terminal')\n try:\n run_ssh(ssh_args, server_args, found_extra_args)\n except KeyboardInterrupt:\n sys.excepthook = lambda *a: None\n raise\n\n\nif __name__ == '__main__':\n main(sys.argv)\nelif __name__ == '__wrapper_of__':\n cd = sys.cli_docs # type: ignore\n cd['wrapper_of'] = 'ssh'\nelif __name__ == '__conf__':\n from .options.definition import definition\n sys.options_definition = definition # type: ignore\n", "path": "kittens/ssh/main.py" } ]
[ { "content": "#!/usr/bin/env python3\n# License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>\n\nimport fnmatch\nimport glob\nimport io\nimport json\nimport os\nimport re\nimport secrets\nimport shlex\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport termios\nimport time\nimport traceback\nfrom base64 import standard_b64decode, standard_b64encode\nfrom contextlib import contextmanager, suppress\nfrom getpass import getuser\nfrom select import select\nfrom typing import Any, Callable, Dict, Iterator, List, NoReturn, Optional, Sequence, Set, Tuple, Union, cast\n\nfrom kitty.constants import cache_dir, runtime_dir, shell_integration_dir, ssh_control_master_template, str_version, terminfo_dir\nfrom kitty.shell_integration import as_str_literal\nfrom kitty.shm import SharedMemory\nfrom kitty.types import run_once\nfrom kitty.utils import SSHConnectionData, expandvars, resolve_abs_or_config_path\nfrom kitty.utils import set_echo as turn_off_echo\n\nfrom ..tui.operations import RESTORE_PRIVATE_MODE_VALUES, SAVE_PRIVATE_MODE_VALUES, Mode, restore_colors, save_colors, set_mode\nfrom ..tui.utils import kitty_opts, running_in_tmux\nfrom .config import init_config\nfrom .copy import CopyInstruction\nfrom .options.types import Options as SSHOptions\nfrom .options.utils import DELETE_ENV_VAR\nfrom .utils import create_shared_memory, ssh_options\n\n\n@run_once\ndef ssh_exe() -> str:\n return shutil.which('ssh') or 'ssh'\n\n\ndef read_data_from_shared_memory(shm_name: str) -> Any:\n with SharedMemory(shm_name, readonly=True) as shm:\n shm.unlink()\n if shm.stats.st_uid != os.geteuid() or shm.stats.st_gid != os.getegid():\n raise ValueError('Incorrect owner on pwfile')\n return json.loads(shm.read_data_with_size())\n\n\n# See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html\nquote_pat = re.compile('([\\\\`\"])')\n\n\ndef quote_env_val(x: str, literal_quote: bool = False) -> str:\n if literal_quote:\n return as_str_literal(x)\n x = quote_pat.sub(r'\\\\\\1', x)\n x = x.replace('$(', r'\\$(') # prevent execution with $()\n return f'\"{x}\"'\n\n\ndef serialize_env(literal_env: Dict[str, str], env: Dict[str, str], base_env: Dict[str, str], for_python: bool = False) -> bytes:\n lines = []\n literal_quote = True\n\n if for_python:\n def a(k: str, val: str = '', prefix: str = 'export') -> None:\n if val:\n lines.append(f'{prefix} {json.dumps((k, val, literal_quote))}')\n else:\n lines.append(f'{prefix} {json.dumps((k,))}')\n else:\n def a(k: str, val: str = '', prefix: str = 'export') -> None:\n if val:\n lines.append(f'{prefix} {shlex.quote(k)}={quote_env_val(val, literal_quote)}')\n else:\n lines.append(f'{prefix} {shlex.quote(k)}')\n\n for k, v in literal_env.items():\n a(k, v)\n\n literal_quote = False\n for k in sorted(env):\n v = env[k]\n if v == DELETE_ENV_VAR:\n a(k, prefix='unset')\n elif v == '_kitty_copy_env_var_':\n q = base_env.get(k)\n if q is not None:\n a(k, q)\n else:\n a(k, v)\n return '\\n'.join(lines).encode('utf-8')\n\n\ndef make_tarfile(ssh_opts: SSHOptions, base_env: Dict[str, str], compression: str = 'gz', literal_env: Dict[str, str] = {}) -> bytes:\n\n def normalize_tarinfo(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo:\n tarinfo.uname = tarinfo.gname = ''\n tarinfo.uid = tarinfo.gid = 0\n # some distro's like nix mess with installed file permissions so ensure\n # files are at least readable and writable by owning user\n tarinfo.mode |= stat.S_IWUSR | stat.S_IRUSR\n return tarinfo\n\n def add_data_as_file(tf: tarfile.TarFile, arcname: str, data: Union[str, bytes]) -> tarfile.TarInfo:\n ans = tarfile.TarInfo(arcname)\n ans.mtime = 0\n ans.type = tarfile.REGTYPE\n if isinstance(data, str):\n data = data.encode('utf-8')\n ans.size = len(data)\n normalize_tarinfo(ans)\n tf.addfile(ans, io.BytesIO(data))\n return ans\n\n def filter_from_globs(*pats: str) -> Callable[[tarfile.TarInfo], Optional[tarfile.TarInfo]]:\n def filter(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]:\n for junk_dir in ('.DS_Store', '__pycache__'):\n for pat in (f'*/{junk_dir}', f'*/{junk_dir}/*'):\n if fnmatch.fnmatch(tarinfo.name, pat):\n return None\n for pat in pats:\n if fnmatch.fnmatch(tarinfo.name, pat):\n return None\n return normalize_tarinfo(tarinfo)\n return filter\n\n from kitty.shell_integration import get_effective_ksi_env_var\n if ssh_opts.shell_integration == 'inherited':\n ksi = get_effective_ksi_env_var(kitty_opts())\n else:\n from kitty.options.types import Options\n from kitty.options.utils import shell_integration\n ksi = get_effective_ksi_env_var(Options({'shell_integration': shell_integration(ssh_opts.shell_integration)}))\n\n env = {\n 'TERM': os.environ.get('TERM') or kitty_opts().term,\n 'COLORTERM': 'truecolor',\n }\n env.update(ssh_opts.env)\n for q in ('KITTY_WINDOW_ID', 'WINDOWID'):\n val = os.environ.get(q)\n if val is not None:\n env[q] = val\n env['KITTY_SHELL_INTEGRATION'] = ksi or DELETE_ENV_VAR\n env['KITTY_SSH_KITTEN_DATA_DIR'] = ssh_opts.remote_dir\n if ssh_opts.login_shell:\n env['KITTY_LOGIN_SHELL'] = ssh_opts.login_shell\n if ssh_opts.cwd:\n env['KITTY_LOGIN_CWD'] = ssh_opts.cwd\n if ssh_opts.remote_kitty != 'no':\n env['KITTY_REMOTE'] = ssh_opts.remote_kitty\n if os.environ.get('KITTY_PUBLIC_KEY'):\n env.pop('KITTY_PUBLIC_KEY', None)\n literal_env['KITTY_PUBLIC_KEY'] = os.environ['KITTY_PUBLIC_KEY']\n env_script = serialize_env(literal_env, env, base_env, for_python=compression != 'gz')\n buf = io.BytesIO()\n with tarfile.open(mode=f'w:{compression}', fileobj=buf, encoding='utf-8') as tf:\n rd = ssh_opts.remote_dir.rstrip('/')\n for ci in ssh_opts.copy.values():\n tf.add(ci.local_path, arcname=ci.arcname, filter=filter_from_globs(*ci.exclude_patterns))\n add_data_as_file(tf, 'data.sh', env_script)\n if compression == 'gz':\n tf.add(f'{shell_integration_dir}/ssh/bootstrap-utils.sh', arcname='bootstrap-utils.sh', filter=normalize_tarinfo)\n if ksi:\n arcname = 'home/' + rd + '/shell-integration'\n tf.add(shell_integration_dir, arcname=arcname, filter=filter_from_globs(\n f'{arcname}/ssh/*', # bootstrap files are sent as command line args\n f'{arcname}/zsh/kitty.zsh', # present for legacy compat not needed by ssh kitten\n ))\n if ssh_opts.remote_kitty != 'no':\n arcname = 'home/' + rd + '/kitty'\n add_data_as_file(tf, arcname + '/version', str_version.encode('ascii'))\n tf.add(shell_integration_dir + '/ssh/kitty', arcname=arcname + '/bin/kitty', filter=normalize_tarinfo)\n tf.add(shell_integration_dir + '/ssh/kitten', arcname=arcname + '/bin/kitten', filter=normalize_tarinfo)\n tf.add(f'{terminfo_dir}/kitty.terminfo', arcname='home/.terminfo/kitty.terminfo', filter=normalize_tarinfo)\n tf.add(glob.glob(f'{terminfo_dir}/*/xterm-kitty')[0], arcname='home/.terminfo/x/xterm-kitty', filter=normalize_tarinfo)\n return buf.getvalue()\n\n\ndef get_ssh_data(msg: str, request_id: str) -> Iterator[bytes]:\n yield b'\\nKITTY_DATA_START\\n' # to discard leading data\n try:\n msg = standard_b64decode(msg).decode('utf-8')\n md = dict(x.split('=', 1) for x in msg.split(':'))\n pw = md['pw']\n pwfilename = md['pwfile']\n rq_id = md['id']\n except Exception:\n traceback.print_exc()\n yield b'invalid ssh data request message\\n'\n else:\n try:\n env_data = read_data_from_shared_memory(pwfilename)\n if pw != env_data['pw']:\n raise ValueError('Incorrect password')\n if rq_id != request_id:\n raise ValueError(f'Incorrect request id: {rq_id!r} expecting the KITTY_PID-KITTY_WINDOW_ID for the current kitty window')\n except Exception as e:\n traceback.print_exc()\n yield f'{e}\\n'.encode('utf-8')\n else:\n yield b'OK\\n'\n ssh_opts = SSHOptions(env_data['opts'])\n ssh_opts.copy = {k: CopyInstruction(*v) for k, v in ssh_opts.copy.items()}\n encoded_data = memoryview(env_data['tarfile'].encode('ascii'))\n # macOS has a 255 byte limit on its input queue as per man stty.\n # Not clear if that applies to canonical mode input as well, but\n # better to be safe.\n line_sz = 254\n while encoded_data:\n yield encoded_data[:line_sz]\n yield b'\\n'\n encoded_data = encoded_data[line_sz:]\n yield b'KITTY_DATA_END\\n'\n\n\ndef safe_remove(x: str) -> None:\n with suppress(OSError):\n os.remove(x)\n\n\ndef prepare_script(ans: str, replacements: Dict[str, str], script_type: str) -> str:\n for k in ('EXEC_CMD', 'EXPORT_HOME_CMD'):\n replacements[k] = replacements.get(k, '')\n\n def sub(m: 're.Match[str]') -> str:\n return replacements[m.group()]\n\n return re.sub('|'.join(fr'\\b{k}\\b' for k in replacements), sub, ans)\n\n\ndef prepare_exec_cmd(remote_args: Sequence[str], is_python: bool) -> str:\n # ssh simply concatenates multiple commands using a space see\n # line 1129 of ssh.c and on the remote side sshd.c runs the\n # concatenated command as shell -c cmd\n if is_python:\n return standard_b64encode(' '.join(remote_args).encode('utf-8')).decode('ascii')\n args = ' '.join(c.replace(\"'\", \"\"\"'\"'\"'\"\"\") for c in remote_args)\n return f\"\"\"unset KITTY_SHELL_INTEGRATION; exec \"$login_shell\" -c '{args}'\"\"\"\n\n\ndef prepare_export_home_cmd(ssh_opts: SSHOptions, is_python: bool) -> str:\n home = ssh_opts.env.get('HOME')\n if home == '_kitty_copy_env_var_':\n home = os.environ.get('HOME')\n if home:\n if is_python:\n return standard_b64encode(home.encode('utf-8')).decode('ascii')\n else:\n return f'export HOME={quote_env_val(home)}; cd \"$HOME\"'\n return ''\n\n\ndef bootstrap_script(\n ssh_opts: SSHOptions, script_type: str = 'sh', remote_args: Sequence[str] = (),\n test_script: str = '', request_id: Optional[str] = None, cli_hostname: str = '', cli_uname: str = '',\n request_data: bool = False, echo_on: bool = True, literal_env: Dict[str, str] = {}\n) -> Tuple[str, Dict[str, str], str]:\n if request_id is None:\n request_id = os.environ['KITTY_PID'] + '-' + os.environ['KITTY_WINDOW_ID']\n is_python = script_type == 'py'\n export_home_cmd = prepare_export_home_cmd(ssh_opts, is_python) if 'HOME' in ssh_opts.env else ''\n exec_cmd = prepare_exec_cmd(remote_args, is_python) if remote_args else ''\n with open(os.path.join(shell_integration_dir, 'ssh', f'bootstrap.{script_type}')) as f:\n ans = f.read()\n pw = secrets.token_hex()\n tfd = standard_b64encode(make_tarfile(ssh_opts, dict(os.environ), 'gz' if script_type == 'sh' else 'bz2', literal_env=literal_env)).decode('ascii')\n data = {'pw': pw, 'opts': ssh_opts._asdict(), 'hostname': cli_hostname, 'uname': cli_uname, 'tarfile': tfd}\n shm_name = create_shared_memory(data, prefix=f'kssh-{os.getpid()}-')\n sensitive_data = {'REQUEST_ID': request_id, 'DATA_PASSWORD': pw, 'PASSWORD_FILENAME': shm_name}\n replacements = {\n 'EXPORT_HOME_CMD': export_home_cmd,\n 'EXEC_CMD': exec_cmd, 'TEST_SCRIPT': test_script,\n 'REQUEST_DATA': '1' if request_data else '0', 'ECHO_ON': '1' if echo_on else '0',\n }\n sd = replacements.copy()\n if request_data:\n sd.update(sensitive_data)\n replacements.update(sensitive_data)\n return prepare_script(ans, sd, script_type), replacements, shm_name\n\n\ndef get_ssh_cli() -> Tuple[Set[str], Set[str]]:\n other_ssh_args: Set[str] = set()\n boolean_ssh_args: Set[str] = set()\n for k, v in ssh_options().items():\n k = f'-{k}'\n if v:\n other_ssh_args.add(k)\n else:\n boolean_ssh_args.add(k)\n return boolean_ssh_args, other_ssh_args\n\n\ndef is_extra_arg(arg: str, extra_args: Tuple[str, ...]) -> str:\n for x in extra_args:\n if arg == x or arg.startswith(f'{x}='):\n return x\n return ''\n\n\ndef get_connection_data(args: List[str], cwd: str = '', extra_args: Tuple[str, ...] = ()) -> Optional[SSHConnectionData]:\n boolean_ssh_args, other_ssh_args = get_ssh_cli()\n port: Optional[int] = None\n expecting_port = expecting_identity = False\n expecting_option_val = False\n expecting_hostname = False\n expecting_extra_val = ''\n host_name = identity_file = found_ssh = ''\n found_extra_args: List[Tuple[str, str]] = []\n\n for i, arg in enumerate(args):\n if not found_ssh:\n if os.path.basename(arg).lower() in ('ssh', 'ssh.exe'):\n found_ssh = arg\n continue\n if expecting_hostname:\n host_name = arg\n continue\n if arg.startswith('-') and not expecting_option_val:\n if arg in boolean_ssh_args:\n continue\n if arg == '--':\n expecting_hostname = True\n if arg.startswith('-p'):\n if arg[2:].isdigit():\n with suppress(Exception):\n port = int(arg[2:])\n continue\n elif arg == '-p':\n expecting_port = True\n elif arg.startswith('-i'):\n if arg == '-i':\n expecting_identity = True\n else:\n identity_file = arg[2:]\n continue\n if arg.startswith('--') and extra_args:\n matching_ex = is_extra_arg(arg, extra_args)\n if matching_ex:\n if '=' in arg:\n exval = arg.partition('=')[-1]\n found_extra_args.append((matching_ex, exval))\n continue\n expecting_extra_val = matching_ex\n\n expecting_option_val = True\n continue\n\n if expecting_option_val:\n if expecting_port:\n with suppress(Exception):\n port = int(arg)\n expecting_port = False\n elif expecting_identity:\n identity_file = arg\n elif expecting_extra_val:\n found_extra_args.append((expecting_extra_val, arg))\n expecting_extra_val = ''\n expecting_option_val = False\n continue\n\n if not host_name:\n host_name = arg\n if not host_name:\n return None\n if host_name.startswith('ssh://'):\n from urllib.parse import urlparse\n purl = urlparse(host_name)\n if purl.hostname:\n host_name = purl.hostname\n if purl.username:\n host_name = f'{purl.username}@{host_name}'\n if port is None and purl.port:\n port = purl.port\n if identity_file:\n if not os.path.isabs(identity_file):\n identity_file = os.path.expanduser(identity_file)\n if not os.path.isabs(identity_file):\n identity_file = os.path.normpath(os.path.join(cwd or os.getcwd(), identity_file))\n\n return SSHConnectionData(found_ssh, host_name, port, identity_file, tuple(found_extra_args))\n\n\nclass InvalidSSHArgs(ValueError):\n\n def __init__(self, msg: str = ''):\n super().__init__(msg)\n self.err_msg = msg\n\n def system_exit(self) -> None:\n if self.err_msg:\n print(self.err_msg, file=sys.stderr)\n os.execlp(ssh_exe(), 'ssh')\n\n\npassthrough_args = {f'-{x}' for x in 'NnfGT'}\n\n\ndef parse_ssh_args(args: List[str], extra_args: Tuple[str, ...] = ()) -> Tuple[List[str], List[str], bool, Tuple[str, ...]]:\n boolean_ssh_args, other_ssh_args = get_ssh_cli()\n ssh_args = []\n server_args: List[str] = []\n expecting_option_val = False\n passthrough = False\n stop_option_processing = False\n found_extra_args: List[str] = []\n expecting_extra_val = ''\n for argument in args:\n if len(server_args) > 1 or stop_option_processing:\n server_args.append(argument)\n continue\n if argument.startswith('-') and not expecting_option_val:\n if argument == '--':\n stop_option_processing = True\n continue\n if extra_args:\n matching_ex = is_extra_arg(argument, extra_args)\n if matching_ex:\n if '=' in argument:\n exval = argument.partition('=')[-1]\n found_extra_args.extend((matching_ex, exval))\n else:\n expecting_extra_val = matching_ex\n expecting_option_val = True\n continue\n # could be a multi-character option\n all_args = argument[1:]\n for i, arg in enumerate(all_args):\n arg = f'-{arg}'\n if arg in passthrough_args:\n passthrough = True\n if arg in boolean_ssh_args:\n ssh_args.append(arg)\n continue\n if arg in other_ssh_args:\n ssh_args.append(arg)\n rest = all_args[i+1:]\n if rest:\n ssh_args.append(rest)\n else:\n expecting_option_val = True\n break\n raise InvalidSSHArgs(f'unknown option -- {arg[1:]}')\n continue\n if expecting_option_val:\n if expecting_extra_val:\n found_extra_args.extend((expecting_extra_val, argument))\n expecting_extra_val = ''\n else:\n ssh_args.append(argument)\n expecting_option_val = False\n continue\n server_args.append(argument)\n if not server_args:\n raise InvalidSSHArgs()\n return ssh_args, server_args, passthrough, tuple(found_extra_args)\n\n\ndef wrap_bootstrap_script(sh_script: str, interpreter: str) -> List[str]:\n # sshd will execute the command we pass it by join all command line\n # arguments with a space and passing it as a single argument to the users\n # login shell with -c. If the user has a non POSIX login shell it might\n # have different escaping semantics and syntax, so the command it should\n # execute has to be as simple as possible, basically of the form\n # interpreter -c unwrap_script escaped_bootstrap_script\n # The unwrap_script is responsible for unescaping the bootstrap script and\n # executing it.\n q = os.path.basename(interpreter).lower()\n is_python = 'python' in q\n if is_python:\n es = standard_b64encode(sh_script.encode('utf-8')).decode('ascii')\n unwrap_script = '''\"import base64, sys; eval(compile(base64.standard_b64decode(sys.argv[-1]), 'bootstrap.py', 'exec'))\"'''\n else:\n # We cant rely on base64 being available on the remote system, so instead\n # we quote the bootstrap script by replacing ' and \\ with \\v and \\f\n # also replacing \\n and ! with \\r and \\b for tcsh\n # finally surrounding with '\n es = \"'\" + sh_script.replace(\"'\", '\\v').replace('\\\\', '\\f').replace('\\n', '\\r').replace('!', '\\b') + \"'\"\n unwrap_script = r\"\"\"'eval \"$(echo \"$0\" | tr \\\\\\v\\\\\\f\\\\\\r\\\\\\b \\\\\\047\\\\\\134\\\\\\n\\\\\\041)\"' \"\"\"\n # exec is supported by all sh like shells, and fish and csh\n return ['exec', interpreter, '-c', unwrap_script, es]\n\n\ndef get_remote_command(\n remote_args: List[str], ssh_opts: SSHOptions, cli_hostname: str = '', cli_uname: str = '',\n echo_on: bool = True, request_data: bool = False, literal_env: Dict[str, str] = {}\n) -> Tuple[List[str], Dict[str, str], str]:\n interpreter = ssh_opts.interpreter\n q = os.path.basename(interpreter).lower()\n is_python = 'python' in q\n sh_script, replacements, shm_name = bootstrap_script(\n ssh_opts, script_type='py' if is_python else 'sh', remote_args=remote_args, literal_env=literal_env,\n cli_hostname=cli_hostname, cli_uname=cli_uname, echo_on=echo_on, request_data=request_data)\n return wrap_bootstrap_script(sh_script, interpreter), replacements, shm_name\n\n\ndef connection_sharing_args(kitty_pid: int) -> List[str]:\n rd = runtime_dir()\n # Bloody OpenSSH generates a 40 char hash and in creating the socket\n # appends a 27 char temp suffix to it. Socket max path length is approx\n # ~104 chars. macOS has no system runtime dir so we use a cache dir in\n # /Users/WHY_DOES_ANYONE_USE_MACOS/Library/Caches/APPLE_ARE_IDIOTIC\n if len(rd) > 35 and os.path.isdir('/tmp'):\n idiotic_design = f'/tmp/kssh-rdir-{os.getuid()}'\n try:\n os.symlink(rd, idiotic_design)\n except FileExistsError:\n try:\n dest = os.readlink(idiotic_design)\n except OSError as e:\n raise ValueError(f'The {idiotic_design} symlink could not be created as something with that name exists already') from e\n else:\n if dest != rd:\n with tempfile.TemporaryDirectory(dir='/tmp') as tdir:\n tlink = os.path.join(tdir, 'sigh')\n os.symlink(rd, tlink)\n os.rename(tlink, idiotic_design)\n rd = idiotic_design\n\n cp = os.path.join(rd, ssh_control_master_template.format(kitty_pid=kitty_pid, ssh_placeholder='%C'))\n ans: List[str] = [\n '-o', 'ControlMaster=auto',\n '-o', f'ControlPath={cp}',\n '-o', 'ControlPersist=yes',\n '-o', 'ServerAliveInterval=60',\n '-o', 'ServerAliveCountMax=5',\n '-o', 'TCPKeepAlive=no',\n ]\n return ans\n\n\n@contextmanager\ndef restore_terminal_state() -> Iterator[bool]:\n with open(os.ctermid()) as f:\n val = termios.tcgetattr(f.fileno())\n print(end=SAVE_PRIVATE_MODE_VALUES)\n print(end=set_mode(Mode.HANDLE_TERMIOS_SIGNALS), flush=True)\n try:\n yield bool(val[3] & termios.ECHO)\n finally:\n termios.tcsetattr(f.fileno(), termios.TCSAFLUSH, val)\n print(end=RESTORE_PRIVATE_MODE_VALUES, flush=True)\n\n\ndef dcs_to_kitty(payload: Union[bytes, str], type: str = 'ssh') -> bytes:\n if isinstance(payload, str):\n payload = payload.encode('utf-8')\n payload = standard_b64encode(payload)\n ans = b'\\033P@kitty-' + type.encode('ascii') + b'|' + payload\n tmux = running_in_tmux()\n if tmux:\n cp = subprocess.run([tmux, 'set', '-p', 'allow-passthrough', 'on'])\n if cp.returncode != 0:\n raise SystemExit(cp.returncode)\n ans = b'\\033Ptmux;\\033' + ans + b'\\033\\033\\\\\\033\\\\'\n else:\n ans += b'\\033\\\\'\n return ans\n\n\n@run_once\ndef ssh_version() -> Tuple[int, int]:\n o = subprocess.check_output([ssh_exe(), '-V'], stderr=subprocess.STDOUT).decode()\n m = re.match(r'OpenSSH_(\\d+).(\\d+)', o)\n if m is None:\n raise ValueError(f'Invalid version string for OpenSSH: {o}')\n return int(m.group(1)), int(m.group(2))\n\n\n@contextmanager\ndef drain_potential_tty_garbage(p: 'subprocess.Popen[bytes]', data_request: str) -> Iterator[None]:\n with open(os.open(os.ctermid(), os.O_CLOEXEC | os.O_RDWR | os.O_NOCTTY), 'wb') as tty:\n if data_request:\n turn_off_echo(tty.fileno())\n tty.write(dcs_to_kitty(data_request))\n tty.flush()\n try:\n yield\n finally:\n # discard queued input data on tty in case data transmission was\n # interrupted due to SSH failure, avoids spewing garbage to screen\n from uuid import uuid4\n canary = uuid4().hex.encode('ascii')\n turn_off_echo(tty.fileno())\n tty.write(dcs_to_kitty(canary + b'\\n\\r', type='echo'))\n tty.flush()\n data = b''\n give_up_at = time.monotonic() + 2\n tty_fd = tty.fileno()\n while time.monotonic() < give_up_at and canary not in data:\n with suppress(KeyboardInterrupt):\n rd, wr, err = select([tty_fd], [], [tty_fd], max(0, give_up_at - time.monotonic()))\n if err or not rd:\n break\n q = os.read(tty_fd, io.DEFAULT_BUFFER_SIZE)\n if not q:\n break\n data += q\n\n\ndef change_colors(color_scheme: str) -> bool:\n if not color_scheme:\n return False\n from kittens.themes.collection import NoCacheFound, load_themes, text_as_opts\n from kittens.themes.main import colors_as_escape_codes\n if color_scheme.endswith('.conf'):\n conf_file = resolve_abs_or_config_path(color_scheme)\n try:\n with open(conf_file) as f:\n opts = text_as_opts(f.read())\n except FileNotFoundError:\n raise SystemExit(f'Failed to find the color conf file: {expandvars(conf_file)}')\n else:\n try:\n themes = load_themes(-1)\n except NoCacheFound:\n themes = load_themes()\n cs = expandvars(color_scheme)\n try:\n theme = themes[cs]\n except KeyError:\n raise SystemExit(f'Failed to find the color theme: {cs}')\n opts = theme.kitty_opts\n raw = colors_as_escape_codes(opts)\n print(save_colors(), sep='', end=raw, flush=True)\n return True\n\n\ndef add_cloned_env(shm_name: str) -> Dict[str, str]:\n try:\n return cast(Dict[str, str], read_data_from_shared_memory(shm_name))\n except FileNotFoundError:\n pass\n return {}\n\n\ndef run_ssh(ssh_args: List[str], server_args: List[str], found_extra_args: Tuple[str, ...]) -> NoReturn:\n cmd = [ssh_exe()] + ssh_args\n hostname, remote_args = server_args[0], server_args[1:]\n if not remote_args:\n cmd.append('-t')\n insertion_point = len(cmd)\n cmd.append('--')\n cmd.append(hostname)\n uname = getuser()\n if hostname.startswith('ssh://'):\n from urllib.parse import urlparse\n purl = urlparse(hostname)\n hostname_for_match = purl.hostname or hostname[6:].split('/', 1)[0]\n uname = purl.username or uname\n elif '@' in hostname and hostname[0] != '@':\n uname, hostname_for_match = hostname.split('@', 1)\n else:\n hostname_for_match = hostname\n hostname_for_match = hostname_for_match.split('@', 1)[-1].split(':', 1)[0]\n overrides: List[str] = []\n literal_env: Dict[str, str] = {}\n pat = re.compile(r'^([a-zA-Z0-9_]+)[ \\t]*=')\n for i, a in enumerate(found_extra_args):\n if i % 2 == 1:\n aq = pat.sub(r'\\1 ', a.lstrip())\n key = aq.split(maxsplit=1)[0]\n if key == 'clone_env':\n literal_env = add_cloned_env(aq.split(maxsplit=1)[1])\n elif key != 'hostname':\n overrides.append(aq)\n if overrides:\n overrides.insert(0, f'hostname {uname}@{hostname_for_match}')\n host_opts = init_config(hostname_for_match, uname, overrides)\n if host_opts.share_connections:\n cmd[insertion_point:insertion_point] = connection_sharing_args(int(os.environ['KITTY_PID']))\n use_kitty_askpass = host_opts.askpass == 'native' or (host_opts.askpass == 'unless-set' and 'SSH_ASKPASS' not in os.environ)\n need_to_request_data = True\n if use_kitty_askpass:\n sentinel = os.path.join(cache_dir(), 'openssh-is-new-enough-for-askpass')\n sentinel_exists = os.path.exists(sentinel)\n if sentinel_exists or ssh_version() >= (8, 4):\n if not sentinel_exists:\n open(sentinel, 'w').close()\n # SSH_ASKPASS_REQUIRE was introduced in 8.4 release on 2020-09-27\n need_to_request_data = False\n os.environ['SSH_ASKPASS_REQUIRE'] = 'force'\n os.environ['SSH_ASKPASS'] = os.path.join(shell_integration_dir, 'ssh', 'askpass.py')\n if need_to_request_data and host_opts.share_connections:\n cp = subprocess.run(cmd[:1] + ['-O', 'check'] + cmd[1:], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n if cp.returncode == 0:\n # we will use the master connection so SSH does not need to use the tty\n need_to_request_data = False\n with restore_terminal_state() as echo_on:\n rcmd, replacements, shm_name = get_remote_command(\n remote_args, host_opts, hostname_for_match, uname, echo_on, request_data=need_to_request_data, literal_env=literal_env)\n cmd += rcmd\n colors_changed = change_colors(host_opts.color_scheme)\n try:\n p = subprocess.Popen(cmd)\n except FileNotFoundError:\n raise SystemExit('Could not find the ssh executable, is it in your PATH?')\n else:\n rq = '' if need_to_request_data else 'id={REQUEST_ID}:pwfile={PASSWORD_FILENAME}:pw={DATA_PASSWORD}'.format(**replacements)\n with drain_potential_tty_garbage(p, rq):\n raise SystemExit(p.wait())\n finally:\n if colors_changed:\n print(end=restore_colors(), flush=True)\n\n\ndef main(args: List[str]) -> None:\n args = args[1:]\n if args and args[0] == 'use-python':\n args = args[1:] # backwards compat from when we had a python implementation\n try:\n ssh_args, server_args, passthrough, found_extra_args = parse_ssh_args(args, extra_args=('--kitten',))\n except InvalidSSHArgs as e:\n e.system_exit()\n if passthrough:\n if found_extra_args:\n raise SystemExit(f'The SSH kitten cannot work with the options: {\", \".join(passthrough_args)}')\n os.execlp(ssh_exe(), 'ssh', *args)\n\n if not os.environ.get('KITTY_WINDOW_ID') or not os.environ.get('KITTY_PID'):\n raise SystemExit('The SSH kitten is meant to run inside a kitty window')\n if not sys.stdin.isatty():\n raise SystemExit('The SSH kitten is meant for interactive use only, STDIN must be a terminal')\n try:\n run_ssh(ssh_args, server_args, found_extra_args)\n except KeyboardInterrupt:\n sys.excepthook = lambda *a: None\n raise\n\n\nif __name__ == '__main__':\n main(sys.argv)\nelif __name__ == '__wrapper_of__':\n cd = sys.cli_docs # type: ignore\n cd['wrapper_of'] = 'ssh'\nelif __name__ == '__conf__':\n from .options.definition import definition\n sys.options_definition = definition # type: ignore\n", "path": "kittens/ssh/main.py" } ]
diff --git a/kittens/ssh/main.py b/kittens/ssh/main.py index 7140379b4ac..cacc60ff0b0 100644 --- a/kittens/ssh/main.py +++ b/kittens/ssh/main.py @@ -50,9 +50,6 @@ def read_data_from_shared_memory(shm_name: str) -> Any: shm.unlink() if shm.stats.st_uid != os.geteuid() or shm.stats.st_gid != os.getegid(): raise ValueError('Incorrect owner on pwfile') - mode = stat.S_IMODE(shm.stats.st_mode) - if mode != stat.S_IREAD: - raise ValueError('Incorrect permissions on pwfile') return json.loads(shm.read_data_with_size())
Permission Error when using SSH kitten on FreeBSD Good morning. I was hoping you or someone could point me in the right direction, or is this possibly a bug? When attempting to use the SSH kitten on FreeBSD I am getting this error: ```bash $ kitty +kitten ssh test-host [Errno 13] Permission denied: '/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af' Shared connection to [redacted] closed. Exception ignored in atexit callback: <bound method SharedMemory.unlink of SharedMemory('/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af', size=32718)> Traceback (most recent call last): File "/usr/home/user/kitty/launcher/../../kitty/shm.py", line 180, in unlink shm_unlink(self._name) PermissionError: [Errno 13] Permission denied: '/kssh-37040-ece2c2d9cdda503806bc6a18097a0bb79508d4e39266667b9aa16a06830dd0af' ``` I am not sure as to what resource the kitty process is lacking permission in order to rectify this. This is on kitty v0.26.5. Happens in every shell.
django-oscar__django-oscar-1766
[ { "content": "from django.utils.translation import ugettext_lazy as _\n\n\nclass Base(object):\n \"\"\"\n Base availability policy.\n \"\"\"\n\n #: Availability code. This is used for HTML classes\n code = ''\n\n #: A description of the availability of a product. This is shown on the\n #: product detail page. Eg \"In stock\", \"Out of stock\" etc\n message = ''\n\n #: When this item should be dispatched\n dispatch_date = None\n\n @property\n def short_message(self):\n \"\"\"\n A shorter version of the availability message, suitable for showing on\n browsing pages.\n \"\"\"\n return self.message\n\n @property\n def is_available_to_buy(self):\n \"\"\"\n Test if this product is available to be bought. This is used for\n validation when a product is added to a user's basket.\n \"\"\"\n # We test a purchase of a single item\n return self.is_purchase_permitted(1)[0]\n\n def is_purchase_permitted(self, quantity):\n \"\"\"\n Test whether a proposed purchase is allowed\n\n Should return a boolean and a reason\n \"\"\"\n return False, _(\"unavailable\")\n\n\n# Common availability policies\n\n\nclass Unavailable(Base):\n \"\"\"\n Policy for when a product is unavailable\n \"\"\"\n code = 'unavailable'\n message = _(\"Unavailable\")\n\n\nclass Available(Base):\n \"\"\"\n For when a product is always available, irrespective of stock level.\n\n This might be appropriate for digital products where stock doesn't need to\n be tracked and the product is always available to buy.\n \"\"\"\n code = 'available'\n message = _(\"Available\")\n\n def is_purchase_permitted(self, quantity):\n return True, \"\"\n\n\nclass StockRequired(Base):\n \"\"\"\n Allow a product to be bought while there is stock. This policy is\n instantiated with a stock number (``num_available``). It ensures that the\n product is only available to buy while there is stock available.\n\n This is suitable for physical products where back orders (eg allowing\n purchases when there isn't stock available) are not permitted.\n \"\"\"\n CODE_IN_STOCK = 'instock'\n CODE_OUT_OF_STOCK = 'outofstock'\n\n def __init__(self, num_available):\n self.num_available = num_available\n\n def is_purchase_permitted(self, quantity):\n if self.num_available == 0:\n return False, _(\"no stock available\")\n if quantity > self.num_available:\n msg = _(\"a maximum of %(max)d can be bought\") % {\n 'max': self.num_available}\n return False, msg\n return True, \"\"\n\n @property\n def code(self):\n if self.num_available > 0:\n return self.CODE_IN_STOCK\n return self.CODE_OUT_OF_STOCK\n\n @property\n def short_message(self):\n if self.num_available > 0:\n return _(\"In stock\")\n return _(\"Unavailable\")\n\n @property\n def message(self):\n if self.num_available > 0:\n return _(\"In stock (%d available)\") % self.num_available\n return _(\"Unavailable\")\n", "path": "src/oscar/apps/partner/availability.py" } ]
[ { "content": "from django.utils.translation import ugettext_lazy as _\n\n\nclass Base(object):\n \"\"\"\n Base availability policy.\n \"\"\"\n\n #: Availability code. This is used for HTML classes\n code = ''\n\n #: A description of the availability of a product. This is shown on the\n #: product detail page. Eg \"In stock\", \"Out of stock\" etc\n message = ''\n\n #: When this item should be dispatched\n dispatch_date = None\n\n @property\n def short_message(self):\n \"\"\"\n A shorter version of the availability message, suitable for showing on\n browsing pages.\n \"\"\"\n return self.message\n\n @property\n def is_available_to_buy(self):\n \"\"\"\n Test if this product is available to be bought. This is used for\n validation when a product is added to a user's basket.\n \"\"\"\n # We test a purchase of a single item\n return self.is_purchase_permitted(1)[0]\n\n def is_purchase_permitted(self, quantity):\n \"\"\"\n Test whether a proposed purchase is allowed\n\n Should return a boolean and a reason\n \"\"\"\n return False, _(\"unavailable\")\n\n\n# Common availability policies\n\n\nclass Unavailable(Base):\n \"\"\"\n Policy for when a product is unavailable\n \"\"\"\n code = 'unavailable'\n message = _(\"Unavailable\")\n\n\nclass Available(Base):\n \"\"\"\n For when a product is always available, irrespective of stock level.\n\n This might be appropriate for digital products where stock doesn't need to\n be tracked and the product is always available to buy.\n \"\"\"\n code = 'available'\n message = _(\"Available\")\n\n def is_purchase_permitted(self, quantity):\n return True, \"\"\n\n\nclass StockRequired(Base):\n \"\"\"\n Allow a product to be bought while there is stock. This policy is\n instantiated with a stock number (``num_available``). It ensures that the\n product is only available to buy while there is stock available.\n\n This is suitable for physical products where back orders (eg allowing\n purchases when there isn't stock available) are not permitted.\n \"\"\"\n CODE_IN_STOCK = 'instock'\n CODE_OUT_OF_STOCK = 'outofstock'\n\n def __init__(self, num_available):\n self.num_available = num_available\n\n def is_purchase_permitted(self, quantity):\n if self.num_available <= 0:\n return False, _(\"no stock available\")\n if quantity > self.num_available:\n msg = _(\"a maximum of %(max)d can be bought\") % {\n 'max': self.num_available}\n return False, msg\n return True, \"\"\n\n @property\n def code(self):\n if self.num_available > 0:\n return self.CODE_IN_STOCK\n return self.CODE_OUT_OF_STOCK\n\n @property\n def short_message(self):\n if self.num_available > 0:\n return _(\"In stock\")\n return _(\"Unavailable\")\n\n @property\n def message(self):\n if self.num_available > 0:\n return _(\"In stock (%d available)\") % self.num_available\n return _(\"Unavailable\")\n", "path": "src/oscar/apps/partner/availability.py" } ]
diff --git a/src/oscar/apps/partner/availability.py b/src/oscar/apps/partner/availability.py index a999f2dce96..eaa3b6e9a17 100644 --- a/src/oscar/apps/partner/availability.py +++ b/src/oscar/apps/partner/availability.py @@ -83,7 +83,7 @@ def __init__(self, num_available): self.num_available = num_available def is_purchase_permitted(self, quantity): - if self.num_available == 0: + if self.num_available <= 0: return False, _("no stock available") if quantity > self.num_available: msg = _("a maximum of %(max)d can be bought") % { diff --git a/tests/unit/partner/availability_tests.py b/tests/unit/partner/availability_tests.py index 9629b0d3247..93966b577c6 100644 --- a/tests/unit/partner/availability_tests.py +++ b/tests/unit/partner/availability_tests.py @@ -51,8 +51,9 @@ def test_permits_purchases_up_to_stock_level(self): self.assertTrue(is_permitted) def test_forbids_purchases_over_stock_level(self): - is_permitted, __ = self.availability.is_purchase_permitted(7) + is_permitted, msg = self.availability.is_purchase_permitted(7) self.assertFalse(is_permitted) + self.assertEqual(msg, "a maximum of 5 can be bought") def test_returns_correct_code(self): self.assertEqual('instock', self.availability.code) @@ -60,6 +61,13 @@ def test_returns_correct_code(self): def test_returns_correct_message(self): self.assertEqual('In stock (5 available)', self.availability.message) + def test_returns_correct_message_when_allocation_higher_than_stock(self): + # this is the value passed when stock lower than allocation by 1 + self.availability.num_available = -1 + is_permitted, msg = self.availability.is_purchase_permitted(1) + self.assertFalse(is_permitted) + self.assertEqual(msg, "no stock available") + class TestStockRequiredWrapperForRecordWithoutStock(TestCase):
In cases where the stock is lower than the number of allocated products incorrect message appears To reproduce: 1. Add 2 products to basket 2. On one of them set num of allocated products as 1 and stock as 0. 3. On the basket page try to do an operation on the one that is still in stock. The message that appears is "a maximum of -1 can be bought" instead of the "no stock available" message
readthedocs__readthedocs.org-3693
[ { "content": "\"\"\"Git-related utilities.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport csv\nimport logging\nimport os\nimport re\n\nfrom builtins import str\nfrom six import StringIO\n\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n fallback_branch = 'master' # default branch\n\n def __init__(self, *args, **kwargs):\n super(Backend, self).__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://%s@%s' % (self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n # Use checkout() to update repo\n self.checkout()\n\n def repo_exists(self):\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n\n def submodules_exists(self):\n code, out, _ = self.run('git', 'submodule', 'status', record=False)\n return code == 0 and bool(out)\n\n def fetch(self):\n code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n if code != 0:\n raise RepositoryError\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run(\n 'git', 'checkout', '--force', revision)\n if code != 0:\n log.warning(\"Failed to checkout revision '%s': %s\",\n revision, code)\n return [code, out, err]\n\n def clone(self):\n code, _, _ = self.run(\n 'git', 'clone', '--recursive', self.repo_url, '.')\n if code != 0:\n raise RepositoryError\n\n @property\n def tags(self):\n retcode, stdout, _ = self.run('git', 'show-ref', '--tags', record_as_success=True)\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_tags(stdout)\n\n def parse_tags(self, data):\n \"\"\"\n Parses output of show-ref --tags, eg:\n\n 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0\n bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1\n c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2\n a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2\n c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1\n edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2\n\n Into VCSTag objects with the tag name as verbose_name and the commit\n hash as identifier.\n \"\"\"\n # parse the lines into a list of tuples (commit-hash, tag ref name)\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_tags = csv.reader(StringIO(data), delimiter=' ')\n vcs_tags = []\n for row in raw_tags:\n row = [f for f in row if f != '']\n if row == []:\n continue\n commit_hash, name = row\n clean_name = name.replace('refs/tags/', '')\n vcs_tags.append(VCSVersion(self, commit_hash, clean_name))\n return vcs_tags\n\n @property\n def branches(self):\n # Only show remote branches\n retcode, stdout, _ = self.run('git', 'branch', '-r')\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n\n def parse_branches(self, data):\n \"\"\"\n Parse output of git branch -r\n\n e.g.:\n\n origin/2.0.X\n origin/HEAD -> origin/master\n origin/develop\n origin/master\n origin/release/2.0.0\n origin/release/2.1.0\n \"\"\"\n clean_branches = []\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_branches = csv.reader(StringIO(data), delimiter=' ')\n for branch in raw_branches:\n branch = [f for f in branch if f != '' and f != '*']\n # Handle empty branches\n if branch:\n branch = branch[0]\n if branch.startswith('origin/'):\n verbose_name = branch.replace('origin/', '')\n if verbose_name in ['HEAD']:\n continue\n clean_branches.append(VCSVersion(self, branch, verbose_name))\n else:\n clean_branches.append(VCSVersion(self, branch, branch))\n return clean_branches\n\n @property\n def commit(self):\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n\n def checkout(self, identifier=None):\n self.check_working_dir()\n\n # Clone or update repository\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n self.fetch()\n else:\n self.make_clean_working_dir()\n self.clone()\n\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n\n # Update submodules\n if self.submodules_exists():\n self.run('git', 'submodule', 'sync')\n self.run('git', 'submodule', 'update',\n '--init', '--recursive', '--force')\n\n return code, out, err\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n code, _, _ = self.run('git', 'show-ref', ref, record_as_success=True)\n return code == 0\n\n @property\n def env(self):\n env = super(Backend, self).env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py" } ]
[ { "content": "\"\"\"Git-related utilities.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport csv\nimport logging\nimport os\nimport re\n\nfrom builtins import str\nfrom six import StringIO\n\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n fallback_branch = 'master' # default branch\n\n def __init__(self, *args, **kwargs):\n super(Backend, self).__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://%s@%s' % (self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n # Use checkout() to update repo\n self.checkout()\n\n def repo_exists(self):\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n\n def submodules_exists(self):\n code, out, _ = self.run('git', 'submodule', 'status', record=False)\n return code == 0 and bool(out)\n\n def fetch(self):\n code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n if code != 0:\n raise RepositoryError\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run(\n 'git', 'checkout', '--force', revision)\n if code != 0:\n log.warning(\"Failed to checkout revision '%s': %s\",\n revision, code)\n return [code, out, err]\n\n def clone(self):\n code, _, _ = self.run(\n 'git', 'clone', '--recursive', self.repo_url, '.')\n if code != 0:\n raise RepositoryError\n\n @property\n def tags(self):\n retcode, stdout, _ = self.run('git', 'show-ref', '--tags', record_as_success=True)\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_tags(stdout)\n\n def parse_tags(self, data):\n \"\"\"\n Parses output of show-ref --tags, eg:\n\n 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0\n bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1\n c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2\n a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2\n c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1\n edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2\n\n Into VCSTag objects with the tag name as verbose_name and the commit\n hash as identifier.\n \"\"\"\n # parse the lines into a list of tuples (commit-hash, tag ref name)\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_tags = csv.reader(StringIO(data), delimiter=' ')\n vcs_tags = []\n for row in raw_tags:\n row = [f for f in row if f != '']\n if row == []:\n continue\n commit_hash, name = row\n clean_name = name.replace('refs/tags/', '')\n vcs_tags.append(VCSVersion(self, commit_hash, clean_name))\n return vcs_tags\n\n @property\n def branches(self):\n # Only show remote branches\n retcode, stdout, _ = self.run('git', 'branch', '-r', record_as_success=True)\n # error (or no branches found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n\n def parse_branches(self, data):\n \"\"\"\n Parse output of git branch -r\n\n e.g.:\n\n origin/2.0.X\n origin/HEAD -> origin/master\n origin/develop\n origin/master\n origin/release/2.0.0\n origin/release/2.1.0\n \"\"\"\n clean_branches = []\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_branches = csv.reader(StringIO(data), delimiter=' ')\n for branch in raw_branches:\n branch = [f for f in branch if f != '' and f != '*']\n # Handle empty branches\n if branch:\n branch = branch[0]\n if branch.startswith('origin/'):\n verbose_name = branch.replace('origin/', '')\n if verbose_name in ['HEAD']:\n continue\n clean_branches.append(VCSVersion(self, branch, verbose_name))\n else:\n clean_branches.append(VCSVersion(self, branch, branch))\n return clean_branches\n\n @property\n def commit(self):\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n\n def checkout(self, identifier=None):\n self.check_working_dir()\n\n # Clone or update repository\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n self.fetch()\n else:\n self.make_clean_working_dir()\n self.clone()\n\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n\n # Update submodules\n if self.submodules_exists():\n self.run('git', 'submodule', 'sync')\n self.run('git', 'submodule', 'update',\n '--init', '--recursive', '--force')\n\n return code, out, err\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n code, _, _ = self.run('git', 'show-ref', ref, record_as_success=True)\n return code == 0\n\n @property\n def env(self):\n env = super(Backend, self).env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py" } ]
diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py index 77673907ec5..afb1d7ed6e3 100644 --- a/readthedocs/vcs_support/backends/git.py +++ b/readthedocs/vcs_support/backends/git.py @@ -122,8 +122,8 @@ def parse_tags(self, data): @property def branches(self): # Only show remote branches - retcode, stdout, _ = self.run('git', 'branch', '-r') - # error (or no tags found) + retcode, stdout, _ = self.run('git', 'branch', '-r', record_as_success=True) + # error (or no branches found) if retcode != 0: return [] return self.parse_branches(stdout)
After build is triggered, state is shown as failed To reproduce, fire off a new build, make sure to catch the build list page while VCS operations are happening. Build will be in a failure state. This is a regression where we are setting the state of the build to failed without checking that the build has completed. This might be a byproduct of using multiple environments during the build process.
canonical__microk8s-3573
[ { "content": "import getpass\nimport json\nimport os\nimport platform\nimport subprocess\nimport sys\nimport time\nfrom pathlib import Path\nimport logging\n\nimport click\nimport yaml\n\nLOG = logging.getLogger(__name__)\n\nKUBECTL = os.path.expandvars(\"$SNAP/microk8s-kubectl.wrapper\")\n\n\ndef get_group():\n return \"snap_microk8s\" if is_strict() else \"microk8s\"\n\n\ndef is_strict():\n snap_yaml = snap() / \"meta/snap.yaml\"\n with open(snap_yaml) as f:\n snap_meta = yaml.safe_load(f)\n return snap_meta[\"confinement\"] == \"strict\"\n\n\ndef get_current_arch():\n # architecture mapping\n arch_mapping = {\n \"aarch64\": \"arm64\",\n \"armv7l\": \"armhf\",\n \"x86_64\": \"amd64\",\n \"s390x\": \"s390x\",\n \"ppc64le\": \"ppc64le\",\n \"ppc64el\": \"ppc64le\",\n }\n\n return arch_mapping[platform.machine()]\n\n\ndef snap() -> Path:\n try:\n return Path(os.environ[\"SNAP\"])\n except KeyError:\n return Path(\"/snap/microk8s/current\")\n\n\ndef snap_data() -> Path:\n try:\n return Path(os.environ[\"SNAP_DATA\"])\n except KeyError:\n return Path(\"/var/snap/microk8s/current\")\n\n\ndef snap_common() -> Path:\n try:\n return Path(os.environ[\"SNAP_COMMON\"])\n except KeyError:\n return Path(\"/var/snap/microk8s/common\")\n\n\ndef run(*args, die=True):\n # Add wrappers to $PATH\n env = os.environ.copy()\n env[\"PATH\"] += \":%s\" % os.environ[\"SNAP\"]\n result = subprocess.run(\n args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env\n )\n\n try:\n result.check_returncode()\n except subprocess.CalledProcessError as err:\n if die:\n if result.stderr:\n print(result.stderr.decode(\"utf-8\"))\n print(err)\n sys.exit(1)\n else:\n raise\n\n return result.stdout.decode(\"utf-8\")\n\n\ndef is_cluster_ready():\n try:\n service_output = kubectl_get(\"all\")\n node_output = kubectl_get(\"nodes\")\n # Make sure to compare with the word \" Ready \" with spaces.\n if \" Ready \" in node_output and \"service/kubernetes\" in service_output:\n return True\n else:\n return False\n except Exception:\n return False\n\n\ndef is_ha_enabled():\n ha_lock = os.path.expandvars(\"${SNAP_DATA}/var/lock/ha-cluster\")\n return os.path.isfile(ha_lock)\n\n\ndef get_dqlite_info():\n cluster_dir = os.path.expandvars(\"${SNAP_DATA}/var/kubernetes/backend\")\n snap_path = os.environ.get(\"SNAP\")\n\n info = []\n\n if not is_ha_enabled():\n return info\n\n waits = 10\n while waits > 0:\n try:\n with open(\"{}/info.yaml\".format(cluster_dir), mode=\"r\") as f:\n data = yaml.safe_load(f)\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split(),\n timeout=4,\n stderr=subprocess.DEVNULL,\n )\n if data[\"Address\"] in out.decode():\n break\n else:\n time.sleep(5)\n waits -= 1\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired):\n time.sleep(2)\n waits -= 1\n\n if waits == 0:\n return info\n\n nodes = json.loads(out.decode())\n for n in nodes:\n if n[\"Role\"] == 0:\n info.append((n[\"Address\"], \"voter\"))\n if n[\"Role\"] == 1:\n info.append((n[\"Address\"], \"standby\"))\n if n[\"Role\"] == 2:\n info.append((n[\"Address\"], \"spare\"))\n return info\n\n\ndef is_cluster_locked():\n if (snap_data() / \"var/lock/clustered.lock\").exists():\n click.echo(\"This MicroK8s deployment is acting as a node in a cluster.\")\n click.echo(\"Please use the master node.\")\n sys.exit(1)\n\n\ndef wait_for_ready(timeout):\n start_time = time.time()\n\n while True:\n if is_cluster_ready():\n return True\n elif timeout and time.time() > start_time + timeout:\n return False\n else:\n time.sleep(2)\n\n\ndef exit_if_no_root():\n \"\"\"\n Exit if the user is not root\n \"\"\"\n if not os.geteuid() == 0:\n click.echo(\n \"Elevated permissions is needed for this operation. Please run this command with sudo.\"\n )\n exit(50)\n\n\ndef exit_if_stopped():\n stoppedLockFile = os.path.expandvars(\"${SNAP_DATA}/var/lock/stopped.lock\")\n if os.path.isfile(stoppedLockFile):\n print(\"microk8s is not running, try microk8s start\")\n exit(0)\n\n\ndef exit_if_no_permission():\n user = getpass.getuser()\n # test if we can access the default kubeconfig\n clientConfigFile = os.path.expandvars(\"${SNAP_DATA}/credentials/client.config\")\n if not os.access(clientConfigFile, os.R_OK):\n print(\"Insufficient permissions to access MicroK8s.\")\n print(\n \"You can either try again with sudo or add the user {} to the 'microk8s' group:\".format(\n user\n )\n )\n print(\"\")\n print(\" sudo usermod -a -G microk8s {}\".format(user))\n print(\" sudo chown -f -R $USER ~/.kube\")\n print(\"\")\n print(\n \"After this, reload the user groups either via a reboot or by running 'newgrp microk8s'.\"\n )\n exit(1)\n\n\ndef ensure_started():\n if (snap_data() / \"var/lock/stopped.lock\").exists():\n click.echo(\"microk8s is not running, try microk8s start\", err=True)\n sys.exit(1)\n\n\ndef kubectl_get(cmd, namespace=\"--all-namespaces\"):\n if namespace == \"--all-namespaces\":\n return run(KUBECTL, \"get\", cmd, \"--all-namespaces\", die=False)\n else:\n return run(KUBECTL, \"get\", cmd, \"-n\", namespace, die=False)\n\n\ndef kubectl_get_clusterroles():\n return run(\n KUBECTL,\n \"get\",\n \"clusterroles\",\n \"--show-kind\",\n \"--no-headers\",\n die=False,\n )\n\n\ndef is_community_addon(arch, addon_name):\n \"\"\"\n Check if an addon is part of the community repo.\n\n :param arch: architecture of the addon we are looking for\n :param addon_name: name of the addon we are looking for\n :return: True if the addon is in the community repo\n \"\"\"\n try:\n addons_yaml = f\"{os.environ['SNAP']}/addons/community/addons.yaml\"\n with open(addons_yaml, \"r\") as fin:\n addons = yaml.safe_load(fin)\n\n for addon in addons[\"microk8s-addons\"][\"addons\"]:\n if arch in addon[\"supported_architectures\"]:\n if addon_name == addon[\"name\"]:\n return True\n except Exception:\n LOG.exception(\"could not load addons from %s\", addons_yaml)\n\n return False\n\n\ndef get_available_addons(arch):\n available = []\n strict = is_strict()\n for dir in os.listdir(snap_common() / \"addons\"):\n try:\n addons_yaml = snap_common() / \"addons\" / dir / \"addons.yaml\"\n with open(addons_yaml, \"r\") as fin:\n addons = yaml.safe_load(fin)\n\n for addon in addons[\"microk8s-addons\"][\"addons\"]:\n if arch not in addon[\"supported_architectures\"]:\n continue\n\n if \"confinement\" in addon:\n if strict and \"strict\" not in addon[\"confinement\"]:\n continue\n if not strict and \"classic\" not in addon[\"confinement\"]:\n continue\n\n available.append({**addon, \"repository\": dir})\n\n except Exception:\n LOG.exception(\"could not load addons from %s\", addons_yaml)\n\n available = sorted(available, key=lambda k: (k[\"repository\"], k[\"name\"]))\n return available\n\n\ndef get_addon_by_name(addons, name):\n filtered_addon = []\n\n parts = name.split(\"/\")\n if len(parts) == 1:\n repo_name, addon_name = None, parts[0]\n elif len(parts) == 2:\n repo_name, addon_name = parts[0], parts[1]\n else:\n # just fallback to the addon name\n repo_name, addon_name = None, name\n\n for addon in addons:\n if addon_name == addon[\"name\"] and (repo_name == addon[\"repository\"] or not repo_name):\n filtered_addon.append(addon)\n\n return filtered_addon\n\n\ndef is_service_expected_to_start(service):\n \"\"\"\n Check if a service is supposed to start\n :param service: the service name\n :return: True if the service is meant to start\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/{}\".format(lock_path, service)\n return os.path.exists(lock_path) and not os.path.isfile(lock)\n\n\ndef set_service_expected_to_start(service, start=True):\n \"\"\"\n Check if a service is not expected to start.\n :param service: the service name\n :param start: should the service start or not\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/{}\".format(lock_path, service)\n if start:\n os.remove(lock)\n else:\n fd = os.open(lock, os.O_CREAT, mode=0o700)\n os.close(fd)\n\n\ndef check_help_flag(addons: list) -> bool:\n \"\"\"Checks to see if a help message needs to be printed for an addon.\n\n Not all addons check for help flags themselves. Until they do, intercept\n calls to print help text and print out a generic message to that effect.\n \"\"\"\n addon = addons[0]\n if any(help_arg in addons for help_arg in (\"-h\", \"--help\")):\n print(\"Addon %s does not yet have a help message.\" % addon)\n print(\"For more information about it, visit https://microk8s.io/docs/addons\")\n return True\n return False\n\n\ndef parse_xable_addon_args(addon_args: list, available_addons: list):\n \"\"\"\n Parse the list of addons passed into the microk8s enable or disable commands.\n Further, it will infer the repository name for addons when possible.\n If any errors are encountered, we print them to stderr and exit.\n\n :param addon_args: The parameters passed to the microk8s enable command\n :param available_addons: List of available addons as (repo_name, addon_name) tuples\n\n Handles the following cases:\n - microk8s enable foo bar:--baz # enable many addons, inline arguments\n - microk8s enable bar --baz # enable one addon, unix style command line arguments\n\n :return: a list of (repo_name, addon_name, args) tuples\n \"\"\"\n\n # Backwards compatibility with enabling multiple addons at once, e.g.\n # `microk8s.enable foo bar:\"baz\"`\n available_addon_names = [addon_name for (_, addon_name) in available_addons]\n available_addon_names += [\n \"/\".join([repo_name, addon_name]) for (repo_name, addon_name) in available_addons\n ]\n addon_names = [arg.split(\":\")[0] for arg in addon_args]\n if set(addon_names) < set(available_addon_names):\n return [parse_xable_single_arg(addon_arg, available_addons) for addon_arg in addon_args]\n\n # The new way of xabling addons, that allows for unix-style argument passing,\n # such as `microk8s.enable foo --bar`.\n repo_name, addon_name, args = parse_xable_single_arg(addon_args[0], available_addons)\n if args and addon_args[1:]:\n click.echo(\n \"Can't pass string arguments and flag arguments simultaneously!\\n\"\n \"Enable or disable addons with only one argument style at a time:\\n\"\n \"\\n\"\n \" microk8s enable foo:'bar'\\n\"\n \"or\\n\"\n \" microk8s enable foo --bar\\n\"\n )\n sys.exit(1)\n\n return [(repo_name, addon_name, addon_args[1:])]\n\n\ndef parse_xable_single_arg(addon_arg: str, available_addons: list):\n \"\"\"\n Parse an addon arg of the following form: `(repo_name/)addon_name(:args)`\n It will automatically infer the repository name if not specified. If multiple repositories\n are found for the addon, we print an error and exit.\n\n :param addon_arg: A parameter passed to the microk8s enable command\n :param available_addons: List of available addons as (repo_name, addon_name) tuples\n\n :return: a (repo_name, addon_name, args) tuple\n \"\"\"\n addon_name, *args = addon_arg.split(\":\")\n parts = addon_name.split(\"/\")\n if len(parts) == 2:\n return (parts[0], parts[1], args)\n elif len(parts) == 1:\n matching_repos = [repo for (repo, addon) in available_addons if addon == addon_name]\n if len(matching_repos) == 0:\n click.echo(\"Addon {} was not found in any repository\".format(addon_name), err=True)\n if is_community_addon(get_current_arch(), addon_name):\n click.echo(\n \"To use the community maintained flavor enable the respective repository:\"\n )\n click.echo(\"\")\n click.echo(\" microk8s enable community\")\n click.echo(\"\")\n\n sys.exit(1)\n elif len(matching_repos) == 1:\n click.echo(\n \"Infer repository {} for addon {}\".format(matching_repos[0], addon_name), err=True\n )\n return (matching_repos[0], addon_name, args)\n else:\n click.echo(\n \"Addon {} exists in more than repository. Please explicitly specify\\n\"\n \"the repository using any of:\\n\".format(addon_name),\n err=True,\n )\n for repo in matching_repos:\n click.echo(\" {}/{}\".format(repo, addon_name), err=True)\n click.echo(\"\", err=True)\n sys.exit(1)\n\n else:\n click.echo(\"Invalid addon name {}\".format(addon_name))\n sys.exit(1)\n\n\ndef xable(action: str, addon_args: list):\n \"\"\"Enables or disables the given addons.\n\n Collated into a single function since the logic is identical other than\n the script names.\n\n :param action: \"enable\" or \"disable\"\n :param addons: List of addons to enable. Each addon may be prefixed with `repository/`\n to specify which addon repository it will be sourced from.\n \"\"\"\n available_addons_info = get_available_addons(get_current_arch())\n enabled_addons_info, disabled_addons_info = get_status(available_addons_info, True)\n if action == \"enable\":\n xabled_addons_info = enabled_addons_info\n elif action == \"disable\":\n xabled_addons_info = disabled_addons_info\n else:\n click.echo(\"Invalid action {}. Only enable and disable are supported\".format(action))\n sys.exit(1)\n\n # available_addons is a list of (repo_name, addon_name) tuples for all available addons\n available_addons = [(addon[\"repository\"], addon[\"name\"]) for addon in available_addons_info]\n # xabled_addons is a list (repo_name, addon_name) tuples of already xabled addons\n xabled_addons = [(addon[\"repository\"], addon[\"name\"]) for addon in xabled_addons_info]\n\n addons = parse_xable_addon_args(addon_args, available_addons)\n\n for repo_name, addon_name, args in addons:\n if (repo_name, addon_name) not in available_addons:\n click.echo(\"Addon {}/{} not found\".format(repo_name, addon_name))\n continue\n if (repo_name, addon_name) in xabled_addons:\n click.echo(\"Addon {}/{} is already {}d\".format(repo_name, addon_name, action))\n continue\n\n wait_for_ready(timeout=30)\n p = subprocess.run(\n [snap_common() / \"addons\" / repo_name / \"addons\" / addon_name / action, *args]\n )\n if p.returncode:\n sys.exit(p.returncode)\n wait_for_ready(timeout=30)\n\n\ndef is_enabled(addon, item):\n if addon in item:\n return True\n else:\n filepath = os.path.expandvars(addon)\n return os.path.isfile(filepath)\n\n\ndef get_status(available_addons, isReady):\n enabled = []\n disabled = []\n if isReady:\n # 'all' does not include ingress\n kube_output = kubectl_get(\"all,ingress\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n found = False\n for row in kube_output.split(\"\\n\"):\n if is_enabled(addon[\"check_status\"], row):\n enabled.append(addon)\n found = True\n break\n if not found:\n disabled.append(addon)\n\n return enabled, disabled\n\n\ndef is_within_directory(directory, target):\n\n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n\n prefix = os.path.commonprefix([abs_directory, abs_target])\n\n return prefix == abs_directory\n\n\ndef safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n\n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n\n tar.extractall(path, members, numeric_owner=numeric_owner)\n", "path": "scripts/wrappers/common/utils.py" } ]
[ { "content": "import getpass\nimport json\nimport os\nimport platform\nimport subprocess\nimport sys\nimport time\nfrom pathlib import Path\nimport logging\n\nimport click\nimport yaml\n\nLOG = logging.getLogger(__name__)\n\nKUBECTL = os.path.expandvars(\"$SNAP/microk8s-kubectl.wrapper\")\n\n\ndef get_group():\n return \"snap_microk8s\" if is_strict() else \"microk8s\"\n\n\ndef is_strict():\n snap_yaml = snap() / \"meta/snap.yaml\"\n with open(snap_yaml) as f:\n snap_meta = yaml.safe_load(f)\n return snap_meta[\"confinement\"] == \"strict\"\n\n\ndef get_current_arch():\n # architecture mapping\n arch_mapping = {\n \"aarch64\": \"arm64\",\n \"armv7l\": \"armhf\",\n \"x86_64\": \"amd64\",\n \"s390x\": \"s390x\",\n \"ppc64le\": \"ppc64le\",\n \"ppc64el\": \"ppc64le\",\n }\n\n return arch_mapping[platform.machine()]\n\n\ndef snap() -> Path:\n try:\n return Path(os.environ[\"SNAP\"])\n except KeyError:\n return Path(\"/snap/microk8s/current\")\n\n\ndef snap_data() -> Path:\n try:\n return Path(os.environ[\"SNAP_DATA\"])\n except KeyError:\n return Path(\"/var/snap/microk8s/current\")\n\n\ndef snap_common() -> Path:\n try:\n return Path(os.environ[\"SNAP_COMMON\"])\n except KeyError:\n return Path(\"/var/snap/microk8s/common\")\n\n\ndef run(*args, die=True):\n # Add wrappers to $PATH\n env = os.environ.copy()\n env[\"PATH\"] += \":%s\" % os.environ[\"SNAP\"]\n result = subprocess.run(\n args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env\n )\n\n try:\n result.check_returncode()\n except subprocess.CalledProcessError as err:\n if die:\n if result.stderr:\n print(result.stderr.decode(\"utf-8\"))\n print(err)\n sys.exit(1)\n else:\n raise\n\n return result.stdout.decode(\"utf-8\")\n\n\ndef is_cluster_ready():\n try:\n service_output = kubectl_get(\"all\")\n node_output = kubectl_get(\"nodes\")\n # Make sure to compare with the word \" Ready \" with spaces.\n if \" Ready \" in node_output and \"service/kubernetes\" in service_output:\n return True\n else:\n return False\n except Exception:\n return False\n\n\ndef is_ha_enabled():\n ha_lock = os.path.expandvars(\"${SNAP_DATA}/var/lock/ha-cluster\")\n return os.path.isfile(ha_lock)\n\n\ndef get_dqlite_info():\n cluster_dir = os.path.expandvars(\"${SNAP_DATA}/var/kubernetes/backend\")\n snap_path = os.environ.get(\"SNAP\")\n\n info = []\n\n if not is_ha_enabled():\n return info\n\n waits = 10\n while waits > 0:\n try:\n with open(\"{}/info.yaml\".format(cluster_dir), mode=\"r\") as f:\n data = yaml.safe_load(f)\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split(),\n timeout=4,\n stderr=subprocess.DEVNULL,\n )\n if data[\"Address\"] in out.decode():\n break\n else:\n time.sleep(5)\n waits -= 1\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired):\n time.sleep(2)\n waits -= 1\n\n if waits == 0:\n return info\n\n nodes = json.loads(out.decode())\n for n in nodes:\n if n[\"Role\"] == 0:\n info.append((n[\"Address\"], \"voter\"))\n if n[\"Role\"] == 1:\n info.append((n[\"Address\"], \"standby\"))\n if n[\"Role\"] == 2:\n info.append((n[\"Address\"], \"spare\"))\n return info\n\n\ndef is_cluster_locked():\n if (snap_data() / \"var/lock/clustered.lock\").exists():\n click.echo(\"This MicroK8s deployment is acting as a node in a cluster.\")\n click.echo(\"Please use the master node.\")\n sys.exit(1)\n\n\ndef wait_for_ready(timeout):\n start_time = time.time()\n\n while True:\n if is_cluster_ready():\n return True\n elif timeout and time.time() > start_time + timeout:\n return False\n else:\n time.sleep(2)\n\n\ndef exit_if_no_root():\n \"\"\"\n Exit if the user is not root\n \"\"\"\n if not os.geteuid() == 0:\n click.echo(\n \"Elevated permissions is needed for this operation. Please run this command with sudo.\"\n )\n exit(50)\n\n\ndef exit_if_stopped():\n stoppedLockFile = os.path.expandvars(\"${SNAP_DATA}/var/lock/stopped.lock\")\n if os.path.isfile(stoppedLockFile):\n print(\"microk8s is not running, try microk8s start\")\n exit(0)\n\n\ndef exit_if_no_permission():\n user = getpass.getuser()\n # test if we can access the default kubeconfig\n clientConfigFile = os.path.expandvars(\"${SNAP_DATA}/credentials/client.config\")\n if not os.access(clientConfigFile, os.R_OK):\n print(\"Insufficient permissions to access MicroK8s.\")\n print(\n \"You can either try again with sudo or add the user {} to the 'microk8s' group:\".format(\n user\n )\n )\n print(\"\")\n print(\" sudo usermod -a -G microk8s {}\".format(user))\n print(\" sudo chown -R $USER ~/.kube\")\n print(\"\")\n print(\n \"After this, reload the user groups either via a reboot or by running 'newgrp microk8s'.\"\n )\n exit(1)\n\n\ndef ensure_started():\n if (snap_data() / \"var/lock/stopped.lock\").exists():\n click.echo(\"microk8s is not running, try microk8s start\", err=True)\n sys.exit(1)\n\n\ndef kubectl_get(cmd, namespace=\"--all-namespaces\"):\n if namespace == \"--all-namespaces\":\n return run(KUBECTL, \"get\", cmd, \"--all-namespaces\", die=False)\n else:\n return run(KUBECTL, \"get\", cmd, \"-n\", namespace, die=False)\n\n\ndef kubectl_get_clusterroles():\n return run(\n KUBECTL,\n \"get\",\n \"clusterroles\",\n \"--show-kind\",\n \"--no-headers\",\n die=False,\n )\n\n\ndef is_community_addon(arch, addon_name):\n \"\"\"\n Check if an addon is part of the community repo.\n\n :param arch: architecture of the addon we are looking for\n :param addon_name: name of the addon we are looking for\n :return: True if the addon is in the community repo\n \"\"\"\n try:\n addons_yaml = f\"{os.environ['SNAP']}/addons/community/addons.yaml\"\n with open(addons_yaml, \"r\") as fin:\n addons = yaml.safe_load(fin)\n\n for addon in addons[\"microk8s-addons\"][\"addons\"]:\n if arch in addon[\"supported_architectures\"]:\n if addon_name == addon[\"name\"]:\n return True\n except Exception:\n LOG.exception(\"could not load addons from %s\", addons_yaml)\n\n return False\n\n\ndef get_available_addons(arch):\n available = []\n strict = is_strict()\n for dir in os.listdir(snap_common() / \"addons\"):\n try:\n addons_yaml = snap_common() / \"addons\" / dir / \"addons.yaml\"\n with open(addons_yaml, \"r\") as fin:\n addons = yaml.safe_load(fin)\n\n for addon in addons[\"microk8s-addons\"][\"addons\"]:\n if arch not in addon[\"supported_architectures\"]:\n continue\n\n if \"confinement\" in addon:\n if strict and \"strict\" not in addon[\"confinement\"]:\n continue\n if not strict and \"classic\" not in addon[\"confinement\"]:\n continue\n\n available.append({**addon, \"repository\": dir})\n\n except Exception:\n LOG.exception(\"could not load addons from %s\", addons_yaml)\n\n available = sorted(available, key=lambda k: (k[\"repository\"], k[\"name\"]))\n return available\n\n\ndef get_addon_by_name(addons, name):\n filtered_addon = []\n\n parts = name.split(\"/\")\n if len(parts) == 1:\n repo_name, addon_name = None, parts[0]\n elif len(parts) == 2:\n repo_name, addon_name = parts[0], parts[1]\n else:\n # just fallback to the addon name\n repo_name, addon_name = None, name\n\n for addon in addons:\n if addon_name == addon[\"name\"] and (repo_name == addon[\"repository\"] or not repo_name):\n filtered_addon.append(addon)\n\n return filtered_addon\n\n\ndef is_service_expected_to_start(service):\n \"\"\"\n Check if a service is supposed to start\n :param service: the service name\n :return: True if the service is meant to start\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/{}\".format(lock_path, service)\n return os.path.exists(lock_path) and not os.path.isfile(lock)\n\n\ndef set_service_expected_to_start(service, start=True):\n \"\"\"\n Check if a service is not expected to start.\n :param service: the service name\n :param start: should the service start or not\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/{}\".format(lock_path, service)\n if start:\n os.remove(lock)\n else:\n fd = os.open(lock, os.O_CREAT, mode=0o700)\n os.close(fd)\n\n\ndef check_help_flag(addons: list) -> bool:\n \"\"\"Checks to see if a help message needs to be printed for an addon.\n\n Not all addons check for help flags themselves. Until they do, intercept\n calls to print help text and print out a generic message to that effect.\n \"\"\"\n addon = addons[0]\n if any(help_arg in addons for help_arg in (\"-h\", \"--help\")):\n print(\"Addon %s does not yet have a help message.\" % addon)\n print(\"For more information about it, visit https://microk8s.io/docs/addons\")\n return True\n return False\n\n\ndef parse_xable_addon_args(addon_args: list, available_addons: list):\n \"\"\"\n Parse the list of addons passed into the microk8s enable or disable commands.\n Further, it will infer the repository name for addons when possible.\n If any errors are encountered, we print them to stderr and exit.\n\n :param addon_args: The parameters passed to the microk8s enable command\n :param available_addons: List of available addons as (repo_name, addon_name) tuples\n\n Handles the following cases:\n - microk8s enable foo bar:--baz # enable many addons, inline arguments\n - microk8s enable bar --baz # enable one addon, unix style command line arguments\n\n :return: a list of (repo_name, addon_name, args) tuples\n \"\"\"\n\n # Backwards compatibility with enabling multiple addons at once, e.g.\n # `microk8s.enable foo bar:\"baz\"`\n available_addon_names = [addon_name for (_, addon_name) in available_addons]\n available_addon_names += [\n \"/\".join([repo_name, addon_name]) for (repo_name, addon_name) in available_addons\n ]\n addon_names = [arg.split(\":\")[0] for arg in addon_args]\n if set(addon_names) < set(available_addon_names):\n return [parse_xable_single_arg(addon_arg, available_addons) for addon_arg in addon_args]\n\n # The new way of xabling addons, that allows for unix-style argument passing,\n # such as `microk8s.enable foo --bar`.\n repo_name, addon_name, args = parse_xable_single_arg(addon_args[0], available_addons)\n if args and addon_args[1:]:\n click.echo(\n \"Can't pass string arguments and flag arguments simultaneously!\\n\"\n \"Enable or disable addons with only one argument style at a time:\\n\"\n \"\\n\"\n \" microk8s enable foo:'bar'\\n\"\n \"or\\n\"\n \" microk8s enable foo --bar\\n\"\n )\n sys.exit(1)\n\n return [(repo_name, addon_name, addon_args[1:])]\n\n\ndef parse_xable_single_arg(addon_arg: str, available_addons: list):\n \"\"\"\n Parse an addon arg of the following form: `(repo_name/)addon_name(:args)`\n It will automatically infer the repository name if not specified. If multiple repositories\n are found for the addon, we print an error and exit.\n\n :param addon_arg: A parameter passed to the microk8s enable command\n :param available_addons: List of available addons as (repo_name, addon_name) tuples\n\n :return: a (repo_name, addon_name, args) tuple\n \"\"\"\n addon_name, *args = addon_arg.split(\":\")\n parts = addon_name.split(\"/\")\n if len(parts) == 2:\n return (parts[0], parts[1], args)\n elif len(parts) == 1:\n matching_repos = [repo for (repo, addon) in available_addons if addon == addon_name]\n if len(matching_repos) == 0:\n click.echo(\"Addon {} was not found in any repository\".format(addon_name), err=True)\n if is_community_addon(get_current_arch(), addon_name):\n click.echo(\n \"To use the community maintained flavor enable the respective repository:\"\n )\n click.echo(\"\")\n click.echo(\" microk8s enable community\")\n click.echo(\"\")\n\n sys.exit(1)\n elif len(matching_repos) == 1:\n click.echo(\n \"Infer repository {} for addon {}\".format(matching_repos[0], addon_name), err=True\n )\n return (matching_repos[0], addon_name, args)\n else:\n click.echo(\n \"Addon {} exists in more than repository. Please explicitly specify\\n\"\n \"the repository using any of:\\n\".format(addon_name),\n err=True,\n )\n for repo in matching_repos:\n click.echo(\" {}/{}\".format(repo, addon_name), err=True)\n click.echo(\"\", err=True)\n sys.exit(1)\n\n else:\n click.echo(\"Invalid addon name {}\".format(addon_name))\n sys.exit(1)\n\n\ndef xable(action: str, addon_args: list):\n \"\"\"Enables or disables the given addons.\n\n Collated into a single function since the logic is identical other than\n the script names.\n\n :param action: \"enable\" or \"disable\"\n :param addons: List of addons to enable. Each addon may be prefixed with `repository/`\n to specify which addon repository it will be sourced from.\n \"\"\"\n available_addons_info = get_available_addons(get_current_arch())\n enabled_addons_info, disabled_addons_info = get_status(available_addons_info, True)\n if action == \"enable\":\n xabled_addons_info = enabled_addons_info\n elif action == \"disable\":\n xabled_addons_info = disabled_addons_info\n else:\n click.echo(\"Invalid action {}. Only enable and disable are supported\".format(action))\n sys.exit(1)\n\n # available_addons is a list of (repo_name, addon_name) tuples for all available addons\n available_addons = [(addon[\"repository\"], addon[\"name\"]) for addon in available_addons_info]\n # xabled_addons is a list (repo_name, addon_name) tuples of already xabled addons\n xabled_addons = [(addon[\"repository\"], addon[\"name\"]) for addon in xabled_addons_info]\n\n addons = parse_xable_addon_args(addon_args, available_addons)\n\n for repo_name, addon_name, args in addons:\n if (repo_name, addon_name) not in available_addons:\n click.echo(\"Addon {}/{} not found\".format(repo_name, addon_name))\n continue\n if (repo_name, addon_name) in xabled_addons:\n click.echo(\"Addon {}/{} is already {}d\".format(repo_name, addon_name, action))\n continue\n\n wait_for_ready(timeout=30)\n p = subprocess.run(\n [snap_common() / \"addons\" / repo_name / \"addons\" / addon_name / action, *args]\n )\n if p.returncode:\n sys.exit(p.returncode)\n wait_for_ready(timeout=30)\n\n\ndef is_enabled(addon, item):\n if addon in item:\n return True\n else:\n filepath = os.path.expandvars(addon)\n return os.path.isfile(filepath)\n\n\ndef get_status(available_addons, isReady):\n enabled = []\n disabled = []\n if isReady:\n # 'all' does not include ingress\n kube_output = kubectl_get(\"all,ingress\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n found = False\n for row in kube_output.split(\"\\n\"):\n if is_enabled(addon[\"check_status\"], row):\n enabled.append(addon)\n found = True\n break\n if not found:\n disabled.append(addon)\n\n return enabled, disabled\n\n\ndef is_within_directory(directory, target):\n\n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n\n prefix = os.path.commonprefix([abs_directory, abs_target])\n\n return prefix == abs_directory\n\n\ndef safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n\n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n\n tar.extractall(path, members, numeric_owner=numeric_owner)\n", "path": "scripts/wrappers/common/utils.py" } ]
diff --git a/microk8s-resources/actions/common/utils.sh b/microk8s-resources/actions/common/utils.sh index 84ecaf5e90..8bb2001383 100644 --- a/microk8s-resources/actions/common/utils.sh +++ b/microk8s-resources/actions/common/utils.sh @@ -18,7 +18,7 @@ exit_if_no_permissions() { echo "You can either try again with sudo or add the user $USER to the '${group}' group:" >&2 echo "" >&2 echo " sudo usermod -a -G ${group} $USER" >&2 - echo " sudo chown -f -R $USER ~/.kube" >&2 + echo " sudo chown -R $USER ~/.kube" >&2 echo "" >&2 echo "After this, reload the user groups either via a reboot or by running 'newgrp ${group}'." >&2 exit 1 diff --git a/scripts/wrappers/common/utils.py b/scripts/wrappers/common/utils.py index b1b08e3c3f..0f3d30f378 100644 --- a/scripts/wrappers/common/utils.py +++ b/scripts/wrappers/common/utils.py @@ -197,7 +197,7 @@ def exit_if_no_permission(): ) print("") print(" sudo usermod -a -G microk8s {}".format(user)) - print(" sudo chown -f -R $USER ~/.kube") + print(" sudo chown -R $USER ~/.kube") print("") print( "After this, reload the user groups either via a reboot or by running 'newgrp microk8s'."
microk8s suggests command hiding error info <!-- Thank you for submitting an issue. Please fill in the template below information about the bug you encountered. --> #### Summary <!-- Please explain the bug in a few short sentences --> When you try to run microk8s with insufficient permissions, you get an error and suggestion: ```console $ microk8s status Insufficient permissions to access MicroK8s. You can either try again with sudo or add the user runner to the 'snap_microk8s' group: sudo usermod -a -G snap_microk8s runner sudo chown -f -R runner ~/.kube After this, reload the user groups either via a reboot or by running 'newgrp snap_microk8s'. ``` However, if you don't have a `~/.kube` file, the `chown` command will fail silently and suppress the error message. This can cause failures e.g. in GitHub runners / scripts with `set -e`, and the `-f` option will hide the error message. This makes it very hard to debug the failure as there are no log messages. #### What Should Happen Instead? <!-- Please explain what the expected behavior is --> Suggest to the user not to use the `-f` flag in `chown`. There is no reason to suppress the error message. ```console $ microk8s status Insufficient permissions to access MicroK8s. You can either try again with sudo or add the user runner to the 'snap_microk8s' group: sudo usermod -a -G snap_microk8s runner sudo chown -R runner ~/.kube After this, reload the user groups either via a reboot or by running 'newgrp snap_microk8s'. ```
Flexget__Flexget-3648
[ { "content": "import datetime\nimport hashlib\nimport logging\nimport os\nimport struct\n\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom loguru import logger\n\nfrom flexget.config_schema import format_checker, register_config_key, register_schema\nfrom flexget.event import event\nfrom flexget.manager import manager\nfrom flexget.utils import json\n\nlogger = logger.bind(name='scheduler')\n\n\n# Add a format checker for more detailed errors on cron type schedules\n@format_checker.checks('cron_schedule', raises=ValueError)\ndef is_cron_schedule(instance):\n if not isinstance(instance, dict):\n return True\n try:\n return CronTrigger(**instance)\n except TypeError:\n # A more specific error message about which key will also be shown by properties schema keyword\n raise ValueError('Invalid key for schedule.')\n\n\nDEFAULT_SCHEDULES = [{'tasks': ['*'], 'interval': {'hours': 1}}]\n\nUNITS = ['minutes', 'hours', 'days', 'weeks']\ninterval_schema = {\n 'type': 'object',\n 'title': 'Simple Interval',\n 'properties': {\n 'minutes': {'type': 'number'},\n 'hours': {'type': 'number'},\n 'days': {'type': 'number'},\n 'weeks': {'type': 'number'},\n 'jitter': {'type': 'integer'},\n },\n 'anyOf': [{'required': [unit]} for unit in UNITS],\n 'error_anyOf': 'Interval must be specified as one or more of %s' % ', '.join(UNITS),\n 'additionalProperties': False,\n}\n\ncron_schema = {\n 'type': 'object',\n 'title': 'Advanced Cron Interval',\n 'properties': {\n 'year': {'type': ['integer', 'string']},\n 'month': {'type': ['integer', 'string']},\n 'day': {'type': ['integer', 'string']},\n 'week': {'type': ['integer', 'string']},\n 'day_of_week': {'type': ['integer', 'string']},\n 'hour': {'type': ['integer', 'string']},\n 'minute': {'type': ['integer', 'string']},\n 'jitter': {'type': 'integer'},\n },\n 'additionalProperties': False,\n}\n\nschedule_schema = {\n 'type': 'object',\n 'title': 'Schedule',\n 'description': 'A schedule which runs specified tasks periodically.',\n 'properties': {\n 'tasks': {'type': ['array', 'string'], 'items': {'type': 'string'}},\n 'interval': interval_schema,\n 'schedule': cron_schema,\n },\n 'required': ['tasks'],\n 'minProperties': 2,\n 'maxProperties': 2,\n 'error_minProperties': 'Either `cron` or `interval` must be defined.',\n 'error_maxProperties': 'Either `cron` or `interval` must be defined.',\n 'additionalProperties': False,\n}\n\nmain_schema = {\n 'oneOf': [\n {'type': 'array', 'title': 'Enable', 'items': schedule_schema},\n {'type': 'boolean', 'title': 'Disable', 'description': 'Disable task schedules'},\n ]\n}\n\nscheduler = None\nscheduler_job_map = {}\n\n\ndef job_id(conf):\n \"\"\"Create a unique id for a schedule item in config.\"\"\"\n return hashlib.sha1(json.dumps(conf, sort_keys=True).encode('utf-8')).hexdigest()\n\n\ndef run_job(tasks):\n \"\"\"Add the execution to the queue and waits until it is finished\"\"\"\n logger.debug('executing tasks: {}', tasks)\n finished_events = manager.execute(\n options={'tasks': tasks, 'cron': True, 'allow_manual': False}, priority=5\n )\n for _, task_name, event_ in finished_events:\n logger.debug('task finished executing: {}', task_name)\n event_.wait()\n logger.debug('all tasks in schedule finished executing')\n\n\n@event('manager.daemon.started')\ndef setup_scheduler(manager):\n \"\"\"Configure and start apscheduler\"\"\"\n global scheduler\n if logger.level(manager.options.loglevel).no > logger.level('DEBUG').no:\n logging.getLogger('apscheduler').setLevel(logging.WARNING)\n # Since APScheduler runs in a separate thread, slower devices can sometimes get a DB lock, so use a separate db\n # for the jobs to avoid this\n db_filename = os.path.join(manager.config_base, 'db-%s-jobs.sqlite' % manager.config_name)\n # in case running on windows, needs double \\\\\n db_filename = db_filename.replace('\\\\', '\\\\\\\\')\n database_uri = 'sqlite:///%s' % db_filename\n jobstores = {'default': SQLAlchemyJobStore(url=database_uri)}\n # If job was meant to run within last day while daemon was shutdown, run it once when continuing\n job_defaults = {'coalesce': True, 'misfire_grace_time': 60 * 60 * 24}\n scheduler = BackgroundScheduler(\n jobstores=jobstores,\n job_defaults=job_defaults,\n timezone=datetime.datetime.now().astimezone().tzinfo,\n )\n setup_jobs(manager)\n\n\n@event('manager.config_updated')\ndef setup_jobs(manager):\n \"\"\"Set up the jobs for apscheduler to run.\"\"\"\n if not manager.is_daemon:\n return\n\n global scheduler_job_map\n scheduler_job_map = {}\n\n if 'schedules' not in manager.config:\n logger.info(\n 'No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.'\n )\n config = manager.config.get('schedules', True)\n if config is True:\n config = DEFAULT_SCHEDULES\n elif not config: # Schedules are disabled with `schedules: no`\n if scheduler.running:\n logger.info('Shutting down scheduler')\n scheduler.shutdown()\n return\n if not scheduler.running:\n logger.info('Starting scheduler')\n scheduler.start(paused=True)\n existing_job_ids = [job.id for job in scheduler.get_jobs()]\n configured_job_ids = []\n for job_config in config:\n jid = job_id(job_config)\n configured_job_ids.append(jid)\n scheduler_job_map[id(job_config)] = jid\n if jid in existing_job_ids:\n continue\n if 'interval' in job_config:\n trigger, trigger_args = 'interval', job_config['interval']\n else:\n trigger, trigger_args = 'cron', job_config['schedule']\n tasks = job_config['tasks']\n if not isinstance(tasks, list):\n tasks = [tasks]\n name = ','.join(tasks)\n scheduler.add_job(\n run_job, args=(tasks,), id=jid, name=name, trigger=trigger, **trigger_args\n )\n # Remove jobs no longer in config\n for jid in existing_job_ids:\n if jid not in configured_job_ids:\n scheduler.remove_job(jid)\n scheduler.resume()\n\n\n@event('manager.shutdown_requested')\ndef shutdown_requested(manager):\n if scheduler and scheduler.running:\n scheduler.shutdown(wait=True)\n\n\n@event('manager.shutdown')\ndef stop_scheduler(manager):\n if scheduler and scheduler.running:\n scheduler.shutdown(wait=False)\n\n\n@event('config.register')\ndef register_config():\n register_config_key('schedules', main_schema)\n register_schema('/schema/config/schedule', schedule_schema)\n", "path": "flexget/components/scheduler/scheduler.py" } ]
[ { "content": "import hashlib\nimport logging\nimport os\nimport struct\n\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom loguru import logger\n\nfrom flexget.config_schema import format_checker, register_config_key, register_schema\nfrom flexget.event import event\nfrom flexget.manager import manager\nfrom flexget.utils import json\n\nlogger = logger.bind(name='scheduler')\n\n\n# Add a format checker for more detailed errors on cron type schedules\n@format_checker.checks('cron_schedule', raises=ValueError)\ndef is_cron_schedule(instance):\n if not isinstance(instance, dict):\n return True\n try:\n return CronTrigger(**instance)\n except TypeError:\n # A more specific error message about which key will also be shown by properties schema keyword\n raise ValueError('Invalid key for schedule.')\n\n\nDEFAULT_SCHEDULES = [{'tasks': ['*'], 'interval': {'hours': 1}}]\n\nUNITS = ['minutes', 'hours', 'days', 'weeks']\ninterval_schema = {\n 'type': 'object',\n 'title': 'Simple Interval',\n 'properties': {\n 'minutes': {'type': 'number'},\n 'hours': {'type': 'number'},\n 'days': {'type': 'number'},\n 'weeks': {'type': 'number'},\n 'jitter': {'type': 'integer'},\n },\n 'anyOf': [{'required': [unit]} for unit in UNITS],\n 'error_anyOf': 'Interval must be specified as one or more of %s' % ', '.join(UNITS),\n 'additionalProperties': False,\n}\n\ncron_schema = {\n 'type': 'object',\n 'title': 'Advanced Cron Interval',\n 'properties': {\n 'year': {'type': ['integer', 'string']},\n 'month': {'type': ['integer', 'string']},\n 'day': {'type': ['integer', 'string']},\n 'week': {'type': ['integer', 'string']},\n 'day_of_week': {'type': ['integer', 'string']},\n 'hour': {'type': ['integer', 'string']},\n 'minute': {'type': ['integer', 'string']},\n 'jitter': {'type': 'integer'},\n },\n 'additionalProperties': False,\n}\n\nschedule_schema = {\n 'type': 'object',\n 'title': 'Schedule',\n 'description': 'A schedule which runs specified tasks periodically.',\n 'properties': {\n 'tasks': {'type': ['array', 'string'], 'items': {'type': 'string'}},\n 'interval': interval_schema,\n 'schedule': cron_schema,\n },\n 'required': ['tasks'],\n 'minProperties': 2,\n 'maxProperties': 2,\n 'error_minProperties': 'Either `cron` or `interval` must be defined.',\n 'error_maxProperties': 'Either `cron` or `interval` must be defined.',\n 'additionalProperties': False,\n}\n\nmain_schema = {\n 'oneOf': [\n {'type': 'array', 'title': 'Enable', 'items': schedule_schema},\n {'type': 'boolean', 'title': 'Disable', 'description': 'Disable task schedules'},\n ]\n}\n\nscheduler = None\nscheduler_job_map = {}\n\n\ndef job_id(conf):\n \"\"\"Create a unique id for a schedule item in config.\"\"\"\n return hashlib.sha1(json.dumps(conf, sort_keys=True).encode('utf-8')).hexdigest()\n\n\ndef run_job(tasks):\n \"\"\"Add the execution to the queue and waits until it is finished\"\"\"\n logger.debug('executing tasks: {}', tasks)\n finished_events = manager.execute(\n options={'tasks': tasks, 'cron': True, 'allow_manual': False}, priority=5\n )\n for _, task_name, event_ in finished_events:\n logger.debug('task finished executing: {}', task_name)\n event_.wait()\n logger.debug('all tasks in schedule finished executing')\n\n\n@event('manager.daemon.started')\ndef setup_scheduler(manager):\n \"\"\"Configure and start apscheduler\"\"\"\n global scheduler\n if logger.level(manager.options.loglevel).no > logger.level('DEBUG').no:\n logging.getLogger('apscheduler').setLevel(logging.WARNING)\n # Since APScheduler runs in a separate thread, slower devices can sometimes get a DB lock, so use a separate db\n # for the jobs to avoid this\n db_filename = os.path.join(manager.config_base, 'db-%s-jobs.sqlite' % manager.config_name)\n # in case running on windows, needs double \\\\\n db_filename = db_filename.replace('\\\\', '\\\\\\\\')\n database_uri = 'sqlite:///%s' % db_filename\n jobstores = {'default': SQLAlchemyJobStore(url=database_uri)}\n # If job was meant to run within last day while daemon was shutdown, run it once when continuing\n job_defaults = {'coalesce': True, 'misfire_grace_time': 60 * 60 * 24}\n scheduler = BackgroundScheduler(\n jobstores=jobstores,\n job_defaults=job_defaults,\n )\n setup_jobs(manager)\n\n\n@event('manager.config_updated')\ndef setup_jobs(manager):\n \"\"\"Set up the jobs for apscheduler to run.\"\"\"\n if not manager.is_daemon:\n return\n\n global scheduler_job_map\n scheduler_job_map = {}\n\n if 'schedules' not in manager.config:\n logger.info(\n 'No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.'\n )\n config = manager.config.get('schedules', True)\n if config is True:\n config = DEFAULT_SCHEDULES\n elif not config: # Schedules are disabled with `schedules: no`\n if scheduler.running:\n logger.info('Shutting down scheduler')\n scheduler.shutdown()\n return\n if not scheduler.running:\n logger.info('Starting scheduler')\n scheduler.start(paused=True)\n existing_job_ids = [job.id for job in scheduler.get_jobs()]\n configured_job_ids = []\n for job_config in config:\n jid = job_id(job_config)\n configured_job_ids.append(jid)\n scheduler_job_map[id(job_config)] = jid\n if jid in existing_job_ids:\n continue\n if 'interval' in job_config:\n trigger, trigger_args = 'interval', job_config['interval']\n else:\n trigger, trigger_args = 'cron', job_config['schedule']\n tasks = job_config['tasks']\n if not isinstance(tasks, list):\n tasks = [tasks]\n name = ','.join(tasks)\n scheduler.add_job(\n run_job, args=(tasks,), id=jid, name=name, trigger=trigger, **trigger_args\n )\n # Remove jobs no longer in config\n for jid in existing_job_ids:\n if jid not in configured_job_ids:\n scheduler.remove_job(jid)\n scheduler.resume()\n\n\n@event('manager.shutdown_requested')\ndef shutdown_requested(manager):\n if scheduler and scheduler.running:\n scheduler.shutdown(wait=True)\n\n\n@event('manager.shutdown')\ndef stop_scheduler(manager):\n if scheduler and scheduler.running:\n scheduler.shutdown(wait=False)\n\n\n@event('config.register')\ndef register_config():\n register_config_key('schedules', main_schema)\n register_schema('/schema/config/schedule', schedule_schema)\n", "path": "flexget/components/scheduler/scheduler.py" } ]
diff --git a/flexget/components/scheduler/scheduler.py b/flexget/components/scheduler/scheduler.py index df8419f9d1..e097cb2e74 100644 --- a/flexget/components/scheduler/scheduler.py +++ b/flexget/components/scheduler/scheduler.py @@ -1,4 +1,3 @@ -import datetime import hashlib import logging import os @@ -126,7 +125,6 @@ def setup_scheduler(manager): scheduler = BackgroundScheduler( jobstores=jobstores, job_defaults=job_defaults, - timezone=datetime.datetime.now().astimezone().tzinfo, ) setup_jobs(manager)
Error on scheduler: Only timezones from the pytz library are supported ### Steps to reproduce: - Step 1: `flexget -L verbose daemon start` #### Config: ```yaml schedules: - tasks: ['some-task'] interval: hours: 1 ``` #### Backtrace: ``` File "/home/pi/.local/lib/python3.9/site-packages/flexget/__init__.py", line 44, in main manager.start() File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 383, in start self.handle_cli() File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 409, in handle_cli self.daemon_command(command_options) File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 506, in daemon_command run_daemon() File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 487, in run_daemon fire_event('manager.daemon.started', self) File "/home/pi/.local/lib/python3.9/site-packages/flexget/event.py", line 109, in fire_event result = event(*args, **kwargs) File "/home/pi/.local/lib/python3.9/site-packages/flexget/event.py", line 20, in __call__ return self.func(*args, **kwargs) File "/home/pi/.local/lib/python3.9/site-packages/flexget/components/scheduler/scheduler.py", line 126, in setup_scheduler scheduler = BackgroundScheduler( File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 87, in __init__ self.configure(gconfig, **options) File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 126, in configure self._configure(config) File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/background.py", line 29, in _configure super(BackgroundScheduler, self)._configure(config) File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 697, in _configure self.timezone = astimezone(config.pop('timezone', None)) or get_localzone() File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/util.py", line 93, in astimezone raise TypeError('Only timezones from the pytz library are supported') TypeError: Only timezones from the pytz library are supported ``` ### Additional information: - FlexGet version: 3.5.2 - Python version: 3.9.2 - Installation method: - Using daemon (yes/no): yes It seems to have started after https://github.com/Flexget/Flexget/pull/3453 that change the timezone argument to a non-pytz compatible object. Error on scheduler: Only timezones from the pytz library are supported ### Steps to reproduce: - Step 1: `flexget -L verbose daemon start` #### Config: ```yaml schedules: - tasks: ['some-task'] interval: hours: 1 ``` #### Backtrace: ``` File "/home/pi/.local/lib/python3.9/site-packages/flexget/__init__.py", line 44, in main manager.start() File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 383, in start self.handle_cli() File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 409, in handle_cli self.daemon_command(command_options) File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 506, in daemon_command run_daemon() File "/home/pi/.local/lib/python3.9/site-packages/flexget/manager.py", line 487, in run_daemon fire_event('manager.daemon.started', self) File "/home/pi/.local/lib/python3.9/site-packages/flexget/event.py", line 109, in fire_event result = event(*args, **kwargs) File "/home/pi/.local/lib/python3.9/site-packages/flexget/event.py", line 20, in __call__ return self.func(*args, **kwargs) File "/home/pi/.local/lib/python3.9/site-packages/flexget/components/scheduler/scheduler.py", line 126, in setup_scheduler scheduler = BackgroundScheduler( File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 87, in __init__ self.configure(gconfig, **options) File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 126, in configure self._configure(config) File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/background.py", line 29, in _configure super(BackgroundScheduler, self)._configure(config) File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/schedulers/base.py", line 697, in _configure self.timezone = astimezone(config.pop('timezone', None)) or get_localzone() File "/home/pi/.local/lib/python3.9/site-packages/apscheduler/util.py", line 93, in astimezone raise TypeError('Only timezones from the pytz library are supported') TypeError: Only timezones from the pytz library are supported ``` ### Additional information: - FlexGet version: 3.5.2 - Python version: 3.9.2 - Installation method: - Using daemon (yes/no): yes It seems to have started after https://github.com/Flexget/Flexget/pull/3453 that change the timezone argument to a non-pytz compatible object.
ray-project__ray-3109
[ { "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.5.3\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py" } ]
[ { "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nmodin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\nsys.path.insert(0, modin_path)\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.5.3\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py" } ]
diff --git a/.gitignore b/.gitignore index f8130b3a2f85e..91189b6f9c41a 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,9 @@ /src/ray/object_manager/format/*_generated.h /src/ray/raylet/format/*_generated.h +# Modin source files +/python/ray/modin + # Redis temporary files *dump.rdb diff --git a/.travis.yml b/.travis.yml index debf450738a79..b48089d52ecdb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -171,6 +171,9 @@ script: # ray temp file tests - python -m pytest -v test/tempfile_test.py + # modin test files + - python python/ray/test/test_modin.py + deploy: - provider: s3 access_key_id: AKIAJ2L7XDUSZVTXI5QA diff --git a/.travis/install-dependencies.sh b/.travis/install-dependencies.sh index 293c1b8b6b04d..0fb597d4686f8 100755 --- a/.travis/install-dependencies.sh +++ b/.travis/install-dependencies.sh @@ -24,7 +24,7 @@ if [[ "$PYTHON" == "2.7" ]] && [[ "$platform" == "linux" ]]; then wget https://repo.continuum.io/miniconda/Miniconda2-4.5.4-Linux-x86_64.sh -O miniconda.sh -nv bash miniconda.sh -b -p $HOME/miniconda export PATH="$HOME/miniconda/bin:$PATH" - pip install -q cython==0.27.3 cmake tensorflow gym opencv-python pyyaml pandas==0.22 requests \ + pip install -q cython==0.27.3 cmake tensorflow gym opencv-python pyyaml pandas==0.23.4 requests \ feather-format lxml openpyxl xlrd py-spy setproctitle faulthandler pytest-timeout elif [[ "$PYTHON" == "3.5" ]] && [[ "$platform" == "linux" ]]; then sudo apt-get update @@ -33,7 +33,7 @@ elif [[ "$PYTHON" == "3.5" ]] && [[ "$platform" == "linux" ]]; then wget https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh -O miniconda.sh -nv bash miniconda.sh -b -p $HOME/miniconda export PATH="$HOME/miniconda/bin:$PATH" - pip install -q cython==0.27.3 cmake tensorflow gym opencv-python pyyaml pandas==0.22 requests \ + pip install -q cython==0.27.3 cmake tensorflow gym opencv-python pyyaml pandas==0.23.4 requests \ feather-format lxml openpyxl xlrd py-spy setproctitle pytest-timeout elif [[ "$PYTHON" == "2.7" ]] && [[ "$platform" == "macosx" ]]; then # check that brew is installed @@ -50,7 +50,7 @@ elif [[ "$PYTHON" == "2.7" ]] && [[ "$platform" == "macosx" ]]; then wget https://repo.continuum.io/miniconda/Miniconda2-4.5.4-MacOSX-x86_64.sh -O miniconda.sh -nv bash miniconda.sh -b -p $HOME/miniconda export PATH="$HOME/miniconda/bin:$PATH" - pip install -q cython==0.27.3 cmake tensorflow gym opencv-python pyyaml pandas==0.22 requests \ + pip install -q cython==0.27.3 cmake tensorflow gym opencv-python pyyaml pandas==0.23.4 requests \ feather-format lxml openpyxl xlrd py-spy setproctitle faulthandler pytest-timeout elif [[ "$PYTHON" == "3.5" ]] && [[ "$platform" == "macosx" ]]; then # check that brew is installed @@ -67,7 +67,7 @@ elif [[ "$PYTHON" == "3.5" ]] && [[ "$platform" == "macosx" ]]; then wget https://repo.continuum.io/miniconda/Miniconda3-4.5.4-MacOSX-x86_64.sh -O miniconda.sh -nv bash miniconda.sh -b -p $HOME/miniconda export PATH="$HOME/miniconda/bin:$PATH" - pip install -q cython==0.27.3 cmake tensorflow gym opencv-python pyyaml pandas==0.22 requests \ + pip install -q cython==0.27.3 cmake tensorflow gym opencv-python pyyaml pandas==0.23.4 requests \ feather-format lxml openpyxl xlrd py-spy setproctitle pytest-timeout elif [[ "$LINT" == "1" ]]; then sudo apt-get update diff --git a/.travis/test-wheels.sh b/.travis/test-wheels.sh index 1765135ec9bef..f7870ea52d496 100755 --- a/.travis/test-wheels.sh +++ b/.travis/test-wheels.sh @@ -59,7 +59,7 @@ if [[ "$platform" == "linux" ]]; then if [[ "$NUMBER_OF_WHEELS" != "5" ]]; then echo "Wrong number of wheels found." ls -l $ROOT_DIR/../.whl/ - exit 1 + exit 2 fi elif [[ "$platform" == "macosx" ]]; then @@ -94,5 +94,5 @@ elif [[ "$platform" == "macosx" ]]; then done else echo "Unrecognized environment." - exit 1 + exit 3 fi diff --git a/python/ray/__init__.py b/python/ray/__init__.py index a507cdd2e7a2b..6d4e0ba9a6b92 100644 --- a/python/ray/__init__.py +++ b/python/ray/__init__.py @@ -46,6 +46,9 @@ e.args += (helpful_message, ) raise +modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin") +sys.path.insert(0, modin_path) + from ray.raylet import ObjectID, _config # noqa: E402 from ray.profiling import profile # noqa: E402 from ray.worker import (error_info, init, connect, disconnect, get, put, wait, diff --git a/python/ray/test/test_modin.py b/python/ray/test/test_modin.py new file mode 100644 index 0000000000000..83c11895ec7b8 --- /dev/null +++ b/python/ray/test/test_modin.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ray # noqa F401 + + +def test_modin_import(): + import modin.pandas as pd + frame_data = [1, 2, 3, 4, 5, 6, 7, 8] + frame = pd.DataFrame(frame_data) + assert frame.sum().squeeze() == sum(frame_data) diff --git a/thirdparty/scripts/build_modin.sh b/thirdparty/scripts/build_modin.sh new file mode 100755 index 0000000000000..96563fdb21067 --- /dev/null +++ b/thirdparty/scripts/build_modin.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -x + +# Cause the script to exit if a single command fails. +set -e + +if [[ -z "$1" ]]; then + PYTHON_EXECUTABLE=`which python` +else + PYTHON_EXECUTABLE=$1 +fi + +PYTHON_VERSION="$($PYTHON_EXECUTABLE -c 'import sys; print(sys.version_info[0])')" + +TP_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd)/../ +MODIN_VERSION=0.2.4 +MODIN_WHEELS_FNAME="modin-$MODIN_VERSION-py$PYTHON_VERSION-none-any.whl" +MODIN_WHEELS_URL="https://github.com/modin-project/modin/releases/download/v$MODIN_VERSION/" + +pushd $TP_DIR/../python/ray/ +rm -rf modin +mkdir modin +pushd modin +curl -kL "$MODIN_WHEELS_URL$MODIN_WHEELS_FNAME" -o "$MODIN_WHEELS_FNAME" +unzip "$MODIN_WHEELS_FNAME" +rm "$MODIN_WHEELS_FNAME" +popd +popd diff --git a/thirdparty/scripts/setup.sh b/thirdparty/scripts/setup.sh index 27f1ef0e3ed51..da283bd3b2bb7 100755 --- a/thirdparty/scripts/setup.sh +++ b/thirdparty/scripts/setup.sh @@ -67,3 +67,8 @@ bash "$TP_SCRIPT_DIR/build_ui.sh" # rDSN (optional) ############################################## # bash "$TP_SCRIPT_DIR/build_rdsn.sh" + +############################################## +# modin +############################################## +bash "$TP_SCRIPT_DIR/build_modin.sh" $PYTHON_EXECUTABLE
Ship Modin with Ray ### Describe the problem <!-- Describe the problem clearly here. --> I think it makes sense to ship Modin with Ray. I suggest doing this similar to how pyarrow is shipped with Ray. We don't need to rely on the dependencies of Modin, but some of the Modin source will have to be updated to make sure that the pandas version is correct.
web2py__web2py-1871
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom gluon.fileutils import tar, untar, read_file, write_file\nimport tarfile\nimport sys\n\n\ndef tar(file, filelist, expression='^.+$'):\n \"\"\"\n tars dir/files into file, only tars file that match expression\n \"\"\"\n\n tar = tarfile.TarFile(file, 'w')\n try:\n for element in filelist:\n try:\n for file in listdir(element, expression, add_dirs=True):\n tar.add(os.path.join(element, file), file, False)\n except:\n tar.add(element)\n finally:\n tar.close()\n\n\ndef start():\n if 'sdist' in sys.argv:\n tar('gluon/env.tar', ['applications', 'VERSION',\n 'extras/icons/splashlogo.gif'])\n\n setup(name='web2py',\n version=read_file(\"VERSION\").split()[1],\n description=\"\"\"full-stack framework for rapid development and prototyping\n of secure database-driven web-based applications, written and\n programmable in Python.\"\"\",\n long_description=\"\"\"\n Everything in one package with no dependencies. Development, deployment,\n debugging, testing, database administration and maintenance of applications can\n be done via the provided web interface. web2py has no configuration files,\n requires no installation, can run off a USB drive. web2py uses Python for the\n Model, the Views and the Controllers, has a built-in ticketing system to manage\n errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,\n MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a\n Database Abstraction Layer. web2py includes libraries to handle\n HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production\n ready, capable of upload/download streaming of very large files, and always\n backward compatible.\n \"\"\",\n author='Massimo Di Pierro',\n author_email='[email protected]',\n license='http://web2py.com/examples/default/license',\n classifiers=[\"Development Status :: 5 - Production/Stable\"],\n url='http://web2py.com',\n platforms='Windows, Linux, Mac, Unix,Windows Mobile',\n packages=['gluon',\n 'gluon/contrib',\n 'gluon/contrib/gateways',\n 'gluon/contrib/login_methods',\n 'gluon/contrib/markdown',\n 'gluon/contrib/markmin',\n 'gluon/contrib/memcache',\n 'gluon/contrib/fpdf',\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n 'gluon/contrib/pyuca',\n 'gluon/tests',\n ],\n package_data={'gluon': ['env.tar']},\n# scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],\n )\n\nif __name__ == '__main__':\n #print \"web2py does not require installation and\"\n #print \"you should just start it with:\"\n #print\n #print \"$ python web2py.py\"\n #print\n #print \"are you sure you want to install it anyway (y/n)?\"\n #s = raw_input('>')\n #if s.lower()[:1]=='y':\n start()\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom gluon.fileutils import tar, untar, read_file, write_file\nimport tarfile\nimport sys\n\n\ndef tar(file, filelist, expression='^.+$'):\n \"\"\"\n tars dir/files into file, only tars file that match expression\n \"\"\"\n\n tar = tarfile.TarFile(file, 'w')\n try:\n for element in filelist:\n try:\n for file in listdir(element, expression, add_dirs=True):\n tar.add(os.path.join(element, file), file, False)\n except:\n tar.add(element)\n finally:\n tar.close()\n\n\ndef start():\n if 'sdist' in sys.argv:\n tar('gluon/env.tar', ['applications', 'VERSION',\n 'extras/icons/splashlogo.gif'])\n\n setup(name='web2py',\n version=read_file(\"VERSION\").split()[1],\n description=\"\"\"full-stack framework for rapid development and prototyping\n of secure database-driven web-based applications, written and\n programmable in Python.\"\"\",\n long_description=\"\"\"\n Everything in one package with no dependencies. Development, deployment,\n debugging, testing, database administration and maintenance of applications can\n be done via the provided web interface. web2py has no configuration files,\n requires no installation, can run off a USB drive. web2py uses Python for the\n Model, the Views and the Controllers, has a built-in ticketing system to manage\n errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,\n MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a\n Database Abstraction Layer. web2py includes libraries to handle\n HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production\n ready, capable of upload/download streaming of very large files, and always\n backward compatible.\n \"\"\",\n author='Massimo Di Pierro',\n author_email='[email protected]',\n license='http://web2py.com/examples/default/license',\n classifiers=[\"Development Status :: 5 - Production/Stable\"],\n url='http://web2py.com',\n platforms='Windows, Linux, Mac, Unix,Windows Mobile',\n packages=['gluon',\n 'gluon/contrib',\n 'gluon/contrib/gateways',\n 'gluon/contrib/login_methods',\n 'gluon/contrib/markdown',\n 'gluon/contrib/markmin',\n 'gluon/contrib/memcache',\n 'gluon/contrib/fpdf',\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n 'gluon/contrib/pyuca',\n 'gluon/tests',\n ],\n package_data={'gluon': ['env.tar']},\n# scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],\n )\n\nif __name__ == '__main__':\n #print \"web2py does not require installation and\"\n #print \"you should just start it with:\"\n #print\n #print \"$ python web2py.py\"\n #print\n #print \"are you sure you want to install it anyway (y/n)?\"\n #s = raw_input('>')\n #if s.lower()[:1]=='y':\n start()\n", "path": "setup.py" } ]
diff --git a/gluon/tests/test_dal.py b/gluon/tests/test_dal.py index d4e09df3a..52701641e 100644 --- a/gluon/tests/test_dal.py +++ b/gluon/tests/test_dal.py @@ -107,11 +107,3 @@ def test_mysql(self): os.environ["DB"] = "mysql://root:@localhost/pydal" result = self._run_tests() self.assertTrue(result) - - def test_pg8000(self): - if os.environ.get('APPVEYOR'): - return - if os.environ.get('TRAVIS'): - os.environ["DB"] = "postgres:pg8000://postgres:@localhost/pydal" - result = self._run_tests() - self.assertTrue(result) diff --git a/setup.py b/setup.py index 499cf4c29..1bb1f3b11 100644 --- a/setup.py +++ b/setup.py @@ -63,7 +63,6 @@ def start(): 'gluon/contrib/pymysql', 'gluon/contrib/pyrtf', 'gluon/contrib/pysimplesoap', - 'gluon/contrib/pg8000', 'gluon/contrib/plural_rules', 'gluon/contrib/minify', 'gluon/contrib/pyaes',
contrib/pg8000 is old and causes weird postgres errors Please update the contrib/pg8000 driver to the current version. Otherwise errors like Broken Pipe, Operationalerror,.. occur, - at least for postgres 9.6, - especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING). related links: https://github.com/mfenniak/pg8000/issues/73 https://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU ..in copy into issues: web2py/web2py, web2py/pydal
holoviz__panel-1044
[ { "content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n return self.object.read()\n if isurl(self.object, [self.imgtype]):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_height':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py" } ]
[ { "content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n return self.object.read()\n if isurl(self.object, [self.imgtype]):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_width':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py" } ]
diff --git a/panel/pane/image.py b/panel/pane/image.py index eae3441a97..547ad17c4d 100644 --- a/panel/pane/image.py +++ b/panel/pane/image.py @@ -114,7 +114,7 @@ def _get_properties(self): w, h = '%spx' % width, '%spx' % height elif smode == 'stretch_both': w, h = '100%', '100%' - elif smode == 'stretch_height': + elif smode == 'stretch_width': w, h = '%spx' % width, '100%' elif smode == 'stretch_height': w, h = '100%', '%spx' % height
Support src urls and alt texts on Images #### My Pain I would like to use images in Panel via `pn.pane.Markdown` and/ or `pn.pane.PNG`. Currently the Bokeh layout engine does not layout markdown with images well. See https://github.com/holoviz/panel/issues/835. So I need to stick to `pn.pane.PNG` for images. But the `ImageBase` class does not support parameters like the `src` url and `alt` text from the the HTML `img` tag. So I cannot provide image links or alt texts. #### Solution Add `src` and `alt` parameters to the `BaseImage` class.
quantumlib__Cirq-423
[ { "content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that combines adjacent single-qubit rotations.\"\"\"\n\nfrom typing import List, Tuple, Optional, cast\n\nimport numpy as np\n\nfrom cirq import ops\nfrom cirq.circuits import (\n Circuit,\n PointOptimizer,\n PointOptimizationSummary,\n)\nfrom cirq.extension import Extensions\nfrom cirq.google.decompositions import two_qubit_matrix_to_native_gates\n\n\nclass MergeInteractions(PointOptimizer):\n \"\"\"Combines adjacent constant single-qubit rotations.\"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n allow_partial_czs: bool = True,\n extensions: Extensions = None) -> None:\n self.tolerance = tolerance\n self.allow_partial_czs = allow_partial_czs\n self.extensions = extensions or Extensions()\n\n def optimization_at(self, circuit, index, op):\n if len(op.qubits) != 2:\n return None\n\n interaction_count, indices, matrix = (\n self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))\n if interaction_count <= 1:\n return None\n\n # Find a max-3-cz construction.\n operations = two_qubit_matrix_to_native_gates(\n op.qubits[0],\n op.qubits[1],\n matrix,\n self.allow_partial_czs,\n self.tolerance)\n\n # TODO: don't replace if there's no benefit in CZ depth.\n\n return PointOptimizationSummary(\n clear_span=max(indices) + 1 - index,\n clear_qubits=op.qubits,\n new_operations=operations)\n\n def _op_to_matrix(self,\n op: ops.Operation,\n qubits: Tuple[ops.QubitId, ...]\n ) -> Optional[Tuple[np.ndarray, bool]]:\n \"\"\"Determines the effect of an operation on the given qubits.\n\n The operation must be a 1-qubit operation on one of the given qubits,\n or a 2-qubit operation on both of the given qubits. Also, the operation\n must have a known matrix. Otherwise None is returned.\n\n Args:\n op: The operation to understand.\n qubits: The qubits we care about. Order determines matrix tensor\n order.\n\n Returns:\n None, or else a tuple containing a matrix equivalent to the effect\n of the operation and a boolean indicating if the operation is a\n 2-qubit interaction.\n \"\"\"\n q1, q2 = qubits\n\n known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)\n if known is None:\n return None\n m = known.matrix()\n\n if op.qubits == qubits:\n return m, True\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n return np.kron(np.eye(2), m), False\n if op.qubits == (q2,):\n return np.kron(m, np.eye(2)), False\n\n return None\n\n def _scan_two_qubit_ops_into_matrix(\n self,\n circuit: Circuit,\n index: Optional[int],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Tuple[int, List[int], np.ndarray]:\n \"\"\"Accumulates operations affecting the given pair of qubits.\n\n The scan terminates when it hits the end of the circuit, finds an\n operation without a known matrix, or finds an operation that interacts\n the given qubits with other qubits.\n\n Args:\n circuit: The circuit to scan for operations.\n index: The index to start scanning forward from.\n qubits: The pair of qubits we care about.\n\n Returns:\n A tuple containing:\n 0. The number of 2-qubit operations that were scanned.\n 1. The moment indices those operations were on.\n 2. A matrix equivalent to the effect of the scanned operations.\n \"\"\"\n\n product = np.eye(4, dtype=np.complex128)\n interaction_count = 0\n touched_indices = []\n\n while index is not None:\n operations = {circuit.operation_at(q, index) for q in qubits}\n op_data = [\n self._op_to_matrix(op, qubits)\n for op in operations\n if op\n ]\n\n # Stop at any non-constant or non-local interaction.\n if any(e is None for e in op_data):\n break\n present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)\n\n for op_mat, interacts in present_op_data:\n product = np.dot(op_mat, product)\n if interacts:\n interaction_count += 1\n\n touched_indices.append(index)\n index = circuit.next_moment_operating_on(qubits, index + 1)\n\n return interaction_count, touched_indices, product\n\n @staticmethod\n def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:\n \"\"\"Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).\"\"\"\n result = np.array([[0] * 4] * 4, dtype=np.complex128)\n order = [0, 2, 1, 3]\n for i in range(4):\n for j in range(4):\n result[order[i], order[j]] = mat4x4[i, j]\n return result\n", "path": "cirq/google/merge_interactions.py" } ]
[ { "content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that combines adjacent single-qubit rotations.\"\"\"\n\nfrom typing import List, Tuple, Optional, cast\n\nimport numpy as np\n\nfrom cirq import ops\nfrom cirq.circuits import (\n Circuit,\n PointOptimizer,\n PointOptimizationSummary,\n)\nfrom cirq.extension import Extensions\nfrom cirq.google.decompositions import two_qubit_matrix_to_native_gates\n\n\nclass MergeInteractions(PointOptimizer):\n \"\"\"Combines adjacent constant single-qubit rotations.\"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n allow_partial_czs: bool = True,\n extensions: Extensions = None) -> None:\n self.tolerance = tolerance\n self.allow_partial_czs = allow_partial_czs\n self.extensions = extensions or Extensions()\n\n def optimization_at(self, circuit, index, op):\n if len(op.qubits) != 2:\n return None\n\n interaction_count, indices, matrix = (\n self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))\n if interaction_count <= 1:\n return None\n\n # Find a max-3-cz construction.\n operations = two_qubit_matrix_to_native_gates(\n op.qubits[0],\n op.qubits[1],\n matrix,\n self.allow_partial_czs,\n self.tolerance)\n\n # TODO: don't replace if there's no benefit in CZ depth.\n\n return PointOptimizationSummary(\n clear_span=max(indices) + 1 - index,\n clear_qubits=op.qubits,\n new_operations=operations)\n\n def _op_to_matrix(self,\n op: ops.Operation,\n qubits: Tuple[ops.QubitId, ...]\n ) -> Optional[Tuple[np.ndarray, bool]]:\n \"\"\"Determines the effect of an operation on the given qubits.\n\n The operation must be a 1-qubit operation on one of the given qubits,\n or a 2-qubit operation on both of the given qubits. Also, the operation\n must have a known matrix. Otherwise None is returned.\n\n Args:\n op: The operation to understand.\n qubits: The qubits we care about. Order determines matrix tensor\n order.\n\n Returns:\n None, or else a tuple containing a matrix equivalent to the effect\n of the operation and a boolean indicating if the operation is a\n 2-qubit interaction.\n \"\"\"\n q1, q2 = qubits\n\n known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)\n if known is None:\n return None\n m = known.matrix()\n\n if op.qubits == qubits:\n return m, True\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n return np.kron(m, np.eye(2)), False\n if op.qubits == (q2,):\n return np.kron(np.eye(2), m), False\n\n return None\n\n def _scan_two_qubit_ops_into_matrix(\n self,\n circuit: Circuit,\n index: Optional[int],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Tuple[int, List[int], np.ndarray]:\n \"\"\"Accumulates operations affecting the given pair of qubits.\n\n The scan terminates when it hits the end of the circuit, finds an\n operation without a known matrix, or finds an operation that interacts\n the given qubits with other qubits.\n\n Args:\n circuit: The circuit to scan for operations.\n index: The index to start scanning forward from.\n qubits: The pair of qubits we care about.\n\n Returns:\n A tuple containing:\n 0. The number of 2-qubit operations that were scanned.\n 1. The moment indices those operations were on.\n 2. A matrix equivalent to the effect of the scanned operations.\n \"\"\"\n\n product = np.eye(4, dtype=np.complex128)\n interaction_count = 0\n touched_indices = []\n\n while index is not None:\n operations = {circuit.operation_at(q, index) for q in qubits}\n op_data = [\n self._op_to_matrix(op, qubits)\n for op in operations\n if op\n ]\n\n # Stop at any non-constant or non-local interaction.\n if any(e is None for e in op_data):\n break\n present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)\n\n for op_mat, interacts in present_op_data:\n product = np.dot(op_mat, product)\n if interacts:\n interaction_count += 1\n\n touched_indices.append(index)\n index = circuit.next_moment_operating_on(qubits, index + 1)\n\n return interaction_count, touched_indices, product\n\n @staticmethod\n def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:\n \"\"\"Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).\"\"\"\n result = np.array([[0] * 4] * 4, dtype=np.complex128)\n order = [0, 2, 1, 3]\n for i in range(4):\n for j in range(4):\n result[order[i], order[j]] = mat4x4[i, j]\n return result\n", "path": "cirq/google/merge_interactions.py" } ]
diff --git a/cirq/google/merge_interactions.py b/cirq/google/merge_interactions.py index 0af943cba29..b293038adba 100644 --- a/cirq/google/merge_interactions.py +++ b/cirq/google/merge_interactions.py @@ -95,9 +95,9 @@ def _op_to_matrix(self, if op.qubits == (q2, q1): return MergeInteractions._flip_kron_order(m), True if op.qubits == (q1,): - return np.kron(np.eye(2), m), False - if op.qubits == (q2,): return np.kron(m, np.eye(2)), False + if op.qubits == (q2,): + return np.kron(np.eye(2), m), False return None diff --git a/cirq/google/merge_interactions_test.py b/cirq/google/merge_interactions_test.py index 66e1c44886a..4a8651ae979 100644 --- a/cirq/google/merge_interactions_test.py +++ b/cirq/google/merge_interactions_test.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from cirq import testing from cirq import circuits from cirq import ops from cirq.google import ExpZGate, MergeInteractions, MergeRotations @@ -39,6 +40,16 @@ def assert_optimizes(before, after): print(after) assert before == after +def assert_optimization_not_broken(circuit): + """Check that the unitary matrix for the input circuit is the same (up to + global phase and rounding error) as the unitary matrix of the optimized + circuit.""" + u_before = circuit.to_unitary_matrix() + MergeInteractions().optimize_circuit(circuit) + u_after = circuit.to_unitary_matrix() + + testing.assert_allclose_up_to_global_phase(u_before, u_after, atol=1e-8) + def test_clears_paired_cnot(): q0 = ops.QubitId() @@ -84,3 +95,49 @@ def test_ignores_czs_separated_by_outer_cz(): circuits.Moment([ops.CZ(q00, q10)]), circuits.Moment([ops.CZ(q00, q01)]), ])) + + +def test_cnots_separated_by_single_gates_correct(): + q0 = ops.QubitId() + q1 = ops.QubitId() + assert_optimization_not_broken( + circuits.Circuit.from_ops( + ops.CNOT(q0, q1), + ops.H(q1), + ops.CNOT(q0, q1), + )) + + +def test_czs_separated_by_single_gates_correct(): + q0 = ops.QubitId() + q1 = ops.QubitId() + assert_optimization_not_broken( + circuits.Circuit.from_ops( + ops.CZ(q0, q1), + ops.X(q1), + ops.X(q1), + ops.X(q1), + ops.CZ(q0, q1), + )) + + +def test_inefficient_circuit_correct(): + t = 0.1 + v = 0.11 + q0 = ops.QubitId() + q1 = ops.QubitId() + assert_optimization_not_broken( + circuits.Circuit.from_ops( + ops.H(q1), + ops.CNOT(q0, q1), + ops.H(q1), + ops.CNOT(q0, q1), + ops.CNOT(q1, q0), + ops.H(q0), + ops.CNOT(q0, q1), + ops.Z(q0)**t, ops.Z(q1)**-t, + ops.CNOT(q0, q1), + ops.H(q0), ops.Z(q1)**v, + ops.CNOT(q0, q1), + ops.Z(q0)**-v, ops.Z(q1)**-v, + ))
MergeInteractions optimization is creating bad circuits Not sure how this made it past the tests yet. ```python def main(): circuit = make_inefficient_circuit() print("BEFORE:") print(circuit.to_unitary_matrix().round(3)) print() cirq.google.MergeInteractions().optimize_circuit(circuit) print("AFTER:") print(circuit.to_unitary_matrix().round(3)) def make_inefficient_circuit(t=0.1, v=0.11): from cirq import H, CNOT, Z a = cirq.NamedQubit('a') b = cirq.NamedQubit('b') return cirq.Circuit.from_ops( H(b), CNOT(a, b), H(b), CNOT(a, b), CNOT(b, a), H(a), CNOT(a, b), Z(a)**t, Z(b)**-t, CNOT(a, b), H(a), Z(b)**v, CNOT(a, b), Z(a)**-v, Z(b)**-v) ``` ``` BEFORE: [[ 1. -0.j 0. -0.j 0. +0.j 0. -0.j ] [-0. -0.j -0. -0.309j 0.951-0.j -0. +0.j ] [ 0. -0.j 0.951-0.j -0. -0.309j 0. +0.j ] [-0. +0.j 0. +0.j 0. -0.j -0.771+0.637j]] AFTER: [[-0.354-0.354j -0.354-0.354j -0.227-0.446j 0.227+0.446j] [-0.452-0.213j -0.452-0.213j 0.364+0.342j -0.364-0.342j] [-0.354-0.354j 0.354+0.354j -0.446-0.227j -0.446-0.227j] [-0.452-0.213j 0.452+0.213j 0.496+0.063j 0.496+0.063j]] ```
kivy__kivy-4584
[ { "content": "'''\nWidget class\n============\n\nThe :class:`Widget` class is the base class required for creating Widgets.\nThis widget class was designed with a couple of principles in mind:\n\n* *Event Driven*\n\n Widget interaction is built on top of events that occur. If a property\n changes, the widget can respond to the change in the 'on_<propname>'\n callback. If nothing changes, nothing will be done. That's the main\n goal of the :class:`~kivy.properties.Property` class.\n\n* *Separation Of Concerns (the widget and its graphical representation)*\n\n Widgets don't have a `draw()` method. This is done on purpose: The idea\n is to allow you to create your own graphical representation outside the\n widget class.\n Obviously you can still use all the available properties to do that, so\n that your representation properly reflects the widget's current state.\n Every widget has its own :class:`~kivy.graphics.Canvas` that you\n can use to draw. This separation allows Kivy to run your\n application in a very efficient manner.\n\n* *Bounding Box / Collision*\n\n Often you want to know if a certain point is within the bounds of your\n widget. An example would be a button widget where you only want to\n trigger an action when the button itself is actually touched.\n For this, you can use the :meth:`~Widget.collide_point` method, which\n will return True if the point you pass to it is inside the axis-aligned\n bounding box defined by the widget's position and size.\n If a simple AABB is not sufficient, you can override the method to\n perform the collision checks with more complex shapes, e.g. a polygon.\n You can also check if a widget collides with another widget with\n :meth:`~Widget.collide_widget`.\n\n\nWe also have some default values and behaviors that you should be aware of:\n\n* A :class:`Widget` is not a :class:`~kivy.uix.layout.Layout`: it will not\n change the position or the size of its children. If you want control over\n positioning or sizing, use a :class:`~kivy.uix.layout.Layout`.\n\n* The default size of a widget is (100, 100). This is only changed if the\n parent is a :class:`~kivy.uix.layout.Layout`.\n For example, if you add a :class:`Label` inside a\n :class:`Button`, the label will not inherit the button's size or position\n because the button is not a *Layout*: it's just another *Widget*.\n\n* The default size_hint is (1, 1). If the parent is a :class:`Layout`, then the\n widget size will be the parent layout's size.\n\n* :meth:`~Widget.on_touch_down`, :meth:`~Widget.on_touch_move`,\n :meth:`~Widget.on_touch_up` don't do any sort of collisions. If you want to\n know if the touch is inside your widget, use :meth:`~Widget.collide_point`.\n\nUsing Properties\n----------------\n\nWhen you read the documentation, all properties are described in the format::\n\n <name> is a <property class> and defaults to <default value>.\n\ne.g.\n\n :attr:`~kivy.uix.label.Label.text` is a\n :class:`~kivy.properties.StringProperty` and defaults to ''.\n\nIf you want to be notified when the pos attribute changes, i.e. when the\nwidget moves, you can bind your own callback function like this::\n\n def callback_pos(instance, value):\n print('The widget', instance, 'moved to', value)\n\n wid = Widget()\n wid.bind(pos=callback_pos)\n\nRead more about :doc:`/api-kivy.properties`.\n\nBasic drawing\n-------------\n\nWidgets support a range of drawing instructions that you can use to customize\nthe look of your widgets and layouts. For example, to draw a background image\nfor your widget, you can do the following:\n\n.. code-block:: python\n\n def redraw(self, args):\n self.bg_rect.size = self.size\n self.bg_rect.pos = self.pos\n\n widget = Widget()\n with widget.canvas:\n widget.bg_rect = Rectangle(source=\"cover.jpg\", pos=self.pos, \\\nsize=self.size)\n widget.bind(pos=redraw, size=redraw)\n\nTo draw a background in kv:\n\n.. code-block:: kv\n\n Widget:\n canvas:\n Rectangle:\n source: \"cover.jpg\"\n size: self.size\n pos: self.pos\n\nThese examples only scratch the surface. Please see the :mod:`kivy.graphics`\ndocumentation for more information.\n\n.. _widget-event-bubbling:\n\nWidget touch event bubbling\n---------------------------\n\nWhen you catch touch events between multiple widgets, you often\nneed to be aware of the order in which these events are propagated. In Kivy,\nevents bubble up from the first child upwards through the other children.\nIf a widget has children, the event is passed through its children before\nbeing passed on to the widget after it.\n\nAs the :meth:`~kivy.uix.widget.Widget.on_touch_up` method inserts widgets at\nindex 0 by default, this means the event goes from the most recently added\nwidget back to the first one added. Consider the following:\n\n.. code-block:: python\n\n box = BoxLayout()\n box.add_widget(Label(text=\"a\"))\n box.add_widget(Label(text=\"b\"))\n box.add_widget(Label(text=\"c\"))\n\nThe label with text \"c\" gets the event first, \"b\" second and \"a\" last. You can\nreverse this order by manually specifying the index:\n\n.. code-block:: python\n\n box = BoxLayout()\n box.add_widget(Label(text=\"a\"), index=0)\n box.add_widget(Label(text=\"b\"), index=1)\n box.add_widget(Label(text=\"c\"), index=2)\n\nNow the order would be \"a\", \"b\" then \"c\". One thing to keep in mind when using\nkv is that declaring a widget uses the\n:meth:`~kivy.uix.widget.Widget.add_widget` method for insertion. Hence, using\n\n.. code-block:: kv\n\n BoxLayout:\n MyLabel:\n text: \"a\"\n MyLabel:\n text: \"b\"\n MyLabel:\n text: \"c\"\n\nwould result in the event order \"c\", \"b\" then \"a\" as \"c\" was actually the last\nadded widget. It thus has index 0, \"b\" index 1 and \"a\" index 2. Effectively,\nthe child order is the reverse of its listed order.\n\nThis ordering is the same for the :meth:`~kivy.uix.widget.Widget.on_touch_move`\nand :meth:`~kivy.uix.widget.Widget.on_touch_up` events.\n\nIn order to stop this event bubbling, a method can return `True`. This tells\nKivy the event has been handled and the event propagation stops. For example:\n\n.. code-block:: python\n\n class MyWidget(Widget):\n def on_touch_down(self, touch):\n If <some_condition>:\n # Do stuff here and kill the event\n return True\n else:\n return super(MyWidget, self).on_touch_down(touch)\n\nThis approach gives you good control over exactly how events are dispatched\nand managed. Sometimes, however, you may wish to let the event be completely\npropagated before taking action. You can use the\n:class:`~kivy.clock.Clock` to help you here:\n\n.. code-block:: python\n\n class MyWidget(Label):\n def on_touch_down(self, touch, after=False):\n if after:\n print \"Fired after the event has been dispatched!\"\n else:\n Clock.schedule_once(lambda dt: self.on_touch_down(touch, True))\n return super(MyWidget, self).on_touch_down(touch)\n\nUsage of :attr:`Widget.center`, :attr:`Widget.right`, and :attr:`Widget.top`\n----------------------------------------------------------------------------\n\nA common mistake when using one of the computed properties such as\n:attr:`Widget.right` is to use it to make a widget follow its parent with a\nKV rule such as `right: self.parent.right`. Consider, for example:\n\n.. code-block:: kv\n\n FloatLayout:\n id: layout\n width: 100\n Widget:\n id: wid\n right: layout.right\n\nThe (mistaken) expectation is that this rule ensures that wid's right will\nalways be whatever layout's right is - that is wid.right and layout.right will\nalways be identical. In actual fact, this rule only says that \"whenever\nlayout's `right` changes, wid's right will be set to that value\". The\ndifference being that as long as `layout.right` doesn't change, `wid.right`\ncould be anything, even a value that will make them different.\n\nSpecifically, for the KV code above, consider the following example::\n\n >>> print(layout.right, wid.right)\n (100, 100)\n >>> wid.x = 200\n >>> print(layout.right, wid.right)\n (100, 300)\n\nAs can be seen, initially they are in sync, however, when we change `wid.x`\nthey go out of sync because `layout.right` is not changed and the rule is not\ntriggered.\n\nThe proper way to make the widget follow its parent's right is to use\n:attr:`Widget.pos_hint`. If instead of `right: layout.right` we did\n`pos_hint: {'right': 1}`, then the widgets right will always be set to be\nat the parent's right at each layout update.\n'''\n\n__all__ = ('Widget', 'WidgetException')\n\nfrom kivy.event import EventDispatcher\nfrom kivy.factory import Factory\nfrom kivy.properties import (\n NumericProperty, StringProperty, AliasProperty, ReferenceListProperty,\n ObjectProperty, ListProperty, DictProperty, BooleanProperty)\nfrom kivy.graphics import (\n Canvas, Translate, Fbo, ClearColor, ClearBuffers, Scale)\nfrom kivy.graphics.transformation import Matrix\nfrom kivy.base import EventLoop\nfrom kivy.lang import Builder\nfrom kivy.context import get_current_context\nfrom kivy.weakproxy import WeakProxy\nfrom functools import partial\nfrom itertools import islice\n\n\n# References to all the widget destructors (partial method with widget uid as\n# key).\n_widget_destructors = {}\n\n\ndef _widget_destructor(uid, r):\n # Internal method called when a widget is deleted from memory. the only\n # thing we remember about it is its uid. Clear all the associated callbacks\n # created in kv language.\n del _widget_destructors[uid]\n Builder.unbind_widget(uid)\n\n\nclass WidgetException(Exception):\n '''Fired when the widget gets an exception.\n '''\n pass\n\n\nclass WidgetMetaclass(type):\n '''Metaclass to automatically register new widgets for the\n :class:`~kivy.factory.Factory`.\n\n .. warning::\n This metaclass is used by the Widget. Do not use it directly!\n '''\n def __init__(mcs, name, bases, attrs):\n super(WidgetMetaclass, mcs).__init__(name, bases, attrs)\n Factory.register(name, cls=mcs)\n\n\n#: Base class used for Widget, that inherits from :class:`EventDispatcher`\nWidgetBase = WidgetMetaclass('WidgetBase', (EventDispatcher, ), {})\n\n\nclass Widget(WidgetBase):\n '''Widget class. See module documentation for more information.\n\n :Events:\n `on_touch_down`:\n Fired when a new touch event occurs\n `on_touch_move`:\n Fired when an existing touch moves\n `on_touch_up`:\n Fired when an existing touch disappears\n\n .. warning::\n Adding a `__del__` method to a class derived from Widget with Python\n prior to 3.4 will disable automatic garbage collection for instances\n of that class. This is because the Widget class creates reference\n cycles, thereby `preventing garbage collection\n <https://docs.python.org/2/library/gc.html#gc.garbage>`_.\n\n .. versionchanged:: 1.0.9\n Everything related to event properties has been moved to the\n :class:`~kivy.event.EventDispatcher`. Event properties can now be used\n when contructing a simple class without subclassing :class:`Widget`.\n\n .. versionchanged:: 1.5.0\n The constructor now accepts on_* arguments to automatically bind\n callbacks to properties or events, as in the Kv language.\n '''\n\n __metaclass__ = WidgetMetaclass\n __events__ = ('on_touch_down', 'on_touch_move', 'on_touch_up')\n _proxy_ref = None\n\n def __init__(self, **kwargs):\n # Before doing anything, ensure the windows exist.\n EventLoop.ensure_window()\n\n # Assign the default context of the widget creation.\n if not hasattr(self, '_context'):\n self._context = get_current_context()\n\n no_builder = '__no_builder' in kwargs\n if no_builder:\n del kwargs['__no_builder']\n on_args = {k: v for k, v in kwargs.items() if k[:3] == 'on_'}\n for key in on_args:\n del kwargs[key]\n\n super(Widget, self).__init__(**kwargs)\n\n # Create the default canvas if it does not exist.\n if self.canvas is None:\n self.canvas = Canvas(opacity=self.opacity)\n\n # Apply all the styles.\n if not no_builder:\n Builder.apply(self, ignored_consts=self._kwargs_applied_init)\n\n # Bind all the events.\n if on_args:\n self.bind(**on_args)\n\n @property\n def proxy_ref(self):\n '''Return a proxy reference to the widget, i.e. without creating a\n reference to the widget. See `weakref.proxy\n <http://docs.python.org/2/library/weakref.html?highlight\\\n =proxy#weakref.proxy>`_ for more information.\n\n .. versionadded:: 1.7.2\n '''\n _proxy_ref = self._proxy_ref\n if _proxy_ref is not None:\n return _proxy_ref\n\n f = partial(_widget_destructor, self.uid)\n self._proxy_ref = _proxy_ref = WeakProxy(self, f)\n # Only f should be enough here, but it appears that is a very\n # specific case, the proxy destructor is not called if both f and\n # _proxy_ref are not together in a tuple.\n _widget_destructors[self.uid] = (f, _proxy_ref)\n return _proxy_ref\n\n def __hash__(self):\n return id(self)\n\n @property\n def __self__(self):\n return self\n\n #\n # Collision\n #\n def collide_point(self, x, y):\n '''\n Check if a point (x, y) is inside the widget's axis aligned bounding\n box.\n\n :Parameters:\n `x`: numeric\n x position of the point (in window coordinates)\n `y`: numeric\n y position of the point (in window coordinates)\n\n :Returns:\n A bool. True if the point is inside the bounding box, False\n otherwise.\n\n .. code-block:: python\n\n >>> Widget(pos=(10, 10), size=(50, 50)).collide_point(40, 40)\n True\n '''\n return self.x <= x <= self.right and self.y <= y <= self.top\n\n def collide_widget(self, wid):\n '''\n Check if another widget collides with this widget. This function\n performs an axis-aligned bounding box intersection test by default.\n\n :Parameters:\n `wid`: :class:`Widget` class\n Widget to collide with.\n\n :Returns:\n bool. True if the other widget collides with this widget, False\n otherwise.\n\n .. code-block:: python\n\n >>> wid = Widget(size=(50, 50))\n >>> wid2 = Widget(size=(50, 50), pos=(25, 25))\n >>> wid.collide_widget(wid2)\n True\n >>> wid2.pos = (55, 55)\n >>> wid.collide_widget(wid2)\n False\n '''\n if self.right < wid.x:\n return False\n if self.x > wid.right:\n return False\n if self.top < wid.y:\n return False\n if self.y > wid.top:\n return False\n return True\n\n #\n # Default event handlers\n #\n def on_touch_down(self, touch):\n '''Receive a touch down event.\n\n :Parameters:\n `touch`: :class:`~kivy.input.motionevent.MotionEvent` class\n Touch received. The touch is in parent coordinates. See\n :mod:`~kivy.uix.relativelayout` for a discussion on\n coordinate systems.\n\n :Returns:\n bool. If True, the dispatching of the touch event will stop.\n If False, the event will continue to be dispatched to the rest\n of the widget tree.\n '''\n if self.disabled and self.collide_point(*touch.pos):\n return True\n for child in self.children[:]:\n if child.dispatch('on_touch_down', touch):\n return True\n\n def on_touch_move(self, touch):\n '''Receive a touch move event. The touch is in parent coordinates.\n\n See :meth:`on_touch_down` for more information.\n '''\n if self.disabled:\n return\n for child in self.children[:]:\n if child.dispatch('on_touch_move', touch):\n return True\n\n def on_touch_up(self, touch):\n '''Receive a touch up event. The touch is in parent coordinates.\n\n See :meth:`on_touch_down` for more information.\n '''\n if self.disabled:\n return\n for child in self.children[:]:\n if child.dispatch('on_touch_up', touch):\n return True\n\n def on_disabled(self, instance, value):\n for child in self.children:\n child.disabled = value\n\n #\n # Tree management\n #\n def add_widget(self, widget, index=0, canvas=None):\n '''Add a new widget as a child of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to add to our list of children.\n `index`: int, defaults to 0\n Index to insert the widget in the list. Notice that the default\n of 0 means the widget is inserted at the beginning of the list\n and will thus be drawn on top of other sibling widgets. For a\n full discussion of the index and widget hierarchy, please see\n the :doc:`Widgets Programming Guide <guide/widgets>`.\n\n .. versionadded:: 1.0.5\n `canvas`: str, defaults to None\n Canvas to add widget's canvas to. Can be 'before', 'after' or\n None for the default canvas.\n\n .. versionadded:: 1.9.0\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> from kivy.uix.slider import Slider\n >>> root = Widget()\n >>> root.add_widget(Button())\n >>> slider = Slider()\n >>> root.add_widget(slider)\n\n '''\n if not isinstance(widget, Widget):\n raise WidgetException(\n 'add_widget() can be used only with instances'\n ' of the Widget class.')\n\n widget = widget.__self__\n if widget is self:\n raise WidgetException(\n 'Widget instances cannot be added to themselves.')\n parent = widget.parent\n # Check if the widget is already a child of another widget.\n if parent:\n raise WidgetException('Cannot add %r, it already has a parent %r'\n % (widget, parent))\n widget.parent = parent = self\n # Child will be disabled if added to a disabled parent.\n if parent.disabled:\n widget.disabled = True\n\n canvas = self.canvas.before if canvas == 'before' else \\\n self.canvas.after if canvas == 'after' else self.canvas\n\n if index == 0 or len(self.children) == 0:\n self.children.insert(0, widget)\n canvas.add(widget.canvas)\n else:\n canvas = self.canvas\n children = self.children\n if index >= len(children):\n index = len(children)\n next_index = 0\n else:\n next_child = children[index]\n next_index = canvas.indexof(next_child.canvas)\n if next_index == -1:\n next_index = canvas.length()\n else:\n next_index += 1\n\n children.insert(index, widget)\n # We never want to insert widget _before_ canvas.before.\n if next_index == 0 and canvas.has_before:\n next_index = 1\n canvas.insert(next_index, widget.canvas)\n\n def remove_widget(self, widget):\n '''Remove a widget from the children of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to remove from our children list.\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> root = Widget()\n >>> button = Button()\n >>> root.add_widget(button)\n >>> root.remove_widget(button)\n '''\n if widget not in self.children:\n return\n self.children.remove(widget)\n if widget.canvas in self.canvas.children:\n self.canvas.remove(widget.canvas)\n elif widget.canvas in self.canvas.after.children:\n self.canvas.after.remove(widget.canvas)\n elif widget.canvas in self.canvas.before.children:\n self.canvas.before.remove(widget.canvas)\n widget.parent = None\n\n def clear_widgets(self, children=None):\n '''\n Remove all (or the specified) :attr:`~Widget.children` of this widget.\n If the 'children' argument is specified, it should be a list (or\n filtered list) of children of the current widget.\n\n .. versionchanged:: 1.8.0\n The `children` argument can be used to specify the children you\n want to remove.\n '''\n\n if not children:\n children = self.children\n remove_widget = self.remove_widget\n for child in children[:]:\n remove_widget(child)\n\n def export_to_png(self, filename, *args):\n '''Saves an image of the widget and its children in png format at the\n specified filename. Works by removing the widget canvas from its\n parent, rendering to an :class:`~kivy.graphics.fbo.Fbo`, and calling\n :meth:`~kivy.graphics.texture.Texture.save`.\n\n .. note::\n\n The image includes only this widget and its children. If you want\n to include widgets elsewhere in the tree, you must call\n :meth:`~Widget.export_to_png` from their common parent, or use\n :meth:`~kivy.core.window.WindowBase.screenshot` to capture the whole\n window.\n\n .. note::\n\n The image will be saved in png format, you should include the\n extension in your filename.\n\n .. versionadded:: 1.9.0\n '''\n\n if self.parent is not None:\n canvas_parent_index = self.parent.canvas.indexof(self.canvas)\n if canvas_parent_index > -1:\n self.parent.canvas.remove(self.canvas)\n\n fbo = Fbo(size=self.size, with_stencilbuffer=True)\n\n with fbo:\n ClearColor(0, 0, 0, 1)\n ClearBuffers()\n Scale(1, -1, 1)\n Translate(-self.x, -self.y - self.height, 0)\n\n fbo.add(self.canvas)\n fbo.draw()\n fbo.texture.save(filename, flipped=False)\n fbo.remove(self.canvas)\n\n if self.parent is not None and canvas_parent_index > -1:\n self.parent.canvas.insert(canvas_parent_index, self.canvas)\n\n return True\n\n def get_root_window(self):\n '''Return the root window.\n\n :Returns:\n Instance of the root window. Can be a\n :class:`~kivy.core.window.WindowBase` or\n :class:`Widget`.\n '''\n if self.parent:\n return self.parent.get_root_window()\n\n def get_parent_window(self):\n '''Return the parent window.\n\n :Returns:\n Instance of the parent window. Can be a\n :class:`~kivy.core.window.WindowBase` or\n :class:`Widget`.\n '''\n if self.parent:\n return self.parent.get_parent_window()\n\n def _walk(self, restrict=False, loopback=False, index=None):\n # We pass index only when we are going on the parent\n # so don't yield the parent as well.\n if index is None:\n index = len(self.children)\n yield self\n\n for child in reversed(self.children[:index]):\n for walk_child in child._walk(restrict=True):\n yield walk_child\n\n # If we want to continue with our parent, just do it.\n if not restrict:\n parent = self.parent\n try:\n if parent is None or not isinstance(parent, Widget):\n raise ValueError\n index = parent.children.index(self)\n except ValueError:\n # Self is root, if we want to loopback from the first element:\n if not loopback:\n return\n # If we started with root (i.e. index==None), then we have to\n # start from root again, so we return self again. Otherwise, we\n # never returned it, so return it now starting with it.\n parent = self\n index = None\n for walk_child in parent._walk(loopback=loopback, index=index):\n yield walk_child\n\n def walk(self, restrict=False, loopback=False):\n ''' Iterator that walks the widget tree starting with this widget and\n goes forward returning widgets in the order in which layouts display\n them.\n\n :Parameters:\n `restrict`: bool, defaults to False\n If True, it will only iterate through the widget and its\n children (or children of its children etc.). Defaults to False.\n `loopback`: bool, defaults to False\n If True, when the last widget in the tree is reached,\n it'll loop back to the uppermost root and start walking until\n we hit this widget again. Naturally, it can only loop back when\n `restrict` is False. Defaults to False.\n\n :return:\n A generator that walks the tree, returning widgets in the\n forward layout order.\n\n For example, given a tree with the following structure:\n\n .. code-block:: kv\n\n GridLayout:\n Button\n BoxLayout:\n id: box\n Widget\n Button\n Widget\n\n walking this tree:\n\n .. code-block:: python\n\n >>> # Call walk on box with loopback True, and restrict False\n >>> [type(widget) for widget in box.walk(loopback=True)]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,\n <class 'Widget'>, <class 'GridLayout'>, <class 'Button'>]\n >>> # Now with loopback False, and restrict False\n >>> [type(widget) for widget in box.walk()]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,\n <class 'Widget'>]\n >>> # Now with restrict True\n >>> [type(widget) for widget in box.walk(restrict=True)]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>]\n\n .. versionadded:: 1.9.0\n '''\n gen = self._walk(restrict, loopback)\n yield next(gen)\n for node in gen:\n if node is self:\n return\n yield node\n\n def _walk_reverse(self, loopback=False, go_up=False):\n # process is walk up level, walk down its children tree, then walk up\n # next level etc.\n # default just walk down the children tree\n root = self\n index = 0\n # we need to go up a level before walking tree\n if go_up:\n root = self.parent\n try:\n if root is None or not isinstance(root, Widget):\n raise ValueError\n index = root.children.index(self) + 1\n except ValueError:\n if not loopback:\n return\n index = 0\n go_up = False\n root = self\n\n # now walk children tree starting with last-most child\n for child in islice(root.children, index, None):\n for walk_child in child._walk_reverse(loopback=loopback):\n yield walk_child\n # we need to return ourself last, in all cases\n yield root\n\n # if going up, continue walking up the parent tree\n if go_up:\n for walk_child in root._walk_reverse(loopback=loopback,\n go_up=go_up):\n yield walk_child\n\n def walk_reverse(self, loopback=False):\n ''' Iterator that walks the widget tree backwards starting with the\n widget before this, and going backwards returning widgets in the\n reverse order in which layouts display them.\n\n This walks in the opposite direction of :meth:`walk`, so a list of the\n tree generated with :meth:`walk` will be in reverse order compared\n to the list generated with this, provided `loopback` is True.\n\n :Parameters:\n `loopback`: bool, defaults to False\n If True, when the uppermost root in the tree is\n reached, it'll loop back to the last widget and start walking\n back until after we hit widget again. Defaults to False.\n\n :return:\n A generator that walks the tree, returning widgets in the\n reverse layout order.\n\n For example, given a tree with the following structure:\n\n .. code-block:: kv\n\n GridLayout:\n Button\n BoxLayout:\n id: box\n Widget\n Button\n Widget\n\n walking this tree:\n\n .. code-block:: python\n\n >>> # Call walk on box with loopback True\n >>> [type(widget) for widget in box.walk_reverse(loopback=True)]\n [<class 'Button'>, <class 'GridLayout'>, <class 'Widget'>,\n <class 'Button'>, <class 'Widget'>, <class 'BoxLayout'>]\n >>> # Now with loopback False\n >>> [type(widget) for widget in box.walk_reverse()]\n [<class 'Button'>, <class 'GridLayout'>]\n >>> forward = [w for w in box.walk(loopback=True)]\n >>> backward = [w for w in box.walk_reverse(loopback=True)]\n >>> forward == backward[::-1]\n True\n\n .. versionadded:: 1.9.0\n\n '''\n for node in self._walk_reverse(loopback=loopback, go_up=True):\n yield node\n if node is self:\n return\n\n def to_widget(self, x, y, relative=False):\n '''Convert the given coordinate from window to local widget\n coordinates. See :mod:`~kivy.uix.relativelayout` for details on the\n coordinate systems.\n '''\n if self.parent:\n x, y = self.parent.to_widget(x, y)\n return self.to_local(x, y, relative=relative)\n\n def to_window(self, x, y, initial=True, relative=False):\n '''Transform local coordinates to window coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n '''\n if not initial:\n x, y = self.to_parent(x, y, relative=relative)\n if self.parent:\n return self.parent.to_window(x, y, initial=False,\n relative=relative)\n return (x, y)\n\n def to_parent(self, x, y, relative=False):\n '''Transform local coordinates to parent coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n\n :Parameters:\n `relative`: bool, defaults to False\n Change to True if you want to translate relative positions from\n a widget to its parent coordinates.\n '''\n if relative:\n return (x + self.x, y + self.y)\n return (x, y)\n\n def to_local(self, x, y, relative=False):\n '''Transform parent coordinates to local coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n\n :Parameters:\n `relative`: bool, defaults to False\n Change to True if you want to translate coordinates to\n relative widget coordinates.\n '''\n if relative:\n return (x - self.x, y - self.y)\n return (x, y)\n\n def _apply_transform(self, m, pos=None):\n if self.parent:\n x, y = self.parent.to_widget(relative=True,\n *self.to_window(*(pos or self.pos)))\n m.translate(x, y, 0)\n m = self.parent._apply_transform(m) if self.parent else m\n return m\n\n def get_window_matrix(self, x=0, y=0):\n '''Calculate the transformation matrix to convert between window and\n widget coordinates.\n\n :Parameters:\n `x`: float, defaults to 0\n Translates the matrix on the x axis.\n `y`: float, defaults to 0\n Translates the matrix on the y axis.\n '''\n m = Matrix()\n m.translate(x, y, 0)\n m = self._apply_transform(m)\n return m\n\n x = NumericProperty(0)\n '''X position of the widget.\n\n :attr:`x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.\n '''\n\n y = NumericProperty(0)\n '''Y position of the widget.\n\n :attr:`y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.\n '''\n\n width = NumericProperty(100)\n '''Width of the widget.\n\n :attr:`width` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 100.\n\n .. warning::\n Keep in mind that the `width` property is subject to layout logic and\n that this has not yet happened at the time of the widget's `__init__`\n method.\n '''\n\n height = NumericProperty(100)\n '''Height of the widget.\n\n :attr:`height` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 100.\n\n .. warning::\n Keep in mind that the `height` property is subject to layout logic and\n that this has not yet happened at the time of the widget's `__init__`\n method.\n '''\n\n pos = ReferenceListProperty(x, y)\n '''Position of the widget.\n\n :attr:`pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`x`, :attr:`y`) properties.\n '''\n\n size = ReferenceListProperty(width, height)\n '''Size of the widget.\n\n :attr:`size` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`width`, :attr:`height`) properties.\n '''\n\n def get_right(self):\n return self.x + self.width\n\n def set_right(self, value):\n self.x = value - self.width\n\n right = AliasProperty(get_right, set_right, bind=('x', 'width'))\n '''Right position of the widget.\n\n :attr:`right` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`x` + :attr:`width`).\n '''\n\n def get_top(self):\n return self.y + self.height\n\n def set_top(self, value):\n self.y = value - self.height\n\n top = AliasProperty(get_top, set_top, bind=('y', 'height'))\n '''Top position of the widget.\n\n :attr:`top` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`y` + :attr:`height`).\n '''\n\n def get_center_x(self):\n return self.x + self.width / 2.\n\n def set_center_x(self, value):\n self.x = value - self.width / 2.\n\n center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))\n '''X center position of the widget.\n\n :attr:`center_x` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`x` + :attr:`width` / 2.).\n '''\n\n def get_center_y(self):\n return self.y + self.height / 2.\n\n def set_center_y(self, value):\n self.y = value - self.height / 2.\n\n center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))\n '''Y center position of the widget.\n\n :attr:`center_y` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`y` + :attr:`height` / 2.).\n '''\n\n center = ReferenceListProperty(center_x, center_y)\n '''Center position of the widget.\n\n :attr:`center` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`center_x`, :attr:`center_y`) properties.\n '''\n\n cls = ListProperty([])\n '''Class of the widget, used for styling.\n '''\n\n id = StringProperty(None, allownone=True)\n '''Unique identifier of the widget in the tree.\n\n :attr:`id` is a :class:`~kivy.properties.StringProperty` and defaults to\n None.\n\n .. warning::\n\n If the :attr:`id` is already used in the tree, an exception will\n be raised.\n '''\n\n children = ListProperty([])\n '''List of children of this widget.\n\n :attr:`children` is a :class:`~kivy.properties.ListProperty` and\n defaults to an empty list.\n\n Use :meth:`add_widget` and :meth:`remove_widget` for manipulating the\n children list. Don't manipulate the children list directly unless you know\n what you are doing.\n '''\n\n parent = ObjectProperty(None, allownone=True, rebind=True)\n '''Parent of this widget. The parent of a widget is set when the widget\n is added to another widget and unset when the widget is removed from its\n parent.\n\n :attr:`parent` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n size_hint_x = NumericProperty(1, allownone=True)\n '''X size hint. Represents how much space the widget should use in the\n direction of the X axis relative to its parent's width.\n Only the :class:`~kivy.uix.layout.Layout` and\n :class:`~kivy.core.window.Window` classes make use of the hint.\n\n The size_hint is used by layouts for two purposes:\n\n - When the layout considers widgets on their own rather than in\n relation to its other children, the size_hint_x is a direct proportion\n of the parent width, normally between 0.0 and 1.0. For instance, a\n widget with ``size_hint_x=0.5`` in\n a vertical BoxLayout will take up half the BoxLayout's width, or\n a widget in a FloatLayout with ``size_hint_x=0.2`` will take up 20%\n of the FloatLayout width. If the size_hint is greater than 1, the\n widget will be wider than the parent.\n - When multiple widgets can share a row of a layout, such as in a\n horizontal BoxLayout, their widths will be their size_hint_x as a\n fraction of the sum of widget size_hints. For instance, if the\n size_hint_xs are (0.5, 1.0, 0.5), the first widget will have a\n width of 25% of the parent width.\n\n :attr:`size_hint_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n size_hint_y = NumericProperty(1, allownone=True)\n '''Y size hint.\n\n :attr:`size_hint_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n\n See :attr:`size_hint_x` for more information, but with widths and heights\n swapped.\n '''\n\n size_hint = ReferenceListProperty(size_hint_x, size_hint_y)\n '''Size hint.\n\n :attr:`size_hint` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`size_hint_x`, :attr:`size_hint_y`) properties.\n\n See :attr:`size_hint_x` for more information.\n '''\n\n pos_hint = ObjectProperty({})\n '''Position hint. This property allows you to set the position of\n the widget inside its parent layout, in percent (similar to\n size_hint).\n\n For example, if you want to set the top of the widget to be at 90%\n height of its parent layout, you can write::\n\n widget = Widget(pos_hint={'top': 0.9})\n\n The keys 'x', 'right' and 'center_x' will use the parent width.\n The keys 'y', 'top' and 'center_y' will use the parent height.\n\n See :doc:`api-kivy.uix.floatlayout` for further reference.\n\n .. note::\n :attr:`pos_hint` is not used by all layouts. Check the documentation\n of the layout in question to see if it supports pos_hint.\n\n :attr:`pos_hint` is an :class:`~kivy.properties.ObjectProperty`\n containing a dict.\n '''\n\n ids = DictProperty({})\n '''This is a dictionary of ids defined in your kv language. This will only\n be populated if you use ids in your kv language code.\n\n .. versionadded:: 1.7.0\n\n :attr:`ids` is a :class:`~kivy.properties.DictProperty` and defaults to an\n empty dict {}.\n\n The :attr:`ids` are populated for each root level widget definition. For\n example:\n\n .. code-block:: kv\n\n # in kv\n <MyWidget@Widget>:\n id: my_widget\n Label:\n id: label_widget\n Widget:\n id: inner_widget\n Label:\n id: inner_label\n TextInput:\n id: text_input\n OtherWidget:\n id: other_widget\n\n\n <OtherWidget@Widget>\n id: other_widget\n Label:\n id: other_label\n TextInput:\n id: other_textinput\n\n Then, in python:\n\n .. code-block:: python\n\n >>> widget = MyWidget()\n >>> print(widget.ids)\n {'other_widget': <weakproxy at 041CFED0 to OtherWidget at 041BEC38>,\n 'inner_widget': <weakproxy at 04137EA0 to Widget at 04138228>,\n 'inner_label': <weakproxy at 04143540 to Label at 04138260>,\n 'label_widget': <weakproxy at 04137B70 to Label at 040F97A0>,\n 'text_input': <weakproxy at 041BB5D0 to TextInput at 041BEC00>}\n >>> print(widget.ids['other_widget'].ids)\n {'other_textinput': <weakproxy at 041DBB40 to TextInput at 041BEF48>,\n 'other_label': <weakproxy at 041DB570 to Label at 041BEEA0>}\n >>> print(widget.ids['label_widget'].ids)\n {}\n '''\n\n opacity = NumericProperty(1.0)\n '''Opacity of the widget and all its children.\n\n .. versionadded:: 1.4.1\n\n The opacity attribute controls the opacity of the widget and its children.\n Be careful, it's a cumulative attribute: the value is multiplied by the\n current global opacity and the result is applied to the current context\n color.\n\n For example, if the parent has an opacity of 0.5 and a child has an\n opacity of 0.2, the real opacity of the child will be 0.5 * 0.2 = 0.1.\n\n Then, the opacity is applied by the shader as:\n\n .. code-block:: python\n\n frag_color = color * vec4(1.0, 1.0, 1.0, opacity);\n\n :attr:`opacity` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.0.\n '''\n\n def on_opacity(self, instance, value):\n canvas = self.canvas\n if canvas is not None:\n canvas.opacity = value\n\n canvas = None\n '''Canvas of the widget.\n\n The canvas is a graphics object that contains all the drawing instructions\n for the graphical representation of the widget.\n\n There are no general properties for the Widget class, such as background\n color, to keep the design simple and lean. Some derived classes, such as\n Button, do add such convenience properties but generally the developer is\n responsible for implementing the graphics representation for a custom\n widget from the ground up. See the derived widget classes for patterns to\n follow and extend.\n\n See :class:`~kivy.graphics.Canvas` for more information about the usage.\n '''\n\n disabled = BooleanProperty(False)\n '''Indicates whether this widget can interact with input or not.\n\n .. note::\n\n 1. Child Widgets, when added to a disabled widget, will be disabled\n automatically.\n 2. Disabling/enabling a parent disables/enables all\n of its children.\n\n .. versionadded:: 1.8.0\n\n :attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to False.\n '''\n", "path": "kivy/uix/widget.py" } ]
[ { "content": "'''\nWidget class\n============\n\nThe :class:`Widget` class is the base class required for creating Widgets.\nThis widget class was designed with a couple of principles in mind:\n\n* *Event Driven*\n\n Widget interaction is built on top of events that occur. If a property\n changes, the widget can respond to the change in the 'on_<propname>'\n callback. If nothing changes, nothing will be done. That's the main\n goal of the :class:`~kivy.properties.Property` class.\n\n* *Separation Of Concerns (the widget and its graphical representation)*\n\n Widgets don't have a `draw()` method. This is done on purpose: The idea\n is to allow you to create your own graphical representation outside the\n widget class.\n Obviously you can still use all the available properties to do that, so\n that your representation properly reflects the widget's current state.\n Every widget has its own :class:`~kivy.graphics.Canvas` that you\n can use to draw. This separation allows Kivy to run your\n application in a very efficient manner.\n\n* *Bounding Box / Collision*\n\n Often you want to know if a certain point is within the bounds of your\n widget. An example would be a button widget where you only want to\n trigger an action when the button itself is actually touched.\n For this, you can use the :meth:`~Widget.collide_point` method, which\n will return True if the point you pass to it is inside the axis-aligned\n bounding box defined by the widget's position and size.\n If a simple AABB is not sufficient, you can override the method to\n perform the collision checks with more complex shapes, e.g. a polygon.\n You can also check if a widget collides with another widget with\n :meth:`~Widget.collide_widget`.\n\n\nWe also have some default values and behaviors that you should be aware of:\n\n* A :class:`Widget` is not a :class:`~kivy.uix.layout.Layout`: it will not\n change the position or the size of its children. If you want control over\n positioning or sizing, use a :class:`~kivy.uix.layout.Layout`.\n\n* The default size of a widget is (100, 100). This is only changed if the\n parent is a :class:`~kivy.uix.layout.Layout`.\n For example, if you add a :class:`Label` inside a\n :class:`Button`, the label will not inherit the button's size or position\n because the button is not a *Layout*: it's just another *Widget*.\n\n* The default size_hint is (1, 1). If the parent is a :class:`Layout`, then the\n widget size will be the parent layout's size.\n\n* :meth:`~Widget.on_touch_down`, :meth:`~Widget.on_touch_move`,\n :meth:`~Widget.on_touch_up` don't do any sort of collisions. If you want to\n know if the touch is inside your widget, use :meth:`~Widget.collide_point`.\n\nUsing Properties\n----------------\n\nWhen you read the documentation, all properties are described in the format::\n\n <name> is a <property class> and defaults to <default value>.\n\ne.g.\n\n :attr:`~kivy.uix.label.Label.text` is a\n :class:`~kivy.properties.StringProperty` and defaults to ''.\n\nIf you want to be notified when the pos attribute changes, i.e. when the\nwidget moves, you can bind your own callback function like this::\n\n def callback_pos(instance, value):\n print('The widget', instance, 'moved to', value)\n\n wid = Widget()\n wid.bind(pos=callback_pos)\n\nRead more about :doc:`/api-kivy.properties`.\n\nBasic drawing\n-------------\n\nWidgets support a range of drawing instructions that you can use to customize\nthe look of your widgets and layouts. For example, to draw a background image\nfor your widget, you can do the following:\n\n.. code-block:: python\n\n def redraw(self, args):\n self.bg_rect.size = self.size\n self.bg_rect.pos = self.pos\n\n widget = Widget()\n with widget.canvas:\n widget.bg_rect = Rectangle(source=\"cover.jpg\", pos=self.pos, \\\nsize=self.size)\n widget.bind(pos=redraw, size=redraw)\n\nTo draw a background in kv:\n\n.. code-block:: kv\n\n Widget:\n canvas:\n Rectangle:\n source: \"cover.jpg\"\n size: self.size\n pos: self.pos\n\nThese examples only scratch the surface. Please see the :mod:`kivy.graphics`\ndocumentation for more information.\n\n.. _widget-event-bubbling:\n\nWidget touch event bubbling\n---------------------------\n\nWhen you catch touch events between multiple widgets, you often\nneed to be aware of the order in which these events are propagated. In Kivy,\nevents bubble up from the first child upwards through the other children.\nIf a widget has children, the event is passed through its children before\nbeing passed on to the widget after it.\n\nAs the :meth:`~kivy.uix.widget.Widget.on_touch_up` method inserts widgets at\nindex 0 by default, this means the event goes from the most recently added\nwidget back to the first one added. Consider the following:\n\n.. code-block:: python\n\n box = BoxLayout()\n box.add_widget(Label(text=\"a\"))\n box.add_widget(Label(text=\"b\"))\n box.add_widget(Label(text=\"c\"))\n\nThe label with text \"c\" gets the event first, \"b\" second and \"a\" last. You can\nreverse this order by manually specifying the index:\n\n.. code-block:: python\n\n box = BoxLayout()\n box.add_widget(Label(text=\"a\"), index=0)\n box.add_widget(Label(text=\"b\"), index=1)\n box.add_widget(Label(text=\"c\"), index=2)\n\nNow the order would be \"a\", \"b\" then \"c\". One thing to keep in mind when using\nkv is that declaring a widget uses the\n:meth:`~kivy.uix.widget.Widget.add_widget` method for insertion. Hence, using\n\n.. code-block:: kv\n\n BoxLayout:\n MyLabel:\n text: \"a\"\n MyLabel:\n text: \"b\"\n MyLabel:\n text: \"c\"\n\nwould result in the event order \"c\", \"b\" then \"a\" as \"c\" was actually the last\nadded widget. It thus has index 0, \"b\" index 1 and \"a\" index 2. Effectively,\nthe child order is the reverse of its listed order.\n\nThis ordering is the same for the :meth:`~kivy.uix.widget.Widget.on_touch_move`\nand :meth:`~kivy.uix.widget.Widget.on_touch_up` events.\n\nIn order to stop this event bubbling, a method can return `True`. This tells\nKivy the event has been handled and the event propagation stops. For example:\n\n.. code-block:: python\n\n class MyWidget(Widget):\n def on_touch_down(self, touch):\n If <some_condition>:\n # Do stuff here and kill the event\n return True\n else:\n return super(MyWidget, self).on_touch_down(touch)\n\nThis approach gives you good control over exactly how events are dispatched\nand managed. Sometimes, however, you may wish to let the event be completely\npropagated before taking action. You can use the\n:class:`~kivy.clock.Clock` to help you here:\n\n.. code-block:: python\n\n class MyWidget(Label):\n def on_touch_down(self, touch, after=False):\n if after:\n print \"Fired after the event has been dispatched!\"\n else:\n Clock.schedule_once(lambda dt: self.on_touch_down(touch, True))\n return super(MyWidget, self).on_touch_down(touch)\n\nUsage of :attr:`Widget.center`, :attr:`Widget.right`, and :attr:`Widget.top`\n----------------------------------------------------------------------------\n\nA common mistake when using one of the computed properties such as\n:attr:`Widget.right` is to use it to make a widget follow its parent with a\nKV rule such as `right: self.parent.right`. Consider, for example:\n\n.. code-block:: kv\n\n FloatLayout:\n id: layout\n width: 100\n Widget:\n id: wid\n right: layout.right\n\nThe (mistaken) expectation is that this rule ensures that wid's right will\nalways be whatever layout's right is - that is wid.right and layout.right will\nalways be identical. In actual fact, this rule only says that \"whenever\nlayout's `right` changes, wid's right will be set to that value\". The\ndifference being that as long as `layout.right` doesn't change, `wid.right`\ncould be anything, even a value that will make them different.\n\nSpecifically, for the KV code above, consider the following example::\n\n >>> print(layout.right, wid.right)\n (100, 100)\n >>> wid.x = 200\n >>> print(layout.right, wid.right)\n (100, 300)\n\nAs can be seen, initially they are in sync, however, when we change `wid.x`\nthey go out of sync because `layout.right` is not changed and the rule is not\ntriggered.\n\nThe proper way to make the widget follow its parent's right is to use\n:attr:`Widget.pos_hint`. If instead of `right: layout.right` we did\n`pos_hint: {'right': 1}`, then the widgets right will always be set to be\nat the parent's right at each layout update.\n'''\n\n__all__ = ('Widget', 'WidgetException')\n\nfrom kivy.event import EventDispatcher\nfrom kivy.factory import Factory\nfrom kivy.properties import (\n NumericProperty, StringProperty, AliasProperty, ReferenceListProperty,\n ObjectProperty, ListProperty, DictProperty, BooleanProperty)\nfrom kivy.graphics import (\n Canvas, Translate, Fbo, ClearColor, ClearBuffers, Scale)\nfrom kivy.graphics.transformation import Matrix\nfrom kivy.base import EventLoop\nfrom kivy.lang import Builder\nfrom kivy.context import get_current_context\nfrom kivy.weakproxy import WeakProxy\nfrom functools import partial\nfrom itertools import islice\n\n\n# References to all the widget destructors (partial method with widget uid as\n# key).\n_widget_destructors = {}\n\n\ndef _widget_destructor(uid, r):\n # Internal method called when a widget is deleted from memory. the only\n # thing we remember about it is its uid. Clear all the associated callbacks\n # created in kv language.\n del _widget_destructors[uid]\n Builder.unbind_widget(uid)\n\n\nclass WidgetException(Exception):\n '''Fired when the widget gets an exception.\n '''\n pass\n\n\nclass WidgetMetaclass(type):\n '''Metaclass to automatically register new widgets for the\n :class:`~kivy.factory.Factory`.\n\n .. warning::\n This metaclass is used by the Widget. Do not use it directly!\n '''\n def __init__(mcs, name, bases, attrs):\n super(WidgetMetaclass, mcs).__init__(name, bases, attrs)\n Factory.register(name, cls=mcs)\n\n\n#: Base class used for Widget, that inherits from :class:`EventDispatcher`\nWidgetBase = WidgetMetaclass('WidgetBase', (EventDispatcher, ), {})\n\n\nclass Widget(WidgetBase):\n '''Widget class. See module documentation for more information.\n\n :Events:\n `on_touch_down`:\n Fired when a new touch event occurs\n `on_touch_move`:\n Fired when an existing touch moves\n `on_touch_up`:\n Fired when an existing touch disappears\n\n .. warning::\n Adding a `__del__` method to a class derived from Widget with Python\n prior to 3.4 will disable automatic garbage collection for instances\n of that class. This is because the Widget class creates reference\n cycles, thereby `preventing garbage collection\n <https://docs.python.org/2/library/gc.html#gc.garbage>`_.\n\n .. versionchanged:: 1.0.9\n Everything related to event properties has been moved to the\n :class:`~kivy.event.EventDispatcher`. Event properties can now be used\n when contructing a simple class without subclassing :class:`Widget`.\n\n .. versionchanged:: 1.5.0\n The constructor now accepts on_* arguments to automatically bind\n callbacks to properties or events, as in the Kv language.\n '''\n\n __metaclass__ = WidgetMetaclass\n __events__ = ('on_touch_down', 'on_touch_move', 'on_touch_up')\n _proxy_ref = None\n\n def __init__(self, **kwargs):\n # Before doing anything, ensure the windows exist.\n EventLoop.ensure_window()\n\n # Assign the default context of the widget creation.\n if not hasattr(self, '_context'):\n self._context = get_current_context()\n\n no_builder = '__no_builder' in kwargs\n if no_builder:\n del kwargs['__no_builder']\n on_args = {k: v for k, v in kwargs.items() if k[:3] == 'on_'}\n for key in on_args:\n del kwargs[key]\n\n super(Widget, self).__init__(**kwargs)\n\n # Create the default canvas if it does not exist.\n if self.canvas is None:\n self.canvas = Canvas(opacity=self.opacity)\n\n # Apply all the styles.\n if not no_builder:\n Builder.apply(self, ignored_consts=self._kwargs_applied_init)\n\n # Bind all the events.\n if on_args:\n self.bind(**on_args)\n\n @property\n def proxy_ref(self):\n '''Return a proxy reference to the widget, i.e. without creating a\n reference to the widget. See `weakref.proxy\n <http://docs.python.org/2/library/weakref.html?highlight\\\n =proxy#weakref.proxy>`_ for more information.\n\n .. versionadded:: 1.7.2\n '''\n _proxy_ref = self._proxy_ref\n if _proxy_ref is not None:\n return _proxy_ref\n\n f = partial(_widget_destructor, self.uid)\n self._proxy_ref = _proxy_ref = WeakProxy(self, f)\n # Only f should be enough here, but it appears that is a very\n # specific case, the proxy destructor is not called if both f and\n # _proxy_ref are not together in a tuple.\n _widget_destructors[self.uid] = (f, _proxy_ref)\n return _proxy_ref\n\n def __hash__(self):\n return id(self)\n\n @property\n def __self__(self):\n return self\n\n #\n # Collision\n #\n def collide_point(self, x, y):\n '''\n Check if a point (x, y) is inside the widget's axis aligned bounding\n box.\n\n :Parameters:\n `x`: numeric\n x position of the point (in window coordinates)\n `y`: numeric\n y position of the point (in window coordinates)\n\n :Returns:\n A bool. True if the point is inside the bounding box, False\n otherwise.\n\n .. code-block:: python\n\n >>> Widget(pos=(10, 10), size=(50, 50)).collide_point(40, 40)\n True\n '''\n return self.x <= x <= self.right and self.y <= y <= self.top\n\n def collide_widget(self, wid):\n '''\n Check if another widget collides with this widget. This function\n performs an axis-aligned bounding box intersection test by default.\n\n :Parameters:\n `wid`: :class:`Widget` class\n Widget to collide with.\n\n :Returns:\n bool. True if the other widget collides with this widget, False\n otherwise.\n\n .. code-block:: python\n\n >>> wid = Widget(size=(50, 50))\n >>> wid2 = Widget(size=(50, 50), pos=(25, 25))\n >>> wid.collide_widget(wid2)\n True\n >>> wid2.pos = (55, 55)\n >>> wid.collide_widget(wid2)\n False\n '''\n if self.right < wid.x:\n return False\n if self.x > wid.right:\n return False\n if self.top < wid.y:\n return False\n if self.y > wid.top:\n return False\n return True\n\n #\n # Default event handlers\n #\n def on_touch_down(self, touch):\n '''Receive a touch down event.\n\n :Parameters:\n `touch`: :class:`~kivy.input.motionevent.MotionEvent` class\n Touch received. The touch is in parent coordinates. See\n :mod:`~kivy.uix.relativelayout` for a discussion on\n coordinate systems.\n\n :Returns:\n bool. If True, the dispatching of the touch event will stop.\n If False, the event will continue to be dispatched to the rest\n of the widget tree.\n '''\n if self.disabled and self.collide_point(*touch.pos):\n return True\n for child in self.children[:]:\n if child.dispatch('on_touch_down', touch):\n return True\n\n def on_touch_move(self, touch):\n '''Receive a touch move event. The touch is in parent coordinates.\n\n See :meth:`on_touch_down` for more information.\n '''\n if self.disabled:\n return\n for child in self.children[:]:\n if child.dispatch('on_touch_move', touch):\n return True\n\n def on_touch_up(self, touch):\n '''Receive a touch up event. The touch is in parent coordinates.\n\n See :meth:`on_touch_down` for more information.\n '''\n if self.disabled:\n return\n for child in self.children[:]:\n if child.dispatch('on_touch_up', touch):\n return True\n\n def on_disabled(self, instance, value):\n for child in self.children:\n child.disabled = value\n\n #\n # Tree management\n #\n def add_widget(self, widget, index=0, canvas=None):\n '''Add a new widget as a child of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to add to our list of children.\n `index`: int, defaults to 0\n Index to insert the widget in the list. Notice that the default\n of 0 means the widget is inserted at the beginning of the list\n and will thus be drawn on top of other sibling widgets. For a\n full discussion of the index and widget hierarchy, please see\n the :doc:`Widgets Programming Guide <guide/widgets>`.\n\n .. versionadded:: 1.0.5\n `canvas`: str, defaults to None\n Canvas to add widget's canvas to. Can be 'before', 'after' or\n None for the default canvas.\n\n .. versionadded:: 1.9.0\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> from kivy.uix.slider import Slider\n >>> root = Widget()\n >>> root.add_widget(Button())\n >>> slider = Slider()\n >>> root.add_widget(slider)\n\n '''\n if not isinstance(widget, Widget):\n raise WidgetException(\n 'add_widget() can be used only with instances'\n ' of the Widget class.')\n\n widget = widget.__self__\n if widget is self:\n raise WidgetException(\n 'Widget instances cannot be added to themselves.')\n parent = widget.parent\n # Check if the widget is already a child of another widget.\n if parent:\n raise WidgetException('Cannot add %r, it already has a parent %r'\n % (widget, parent))\n widget.parent = parent = self\n # Child will be disabled if added to a disabled parent.\n if parent.disabled:\n widget.disabled = True\n\n canvas = self.canvas.before if canvas == 'before' else \\\n self.canvas.after if canvas == 'after' else self.canvas\n\n if index == 0 or len(self.children) == 0:\n self.children.insert(0, widget)\n canvas.add(widget.canvas)\n else:\n canvas = self.canvas\n children = self.children\n if index >= len(children):\n index = len(children)\n next_index = canvas.indexof(children[-1].canvas)\n else:\n next_child = children[index]\n next_index = canvas.indexof(next_child.canvas)\n if next_index == -1:\n next_index = canvas.length()\n else:\n next_index += 1\n\n children.insert(index, widget)\n # We never want to insert widget _before_ canvas.before.\n if next_index == 0 and canvas.has_before:\n next_index = 1\n canvas.insert(next_index, widget.canvas)\n\n def remove_widget(self, widget):\n '''Remove a widget from the children of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to remove from our children list.\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> root = Widget()\n >>> button = Button()\n >>> root.add_widget(button)\n >>> root.remove_widget(button)\n '''\n if widget not in self.children:\n return\n self.children.remove(widget)\n if widget.canvas in self.canvas.children:\n self.canvas.remove(widget.canvas)\n elif widget.canvas in self.canvas.after.children:\n self.canvas.after.remove(widget.canvas)\n elif widget.canvas in self.canvas.before.children:\n self.canvas.before.remove(widget.canvas)\n widget.parent = None\n\n def clear_widgets(self, children=None):\n '''\n Remove all (or the specified) :attr:`~Widget.children` of this widget.\n If the 'children' argument is specified, it should be a list (or\n filtered list) of children of the current widget.\n\n .. versionchanged:: 1.8.0\n The `children` argument can be used to specify the children you\n want to remove.\n '''\n\n if not children:\n children = self.children\n remove_widget = self.remove_widget\n for child in children[:]:\n remove_widget(child)\n\n def export_to_png(self, filename, *args):\n '''Saves an image of the widget and its children in png format at the\n specified filename. Works by removing the widget canvas from its\n parent, rendering to an :class:`~kivy.graphics.fbo.Fbo`, and calling\n :meth:`~kivy.graphics.texture.Texture.save`.\n\n .. note::\n\n The image includes only this widget and its children. If you want\n to include widgets elsewhere in the tree, you must call\n :meth:`~Widget.export_to_png` from their common parent, or use\n :meth:`~kivy.core.window.WindowBase.screenshot` to capture the whole\n window.\n\n .. note::\n\n The image will be saved in png format, you should include the\n extension in your filename.\n\n .. versionadded:: 1.9.0\n '''\n\n if self.parent is not None:\n canvas_parent_index = self.parent.canvas.indexof(self.canvas)\n if canvas_parent_index > -1:\n self.parent.canvas.remove(self.canvas)\n\n fbo = Fbo(size=self.size, with_stencilbuffer=True)\n\n with fbo:\n ClearColor(0, 0, 0, 1)\n ClearBuffers()\n Scale(1, -1, 1)\n Translate(-self.x, -self.y - self.height, 0)\n\n fbo.add(self.canvas)\n fbo.draw()\n fbo.texture.save(filename, flipped=False)\n fbo.remove(self.canvas)\n\n if self.parent is not None and canvas_parent_index > -1:\n self.parent.canvas.insert(canvas_parent_index, self.canvas)\n\n return True\n\n def get_root_window(self):\n '''Return the root window.\n\n :Returns:\n Instance of the root window. Can be a\n :class:`~kivy.core.window.WindowBase` or\n :class:`Widget`.\n '''\n if self.parent:\n return self.parent.get_root_window()\n\n def get_parent_window(self):\n '''Return the parent window.\n\n :Returns:\n Instance of the parent window. Can be a\n :class:`~kivy.core.window.WindowBase` or\n :class:`Widget`.\n '''\n if self.parent:\n return self.parent.get_parent_window()\n\n def _walk(self, restrict=False, loopback=False, index=None):\n # We pass index only when we are going on the parent\n # so don't yield the parent as well.\n if index is None:\n index = len(self.children)\n yield self\n\n for child in reversed(self.children[:index]):\n for walk_child in child._walk(restrict=True):\n yield walk_child\n\n # If we want to continue with our parent, just do it.\n if not restrict:\n parent = self.parent\n try:\n if parent is None or not isinstance(parent, Widget):\n raise ValueError\n index = parent.children.index(self)\n except ValueError:\n # Self is root, if we want to loopback from the first element:\n if not loopback:\n return\n # If we started with root (i.e. index==None), then we have to\n # start from root again, so we return self again. Otherwise, we\n # never returned it, so return it now starting with it.\n parent = self\n index = None\n for walk_child in parent._walk(loopback=loopback, index=index):\n yield walk_child\n\n def walk(self, restrict=False, loopback=False):\n ''' Iterator that walks the widget tree starting with this widget and\n goes forward returning widgets in the order in which layouts display\n them.\n\n :Parameters:\n `restrict`: bool, defaults to False\n If True, it will only iterate through the widget and its\n children (or children of its children etc.). Defaults to False.\n `loopback`: bool, defaults to False\n If True, when the last widget in the tree is reached,\n it'll loop back to the uppermost root and start walking until\n we hit this widget again. Naturally, it can only loop back when\n `restrict` is False. Defaults to False.\n\n :return:\n A generator that walks the tree, returning widgets in the\n forward layout order.\n\n For example, given a tree with the following structure:\n\n .. code-block:: kv\n\n GridLayout:\n Button\n BoxLayout:\n id: box\n Widget\n Button\n Widget\n\n walking this tree:\n\n .. code-block:: python\n\n >>> # Call walk on box with loopback True, and restrict False\n >>> [type(widget) for widget in box.walk(loopback=True)]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,\n <class 'Widget'>, <class 'GridLayout'>, <class 'Button'>]\n >>> # Now with loopback False, and restrict False\n >>> [type(widget) for widget in box.walk()]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,\n <class 'Widget'>]\n >>> # Now with restrict True\n >>> [type(widget) for widget in box.walk(restrict=True)]\n [<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>]\n\n .. versionadded:: 1.9.0\n '''\n gen = self._walk(restrict, loopback)\n yield next(gen)\n for node in gen:\n if node is self:\n return\n yield node\n\n def _walk_reverse(self, loopback=False, go_up=False):\n # process is walk up level, walk down its children tree, then walk up\n # next level etc.\n # default just walk down the children tree\n root = self\n index = 0\n # we need to go up a level before walking tree\n if go_up:\n root = self.parent\n try:\n if root is None or not isinstance(root, Widget):\n raise ValueError\n index = root.children.index(self) + 1\n except ValueError:\n if not loopback:\n return\n index = 0\n go_up = False\n root = self\n\n # now walk children tree starting with last-most child\n for child in islice(root.children, index, None):\n for walk_child in child._walk_reverse(loopback=loopback):\n yield walk_child\n # we need to return ourself last, in all cases\n yield root\n\n # if going up, continue walking up the parent tree\n if go_up:\n for walk_child in root._walk_reverse(loopback=loopback,\n go_up=go_up):\n yield walk_child\n\n def walk_reverse(self, loopback=False):\n ''' Iterator that walks the widget tree backwards starting with the\n widget before this, and going backwards returning widgets in the\n reverse order in which layouts display them.\n\n This walks in the opposite direction of :meth:`walk`, so a list of the\n tree generated with :meth:`walk` will be in reverse order compared\n to the list generated with this, provided `loopback` is True.\n\n :Parameters:\n `loopback`: bool, defaults to False\n If True, when the uppermost root in the tree is\n reached, it'll loop back to the last widget and start walking\n back until after we hit widget again. Defaults to False.\n\n :return:\n A generator that walks the tree, returning widgets in the\n reverse layout order.\n\n For example, given a tree with the following structure:\n\n .. code-block:: kv\n\n GridLayout:\n Button\n BoxLayout:\n id: box\n Widget\n Button\n Widget\n\n walking this tree:\n\n .. code-block:: python\n\n >>> # Call walk on box with loopback True\n >>> [type(widget) for widget in box.walk_reverse(loopback=True)]\n [<class 'Button'>, <class 'GridLayout'>, <class 'Widget'>,\n <class 'Button'>, <class 'Widget'>, <class 'BoxLayout'>]\n >>> # Now with loopback False\n >>> [type(widget) for widget in box.walk_reverse()]\n [<class 'Button'>, <class 'GridLayout'>]\n >>> forward = [w for w in box.walk(loopback=True)]\n >>> backward = [w for w in box.walk_reverse(loopback=True)]\n >>> forward == backward[::-1]\n True\n\n .. versionadded:: 1.9.0\n\n '''\n for node in self._walk_reverse(loopback=loopback, go_up=True):\n yield node\n if node is self:\n return\n\n def to_widget(self, x, y, relative=False):\n '''Convert the given coordinate from window to local widget\n coordinates. See :mod:`~kivy.uix.relativelayout` for details on the\n coordinate systems.\n '''\n if self.parent:\n x, y = self.parent.to_widget(x, y)\n return self.to_local(x, y, relative=relative)\n\n def to_window(self, x, y, initial=True, relative=False):\n '''Transform local coordinates to window coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n '''\n if not initial:\n x, y = self.to_parent(x, y, relative=relative)\n if self.parent:\n return self.parent.to_window(x, y, initial=False,\n relative=relative)\n return (x, y)\n\n def to_parent(self, x, y, relative=False):\n '''Transform local coordinates to parent coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n\n :Parameters:\n `relative`: bool, defaults to False\n Change to True if you want to translate relative positions from\n a widget to its parent coordinates.\n '''\n if relative:\n return (x + self.x, y + self.y)\n return (x, y)\n\n def to_local(self, x, y, relative=False):\n '''Transform parent coordinates to local coordinates. See\n :mod:`~kivy.uix.relativelayout` for details on the coordinate systems.\n\n :Parameters:\n `relative`: bool, defaults to False\n Change to True if you want to translate coordinates to\n relative widget coordinates.\n '''\n if relative:\n return (x - self.x, y - self.y)\n return (x, y)\n\n def _apply_transform(self, m, pos=None):\n if self.parent:\n x, y = self.parent.to_widget(relative=True,\n *self.to_window(*(pos or self.pos)))\n m.translate(x, y, 0)\n m = self.parent._apply_transform(m) if self.parent else m\n return m\n\n def get_window_matrix(self, x=0, y=0):\n '''Calculate the transformation matrix to convert between window and\n widget coordinates.\n\n :Parameters:\n `x`: float, defaults to 0\n Translates the matrix on the x axis.\n `y`: float, defaults to 0\n Translates the matrix on the y axis.\n '''\n m = Matrix()\n m.translate(x, y, 0)\n m = self._apply_transform(m)\n return m\n\n x = NumericProperty(0)\n '''X position of the widget.\n\n :attr:`x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.\n '''\n\n y = NumericProperty(0)\n '''Y position of the widget.\n\n :attr:`y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.\n '''\n\n width = NumericProperty(100)\n '''Width of the widget.\n\n :attr:`width` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 100.\n\n .. warning::\n Keep in mind that the `width` property is subject to layout logic and\n that this has not yet happened at the time of the widget's `__init__`\n method.\n '''\n\n height = NumericProperty(100)\n '''Height of the widget.\n\n :attr:`height` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 100.\n\n .. warning::\n Keep in mind that the `height` property is subject to layout logic and\n that this has not yet happened at the time of the widget's `__init__`\n method.\n '''\n\n pos = ReferenceListProperty(x, y)\n '''Position of the widget.\n\n :attr:`pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`x`, :attr:`y`) properties.\n '''\n\n size = ReferenceListProperty(width, height)\n '''Size of the widget.\n\n :attr:`size` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`width`, :attr:`height`) properties.\n '''\n\n def get_right(self):\n return self.x + self.width\n\n def set_right(self, value):\n self.x = value - self.width\n\n right = AliasProperty(get_right, set_right, bind=('x', 'width'))\n '''Right position of the widget.\n\n :attr:`right` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`x` + :attr:`width`).\n '''\n\n def get_top(self):\n return self.y + self.height\n\n def set_top(self, value):\n self.y = value - self.height\n\n top = AliasProperty(get_top, set_top, bind=('y', 'height'))\n '''Top position of the widget.\n\n :attr:`top` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`y` + :attr:`height`).\n '''\n\n def get_center_x(self):\n return self.x + self.width / 2.\n\n def set_center_x(self, value):\n self.x = value - self.width / 2.\n\n center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))\n '''X center position of the widget.\n\n :attr:`center_x` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`x` + :attr:`width` / 2.).\n '''\n\n def get_center_y(self):\n return self.y + self.height / 2.\n\n def set_center_y(self, value):\n self.y = value - self.height / 2.\n\n center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))\n '''Y center position of the widget.\n\n :attr:`center_y` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`y` + :attr:`height` / 2.).\n '''\n\n center = ReferenceListProperty(center_x, center_y)\n '''Center position of the widget.\n\n :attr:`center` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`center_x`, :attr:`center_y`) properties.\n '''\n\n cls = ListProperty([])\n '''Class of the widget, used for styling.\n '''\n\n id = StringProperty(None, allownone=True)\n '''Unique identifier of the widget in the tree.\n\n :attr:`id` is a :class:`~kivy.properties.StringProperty` and defaults to\n None.\n\n .. warning::\n\n If the :attr:`id` is already used in the tree, an exception will\n be raised.\n '''\n\n children = ListProperty([])\n '''List of children of this widget.\n\n :attr:`children` is a :class:`~kivy.properties.ListProperty` and\n defaults to an empty list.\n\n Use :meth:`add_widget` and :meth:`remove_widget` for manipulating the\n children list. Don't manipulate the children list directly unless you know\n what you are doing.\n '''\n\n parent = ObjectProperty(None, allownone=True, rebind=True)\n '''Parent of this widget. The parent of a widget is set when the widget\n is added to another widget and unset when the widget is removed from its\n parent.\n\n :attr:`parent` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n size_hint_x = NumericProperty(1, allownone=True)\n '''X size hint. Represents how much space the widget should use in the\n direction of the X axis relative to its parent's width.\n Only the :class:`~kivy.uix.layout.Layout` and\n :class:`~kivy.core.window.Window` classes make use of the hint.\n\n The size_hint is used by layouts for two purposes:\n\n - When the layout considers widgets on their own rather than in\n relation to its other children, the size_hint_x is a direct proportion\n of the parent width, normally between 0.0 and 1.0. For instance, a\n widget with ``size_hint_x=0.5`` in\n a vertical BoxLayout will take up half the BoxLayout's width, or\n a widget in a FloatLayout with ``size_hint_x=0.2`` will take up 20%\n of the FloatLayout width. If the size_hint is greater than 1, the\n widget will be wider than the parent.\n - When multiple widgets can share a row of a layout, such as in a\n horizontal BoxLayout, their widths will be their size_hint_x as a\n fraction of the sum of widget size_hints. For instance, if the\n size_hint_xs are (0.5, 1.0, 0.5), the first widget will have a\n width of 25% of the parent width.\n\n :attr:`size_hint_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n size_hint_y = NumericProperty(1, allownone=True)\n '''Y size hint.\n\n :attr:`size_hint_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n\n See :attr:`size_hint_x` for more information, but with widths and heights\n swapped.\n '''\n\n size_hint = ReferenceListProperty(size_hint_x, size_hint_y)\n '''Size hint.\n\n :attr:`size_hint` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`size_hint_x`, :attr:`size_hint_y`) properties.\n\n See :attr:`size_hint_x` for more information.\n '''\n\n pos_hint = ObjectProperty({})\n '''Position hint. This property allows you to set the position of\n the widget inside its parent layout, in percent (similar to\n size_hint).\n\n For example, if you want to set the top of the widget to be at 90%\n height of its parent layout, you can write::\n\n widget = Widget(pos_hint={'top': 0.9})\n\n The keys 'x', 'right' and 'center_x' will use the parent width.\n The keys 'y', 'top' and 'center_y' will use the parent height.\n\n See :doc:`api-kivy.uix.floatlayout` for further reference.\n\n .. note::\n :attr:`pos_hint` is not used by all layouts. Check the documentation\n of the layout in question to see if it supports pos_hint.\n\n :attr:`pos_hint` is an :class:`~kivy.properties.ObjectProperty`\n containing a dict.\n '''\n\n ids = DictProperty({})\n '''This is a dictionary of ids defined in your kv language. This will only\n be populated if you use ids in your kv language code.\n\n .. versionadded:: 1.7.0\n\n :attr:`ids` is a :class:`~kivy.properties.DictProperty` and defaults to an\n empty dict {}.\n\n The :attr:`ids` are populated for each root level widget definition. For\n example:\n\n .. code-block:: kv\n\n # in kv\n <MyWidget@Widget>:\n id: my_widget\n Label:\n id: label_widget\n Widget:\n id: inner_widget\n Label:\n id: inner_label\n TextInput:\n id: text_input\n OtherWidget:\n id: other_widget\n\n\n <OtherWidget@Widget>\n id: other_widget\n Label:\n id: other_label\n TextInput:\n id: other_textinput\n\n Then, in python:\n\n .. code-block:: python\n\n >>> widget = MyWidget()\n >>> print(widget.ids)\n {'other_widget': <weakproxy at 041CFED0 to OtherWidget at 041BEC38>,\n 'inner_widget': <weakproxy at 04137EA0 to Widget at 04138228>,\n 'inner_label': <weakproxy at 04143540 to Label at 04138260>,\n 'label_widget': <weakproxy at 04137B70 to Label at 040F97A0>,\n 'text_input': <weakproxy at 041BB5D0 to TextInput at 041BEC00>}\n >>> print(widget.ids['other_widget'].ids)\n {'other_textinput': <weakproxy at 041DBB40 to TextInput at 041BEF48>,\n 'other_label': <weakproxy at 041DB570 to Label at 041BEEA0>}\n >>> print(widget.ids['label_widget'].ids)\n {}\n '''\n\n opacity = NumericProperty(1.0)\n '''Opacity of the widget and all its children.\n\n .. versionadded:: 1.4.1\n\n The opacity attribute controls the opacity of the widget and its children.\n Be careful, it's a cumulative attribute: the value is multiplied by the\n current global opacity and the result is applied to the current context\n color.\n\n For example, if the parent has an opacity of 0.5 and a child has an\n opacity of 0.2, the real opacity of the child will be 0.5 * 0.2 = 0.1.\n\n Then, the opacity is applied by the shader as:\n\n .. code-block:: python\n\n frag_color = color * vec4(1.0, 1.0, 1.0, opacity);\n\n :attr:`opacity` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.0.\n '''\n\n def on_opacity(self, instance, value):\n canvas = self.canvas\n if canvas is not None:\n canvas.opacity = value\n\n canvas = None\n '''Canvas of the widget.\n\n The canvas is a graphics object that contains all the drawing instructions\n for the graphical representation of the widget.\n\n There are no general properties for the Widget class, such as background\n color, to keep the design simple and lean. Some derived classes, such as\n Button, do add such convenience properties but generally the developer is\n responsible for implementing the graphics representation for a custom\n widget from the ground up. See the derived widget classes for patterns to\n follow and extend.\n\n See :class:`~kivy.graphics.Canvas` for more information about the usage.\n '''\n\n disabled = BooleanProperty(False)\n '''Indicates whether this widget can interact with input or not.\n\n .. note::\n\n 1. Child Widgets, when added to a disabled widget, will be disabled\n automatically.\n 2. Disabling/enabling a parent disables/enables all\n of its children.\n\n .. versionadded:: 1.8.0\n\n :attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to False.\n '''\n", "path": "kivy/uix/widget.py" } ]
diff --git a/kivy/uix/widget.py b/kivy/uix/widget.py index 5b53db1828..31dbb4c38e 100644 --- a/kivy/uix/widget.py +++ b/kivy/uix/widget.py @@ -546,7 +546,7 @@ def add_widget(self, widget, index=0, canvas=None): children = self.children if index >= len(children): index = len(children) - next_index = 0 + next_index = canvas.indexof(children[-1].canvas) else: next_child = children[index] next_index = canvas.indexof(next_child.canvas)
Remove and Re-add of Highest Widget in a Layout leaves the Widget Invisible I have noticed that adding widgets to a Layout Manager does not seem to work correctly. I have tested with BoxLayout, GridLayout, and FloatLayout. I have tested with 3 widgets, and with 4 widgets. I am using Kivy 1.9.0 with Python 2.7 on Fedora 22. I start by adding widgets to a Layout- it looks fine. Then I remove a widget- the "first" one, which is list index 3 in the list of children in my FloatLayout example (index 2 with the Grid example). See the attachments. [app2-float.txt](https://github.com/kivy/kivy/files/388380/app2-float.txt) [app2-grid.txt](https://github.com/kivy/kivy/files/388383/app2-grid.txt) When I re-add the widget in a BoxLayout or GridLayout, space is used in but the widget is not displayed. Testing in my "real" app (code not given here) shows that the widget is indeed in place (I can perform actions on it) but it's just not displayed. This problem only seems to happen with the leftmost position.
oppia__oppia-14800
[ { "content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the creator dashboard, notifications, and creating new\nactivities.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nfrom core import feconf\nfrom core import utils\nfrom core.constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import collection_domain\nfrom core.domain import collection_services\nfrom core.domain import exp_domain\nfrom core.domain import exp_fetchers\nfrom core.domain import exp_services\nfrom core.domain import feedback_services\nfrom core.domain import role_services\nfrom core.domain import subscription_services\nfrom core.domain import suggestion_services\nfrom core.domain import summary_services\nfrom core.domain import topic_fetchers\nfrom core.domain import user_services\n\nEXPLORATION_ID_KEY = 'exploration_id'\nCOLLECTION_ID_KEY = 'collection_id'\n\n\nclass OldContributorDashboardRedirectPage(base.BaseHandler):\n \"\"\"Redirects the old contributor dashboard URL to the new one.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n self.redirect('/contributor-dashboard', permanent=True)\n\n\nclass OldCreatorDashboardRedirectPage(base.BaseHandler):\n \"\"\"Redirects the old creator dashboard URL to the new one.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n self.redirect(feconf.CREATOR_DASHBOARD_URL, permanent=True)\n\n\nclass CreatorDashboardPage(base.BaseHandler):\n \"\"\"Page showing the user's creator dashboard.\"\"\"\n\n ADDITIONAL_DEPENDENCY_IDS = ['codemirror']\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.can_access_creator_dashboard\n def get(self):\n\n self.render_template('creator-dashboard-page.mainpage.html')\n\n\nclass CreatorDashboardHandler(base.BaseHandler):\n \"\"\"Provides data for the user's creator dashboard page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'GET': {},\n 'POST': {\n 'display_preference': {\n 'schema': {\n 'type': 'basestring',\n 'choices': (\n constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS\n .values()\n )\n }\n }\n }\n }\n\n @acl_decorators.can_access_creator_dashboard\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n\n def _round_average_ratings(rating):\n \"\"\"Returns the rounded average rating to display on the creator\n dashboard.\n\n Args:\n rating: float. The rating of the lesson.\n\n Returns:\n float. The rounded average value of rating.\n \"\"\"\n return round(\n rating, feconf.AVERAGE_RATINGS_DASHBOARD_PRECISION)\n\n subscribed_exploration_summaries = (\n exp_fetchers.get_exploration_summaries_subscribed_to(\n self.user_id))\n subscribed_collection_summaries = (\n collection_services.get_collection_summaries_subscribed_to(\n self.user_id))\n\n exploration_ids_subscribed_to = [\n summary.id for summary in subscribed_exploration_summaries]\n\n exp_summary_dicts = summary_services.get_displayable_exp_summary_dicts(\n subscribed_exploration_summaries)\n collection_summary_dicts = []\n\n feedback_thread_analytics = (\n feedback_services.get_thread_analytics_multi(\n exploration_ids_subscribed_to))\n\n # TODO(bhenning): Update this to use unresolved answers from\n # stats_services once the training interface is enabled and it's cheaper\n # to retrieve top answers from stats_services.\n for ind, exploration in enumerate(exp_summary_dicts):\n exploration.update(feedback_thread_analytics[ind].to_dict())\n\n exp_summary_dicts = sorted(\n exp_summary_dicts,\n key=lambda x: (x['num_open_threads'], x['last_updated_msec']),\n reverse=True)\n\n topic_summaries = topic_fetchers.get_all_topic_summaries()\n topic_summary_dicts = [\n summary.to_dict() for summary in topic_summaries]\n\n if role_services.ACTION_CREATE_COLLECTION in self.user.actions:\n for collection_summary in subscribed_collection_summaries:\n # TODO(sll): Reuse _get_displayable_collection_summary_dicts()\n # in summary_services, instead of replicating it like this.\n collection_summary_dicts.append({\n 'id': collection_summary.id,\n 'title': collection_summary.title,\n 'category': collection_summary.category,\n 'objective': collection_summary.objective,\n 'language_code': collection_summary.language_code,\n 'last_updated_msec': utils.get_time_in_millisecs(\n collection_summary.collection_model_last_updated),\n 'created_on': utils.get_time_in_millisecs(\n collection_summary.collection_model_created_on),\n 'status': collection_summary.status,\n 'node_count': collection_summary.node_count,\n 'community_owned': collection_summary.community_owned,\n 'thumbnail_icon_url': (\n utils.get_thumbnail_icon_url_for_category(\n collection_summary.category)),\n 'thumbnail_bg_color': utils.get_hex_color_for_category(\n collection_summary.category),\n })\n\n dashboard_stats = user_services.get_dashboard_stats(self.user_id)\n dashboard_stats.update({\n 'total_open_feedback': feedback_services.get_total_open_threads(\n feedback_thread_analytics)\n })\n if dashboard_stats and dashboard_stats.get('average_ratings'):\n dashboard_stats['average_ratings'] = (\n _round_average_ratings(dashboard_stats['average_ratings']))\n\n last_week_stats = (\n user_services.get_last_week_dashboard_stats(self.user_id))\n\n if last_week_stats and len(list(last_week_stats.keys())) != 1:\n logging.exception(\n '\\'last_week_stats\\' should contain only one key-value pair'\n ' denoting last week dashboard stats of the user keyed by a'\n ' datetime string.')\n last_week_stats = None\n\n if last_week_stats:\n # 'last_week_stats' is a dict with only one key-value pair denoting\n # last week dashboard stats of the user keyed by a datetime string.\n datetime_of_stats = list(last_week_stats.keys())[0]\n last_week_stats_average_ratings = (\n list(last_week_stats.values())[0].get('average_ratings'))\n if last_week_stats_average_ratings:\n last_week_stats[datetime_of_stats]['average_ratings'] = (\n _round_average_ratings(last_week_stats_average_ratings))\n\n subscriber_ids = subscription_services.get_all_subscribers_of_creator(\n self.user_id)\n subscribers_settings = user_services.get_users_settings(subscriber_ids)\n subscribers_list = []\n for index, subscriber_settings in enumerate(subscribers_settings):\n subscriber_summary = {\n 'subscriber_picture_data_url': (\n subscriber_settings.profile_picture_data_url),\n 'subscriber_username': subscriber_settings.username,\n 'subscriber_impact': (\n user_services.get_user_impact_score(subscriber_ids[index]))\n }\n\n subscribers_list.append(subscriber_summary)\n\n user_settings = user_services.get_user_settings(\n self.user_id, strict=False)\n creator_dashboard_display_pref = (\n user_settings.creator_dashboard_display_pref)\n\n suggestions_created_by_user = suggestion_services.query_suggestions(\n [('author_id', self.user_id),\n (\n 'suggestion_type',\n feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT)])\n suggestions_which_can_be_reviewed = (\n suggestion_services\n .get_all_suggestions_that_can_be_reviewed_by_user(self.user_id))\n\n for s in suggestions_created_by_user:\n s.populate_old_value_of_change()\n\n for s in suggestions_which_can_be_reviewed:\n s.populate_old_value_of_change()\n\n suggestion_dicts_created_by_user = (\n [s.to_dict() for s in suggestions_created_by_user])\n suggestion_dicts_which_can_be_reviewed = (\n [s.to_dict() for s in suggestions_which_can_be_reviewed])\n\n ids_of_suggestions_created_by_user = (\n [s['suggestion_id'] for s in suggestion_dicts_created_by_user])\n ids_of_suggestions_which_can_be_reviewed = (\n [s['suggestion_id']\n for s in suggestion_dicts_which_can_be_reviewed])\n\n threads_linked_to_suggestions_by_user = (\n [t.to_dict() for t in feedback_services.get_multiple_threads(\n ids_of_suggestions_created_by_user)])\n threads_linked_to_suggestions_which_can_be_reviewed = (\n [t.to_dict() for t in feedback_services.get_multiple_threads(\n ids_of_suggestions_which_can_be_reviewed)])\n\n self.values.update({\n 'explorations_list': exp_summary_dicts,\n 'collections_list': collection_summary_dicts,\n 'dashboard_stats': dashboard_stats,\n 'last_week_stats': last_week_stats,\n 'subscribers_list': subscribers_list,\n 'display_preference': creator_dashboard_display_pref,\n 'threads_for_created_suggestions_list': (\n threads_linked_to_suggestions_by_user),\n 'threads_for_suggestions_to_review_list': (\n threads_linked_to_suggestions_which_can_be_reviewed),\n 'created_suggestions_list': suggestion_dicts_created_by_user,\n 'suggestions_to_review_list': (\n suggestion_dicts_which_can_be_reviewed),\n 'topic_summary_dicts': topic_summary_dicts\n })\n\n self.render_json(self.values)\n\n @acl_decorators.can_access_creator_dashboard\n def post(self):\n creator_dashboard_display_pref = (\n self.normalized_payload.get('display_preference'))\n user_services.update_user_creator_dashboard_display(\n self.user_id, creator_dashboard_display_pref)\n self.render_json({})\n\n\nclass NewExplorationHandler(base.BaseHandler):\n \"\"\"Creates a new exploration.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {\n 'title': {\n 'schema': {\n 'type': 'basestring'\n },\n 'default_value': feconf.DEFAULT_EXPLORATION_TITLE\n }\n }\n }\n\n @acl_decorators.can_create_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n title = self.normalized_payload.get('title')\n\n new_exploration_id = exp_fetchers.get_new_exploration_id()\n exploration = exp_domain.Exploration.create_default_exploration(\n new_exploration_id, title=title)\n exp_services.save_new_exploration(self.user_id, exploration)\n\n self.render_json({\n EXPLORATION_ID_KEY: new_exploration_id\n })\n\n\nclass NewCollectionHandler(base.BaseHandler):\n \"\"\"Creates a new collection.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {}\n }\n\n @acl_decorators.can_create_collection\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n new_collection_id = collection_services.get_new_collection_id()\n collection = collection_domain.Collection.create_default_collection(\n new_collection_id)\n collection_services.save_new_collection(self.user_id, collection)\n\n self.render_json({\n COLLECTION_ID_KEY: new_collection_id\n })\n\n\nclass UploadExplorationHandler(base.BaseHandler):\n \"\"\"Uploads a new exploration.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {\n 'yaml_file': {\n 'schema': {\n 'type': 'basestring'\n },\n 'default_value': None\n }\n }\n }\n\n @acl_decorators.can_upload_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n yaml_content = self.normalized_payload.get('yaml_file')\n\n new_exploration_id = exp_fetchers.get_new_exploration_id()\n if constants.ALLOW_YAML_FILE_UPLOAD:\n exp_services.save_new_exploration_from_yaml_and_assets(\n self.user_id, yaml_content, new_exploration_id, [],\n strip_voiceovers=True)\n self.render_json({\n EXPLORATION_ID_KEY: new_exploration_id\n })\n else:\n raise self.InvalidInputException(\n 'This server does not allow file uploads.')\n", "path": "core/controllers/creator_dashboard.py" } ]
[ { "content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the creator dashboard, notifications, and creating new\nactivities.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nfrom core import feconf\nfrom core import utils\nfrom core.constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import collection_domain\nfrom core.domain import collection_services\nfrom core.domain import exp_domain\nfrom core.domain import exp_fetchers\nfrom core.domain import exp_services\nfrom core.domain import feedback_services\nfrom core.domain import role_services\nfrom core.domain import subscription_services\nfrom core.domain import suggestion_services\nfrom core.domain import summary_services\nfrom core.domain import topic_fetchers\nfrom core.domain import user_services\n\nEXPLORATION_ID_KEY = 'exploration_id'\nCOLLECTION_ID_KEY = 'collection_id'\n\n\nclass OldContributorDashboardRedirectPage(base.BaseHandler):\n \"\"\"Redirects the old contributor dashboard URL to the new one.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n self.redirect('/contributor-dashboard', permanent=True)\n\n\nclass OldCreatorDashboardRedirectPage(base.BaseHandler):\n \"\"\"Redirects the old creator dashboard URL to the new one.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n self.redirect(feconf.CREATOR_DASHBOARD_URL, permanent=True)\n\n\nclass CreatorDashboardPage(base.BaseHandler):\n \"\"\"Page showing the user's creator dashboard.\"\"\"\n\n ADDITIONAL_DEPENDENCY_IDS = ['codemirror']\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {'GET': {}}\n\n @acl_decorators.can_access_creator_dashboard\n def get(self):\n\n self.render_template('creator-dashboard-page.mainpage.html')\n\n\nclass CreatorDashboardHandler(base.BaseHandler):\n \"\"\"Provides data for the user's creator dashboard page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'GET': {},\n 'POST': {\n 'display_preference': {\n 'schema': {\n 'type': 'basestring',\n 'choices': (\n constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS\n .values()\n )\n }\n }\n }\n }\n\n @acl_decorators.can_access_creator_dashboard\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n\n def _round_average_ratings(rating):\n \"\"\"Returns the rounded average rating to display on the creator\n dashboard.\n\n Args:\n rating: float. The rating of the lesson.\n\n Returns:\n float. The rounded average value of rating.\n \"\"\"\n return round(\n rating, feconf.AVERAGE_RATINGS_DASHBOARD_PRECISION)\n\n subscribed_exploration_summaries = (\n exp_fetchers.get_exploration_summaries_subscribed_to(\n self.user_id))\n subscribed_collection_summaries = (\n collection_services.get_collection_summaries_subscribed_to(\n self.user_id))\n\n exploration_ids_subscribed_to = [\n summary.id for summary in subscribed_exploration_summaries]\n\n exp_summary_dicts = summary_services.get_displayable_exp_summary_dicts(\n subscribed_exploration_summaries)\n collection_summary_dicts = []\n\n feedback_thread_analytics = (\n feedback_services.get_thread_analytics_multi(\n exploration_ids_subscribed_to))\n\n # TODO(bhenning): Update this to use unresolved answers from\n # stats_services once the training interface is enabled and it's cheaper\n # to retrieve top answers from stats_services.\n for ind, exploration in enumerate(exp_summary_dicts):\n exploration.update(feedback_thread_analytics[ind].to_dict())\n\n exp_summary_dicts = sorted(\n exp_summary_dicts,\n key=lambda x: (x['num_open_threads'], x['last_updated_msec']),\n reverse=True)\n\n topic_summaries = topic_fetchers.get_all_topic_summaries()\n topic_summary_dicts = [\n summary.to_dict() for summary in topic_summaries]\n\n if role_services.ACTION_CREATE_COLLECTION in self.user.actions:\n for collection_summary in subscribed_collection_summaries:\n # TODO(sll): Reuse _get_displayable_collection_summary_dicts()\n # in summary_services, instead of replicating it like this.\n collection_summary_dicts.append({\n 'id': collection_summary.id,\n 'title': collection_summary.title,\n 'category': collection_summary.category,\n 'objective': collection_summary.objective,\n 'language_code': collection_summary.language_code,\n 'last_updated_msec': utils.get_time_in_millisecs(\n collection_summary.collection_model_last_updated),\n 'created_on': utils.get_time_in_millisecs(\n collection_summary.collection_model_created_on),\n 'status': collection_summary.status,\n 'node_count': collection_summary.node_count,\n 'community_owned': collection_summary.community_owned,\n 'thumbnail_icon_url': (\n utils.get_thumbnail_icon_url_for_category(\n collection_summary.category)),\n 'thumbnail_bg_color': utils.get_hex_color_for_category(\n collection_summary.category),\n })\n\n dashboard_stats = user_services.get_dashboard_stats(self.user_id)\n dashboard_stats.update({\n 'total_open_feedback': feedback_services.get_total_open_threads(\n feedback_thread_analytics)\n })\n if dashboard_stats and dashboard_stats.get('average_ratings'):\n dashboard_stats['average_ratings'] = (\n _round_average_ratings(dashboard_stats['average_ratings']))\n\n last_week_stats = (\n user_services.get_last_week_dashboard_stats(self.user_id))\n\n if last_week_stats and len(list(last_week_stats.keys())) != 1:\n logging.exception(\n '\\'last_week_stats\\' should contain only one key-value pair'\n ' denoting last week dashboard stats of the user keyed by a'\n ' datetime string.')\n last_week_stats = None\n\n if last_week_stats:\n # 'last_week_stats' is a dict with only one key-value pair denoting\n # last week dashboard stats of the user keyed by a datetime string.\n datetime_of_stats = list(last_week_stats.keys())[0]\n last_week_stats_average_ratings = (\n list(last_week_stats.values())[0].get('average_ratings'))\n if last_week_stats_average_ratings:\n last_week_stats[datetime_of_stats]['average_ratings'] = (\n _round_average_ratings(last_week_stats_average_ratings))\n\n subscriber_ids = subscription_services.get_all_subscribers_of_creator(\n self.user_id)\n subscribers_settings = user_services.get_users_settings(subscriber_ids)\n subscribers_list = []\n for index, subscriber_settings in enumerate(subscribers_settings):\n subscriber_summary = {\n 'subscriber_picture_data_url': (\n subscriber_settings.profile_picture_data_url),\n 'subscriber_username': subscriber_settings.username,\n 'subscriber_impact': (\n user_services.get_user_impact_score(subscriber_ids[index]))\n }\n\n subscribers_list.append(subscriber_summary)\n\n user_settings = user_services.get_user_settings(\n self.user_id, strict=False)\n creator_dashboard_display_pref = (\n user_settings.creator_dashboard_display_pref)\n\n suggestions_created_by_user = suggestion_services.query_suggestions(\n [('author_id', self.user_id),\n (\n 'suggestion_type',\n feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT)])\n suggestions_which_can_be_reviewed = (\n suggestion_services\n .get_all_suggestions_that_can_be_reviewed_by_user(self.user_id))\n\n for s in suggestions_created_by_user:\n s.populate_old_value_of_change()\n\n for s in suggestions_which_can_be_reviewed:\n s.populate_old_value_of_change()\n\n suggestion_dicts_created_by_user = (\n [s.to_dict() for s in suggestions_created_by_user])\n suggestion_dicts_which_can_be_reviewed = (\n [s.to_dict() for s in suggestions_which_can_be_reviewed])\n\n ids_of_suggestions_created_by_user = (\n [s['suggestion_id'] for s in suggestion_dicts_created_by_user])\n ids_of_suggestions_which_can_be_reviewed = (\n [s['suggestion_id']\n for s in suggestion_dicts_which_can_be_reviewed])\n\n threads_linked_to_suggestions_by_user = (\n [t.to_dict() for t in feedback_services.get_multiple_threads(\n ids_of_suggestions_created_by_user)])\n threads_linked_to_suggestions_which_can_be_reviewed = (\n [t.to_dict() for t in feedback_services.get_multiple_threads(\n ids_of_suggestions_which_can_be_reviewed)])\n\n self.values.update({\n 'explorations_list': exp_summary_dicts,\n 'collections_list': collection_summary_dicts,\n 'dashboard_stats': dashboard_stats,\n 'last_week_stats': last_week_stats,\n 'subscribers_list': subscribers_list,\n 'display_preference': creator_dashboard_display_pref,\n 'threads_for_created_suggestions_list': (\n threads_linked_to_suggestions_by_user),\n 'threads_for_suggestions_to_review_list': (\n threads_linked_to_suggestions_which_can_be_reviewed),\n 'created_suggestions_list': suggestion_dicts_created_by_user,\n 'suggestions_to_review_list': (\n suggestion_dicts_which_can_be_reviewed),\n 'topic_summary_dicts': topic_summary_dicts\n })\n\n self.render_json(self.values)\n\n @acl_decorators.can_access_creator_dashboard\n def post(self):\n creator_dashboard_display_pref = (\n self.normalized_payload.get('display_preference'))\n user_services.update_user_creator_dashboard_display(\n self.user_id, creator_dashboard_display_pref)\n self.render_json({})\n\n\nclass NewExplorationHandler(base.BaseHandler):\n \"\"\"Creates a new exploration.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {\n 'title': {\n 'schema': {\n 'type': 'basestring'\n },\n 'default_value': feconf.DEFAULT_EXPLORATION_TITLE\n }\n }\n }\n\n @acl_decorators.can_create_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n title = self.normalized_payload.get('title')\n\n new_exploration_id = exp_fetchers.get_new_exploration_id()\n exploration = exp_domain.Exploration.create_default_exploration(\n new_exploration_id, title=title)\n exp_services.save_new_exploration(self.user_id, exploration)\n\n self.render_json({\n EXPLORATION_ID_KEY: new_exploration_id\n })\n\n\nclass NewCollectionHandler(base.BaseHandler):\n \"\"\"Creates a new collection.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {}\n }\n\n @acl_decorators.can_create_collection\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n new_collection_id = collection_services.get_new_collection_id()\n collection = collection_domain.Collection.create_default_collection(\n new_collection_id)\n collection_services.save_new_collection(self.user_id, collection)\n\n self.render_json({\n COLLECTION_ID_KEY: new_collection_id\n })\n\n\nclass UploadExplorationHandler(base.BaseHandler):\n \"\"\"Uploads a new exploration.\"\"\"\n\n URL_PATH_ARGS_SCHEMAS = {}\n HANDLER_ARGS_SCHEMAS = {\n 'POST': {\n 'yaml_file': {\n 'schema': {\n 'type': 'basestring'\n },\n 'default_value': None\n }\n }\n }\n\n @acl_decorators.can_upload_exploration\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n yaml_content = self.normalized_request.get('yaml_file')\n\n new_exploration_id = exp_fetchers.get_new_exploration_id()\n if constants.ALLOW_YAML_FILE_UPLOAD:\n exp_services.save_new_exploration_from_yaml_and_assets(\n self.user_id, yaml_content, new_exploration_id, [],\n strip_voiceovers=True)\n self.render_json({\n EXPLORATION_ID_KEY: new_exploration_id\n })\n else:\n raise self.InvalidInputException(\n 'This server does not allow file uploads.')\n", "path": "core/controllers/creator_dashboard.py" } ]
diff --git a/core/controllers/creator_dashboard.py b/core/controllers/creator_dashboard.py index fde5748fb4fdd..c2a7440daa2b2 100644 --- a/core/controllers/creator_dashboard.py +++ b/core/controllers/creator_dashboard.py @@ -352,7 +352,7 @@ class UploadExplorationHandler(base.BaseHandler): @acl_decorators.can_upload_exploration def post(self): """Handles POST requests.""" - yaml_content = self.normalized_payload.get('yaml_file') + yaml_content = self.normalized_request.get('yaml_file') new_exploration_id = exp_fetchers.get_new_exploration_id() if constants.ALLOW_YAML_FILE_UPLOAD: diff --git a/core/controllers/creator_dashboard_test.py b/core/controllers/creator_dashboard_test.py index a6ada26cbede2..386481819b96b 100644 --- a/core/controllers/creator_dashboard_test.py +++ b/core/controllers/creator_dashboard_test.py @@ -17,12 +17,13 @@ from __future__ import annotations import logging +import os from core import feconf +from core import python_utils from core.constants import constants from core.controllers import creator_dashboard from core.domain import collection_services -from core.domain import exp_fetchers from core.domain import exp_services from core.domain import feedback_domain from core.domain import feedback_services @@ -500,6 +501,12 @@ def test_creator_dashboard_page(self): class CreationButtonsTests(test_utils.GenericTestBase): + with python_utils.open_file( + os.path.join( + feconf.SAMPLE_EXPLORATIONS_DIR, 'welcome', 'welcome.yaml'), + 'rb', encoding=None + ) as f: + raw_yaml = f.read() def setUp(self): super(CreationButtonsTests, self).setUp() @@ -542,14 +549,14 @@ def test_can_upload_exploration(self): self.assertEqual(explorations_list, []) exp_a_id = self.post_json( feconf.UPLOAD_EXPLORATION_URL, - {'yaml_file': self.SAMPLE_YAML_CONTENT}, - csrf_token=csrf_token + {}, + csrf_token=csrf_token, + upload_files=(( + 'yaml_file', 'unused_filename', self.raw_yaml),) )[creator_dashboard.EXPLORATION_ID_KEY] explorations_list = self.get_json( feconf.CREATOR_DASHBOARD_DATA_URL)['explorations_list'] - exploration = exp_fetchers.get_exploration_by_id(exp_a_id) self.assertEqual(explorations_list[0]['id'], exp_a_id) - self.assertEqual(exploration.to_yaml(), self.SAMPLE_YAML_CONTENT) self.logout() def test_can_not_upload_exploration_when_server_does_not_allow_file_upload( @@ -559,9 +566,10 @@ def test_can_not_upload_exploration_when_server_does_not_allow_file_upload( csrf_token = self.get_new_csrf_token() self.post_json( feconf.UPLOAD_EXPLORATION_URL, - {'yaml_file': self.SAMPLE_YAML_CONTENT}, + {}, csrf_token=csrf_token, + upload_files=(( + 'yaml_file', 'unused_filename', self.raw_yaml),), expected_status_int=400 ) - self.logout() diff --git a/core/templates/components/entity-creation-services/exploration-creation.service.spec.ts b/core/templates/components/entity-creation-services/exploration-creation.service.spec.ts index 0a9fdb033cf18..f921c47be8208 100644 --- a/core/templates/components/entity-creation-services/exploration-creation.service.spec.ts +++ b/core/templates/components/entity-creation-services/exploration-creation.service.spec.ts @@ -168,7 +168,7 @@ describe('ExplorationCreationService', () => { spyOn($, 'ajax').and.callFake((options: Promise) => { let d = $.Deferred(); d.resolve( - options.dataFilter(')]}\',\n{"explorationId": "expId"}') + options.dataFilter(')]}\',\n{"exploration_id": "expId"}') ); return d.promise(); }); diff --git a/core/templates/components/entity-creation-services/exploration-creation.service.ts b/core/templates/components/entity-creation-services/exploration-creation.service.ts index 54cc77e631315..6df1646e17258 100644 --- a/core/templates/components/entity-creation-services/exploration-creation.service.ts +++ b/core/templates/components/entity-creation-services/exploration-creation.service.ts @@ -84,9 +84,8 @@ export class ExplorationCreationService { this.loaderService.showLoadingScreen('Creating exploration'); var form = new FormData(); - form.append('payload', JSON.stringify({ - yaml_file: yamlFile - })); + form.append('yaml_file', yamlFile); + form.append('payload', JSON.stringify({})); this.csrfTokenService.getTokenAsync().then((token) => { form.append('csrf_token', token); $.ajax({ @@ -104,7 +103,7 @@ export class ExplorationCreationService { this.windowRef.nativeWindow.location.href = this.urlInterpolationService.interpolateUrl( this.CREATE_NEW_EXPLORATION_URL_TEMPLATE, { - exploration_id: data.explorationId + exploration_id: data.exploration_id } ); }).fail((data) => {
Schema validation error on uploading an exploration YAML <!-- - Thanks for taking the time to report a bug in the Oppia project. - Before filing a new issue, please do a quick search to check that it hasn't - already been filed on the [issue tracker](https://github.com/oppia/oppia/issues)._ --> **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Set `ALLOW_YAML_FILE_UPLOAD` to true in `constants.ts` 2. Go to the creator dashboard. 3. Upload a test exploration YAML file from [here](https://github.com/oppia/oppia/tree/develop/data/explorations). 4. See error. **Observed behavior** Valid exploration YAML does not get uploaded. **Expected behavior** Valid exploration YAML should be uploaded correctly. **Screenshots** https://user-images.githubusercontent.com/11008603/150233493-27963be2-18ac-4c2d-a184-c39cd3268f09.mp4
google__pytype-807
[ { "content": "\"\"\"Match pytd types against each other.\n\n\"Matching\" x against y means roughly: If we have a function f(param: y) and\na type x, would we be able to pass (an instance of) x to f. (I.e.,\n\"execute f(x)\"). So for example, str would \"match\" against basestring, and\nlist[int] would match against list[Number].\n\nThis is used for converting structural types to nominal types during type\ninference, but could also be used when merging pytd files, to match existing\nsignatures against new inference results.\n\"\"\"\n\nimport logging\n\nfrom pytype import utils\nfrom pytype.pytd import booleq\nfrom pytype.pytd import escape\nfrom pytype.pytd import pytd\nfrom pytype.pytd import pytd_utils\nfrom pytype.pytd import visitors\nfrom pytype.pytd.parse import node\n\nlog = logging.getLogger(__name__)\n\n\nis_complete = escape.is_complete\n\n\n# Might not be needed anymore once pytd has builtin support for ~unknown.\ndef is_unknown(t):\n \"\"\"Return True if this is an ~unknown.\"\"\"\n if isinstance(t, (pytd.ClassType, pytd.NamedType, pytd.Class, StrictType)):\n return escape.is_unknown(t.name)\n elif isinstance(t, str):\n return escape.is_unknown(t)\n else:\n return False\n\n\ndef get_all_subclasses(asts):\n \"\"\"Compute a class->subclasses mapping.\n\n Args:\n asts: A list of ASTs.\n\n Returns:\n A dictionary, mapping instances of pytd.Type (types) to lists of\n pytd.Class (the derived classes).\n \"\"\"\n hierarchy = {}\n for ast in asts:\n hierarchy.update(ast.Visit(visitors.ExtractSuperClasses()))\n def filter_superclasses(superclasses):\n return [superclass for superclass in superclasses\n if is_complete(superclass)]\n hierarchy = {cls: filter_superclasses(superclasses)\n for cls, superclasses in hierarchy.items() if is_complete(cls)}\n # typically this is a fairly short list, e.g.:\n # [ClassType(basestring), ClassType(int), ClassType(object)]\n return utils.invert_dict(hierarchy)\n\n\nclass StrictType(node.Node(\"name\")):\n \"\"\"A type that doesn't allow sub- or superclasses to match.\n\n For example, \"int\" is considered a valid argument for a function that accepts\n \"object\", but StrictType(\"int\") is not.\n \"\"\"\n\n def __str__(self):\n return self.name\n\n\nclass TypeMatch(pytd_utils.TypeMatcher):\n \"\"\"Class for matching types against other types.\"\"\"\n\n def __init__(self, direct_subclasses=None, any_also_is_bottom=True):\n \"\"\"Construct.\n\n Args:\n direct_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type.\n any_also_is_bottom: Whether we should, (if True) consider\n pytd.AnythingType() to also be at the bottom of the type hierarchy,\n thus making it a subclass of everything, or (if False) to be only\n at the top.\n \"\"\"\n self.direct_subclasses = direct_subclasses or {}\n self.any_also_is_bottom = any_also_is_bottom\n self.solver = booleq.Solver()\n self._implications = {}\n\n def default_match(self, t1, t2, *unused_args, **unused_kwargs):\n # Don't allow pytd_utils.TypeMatcher to do default matching.\n raise AssertionError(\n \"Can't compare %s and %s\" % (type(t1).__name__, type(t2).__name__))\n\n def get_superclasses(self, t):\n \"\"\"Get all base classes of this type.\n\n Args:\n t: A pytd.Type\n Returns:\n A list of pytd.Type.\n \"\"\"\n if isinstance(t, pytd.ClassType):\n return sum((self.get_superclasses(c) for c in t.cls.parents), [t])\n elif isinstance(t, pytd.AnythingType):\n # All types, even \"?\", inherit from object.\n return [pytd.NamedType(\"builtins.object\")]\n elif isinstance(t, pytd.GenericType):\n return self.get_superclasses(t.base_type)\n else:\n log.warning(\"Can't extract superclasses from %s\", type(t))\n return [pytd.NamedType(\"builtins.object\")]\n\n def get_subclasses(self, t):\n \"\"\"Get all classes derived from this type.\n\n Args:\n t: A pytd.Type\n Returns:\n A list of pytd.Type.\n \"\"\"\n if isinstance(t, pytd.ClassType):\n subclasses = self.direct_subclasses.get(t, [])\n return sum((self.get_subclasses(pytd.ClassType(c.name, c))\n for c in subclasses), [t])\n else:\n raise NotImplementedError(\"Can't extract subclasses from %s\" % type(t))\n\n def type_parameter(self, unknown, base_class, item):\n \"\"\"This generates the type parameter when matching against a generic type.\n\n For example, when we match ~unknown1 against list[T], we need an additional\n type to model the T in \"~unknown1[T]\". This type would have the name\n \"~unknown1.list.T\".\n\n Args:\n unknown: An unknown type. This is the type that's matched against\n base_class[T]\n base_class: The base class of the generic we're matching the unknown\n against. E.g. \"list\".\n item: The pytd.TemplateItem, i.e., the actual type parameter. (\"T\" in\n the examples above)\n Returns:\n A type (pytd.Node) to represent this type parameter.\n \"\"\"\n assert is_unknown(unknown)\n assert isinstance(base_class, pytd.Class)\n name = unknown.name + \".\" + base_class.name + \".\" + item.type_param.name\n # We do *not* consider subclasses or superclasses when matching type\n # parameters.\n # So for example, if we pass list[int] to f(x: list[T]), we assume that\n # T can only be \"int\", not \"int + object\". This might be considered\n # incorrect, but typically gives us more intuitive results.\n # Note that this only happens if we match ~unknown against generic types,\n # not for matching of \"known\" types against each other.\n return StrictType(name)\n\n def _get_parameters(self, t1, t2):\n if isinstance(t1, pytd.TupleType) and isinstance(t2, pytd.TupleType):\n # No change needed; the parameters will be compared element-wise.\n return t1.parameters, t2.parameters\n elif isinstance(t2, pytd.TupleType):\n # Since we call _get_parameters after confirming that t1 and t2 have\n # compatible base types, t1 is a homogeneous tuple here.\n return (t1.element_type,) * len(t2.parameters), t2.parameters\n elif isinstance(t1, pytd.TupleType):\n return (pytd_utils.JoinTypes(t1.parameters),), t2.parameters\n elif (isinstance(t1, pytd.CallableType) and\n isinstance(t2, pytd.CallableType)):\n # Flip the arguments, since argument types are contravariant.\n return t2.args + (t1.ret,), t1.args + (t2.ret,)\n elif (t1.base_type.cls.name == \"builtins.type\" and\n t2.base_type.cls.name == \"typing.Callable\"):\n # We'll only check the return type, since getting the argument types for\n # initializing a class is tricky.\n return t1.parameters, (t2.parameters[-1],)\n elif (t1.base_type.cls.name == \"typing.Callable\" and\n t2.base_type.cls.name == \"builtins.type\"):\n return (t1.parameters[-1],), t2.parameters\n elif isinstance(t1, pytd.CallableType):\n # We're matching against GenericType(Callable, (Any, _RET)), so we don't\n # need the argument types.\n return (pytd.AnythingType(), t1.ret), t2.parameters\n elif isinstance(t2, pytd.CallableType):\n return t1.parameters, (pytd.AnythingType(), t2.ret)\n else:\n num_extra_params = len(t1.parameters) - len(t2.parameters)\n # Matching, e.g., Dict[str, int] against Iterable[K] is legitimate.\n assert num_extra_params >= 0, (t1.base_type.cls.name,\n t2.base_type.cls.name)\n t2_parameters = t2.parameters + (pytd.AnythingType(),) * num_extra_params\n return t1.parameters, t2_parameters\n\n def match_Generic_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name\n \"\"\"Match a pytd.GenericType against another pytd.GenericType.\"\"\"\n assert isinstance(t1.base_type, pytd.ClassType), type(t1.base_type)\n assert isinstance(t2.base_type, pytd.ClassType), type(t2.base_type)\n base1 = pytd.ClassType(t1.base_type.cls.name, t1.base_type.cls)\n base2 = pytd.ClassType(t2.base_type.cls.name, t2.base_type.cls)\n base_type_cmp = self.match_type_against_type(base1, base2, subst)\n if base_type_cmp is booleq.FALSE:\n return booleq.FALSE\n t1_parameters, t2_parameters = self._get_parameters(t1, t2)\n if len(t1_parameters) != len(t2_parameters):\n return booleq.FALSE\n # Type parameters are covariant:\n # E.g. passing list[int] as argument for list[object] succeeds.\n param_cmp = [self.match_type_against_type(p1, p2, subst)\n for p1, p2 in zip(t1_parameters, t2_parameters)]\n return booleq.And([base_type_cmp] + param_cmp)\n\n def match_Unknown_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name\n assert isinstance(t2.base_type, pytd.ClassType)\n # No inheritance for base classes - you can only inherit from an\n # instantiated template, but not from a template itself.\n base_match = booleq.Eq(t1.name, t2.base_type.cls.name)\n type_params = [self.type_parameter(t1, t2.base_type.cls, item)\n for item in t2.base_type.cls.template]\n for type_param in type_params:\n self.solver.register_variable(type_param.name)\n if isinstance(t2, pytd.TupleType):\n t2_parameters = (pytd_utils.JoinTypes(t2.parameters),)\n else:\n t2_parameters = t2.parameters\n params = [self.match_type_against_type(p1, p2, subst)\n for p1, p2 in zip(type_params, t2_parameters)]\n return booleq.And([base_match] + params)\n\n def match_Generic_against_Unknown(self, t1, t2, subst): # pylint: disable=invalid-name\n # Note: This flips p1 and p2 above.\n return self.match_Unknown_against_Generic(t2, t1, subst) # pylint: disable=arguments-out-of-order\n\n def maybe_lookup_type_param(self, t, subst):\n while isinstance(t, pytd.TypeParameter):\n # We can only have type parameters in a class, and if so, we should have\n # added them to the type parameter substitution map (subst) beforehand:\n assert t in subst\n if subst[t] is None:\n # Function type parameter. Can be anything.\n t = pytd.AnythingType()\n else:\n assert subst[t] != t, \"Cyclic type parameter.\"\n t = subst[t]\n return t\n\n def unclass(self, t):\n \"\"\"Prevent further subclass or superclass expansion for this type.\"\"\"\n if isinstance(t, pytd.ClassType):\n # When t.name and t.cls.name differ (e.g., int vs. builtins.int), the\n # latter is the complete name.\n return pytd.NamedType(t.cls.name)\n else:\n return t\n\n def expand_superclasses(self, t):\n class_and_superclasses = self.get_superclasses(t)\n return [self.unclass(t) for t in class_and_superclasses]\n\n def expand_subclasses(self, t):\n class_and_subclasses = self.get_subclasses(t)\n return [self.unclass(t) for t in class_and_subclasses]\n\n def match_type_against_type(self, t1, t2, subst):\n types = (t1, t2, frozenset(subst.items()))\n if types in self._implications:\n return self._implications[types]\n implication = self._implications[types] = self._match_type_against_type(\n t1, t2, subst)\n return implication\n\n def _full_name(self, t):\n return t.name\n\n def _match_type_against_type(self, t1, t2, subst):\n \"\"\"Match a pytd.Type against another pytd.Type.\"\"\"\n t1 = self.maybe_lookup_type_param(t1, subst)\n t2 = self.maybe_lookup_type_param(t2, subst)\n # TODO(b/159058933): Use utils:TypeMatcher to simplify this?\n if isinstance(t2, pytd.AnythingType):\n # We can match anything against AnythingType. (It's like top)\n return booleq.TRUE\n elif isinstance(t1, pytd.AnythingType):\n if self.any_also_is_bottom:\n # We can match AnythingType against everything. (It's like bottom)\n return booleq.TRUE\n else:\n return booleq.FALSE\n elif isinstance(t1, pytd.NothingType):\n # nothing as an actual type matches against everything, since it\n # represents an empty value.\n return booleq.TRUE\n elif isinstance(t2, pytd.NothingType):\n # We can't match anything against nothing as an expected type (except\n # nothing itself, above).\n return booleq.FALSE\n elif isinstance(t1, pytd.UnionType):\n return booleq.And(self.match_type_against_type(u, t2, subst)\n for u in t1.type_list)\n elif isinstance(t2, pytd.UnionType):\n return booleq.Or(self.match_type_against_type(t1, u, subst)\n for u in t2.type_list)\n elif (isinstance(t1, pytd.ClassType) and isinstance(t2, StrictType) or\n isinstance(t1, StrictType) and isinstance(t2, pytd.ClassType)):\n # For strict types, avoid subclasses of the left side.\n return booleq.Eq(self._full_name(t1), self._full_name(t2))\n elif isinstance(t1, pytd.ClassType) and t2.name == \"builtins.object\":\n return booleq.TRUE\n elif (t1.name in (\"builtins.type\", \"typing.Callable\") and\n t2.name in (\"builtins.type\", \"typing.Callable\")):\n return booleq.TRUE\n elif isinstance(t1, pytd.ClassType):\n # ClassTypes are similar to Unions, except they're disjunctions: We can\n # match the type or any of its base classes against the formal parameter.\n return booleq.Or(self.match_type_against_type(t, t2, subst)\n for t in self.expand_superclasses(t1))\n elif isinstance(t2, pytd.ClassType):\n # ClassTypes on the right are exactly like Unions: We can match against\n # this type or any of its subclasses.\n return booleq.Or(self.match_type_against_type(t1, t, subst)\n for t in self.expand_subclasses(t2))\n assert not isinstance(t1, pytd.ClassType)\n assert not isinstance(t2, pytd.ClassType)\n if is_unknown(t1) and isinstance(t2, pytd.GenericType):\n return self.match_Unknown_against_Generic(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType) and is_unknown(t2):\n return self.match_Generic_against_Unknown(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType) and isinstance(t2, pytd.GenericType):\n return self.match_Generic_against_Generic(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType):\n # E.g. list[...] matches against list, or even object.\n return self.match_type_against_type(t1.base_type, t2, subst)\n elif isinstance(t2, pytd.GenericType):\n if self.any_also_is_bottom:\n # E.g. list (a.k.a. list[Any]) matches against list[str]\n return self.match_type_against_type(t1, t2.base_type, subst)\n else:\n return booleq.FALSE\n elif is_unknown(t1) and is_unknown(t2):\n return booleq.Eq(t1.name, t2.name)\n elif (isinstance(t1, (pytd.NamedType, StrictType)) and\n isinstance(t2, (pytd.NamedType, StrictType))):\n if is_complete(t1) and is_complete(t2) and t1.name != t2.name:\n # Optimization: If we know these two can never be equal, just return\n # false right away.\n return booleq.FALSE\n else:\n return booleq.Eq(t1.name, t2.name)\n elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):\n # Unresolved types never match against anything.\n return booleq.FALSE\n else:\n raise AssertionError(\"Don't know how to match %s against %s\" % (\n type(t1), type(t2)))\n\n # pylint: disable=invalid-name\n def match_Signature_against_Signature(self, sig1, sig2, subst,\n skip_self=False):\n \"\"\"Match a pytd.Signature against another pytd.Signature.\n\n Args:\n sig1: The caller\n sig2: The callee\n subst: Current type parameters.\n skip_self: If True, doesn't compare the first parameter, which is\n considered (and verified) to be \"self\".\n Returns:\n An instance of booleq.BooleanTerm, i.e. a boolean formula.\n \"\"\"\n assert not sig1.template\n # Signatures have type parameters, too. We ignore them, since they can\n # be anything. (See maybe_lookup_type_param())\n subst.update({p.type_param: None for p in sig2.template})\n params1 = sig1.params\n params2 = sig2.params\n if skip_self:\n # Methods in an ~unknown need to declare their methods with \"self\"\n assert params1 and params1[0].name == \"self\"\n params1 = params1[1:]\n if params2 and params2[0].name == \"self\":\n params2 = params2[1:]\n equalities = []\n if len(params1) > len(params2) and not sig2.has_optional:\n return booleq.FALSE # extra parameters\n if sig1.starargs is not None and sig2.starargs is not None:\n equalities.append(self.match_type_against_type(\n sig1.starargs.type, sig2.starargs.type, subst))\n if sig1.starstarargs is not None and sig2.starstarargs is not None:\n equalities.append(self.match_type_against_type(\n sig1.starstarargs.type, sig2.starstarargs.type, subst))\n # TODO(b/159058933): Handle kwonly parameters (on either side). Presumably,\n # a kwonly on the left side means that it was a keyword param.\n for p1, p2 in zip(params1, params2):\n if p1.optional and not p2.optional:\n return booleq.FALSE # needed for optimize.py:RemoveRedundantSignatures\n for i, p2 in enumerate(params2):\n if i >= len(params1):\n if not p2.optional:\n return booleq.FALSE # missing parameter\n else:\n pass\n else:\n p1 = params1[i]\n if p1.name != p2.name and not (\n pytd_utils.ANON_PARAM.match(p1.name) or\n pytd_utils.ANON_PARAM.match(p2.name)):\n return booleq.FALSE\n equalities.append(self.match_type_against_type(p1.type, p2.type, subst))\n equalities.append(\n self.match_type_against_type(\n sig1.return_type, sig2.return_type, subst))\n return booleq.And(equalities)\n\n def match_Signature_against_Function(self, sig, f, subst, skip_self=False): # pylint: disable=invalid-name\n def make_or(inner):\n return booleq.Or(\n self.match_Signature_against_Signature(inner, s, subst, skip_self)\n for s in f.signatures)\n return booleq.And(make_or(inner) for inner in visitors.ExpandSignature(sig))\n\n def match_Function_against_Function(self, f1, f2, subst, skip_self=False): # pylint: disable=invalid-name\n return booleq.And(\n self.match_Signature_against_Function(s1, f2, subst, skip_self)\n for s1 in f1.signatures)\n\n def match_Function_against_Class(self, f1, cls2, subst, cache):\n cls2_methods = cache.get(id(cls2))\n if cls2_methods is None:\n cls2_methods = cache[id(cls2)] = {f.name: f for f in cls2.methods}\n if f1.name not in cls2_methods:\n # The class itself doesn't have this method, but base classes might.\n # TODO(b/159058933): This should do MRO order, not depth-first.\n for base in cls2.parents:\n if isinstance(base, pytd.AnythingType):\n # AnythingType can contain any method. However, that would mean that\n # a class that inherits from AnythingType contains any method\n # imaginable, and hence is a match for anything. To prevent the bad\n # results caused by that, return FALSE here.\n return booleq.FALSE\n elif isinstance(base, (pytd.ClassType, pytd.GenericType)):\n if isinstance(base, pytd.ClassType):\n cls = base.cls\n values = tuple(pytd.AnythingType() for _ in cls.template)\n elif isinstance(base, pytd.TupleType):\n cls = base.base_type.cls\n values = (pytd_utils.JoinTypes(base.parameters),)\n else:\n cls = base.base_type.cls\n values = base.parameters\n if values:\n subst = subst.copy()\n for param, value in zip(cls.template, values):\n subst[param.type_param] = value\n implication = self.match_Function_against_Class(f1, cls, subst, cache)\n if implication is not booleq.FALSE:\n return implication\n else:\n # Funky types like UnionType are hard to match against (and shouldn't\n # appear as a base class) so we treat them as catch-all.\n log.warning(\"Assuming that %s has method %s\",\n pytd_utils.Print(base), f1.name)\n return booleq.TRUE\n return booleq.FALSE\n else:\n f2 = cls2_methods[f1.name]\n return self.match_Function_against_Function(\n f1, f2, subst, skip_self=True)\n\n def match_Class_against_Class(self, cls1, cls2, subst): # pylint: disable=invalid-name\n \"\"\"Match a pytd.Class against another pytd.Class.\"\"\"\n return self.match_Functions_against_Class(\n cls1.methods, cls2, subst)\n\n def match_Protocol_against_Unknown(self, protocol, unknown, subst): # pylint: disable=invalid-name\n \"\"\"Match a typing.Protocol against an unknown class.\"\"\"\n filtered_methods = [f for f in protocol.methods if f.is_abstract]\n return self.match_Functions_against_Class(\n filtered_methods, unknown, subst)\n\n def match_Functions_against_Class(self, methods, cls2, subst):\n implications = []\n cache = {}\n for f1 in methods:\n implication = self.match_Function_against_Class(f1, cls2, subst, cache)\n implications.append(implication)\n if implication is booleq.FALSE:\n break\n # TODO(b/159058933): class attributes\n return booleq.And(implications)\n", "path": "pytype/pytd/type_match.py" } ]
[ { "content": "\"\"\"Match pytd types against each other.\n\n\"Matching\" x against y means roughly: If we have a function f(param: y) and\na type x, would we be able to pass (an instance of) x to f. (I.e.,\n\"execute f(x)\"). So for example, str would \"match\" against basestring, and\nlist[int] would match against list[Number].\n\nThis is used for converting structural types to nominal types during type\ninference, but could also be used when merging pytd files, to match existing\nsignatures against new inference results.\n\"\"\"\n\nimport logging\n\nfrom pytype import utils\nfrom pytype.pytd import booleq\nfrom pytype.pytd import escape\nfrom pytype.pytd import pytd\nfrom pytype.pytd import pytd_utils\nfrom pytype.pytd import visitors\nfrom pytype.pytd.parse import node\n\nlog = logging.getLogger(__name__)\n\n\nis_complete = escape.is_complete\n\n\n# Might not be needed anymore once pytd has builtin support for ~unknown.\ndef is_unknown(t):\n \"\"\"Return True if this is an ~unknown.\"\"\"\n if isinstance(t, (pytd.ClassType, pytd.NamedType, pytd.Class, StrictType)):\n return escape.is_unknown(t.name)\n elif isinstance(t, str):\n return escape.is_unknown(t)\n else:\n return False\n\n\ndef get_all_subclasses(asts):\n \"\"\"Compute a class->subclasses mapping.\n\n Args:\n asts: A list of ASTs.\n\n Returns:\n A dictionary, mapping instances of pytd.Type (types) to lists of\n pytd.Class (the derived classes).\n \"\"\"\n hierarchy = {}\n for ast in asts:\n hierarchy.update(ast.Visit(visitors.ExtractSuperClasses()))\n def filter_superclasses(superclasses):\n return [superclass for superclass in superclasses\n if is_complete(superclass)]\n hierarchy = {cls: filter_superclasses(superclasses)\n for cls, superclasses in hierarchy.items() if is_complete(cls)}\n # typically this is a fairly short list, e.g.:\n # [ClassType(basestring), ClassType(int), ClassType(object)]\n return utils.invert_dict(hierarchy)\n\n\nclass StrictType(node.Node(\"name\")):\n \"\"\"A type that doesn't allow sub- or superclasses to match.\n\n For example, \"int\" is considered a valid argument for a function that accepts\n \"object\", but StrictType(\"int\") is not.\n \"\"\"\n\n def __str__(self):\n return self.name\n\n\nclass TypeMatch(pytd_utils.TypeMatcher):\n \"\"\"Class for matching types against other types.\"\"\"\n\n def __init__(self, direct_subclasses=None, any_also_is_bottom=True):\n \"\"\"Construct.\n\n Args:\n direct_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type.\n any_also_is_bottom: Whether we should, (if True) consider\n pytd.AnythingType() to also be at the bottom of the type hierarchy,\n thus making it a subclass of everything, or (if False) to be only\n at the top.\n \"\"\"\n self.direct_subclasses = direct_subclasses or {}\n self.any_also_is_bottom = any_also_is_bottom\n self.solver = booleq.Solver()\n self._implications = {}\n\n def default_match(self, t1, t2, *unused_args, **unused_kwargs):\n # Don't allow pytd_utils.TypeMatcher to do default matching.\n raise AssertionError(\n \"Can't compare %s and %s\" % (type(t1).__name__, type(t2).__name__))\n\n def get_superclasses(self, t):\n \"\"\"Get all base classes of this type.\n\n Args:\n t: A pytd.Type\n Returns:\n A list of pytd.Type.\n \"\"\"\n if isinstance(t, pytd.ClassType):\n return sum((self.get_superclasses(c) for c in t.cls.parents), [t])\n elif isinstance(t, pytd.AnythingType):\n # All types, even \"?\", inherit from object.\n return [pytd.NamedType(\"builtins.object\")]\n elif isinstance(t, pytd.GenericType):\n return self.get_superclasses(t.base_type)\n else:\n log.warning(\"Can't extract superclasses from %s\", type(t))\n return [pytd.NamedType(\"builtins.object\")]\n\n def get_subclasses(self, t):\n \"\"\"Get all classes derived from this type.\n\n Args:\n t: A pytd.Type\n Returns:\n A list of pytd.Type.\n \"\"\"\n if isinstance(t, pytd.ClassType):\n subclasses = self.direct_subclasses.get(t, [])\n return sum((self.get_subclasses(pytd.ClassType(c.name, c))\n for c in subclasses), [t])\n else:\n raise NotImplementedError(\"Can't extract subclasses from %s\" % type(t))\n\n def type_parameter(self, unknown, base_class, item):\n \"\"\"This generates the type parameter when matching against a generic type.\n\n For example, when we match ~unknown1 against list[T], we need an additional\n type to model the T in \"~unknown1[T]\". This type would have the name\n \"~unknown1.list.T\".\n\n Args:\n unknown: An unknown type. This is the type that's matched against\n base_class[T]\n base_class: The base class of the generic we're matching the unknown\n against. E.g. \"list\".\n item: The pytd.TemplateItem, i.e., the actual type parameter. (\"T\" in\n the examples above)\n Returns:\n A type (pytd.Node) to represent this type parameter.\n \"\"\"\n assert is_unknown(unknown)\n assert isinstance(base_class, pytd.Class)\n name = unknown.name + \".\" + base_class.name + \".\" + item.type_param.name\n # We do *not* consider subclasses or superclasses when matching type\n # parameters.\n # So for example, if we pass list[int] to f(x: list[T]), we assume that\n # T can only be \"int\", not \"int + object\". This might be considered\n # incorrect, but typically gives us more intuitive results.\n # Note that this only happens if we match ~unknown against generic types,\n # not for matching of \"known\" types against each other.\n return StrictType(name)\n\n def _get_parameters(self, t1, t2):\n if isinstance(t1, pytd.TupleType) and isinstance(t2, pytd.TupleType):\n # No change needed; the parameters will be compared element-wise.\n return t1.parameters, t2.parameters\n elif isinstance(t2, pytd.TupleType):\n # Since we call _get_parameters after confirming that t1 and t2 have\n # compatible base types, t1 is a homogeneous tuple here.\n return (t1.element_type,) * len(t2.parameters), t2.parameters\n elif isinstance(t1, pytd.TupleType):\n return (pytd_utils.JoinTypes(t1.parameters),), t2.parameters\n elif (isinstance(t1, pytd.CallableType) and\n isinstance(t2, pytd.CallableType)):\n # Flip the arguments, since argument types are contravariant.\n return t2.args + (t1.ret,), t1.args + (t2.ret,)\n elif (t1.base_type.cls.name == \"builtins.type\" and\n t2.base_type.cls.name == \"typing.Callable\"):\n # We'll only check the return type, since getting the argument types for\n # initializing a class is tricky.\n return t1.parameters, (t2.parameters[-1],)\n elif (t1.base_type.cls.name == \"typing.Callable\" and\n t2.base_type.cls.name == \"builtins.type\"):\n return (t1.parameters[-1],), t2.parameters\n elif isinstance(t1, pytd.CallableType):\n # We're matching against GenericType(Callable, (Any, _RET)), so we don't\n # need the argument types.\n return (pytd.AnythingType(), t1.ret), t2.parameters\n elif isinstance(t2, pytd.CallableType):\n return t1.parameters, (pytd.AnythingType(), t2.ret)\n else:\n num_extra_params = len(t1.parameters) - len(t2.parameters)\n # Matching, e.g., Dict[str, int] against Iterable[K] is legitimate.\n assert num_extra_params >= 0, (t1.base_type.cls.name,\n t2.base_type.cls.name)\n t2_parameters = t2.parameters + (pytd.AnythingType(),) * num_extra_params\n return t1.parameters, t2_parameters\n\n def match_Generic_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name\n \"\"\"Match a pytd.GenericType against another pytd.GenericType.\"\"\"\n assert isinstance(t1.base_type, pytd.ClassType), type(t1.base_type)\n assert isinstance(t2.base_type, pytd.ClassType), type(t2.base_type)\n base1 = pytd.ClassType(t1.base_type.cls.name, t1.base_type.cls)\n base2 = pytd.ClassType(t2.base_type.cls.name, t2.base_type.cls)\n base_type_cmp = self.match_type_against_type(base1, base2, subst)\n if base_type_cmp is booleq.FALSE:\n return booleq.FALSE\n t1_parameters, t2_parameters = self._get_parameters(t1, t2)\n if len(t1_parameters) != len(t2_parameters):\n return booleq.FALSE\n # Type parameters are covariant:\n # E.g. passing list[int] as argument for list[object] succeeds.\n param_cmp = [self.match_type_against_type(p1, p2, subst)\n for p1, p2 in zip(t1_parameters, t2_parameters)]\n return booleq.And([base_type_cmp] + param_cmp)\n\n def match_Unknown_against_Generic(self, t1, t2, subst): # pylint: disable=invalid-name\n assert isinstance(t2.base_type, pytd.ClassType)\n # No inheritance for base classes - you can only inherit from an\n # instantiated template, but not from a template itself.\n base_match = booleq.Eq(t1.name, t2.base_type.cls.name)\n type_params = [self.type_parameter(t1, t2.base_type.cls, item)\n for item in t2.base_type.cls.template]\n for type_param in type_params:\n self.solver.register_variable(type_param.name)\n if isinstance(t2, pytd.TupleType):\n t2_parameters = (pytd_utils.JoinTypes(t2.parameters),)\n else:\n t2_parameters = t2.parameters\n params = [self.match_type_against_type(p1, p2, subst)\n for p1, p2 in zip(type_params, t2_parameters)]\n return booleq.And([base_match] + params)\n\n def match_Generic_against_Unknown(self, t1, t2, subst): # pylint: disable=invalid-name\n # Note: This flips p1 and p2 above.\n return self.match_Unknown_against_Generic(t2, t1, subst) # pylint: disable=arguments-out-of-order\n\n def maybe_lookup_type_param(self, t, subst):\n while isinstance(t, pytd.TypeParameter):\n # We can only have type parameters in a class, and if so, we should have\n # added them to the type parameter substitution map (subst) beforehand:\n assert t in subst\n if subst[t] is None:\n # Function type parameter. Can be anything.\n t = pytd.AnythingType()\n else:\n assert subst[t] != t, \"Cyclic type parameter.\"\n t = subst[t]\n return t\n\n def unclass(self, t):\n \"\"\"Prevent further subclass or superclass expansion for this type.\"\"\"\n if isinstance(t, pytd.ClassType):\n # When t.name and t.cls.name differ (e.g., int vs. builtins.int), the\n # latter is the complete name.\n return pytd.NamedType(t.cls.name)\n else:\n return t\n\n def expand_superclasses(self, t):\n class_and_superclasses = self.get_superclasses(t)\n return [self.unclass(t) for t in class_and_superclasses]\n\n def expand_subclasses(self, t):\n class_and_subclasses = self.get_subclasses(t)\n return [self.unclass(t) for t in class_and_subclasses]\n\n def match_type_against_type(self, t1, t2, subst):\n types = (t1, t2, frozenset(subst.items()))\n if types in self._implications:\n return self._implications[types]\n implication = self._implications[types] = self._match_type_against_type(\n t1, t2, subst)\n return implication\n\n def _full_name(self, t):\n return t.name\n\n def _match_type_against_type(self, t1, t2, subst):\n \"\"\"Match a pytd.Type against another pytd.Type.\"\"\"\n t1 = self.maybe_lookup_type_param(t1, subst)\n t2 = self.maybe_lookup_type_param(t2, subst)\n # TODO(b/159058933): Use utils:TypeMatcher to simplify this?\n if isinstance(t2, pytd.AnythingType):\n # We can match anything against AnythingType. (It's like top)\n return booleq.TRUE\n elif isinstance(t1, pytd.AnythingType):\n if self.any_also_is_bottom:\n # We can match AnythingType against everything. (It's like bottom)\n return booleq.TRUE\n else:\n return booleq.FALSE\n elif isinstance(t1, pytd.NothingType):\n # nothing as an actual type matches against everything, since it\n # represents an empty value.\n return booleq.TRUE\n elif isinstance(t2, pytd.NothingType):\n # We can't match anything against nothing as an expected type (except\n # nothing itself, above).\n return booleq.FALSE\n elif isinstance(t1, pytd.UnionType):\n return booleq.And(self.match_type_against_type(u, t2, subst)\n for u in t1.type_list)\n elif isinstance(t2, pytd.UnionType):\n return booleq.Or(self.match_type_against_type(t1, u, subst)\n for u in t2.type_list)\n elif (isinstance(t1, pytd.ClassType) and isinstance(t2, StrictType) or\n isinstance(t1, StrictType) and isinstance(t2, pytd.ClassType)):\n # For strict types, avoid subclasses of the left side.\n return booleq.Eq(self._full_name(t1), self._full_name(t2))\n elif isinstance(t1, pytd.ClassType) and t2.name == \"builtins.object\":\n return booleq.TRUE\n elif (t1.name in (\"builtins.type\", \"typing.Callable\") and\n t2.name in (\"builtins.type\", \"typing.Callable\")):\n return booleq.TRUE\n elif isinstance(t1, pytd.ClassType):\n # ClassTypes are similar to Unions, except they're disjunctions: We can\n # match the type or any of its base classes against the formal parameter.\n return booleq.Or(self.match_type_against_type(t, t2, subst)\n for t in self.expand_superclasses(t1))\n elif isinstance(t2, pytd.ClassType):\n # ClassTypes on the right are exactly like Unions: We can match against\n # this type or any of its subclasses.\n return booleq.Or(self.match_type_against_type(t1, t, subst)\n for t in self.expand_subclasses(t2))\n assert not isinstance(t1, pytd.ClassType)\n assert not isinstance(t2, pytd.ClassType)\n if is_unknown(t1) and isinstance(t2, pytd.GenericType):\n return self.match_Unknown_against_Generic(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType) and is_unknown(t2):\n return self.match_Generic_against_Unknown(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType) and isinstance(t2, pytd.GenericType):\n return self.match_Generic_against_Generic(t1, t2, subst)\n elif isinstance(t1, pytd.GenericType):\n # E.g. list[...] matches against list, or even object.\n return self.match_type_against_type(t1.base_type, t2, subst)\n elif isinstance(t2, pytd.GenericType):\n if self.any_also_is_bottom:\n # E.g. list (a.k.a. list[Any]) matches against list[str]\n return self.match_type_against_type(t1, t2.base_type, subst)\n else:\n return booleq.FALSE\n elif is_unknown(t1) and is_unknown(t2):\n return booleq.Eq(t1.name, t2.name)\n elif (isinstance(t1, (pytd.NamedType, StrictType)) and\n isinstance(t2, (pytd.NamedType, StrictType))):\n if is_complete(t1) and is_complete(t2) and t1.name != t2.name:\n # Optimization: If we know these two can never be equal, just return\n # false right away.\n return booleq.FALSE\n else:\n return booleq.Eq(t1.name, t2.name)\n elif isinstance(t1, pytd.NamedType) and isinstance(t2, pytd.Literal):\n return booleq.FALSE\n elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):\n # Unresolved types never match against anything.\n return booleq.FALSE\n else:\n raise AssertionError(\"Don't know how to match %s against %s\" % (\n type(t1), type(t2)))\n\n # pylint: disable=invalid-name\n def match_Signature_against_Signature(self, sig1, sig2, subst,\n skip_self=False):\n \"\"\"Match a pytd.Signature against another pytd.Signature.\n\n Args:\n sig1: The caller\n sig2: The callee\n subst: Current type parameters.\n skip_self: If True, doesn't compare the first parameter, which is\n considered (and verified) to be \"self\".\n Returns:\n An instance of booleq.BooleanTerm, i.e. a boolean formula.\n \"\"\"\n assert not sig1.template\n # Signatures have type parameters, too. We ignore them, since they can\n # be anything. (See maybe_lookup_type_param())\n subst.update({p.type_param: None for p in sig2.template})\n params1 = sig1.params\n params2 = sig2.params\n if skip_self:\n # Methods in an ~unknown need to declare their methods with \"self\"\n assert params1 and params1[0].name == \"self\"\n params1 = params1[1:]\n if params2 and params2[0].name == \"self\":\n params2 = params2[1:]\n equalities = []\n if len(params1) > len(params2) and not sig2.has_optional:\n return booleq.FALSE # extra parameters\n if sig1.starargs is not None and sig2.starargs is not None:\n equalities.append(self.match_type_against_type(\n sig1.starargs.type, sig2.starargs.type, subst))\n if sig1.starstarargs is not None and sig2.starstarargs is not None:\n equalities.append(self.match_type_against_type(\n sig1.starstarargs.type, sig2.starstarargs.type, subst))\n # TODO(b/159058933): Handle kwonly parameters (on either side). Presumably,\n # a kwonly on the left side means that it was a keyword param.\n for p1, p2 in zip(params1, params2):\n if p1.optional and not p2.optional:\n return booleq.FALSE # needed for optimize.py:RemoveRedundantSignatures\n for i, p2 in enumerate(params2):\n if i >= len(params1):\n if not p2.optional:\n return booleq.FALSE # missing parameter\n else:\n pass\n else:\n p1 = params1[i]\n if p1.name != p2.name and not (\n pytd_utils.ANON_PARAM.match(p1.name) or\n pytd_utils.ANON_PARAM.match(p2.name)):\n return booleq.FALSE\n equalities.append(self.match_type_against_type(p1.type, p2.type, subst))\n equalities.append(\n self.match_type_against_type(\n sig1.return_type, sig2.return_type, subst))\n return booleq.And(equalities)\n\n def match_Signature_against_Function(self, sig, f, subst, skip_self=False): # pylint: disable=invalid-name\n def make_or(inner):\n return booleq.Or(\n self.match_Signature_against_Signature(inner, s, subst, skip_self)\n for s in f.signatures)\n return booleq.And(make_or(inner) for inner in visitors.ExpandSignature(sig))\n\n def match_Function_against_Function(self, f1, f2, subst, skip_self=False): # pylint: disable=invalid-name\n return booleq.And(\n self.match_Signature_against_Function(s1, f2, subst, skip_self)\n for s1 in f1.signatures)\n\n def match_Function_against_Class(self, f1, cls2, subst, cache):\n cls2_methods = cache.get(id(cls2))\n if cls2_methods is None:\n cls2_methods = cache[id(cls2)] = {f.name: f for f in cls2.methods}\n if f1.name not in cls2_methods:\n # The class itself doesn't have this method, but base classes might.\n # TODO(b/159058933): This should do MRO order, not depth-first.\n for base in cls2.parents:\n if isinstance(base, pytd.AnythingType):\n # AnythingType can contain any method. However, that would mean that\n # a class that inherits from AnythingType contains any method\n # imaginable, and hence is a match for anything. To prevent the bad\n # results caused by that, return FALSE here.\n return booleq.FALSE\n elif isinstance(base, (pytd.ClassType, pytd.GenericType)):\n if isinstance(base, pytd.ClassType):\n cls = base.cls\n values = tuple(pytd.AnythingType() for _ in cls.template)\n elif isinstance(base, pytd.TupleType):\n cls = base.base_type.cls\n values = (pytd_utils.JoinTypes(base.parameters),)\n else:\n cls = base.base_type.cls\n values = base.parameters\n if values:\n subst = subst.copy()\n for param, value in zip(cls.template, values):\n subst[param.type_param] = value\n implication = self.match_Function_against_Class(f1, cls, subst, cache)\n if implication is not booleq.FALSE:\n return implication\n else:\n # Funky types like UnionType are hard to match against (and shouldn't\n # appear as a base class) so we treat them as catch-all.\n log.warning(\"Assuming that %s has method %s\",\n pytd_utils.Print(base), f1.name)\n return booleq.TRUE\n return booleq.FALSE\n else:\n f2 = cls2_methods[f1.name]\n return self.match_Function_against_Function(\n f1, f2, subst, skip_self=True)\n\n def match_Class_against_Class(self, cls1, cls2, subst): # pylint: disable=invalid-name\n \"\"\"Match a pytd.Class against another pytd.Class.\"\"\"\n return self.match_Functions_against_Class(\n cls1.methods, cls2, subst)\n\n def match_Protocol_against_Unknown(self, protocol, unknown, subst): # pylint: disable=invalid-name\n \"\"\"Match a typing.Protocol against an unknown class.\"\"\"\n filtered_methods = [f for f in protocol.methods if f.is_abstract]\n return self.match_Functions_against_Class(\n filtered_methods, unknown, subst)\n\n def match_Functions_against_Class(self, methods, cls2, subst):\n implications = []\n cache = {}\n for f1 in methods:\n implication = self.match_Function_against_Class(f1, cls2, subst, cache)\n implications.append(implication)\n if implication is booleq.FALSE:\n break\n # TODO(b/159058933): class attributes\n return booleq.And(implications)\n", "path": "pytype/pytd/type_match.py" } ]
diff --git a/pytype/pytd/type_match.py b/pytype/pytd/type_match.py index f5ecd1928..35f639b31 100644 --- a/pytype/pytd/type_match.py +++ b/pytype/pytd/type_match.py @@ -347,6 +347,8 @@ def _match_type_against_type(self, t1, t2, subst): return booleq.FALSE else: return booleq.Eq(t1.name, t2.name) + elif isinstance(t1, pytd.NamedType) and isinstance(t2, pytd.Literal): + return booleq.FALSE elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType): # Unresolved types never match against anything. return booleq.FALSE
AssertionError: Don't know how to match <class 'pytype.pytd.pytd.Name… …dType'> against <class 'pytype.pytd.pytd.Literal'> I opened an issue for this error. If this a possible fix for this issue, feel free to merge the fix. Issue = https://github.com/google/pytype/issues/802
electricitymaps__electricitymaps-contrib-2656
[ { "content": "#!/usr/bin/env python3\n\nimport logging\nfrom arrow import get\nfrom requests import Session\nfrom ree import (Formentera, Ibiza,\n Mallorca, Menorca,\n BalearicIslands)\n# package \"ree\" is used to parse data from www.ree.es // maintained on github by @hectorespert\n\nfrom .lib.exceptions import ParserException\nfrom .lib.validation import validate, validate_production_diffs\n\n## Guess we'll need to figure these out later?! Adapted from ES-CN:\n\n# Minimum valid zone demand. This is used to eliminate some cases\n# where generation for one or more modes is obviously missing.\nFLOORS = {\n 'ES-IB': 0,\n 'ES-IB-FO': 0,\n 'ES-IB-IZ': 0,\n 'ES-IB-MA': 0,\n 'ES-IB-ME': 0,\n}\n\n\ndef fetch_island_data(zone_key, session):\n if zone_key == 'ES-IB-FO':\n formentera_data = Formentera(session, verify=False).get_all()\n if not formentera_data:\n raise ParserException(zone_key, \"Formentera doesn't respond\")\n else:\n return formentera_data\n elif zone_key == 'ES-IB-IZ':\n ibiza_data = Ibiza(session, verify=False).get_all()\n if not ibiza_data:\n raise ParserException(zone_key, \"Party is over, Ibiza doesn't respond\")\n else:\n return ibiza_data\n elif zone_key == 'ES-IB-MA':\n mallorca_data = Mallorca(session, verify=False).get_all()\n if not mallorca_data:\n raise ParserException(zone_key, \"Mallorca doesn't respond\")\n else:\n return mallorca_data\n elif zone_key == 'ES-IB-ME':\n menorca_data = Menorca(session, verify=False).get_all()\n if not menorca_data:\n raise ParserException(zone_key, \"Menorca doesn't respond\")\n else:\n return menorca_data\n elif zone_key == 'ES-IB':\n balearic_islands = BalearicIslands(session, verify=False).get_all()\n if not balearic_islands:\n raise ParserException(zone_key, \"Balearic Islands doesn't respond\")\n else:\n return balearic_islands\n else:\n raise ParserException(zone_key, 'Can\\'t read this country code {0}'.format(zone_key))\n\n\ndef fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n for response in island_data:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'consumption': response.demand,\n 'source': 'demanda.ree.es'\n }\n\n data.append(response_data)\n\n return data\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n\n if zone_key == 'ES-IB':\n expected_range = {'coal': (50,600)}\n else:\n expected_range = None\n\n for response in island_data:\n if response.production() >= 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': response.carbon,\n 'gas': round(response.gas + response.combined, 2),\n 'solar': response.solar,\n 'oil': round(response.vapor + response.diesel, 2),\n 'wind': response.wind,\n 'hydro': response.hydraulic,\n 'biomass': response.waste,\n 'nuclear': 0.0,\n 'geothermal': 0.0,\n 'unknown': response.other\n },\n 'storage': {\n 'hydro': 0.0,\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key],\n expected_range = expected_range)\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n if len(data) > 1:\n # granularity is 10 minutes, drops points with change in coal > 100MW\n data = validate_production_diffs(data, {'coal': 150}, logger)\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n ses = session or Session()\n\n if sorted_zone_keys == 'ES->ES-IB':\n responses = BalearicIslands(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB\", \"No responses\")\n elif sorted_zone_keys == 'ES->ES-IB-MA' or sorted_zone_keys == 'ES-IB-MA->ES-IB-ME' or sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n responses = Mallorca(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-MA\", \"No responses\")\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n responses = Formentera(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-FO\", \"No responses\")\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n exchanges = []\n for response in responses:\n\n if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':\n net_flow = response.link['ma_me']\n elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n net_flow = response.link['ma_ib']\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n net_flow = -1 * response.link['ib_fo']\n else:\n net_flow = response.link['pe_ma']\n\n exchange = {\n 'sortedZoneKeys': sorted_zone_keys,\n 'datetime': get(response.timestamp).datetime,\n 'netFlow': net_flow,\n 'source': 'demanda.ree.es',\n }\n\n exchanges.append(exchange)\n\n return exchanges\n\n\nif __name__ == '__main__':\n session = Session\n print(\"fetch_consumption(ES-IB)\")\n print(fetch_consumption('ES-IB', session))\n\n print(\"fetch_production(ES-IB)\")\n print(fetch_production('ES-IB', session))\n\n print(\"fetch_exchange(ES, ES-IB)\")\n print(fetch_exchange('ES', 'ES-IB', session))\n\n print(\"fetch_consumption(ES-IB-FO)\")\n print(fetch_consumption('ES-IB-FO'))\n print(\"fetch_production(ES-IB-FO)\")\n print(fetch_production('ES-IB-FO'))\n print(\"fetch_consumption(ES-IB-IZ)\")\n print(fetch_consumption('ES-IB-IZ'))\n print(\"fetch_production(ES-IB-IZ)\")\n print(fetch_production('ES-IB-IZ'))\n print(\"fetch_consumption(ES-IB-MA)\")\n print(fetch_consumption('ES-IB-MA'))\n print(\"fetch_production(ES-IB-MA)\")\n print(fetch_production('ES-IB-MA'))\n print(\"fetch_consumption(ES-IB-ME)\")\n print(fetch_consumption('ES-IB-ME'))\n print(\"fetch_production(ES-IB-ME)\")\n print(fetch_production('ES-IB-ME'))\n print(\"fetch_exchange(ES, ES-IB-MA)\")\n print(fetch_exchange('ES', 'ES-IB-MA'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-ME)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-ME'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-IZ)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-IZ'))\n print(\"fetch_exchange(ES-IB-IZ, ES-IB-FO)\")\n print(fetch_exchange('ES-IB-IZ', 'ES-IB-FO'))\n", "path": "parsers/ES_IB.py" } ]
[ { "content": "#!/usr/bin/env python3\n\nimport logging\nfrom arrow import get\nfrom requests import Session\nfrom ree import (Formentera, Ibiza,\n Mallorca, Menorca,\n BalearicIslands)\n# package \"ree\" is used to parse data from www.ree.es // maintained on github by @hectorespert\n\nfrom .lib.exceptions import ParserException\nfrom .lib.validation import validate, validate_production_diffs\n\n## Guess we'll need to figure these out later?! Adapted from ES-CN:\n\n# Minimum valid zone demand. This is used to eliminate some cases\n# where generation for one or more modes is obviously missing.\nFLOORS = {\n 'ES-IB': 0,\n 'ES-IB-FO': 0,\n 'ES-IB-IZ': 0,\n 'ES-IB-MA': 0,\n 'ES-IB-ME': 0,\n}\n\n\ndef fetch_island_data(zone_key, session):\n if zone_key == 'ES-IB-FO':\n formentera_data = Formentera(session, verify=False).get_all()\n if not formentera_data:\n raise ParserException(zone_key, \"Formentera doesn't respond\")\n else:\n return formentera_data\n elif zone_key == 'ES-IB-IZ':\n ibiza_data = Ibiza(session, verify=False).get_all()\n if not ibiza_data:\n raise ParserException(zone_key, \"Party is over, Ibiza doesn't respond\")\n else:\n return ibiza_data\n elif zone_key == 'ES-IB-MA':\n mallorca_data = Mallorca(session, verify=False).get_all()\n if not mallorca_data:\n raise ParserException(zone_key, \"Mallorca doesn't respond\")\n else:\n return mallorca_data\n elif zone_key == 'ES-IB-ME':\n menorca_data = Menorca(session, verify=False).get_all()\n if not menorca_data:\n raise ParserException(zone_key, \"Menorca doesn't respond\")\n else:\n return menorca_data\n elif zone_key == 'ES-IB':\n balearic_islands = BalearicIslands(session, verify=False).get_all()\n if not balearic_islands:\n raise ParserException(zone_key, \"Balearic Islands doesn't respond\")\n else:\n return balearic_islands\n else:\n raise ParserException(zone_key, 'Can\\'t read this country code {0}'.format(zone_key))\n\n\ndef fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n for response in island_data:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'consumption': response.demand,\n 'source': 'demanda.ree.es'\n }\n\n data.append(response_data)\n\n return data\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n\n if zone_key == 'ES-IB':\n expected_range = {'coal': (50,600)}\n else:\n expected_range = None\n\n for response in island_data:\n if response.production() >= 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': response.carbon,\n 'gas': round(response.gas + response.combined, 2),\n 'solar': response.solar,\n 'oil': round(response.vapor + response.diesel, 2),\n 'wind': response.wind,\n 'hydro': response.hydraulic,\n 'biomass': response.waste,\n 'nuclear': 0.0,\n 'geothermal': 0.0,\n 'unknown': response.other\n },\n 'storage': {\n 'hydro': 0.0,\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key],\n expected_range = expected_range)\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n if len(data) > 1:\n # granularity is 10 minutes, drops points with change in coal > 100MW\n data = validate_production_diffs(data, {'coal': 150}, logger)\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n ses = session or Session()\n\n if sorted_zone_keys == 'ES->ES-IB':\n responses = BalearicIslands(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB\", \"No responses\")\n elif sorted_zone_keys == 'ES->ES-IB-MA' or sorted_zone_keys == 'ES-IB-MA->ES-IB-ME' or sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n responses = Mallorca(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-MA\", \"No responses\")\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n responses = Formentera(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-FO\", \"No responses\")\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n exchanges = []\n for response in responses:\n\n if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':\n net_flow = -1 * response.link['ma_me']\n elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n net_flow = response.link['ma_ib']\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n net_flow = -1 * response.link['ib_fo']\n else:\n net_flow = response.link['pe_ma']\n\n exchange = {\n 'sortedZoneKeys': sorted_zone_keys,\n 'datetime': get(response.timestamp).datetime,\n 'netFlow': net_flow,\n 'source': 'demanda.ree.es',\n }\n\n exchanges.append(exchange)\n\n return exchanges\n\n\nif __name__ == '__main__':\n session = Session\n print(\"fetch_consumption(ES-IB)\")\n print(fetch_consumption('ES-IB', session))\n\n print(\"fetch_production(ES-IB)\")\n print(fetch_production('ES-IB', session))\n\n print(\"fetch_exchange(ES, ES-IB)\")\n print(fetch_exchange('ES', 'ES-IB', session))\n\n print(\"fetch_consumption(ES-IB-FO)\")\n print(fetch_consumption('ES-IB-FO'))\n print(\"fetch_production(ES-IB-FO)\")\n print(fetch_production('ES-IB-FO'))\n print(\"fetch_consumption(ES-IB-IZ)\")\n print(fetch_consumption('ES-IB-IZ'))\n print(\"fetch_production(ES-IB-IZ)\")\n print(fetch_production('ES-IB-IZ'))\n print(\"fetch_consumption(ES-IB-MA)\")\n print(fetch_consumption('ES-IB-MA'))\n print(\"fetch_production(ES-IB-MA)\")\n print(fetch_production('ES-IB-MA'))\n print(\"fetch_consumption(ES-IB-ME)\")\n print(fetch_consumption('ES-IB-ME'))\n print(\"fetch_production(ES-IB-ME)\")\n print(fetch_production('ES-IB-ME'))\n print(\"fetch_exchange(ES, ES-IB-MA)\")\n print(fetch_exchange('ES', 'ES-IB-MA'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-ME)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-ME'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-IZ)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-IZ'))\n print(\"fetch_exchange(ES-IB-IZ, ES-IB-FO)\")\n print(fetch_exchange('ES-IB-IZ', 'ES-IB-FO'))\n", "path": "parsers/ES_IB.py" } ]
diff --git a/parsers/ES_IB.py b/parsers/ES_IB.py index 116587d597..93dfe140c9 100644 --- a/parsers/ES_IB.py +++ b/parsers/ES_IB.py @@ -160,7 +160,7 @@ def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, log for response in responses: if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME': - net_flow = response.link['ma_me'] + net_flow = -1 * response.link['ma_me'] elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA': net_flow = response.link['ma_ib'] elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':
The import/export between Menorca and Mallorca appears reversed Our data is showing the export going one way, but our data source is showing the reverse according to one user. See screenshots: ![IMG_9768](https://user-images.githubusercontent.com/8129157/86447078-17d4b400-bd15-11ea-87da-15f44842d645.PNG) ![image](https://user-images.githubusercontent.com/8129157/86447080-186d4a80-bd15-11ea-8f22-9ab474bb070b.png)
pypi__warehouse-6337
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport email\nimport hashlib\nimport hmac\nimport os.path\nimport re\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom cgi import FieldStorage, parse_header\nfrom itertools import chain\n\nimport packaging.requirements\nimport packaging.specifiers\nimport packaging.utils\nimport packaging.version\nimport pkg_resources\nimport requests\nimport stdlib_list\nimport wtforms\nimport wtforms.validators\n\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import exists, func, orm\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom warehouse import forms\nfrom warehouse.admin.squats import Squat\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import (\n BlacklistedProject,\n Dependency,\n DependencyKind,\n Description,\n File,\n Filename,\n JournalEntry,\n Project,\n Release,\n Role,\n)\nfrom warehouse.utils import http, readme\n\nMAX_FILESIZE = 60 * 1024 * 1024 # 60M\nMAX_SIGSIZE = 8 * 1024 # 8K\n\nPATH_HASHER = \"blake2_256\"\n\n\ndef namespace_stdlib_list(module_list):\n for module_name in module_list:\n parts = module_name.split(\".\")\n for i, part in enumerate(parts):\n yield \".\".join(parts[: i + 1])\n\n\nSTDLIB_PROHIBITTED = {\n packaging.utils.canonicalize_name(s.rstrip(\"-_.\").lstrip(\"-_.\"))\n for s in chain.from_iterable(\n namespace_stdlib_list(stdlib_list.stdlib_list(version))\n for version in stdlib_list.short_versions\n )\n}\n\n# Wheel platform checking\n\n# Note: defining new platform ABI compatibility tags that don't\n# have a python.org binary release to anchor them is a\n# complex task that needs more than just OS+architecture info.\n# For Linux specifically, the platform ABI is defined by each\n# individual distro version, so wheels built on one version may\n# not even work on older versions of the same distro, let alone\n# a completely different distro.\n#\n# That means new entries should only be added given an\n# accompanying ABI spec that explains how to build a\n# compatible binary (see the manylinux specs as examples).\n\n# These platforms can be handled by a simple static list:\n_allowed_platforms = {\n \"any\",\n \"win32\",\n \"win_amd64\",\n \"win_ia64\",\n \"manylinux1_x86_64\",\n \"manylinux1_i686\",\n \"manylinux2010_x86_64\",\n \"manylinux2010_i686\",\n \"linux_armv6l\",\n \"linux_armv7l\",\n}\n# macosx is a little more complicated:\n_macosx_platform_re = re.compile(r\"macosx_10_(\\d+)+_(?P<arch>.*)\")\n_macosx_arches = {\n \"ppc\",\n \"ppc64\",\n \"i386\",\n \"x86_64\",\n \"intel\",\n \"fat\",\n \"fat32\",\n \"fat64\",\n \"universal\",\n}\n\n\n# Actual checking code;\ndef _valid_platform_tag(platform_tag):\n if platform_tag in _allowed_platforms:\n return True\n m = _macosx_platform_re.match(platform_tag)\n if m and m.group(\"arch\") in _macosx_arches:\n return True\n return False\n\n\n_error_message_order = [\"metadata_version\", \"name\", \"version\"]\n\n\n_dist_file_regexes = {\n # True/False is for legacy or not.\n True: re.compile(r\".+?\\.(exe|tar\\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$\", re.I),\n False: re.compile(r\".+?\\.(tar\\.gz|zip|whl|egg)$\", re.I),\n}\n\n\n_wheel_file_re = re.compile(\n r\"\"\"\n ^\n (?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n (\n (-(?P<build>\\d.*?))?\n -(?P<pyver>.+?)\n -(?P<abi>.+?)\n -(?P<plat>.+?)\n (?:\\.whl|\\.dist-info)\n )\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\n_project_name_re = re.compile(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\", re.IGNORECASE\n)\n\n\n_legacy_specifier_re = re.compile(r\"^(?P<name>\\S+)(?: \\((?P<specifier>\\S+)\\))?$\")\n\n\n_valid_description_content_types = {\"text/plain\", \"text/x-rst\", \"text/markdown\"}\n\n_valid_markdown_variants = {\"CommonMark\", \"GFM\"}\n\n\ndef _exc_with_message(exc, message):\n # The crappy old API that PyPI offered uses the status to pass down\n # messages to the client. So this function will make that easier to do.\n resp = exc(message)\n resp.status = \"{} {}\".format(resp.status_code, message)\n return resp\n\n\ndef _validate_pep440_version(form, field):\n parsed = packaging.version.parse(field.data)\n\n # Check that this version is a valid PEP 440 version at all.\n if not isinstance(parsed, packaging.version.Version):\n raise wtforms.validators.ValidationError(\n \"Start and end with a letter or numeral containing only \"\n \"ASCII numeric and '.', '_' and '-'.\"\n )\n\n # Check that this version does not have a PEP 440 local segment attached\n # to it.\n if parsed.local is not None:\n raise wtforms.validators.ValidationError(\"Can't use PEP 440 local versions.\")\n\n\ndef _parse_legacy_requirement(requirement):\n parsed = _legacy_specifier_re.search(requirement)\n if parsed is None:\n raise ValueError(\"Invalid requirement.\")\n return parsed.groupdict()[\"name\"], parsed.groupdict()[\"specifier\"]\n\n\ndef _validate_pep440_specifier(specifier):\n try:\n packaging.specifiers.SpecifierSet(specifier)\n except packaging.specifiers.InvalidSpecifier:\n raise wtforms.validators.ValidationError(\n \"Invalid specifier in requirement.\"\n ) from None\n\n\ndef _validate_pep440_specifier_field(form, field):\n return _validate_pep440_specifier(field.data)\n\n\ndef _validate_legacy_non_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement.replace(\"_\", \"\"))\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't direct dependency: {!r}\".format(requirement)\n )\n\n if any(\n not identifier.isalnum() or identifier[0].isdigit()\n for identifier in req.name.split(\".\")\n ):\n raise wtforms.validators.ValidationError(\"Use a valid Python identifier.\")\n\n\ndef _validate_legacy_non_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_non_dist_req(datum)\n\n\ndef _validate_legacy_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement)\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}.\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n\n if any(packaging.version.Version(spec.version).local for spec in req.specifier):\n raise wtforms.validators.ValidationError(\n \"Can't have dependency with local version: {!r}\".format(requirement)\n )\n\n\ndef _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_dist_req(datum)\n\n\ndef _validate_requires_external(requirement):\n name, specifier = _parse_legacy_requirement(requirement)\n\n # TODO: Is it really reasonable to parse the specifier using PEP 440?\n if specifier is not None:\n _validate_pep440_specifier(specifier)\n\n\ndef _validate_requires_external_list(form, field):\n for datum in field.data:\n _validate_requires_external(datum)\n\n\ndef _validate_project_url(value):\n try:\n label, url = value.split(\", \", 1)\n except ValueError:\n raise wtforms.validators.ValidationError(\n \"Use both a label and an URL.\"\n ) from None\n\n if not label:\n raise wtforms.validators.ValidationError(\"Use a label.\")\n\n if len(label) > 32:\n raise wtforms.validators.ValidationError(\"Use 32 characters or less.\")\n\n if not url:\n raise wtforms.validators.ValidationError(\"Use an URL.\")\n\n if not http.is_valid_uri(url, require_authority=False):\n raise wtforms.validators.ValidationError(\"Use valid URL.\")\n\n\ndef _validate_project_url_list(form, field):\n for datum in field.data:\n _validate_project_url(datum)\n\n\ndef _validate_rfc822_email_field(form, field):\n email_validator = wtforms.validators.Email(message=\"Use a valid email address\")\n addresses = email.utils.getaddresses([field.data])\n\n for real_name, address in addresses:\n email_validator(form, type(\"field\", (), {\"data\": address}))\n\n\ndef _validate_description_content_type(form, field):\n def _raise(message):\n raise wtforms.validators.ValidationError(\n f\"Invalid description content type: {message}\"\n )\n\n content_type, parameters = parse_header(field.data)\n if content_type not in _valid_description_content_types:\n _raise(\"type/subtype is not valid\")\n\n charset = parameters.get(\"charset\")\n if charset and charset != \"UTF-8\":\n _raise(\"Use a valid charset\")\n\n variant = parameters.get(\"variant\")\n if (\n content_type == \"text/markdown\"\n and variant\n and variant not in _valid_markdown_variants\n ):\n _raise(\n \"Use a valid variant, expected one of {}\".format(\n \", \".join(_valid_markdown_variants)\n )\n )\n\n\ndef _construct_dependencies(form, types):\n for name, kind in types.items():\n for item in getattr(form, name).data:\n yield Dependency(kind=kind.value, specifier=item)\n\n\nclass ListField(wtforms.Field):\n def process_formdata(self, valuelist):\n self.data = [v.strip() for v in valuelist if v.strip()]\n\n\n# TODO: Eventually this whole validation thing should move to the packaging\n# library and we should just call that. However until PEP 426 is done\n# that library won't have an API for this.\nclass MetadataForm(forms.Form):\n\n # Metadata version\n metadata_version = wtforms.StringField(\n description=\"Metadata-Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n # Note: This isn't really Metadata 2.0, however bdist_wheel\n # claims it is producing a Metadata 2.0 metadata when in\n # reality it's more like 1.2 with some extensions.\n [\"1.0\", \"1.1\", \"1.2\", \"2.0\", \"2.1\"],\n message=\"Use a known metadata version.\",\n ),\n ],\n )\n\n # Identity Project and Release\n name = wtforms.StringField(\n description=\"Name\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n _project_name_re,\n re.IGNORECASE,\n message=(\n \"Start and end with a letter or numeral containing \"\n \"only ASCII numeric and '.', '_' and '-'.\"\n ),\n ),\n ],\n )\n version = wtforms.StringField(\n description=\"Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n r\"^(?!\\s).*(?<!\\s)$\",\n message=\"Can't have leading or trailing whitespace.\",\n ),\n _validate_pep440_version,\n ],\n )\n\n # Additional Release metadata\n summary = wtforms.StringField(\n description=\"Summary\",\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Length(max=512),\n wtforms.validators.Regexp(\n r\"^.+$\", # Rely on the fact that . doesn't match a newline.\n message=\"Use a single line only.\",\n ),\n ],\n )\n description = wtforms.StringField(\n description=\"Description\", validators=[wtforms.validators.Optional()]\n )\n author = wtforms.StringField(\n description=\"Author\", validators=[wtforms.validators.Optional()]\n )\n description_content_type = wtforms.StringField(\n description=\"Description-Content-Type\",\n validators=[wtforms.validators.Optional(), _validate_description_content_type],\n )\n author_email = wtforms.StringField(\n description=\"Author-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n maintainer = wtforms.StringField(\n description=\"Maintainer\", validators=[wtforms.validators.Optional()]\n )\n maintainer_email = wtforms.StringField(\n description=\"Maintainer-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n license = wtforms.StringField(\n description=\"License\", validators=[wtforms.validators.Optional()]\n )\n keywords = wtforms.StringField(\n description=\"Keywords\", validators=[wtforms.validators.Optional()]\n )\n classifiers = wtforms.fields.SelectMultipleField(description=\"Classifier\")\n platform = wtforms.StringField(\n description=\"Platform\", validators=[wtforms.validators.Optional()]\n )\n\n # URLs\n home_page = wtforms.StringField(\n description=\"Home-Page\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n download_url = wtforms.StringField(\n description=\"Download-URL\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n\n # Dependency Information\n requires_python = wtforms.StringField(\n description=\"Requires-Python\",\n validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],\n )\n\n # File information\n pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])\n filetype = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n [\n \"bdist_dmg\",\n \"bdist_dumb\",\n \"bdist_egg\",\n \"bdist_msi\",\n \"bdist_rpm\",\n \"bdist_wheel\",\n \"bdist_wininst\",\n \"sdist\",\n ],\n message=\"Use a known file type.\",\n ),\n ]\n )\n comment = wtforms.StringField(validators=[wtforms.validators.Optional()])\n md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])\n sha256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, SHA256 message digest.\",\n ),\n ]\n )\n blake2_256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, BLAKE2 message digest.\",\n ),\n ]\n )\n\n # Legacy dependency information\n requires = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n provides = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n obsoletes = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n\n # Newer dependency information\n requires_dist = ListField(\n description=\"Requires-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n provides_dist = ListField(\n description=\"Provides-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n obsoletes_dist = ListField(\n description=\"Obsoletes-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n requires_external = ListField(\n description=\"Requires-External\",\n validators=[wtforms.validators.Optional(), _validate_requires_external_list],\n )\n\n # Newer metadata information\n project_urls = ListField(\n description=\"Project-URL\",\n validators=[wtforms.validators.Optional(), _validate_project_url_list],\n )\n\n def full_validate(self):\n # All non source releases *must* have a pyversion\n if (\n self.filetype.data\n and self.filetype.data != \"sdist\"\n and not self.pyversion.data\n ):\n raise wtforms.validators.ValidationError(\n \"Python version is required for binary distribution uploads.\"\n )\n\n # All source releases *must* have a pyversion of \"source\"\n if self.filetype.data == \"sdist\":\n if not self.pyversion.data:\n self.pyversion.data = \"source\"\n elif self.pyversion.data != \"source\":\n raise wtforms.validators.ValidationError(\n \"Use 'source' as Python version for an sdist.\"\n )\n\n # We *must* have at least one digest to verify against.\n if not self.md5_digest.data and not self.sha256_digest.data:\n raise wtforms.validators.ValidationError(\n \"Include at least one message digest.\"\n )\n\n\n_safe_zipnames = re.compile(r\"(purelib|platlib|headers|scripts|data).+\", re.I)\n# .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2\n_tar_filenames_re = re.compile(r\"\\.(?:tar$|t(?:ar\\.)?(?P<z_type>gz|bz2)$)\")\n\n\ndef _is_valid_dist_file(filename, filetype):\n \"\"\"\n Perform some basic checks to see whether the indicated file could be\n a valid distribution file.\n \"\"\"\n\n # If our file is a zipfile, then ensure that it's members are only\n # compressed with supported compression methods.\n if zipfile.is_zipfile(filename):\n with zipfile.ZipFile(filename) as zfp:\n for zinfo in zfp.infolist():\n if zinfo.compress_type not in {\n zipfile.ZIP_STORED,\n zipfile.ZIP_DEFLATED,\n }:\n return False\n\n tar_fn_match = _tar_filenames_re.search(filename)\n if tar_fn_match:\n # Ensure that this is a valid tar file, and that it contains PKG-INFO.\n z_type = tar_fn_match.group(\"z_type\") or \"\"\n try:\n with tarfile.open(filename, f\"r:{z_type}\") as tar:\n # This decompresses the entire stream to validate it and the\n # tar within. Easy CPU DoS attack. :/\n bad_tar = True\n member = tar.next()\n while member:\n parts = os.path.split(member.name)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n bad_tar = False\n member = tar.next()\n if bad_tar:\n return False\n except tarfile.ReadError:\n return False\n elif filename.endswith(\".exe\"):\n # The only valid filetype for a .exe file is \"bdist_wininst\".\n if filetype != \"bdist_wininst\":\n return False\n\n # Ensure that the .exe is a valid zip file, and that all of the files\n # contained within it have safe filenames.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch where there\n # isn't one.\n for zipname in zfp.namelist(): # pragma: no branch\n if not _safe_zipnames.match(zipname):\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".msi\"):\n # The only valid filetype for a .msi is \"bdist_msi\"\n if filetype != \"bdist_msi\":\n return False\n\n # Check the first 8 bytes of the MSI file. This was taken from the\n # legacy implementation of PyPI which itself took it from the\n # implementation of `file` I believe.\n with open(filename, \"rb\") as fp:\n if fp.read(8) != b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\":\n return False\n elif filename.endswith(\".zip\") or filename.endswith(\".egg\"):\n # Ensure that the .zip/.egg is a valid zip file, and that it has a\n # PKG-INFO file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".whl\"):\n # Ensure that the .whl is a valid zip file, and that it has a WHEEL\n # file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"WHEEL\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n\n # If we haven't yet decided it's not valid, then we'll assume it is and\n # allow it.\n return True\n\n\ndef _is_duplicate_file(db_session, filename, hashes):\n \"\"\"\n Check to see if file already exists, and if it's content matches.\n A file is considered to exist if its filename *or* blake2 digest are\n present in a file row in the database.\n\n Returns:\n - True: This file is a duplicate and all further processing should halt.\n - False: This file exists, but it is not a duplicate.\n - None: This file does not exist.\n \"\"\"\n\n file_ = (\n db_session.query(File)\n .filter(\n (File.filename == filename)\n | (File.blake2_256_digest == hashes[\"blake2_256\"])\n )\n .first()\n )\n\n if file_ is not None:\n return (\n file_.filename == filename\n and file_.sha256_digest == hashes[\"sha256\"]\n and file_.md5_digest == hashes[\"md5\"]\n and file_.blake2_256_digest == hashes[\"blake2_256\"]\n )\n\n return None\n\n\ndef _no_deprecated_classifiers(request):\n deprecated_classifiers = {\n classifier.classifier\n for classifier in (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(True))\n .all()\n )\n }\n\n def validate_no_deprecated_classifiers(form, field):\n invalid_classifiers = set(field.data or []) & deprecated_classifiers\n if invalid_classifiers:\n first_invalid_classifier = sorted(invalid_classifiers)[0]\n host = request.registry.settings.get(\"warehouse.domain\")\n classifiers_url = request.route_url(\"classifiers\", _host=host)\n\n raise wtforms.validators.ValidationError(\n f\"Classifier {first_invalid_classifier!r} has been \"\n f\"deprecated, see {classifiers_url} for a list of valid \"\n \"classifiers.\"\n )\n\n return validate_no_deprecated_classifiers\n\n\n@view_config(\n route_name=\"forklift.legacy.file_upload\",\n uses_session=True,\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef file_upload(request):\n # If we're in read-only mode, let upload clients know\n if request.flags.enabled(\"read-only\"):\n raise _exc_with_message(\n HTTPForbidden, \"Read-only mode: Uploads are temporarily disabled\"\n )\n\n # Log an attempt to upload\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.upload.attempt\")\n\n # Before we do anything, if there isn't an authenticated user with this\n # request, then we'll go ahead and bomb out.\n if request.authenticated_userid is None:\n raise _exc_with_message(\n HTTPForbidden, \"Invalid or non-existent authentication information.\"\n )\n\n # Ensure that user has a verified, primary email address. This should both\n # reduce the ease of spam account creation and activity, as well as act as\n # a forcing function for https://github.com/pypa/warehouse/issues/3632.\n # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,\n # we might consider a different condition, possibly looking at\n # User.is_active instead.\n if not (request.user.primary_email and request.user.primary_email.verified):\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"User {!r} does not have a verified primary email address. \"\n \"Please add a verified primary email before attempting to \"\n \"upload to PyPI. See {project_help} for more information.\"\n \"for more information.\"\n ).format(\n request.user.username,\n project_help=request.help_url(_anchor=\"verified-email\"),\n ),\n ) from None\n\n # Do some cleanup of the various form fields\n for key in list(request.POST):\n value = request.POST.get(key)\n if isinstance(value, str):\n # distutils \"helpfully\" substitutes unknown, but \"required\" values\n # with the string \"UNKNOWN\". This is basically never what anyone\n # actually wants so we'll just go ahead and delete anything whose\n # value is UNKNOWN.\n if value.strip() == \"UNKNOWN\":\n del request.POST[key]\n\n # Escape NUL characters, which psycopg doesn't like\n if \"\\x00\" in value:\n request.POST[key] = value.replace(\"\\x00\", \"\\\\x00\")\n\n # We require protocol_version 1, it's the only supported version however\n # passing a different version should raise an error.\n if request.POST.get(\"protocol_version\", \"1\") != \"1\":\n raise _exc_with_message(HTTPBadRequest, \"Unknown protocol version.\")\n\n # Check if any fields were supplied as a tuple and have become a\n # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a\n # FieldStorage, however.\n # ref: https://github.com/pypa/warehouse/issues/2185\n # ref: https://github.com/pypa/warehouse/issues/2491\n for field in set(request.POST) - {\"content\", \"gpg_signature\"}:\n values = request.POST.getall(field)\n if any(isinstance(value, FieldStorage) for value in values):\n raise _exc_with_message(HTTPBadRequest, f\"{field}: Should not be a tuple.\")\n\n # Look up all of the valid classifiers\n all_classifiers = request.db.query(Classifier).all()\n\n # Validate and process the incoming metadata.\n form = MetadataForm(request.POST)\n\n # Add a validator for deprecated classifiers\n form.classifiers.validators.append(_no_deprecated_classifiers(request))\n\n form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]\n if not form.validate():\n for field_name in _error_message_order:\n if field_name in form.errors:\n break\n else:\n field_name = sorted(form.errors.keys())[0]\n\n if field_name in form:\n field = form[field_name]\n if field.description and isinstance(field, wtforms.StringField):\n error_message = (\n \"{value!r} is an invalid value for {field}. \".format(\n value=field.data, field=field.description\n )\n + \"Error: {} \".format(form.errors[field_name][0])\n + \"See \"\n \"https://packaging.python.org/specifications/core-metadata\"\n )\n else:\n error_message = \"Invalid value for {field}. Error: {msgs[0]}\".format(\n field=field_name, msgs=form.errors[field_name]\n )\n else:\n error_message = \"Error: {}\".format(form.errors[field_name][0])\n\n raise _exc_with_message(HTTPBadRequest, error_message)\n\n # Ensure that we have file data in the request.\n if \"content\" not in request.POST:\n raise _exc_with_message(HTTPBadRequest, \"Upload payload does not have a file.\")\n\n # Look up the project first before doing anything else, this is so we can\n # automatically register it if we need to and can check permissions before\n # going any further.\n try:\n project = (\n request.db.query(Project)\n .filter(\n Project.normalized_name == func.normalize_pep426_name(form.name.data)\n )\n .one()\n )\n except NoResultFound:\n # Check for AdminFlag set by a PyPI Administrator disabling new project\n # registration, reasons for this include Spammers, security\n # vulnerabilities, or just wanting to be lazy and not worry ;)\n if request.flags.enabled(\"disallow-new-project-registration\"):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"New project registration temporarily disabled. \"\n \"See {projecthelp} for details\"\n ).format(projecthelp=request.help_url(_anchor=\"admin-intervention\")),\n ) from None\n\n # Before we create the project, we're going to check our blacklist to\n # see if this project is even allowed to be registered. If it is not,\n # then we're going to deny the request to create this project.\n if request.db.query(\n exists().where(\n BlacklistedProject.name == func.normalize_pep426_name(form.name.data)\n )\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed. \"\n \"See {projecthelp} \"\n \"for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # Also check for collisions with Python Standard Library modules.\n if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed (conflict with Python \"\n \"Standard Library module name). See \"\n \"{projecthelp} for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # The project doesn't exist in our database, so first we'll check for\n # projects with a similar name\n squattees = (\n request.db.query(Project)\n .filter(\n func.levenshtein(\n Project.normalized_name, func.normalize_pep426_name(form.name.data)\n )\n <= 2\n )\n .all()\n )\n\n # Next we'll create the project\n project = Project(name=form.name.data)\n request.db.add(project)\n\n # Now that the project exists, add any squats which it is the squatter for\n for squattee in squattees:\n request.db.add(Squat(squatter=project, squattee=squattee))\n\n # Then we'll add a role setting the current user as the \"Owner\" of the\n # project.\n request.db.add(Role(user=request.user, project=project, role_name=\"Owner\"))\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"create\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"add Owner {}\".format(request.user.username),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # Check that the user has permission to do things to this project, if this\n # is a new project this will act as a sanity check for the role we just\n # added above.\n if not request.has_permission(\"upload\", project):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"The credential associated with user '{0}' \"\n \"isn't allowed to upload to project '{1}'. \"\n \"See {2} for more information.\"\n ).format(\n request.user.username,\n project.name,\n request.help_url(_anchor=\"project-name\"),\n ),\n )\n\n # Update name if it differs but is still equivalent. We don't need to check if\n # they are equivalent when normalized because that's already been done when we\n # queried for the project.\n if project.name != form.name.data:\n project.name = form.name.data\n\n # Render our description so we can save from having to render this data every time\n # we load a project description page.\n rendered = None\n if form.description.data:\n description_content_type = form.description_content_type.data\n if not description_content_type:\n description_content_type = \"text/x-rst\"\n\n rendered = readme.render(\n form.description.data, description_content_type, use_fallback=False\n )\n\n # Uploading should prevent broken rendered descriptions.\n if rendered is None:\n if form.description_content_type.data:\n message = (\n \"The description failed to render \"\n \"for '{description_content_type}'.\"\n ).format(description_content_type=description_content_type)\n else:\n message = (\n \"The description failed to render \"\n \"in the default format of reStructuredText.\"\n )\n raise _exc_with_message(\n HTTPBadRequest,\n \"{message} See {projecthelp} for more information.\".format(\n message=message,\n projecthelp=request.help_url(_anchor=\"description-content-type\"),\n ),\n ) from None\n\n try:\n canonical_version = packaging.utils.canonicalize_version(form.version.data)\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project)\n & (Release.canonical_version == canonical_version)\n )\n .one()\n )\n except MultipleResultsFound:\n # There are multiple releases of this project which have the same\n # canonical version that were uploaded before we checked for\n # canonical version equivalence, so return the exact match instead\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project) & (Release.version == form.version.data)\n )\n .one()\n )\n except NoResultFound:\n release = Release(\n project=project,\n _classifiers=[\n c for c in all_classifiers if c.classifier in form.classifiers.data\n ],\n dependencies=list(\n _construct_dependencies(\n form,\n {\n \"requires\": DependencyKind.requires,\n \"provides\": DependencyKind.provides,\n \"obsoletes\": DependencyKind.obsoletes,\n \"requires_dist\": DependencyKind.requires_dist,\n \"provides_dist\": DependencyKind.provides_dist,\n \"obsoletes_dist\": DependencyKind.obsoletes_dist,\n \"requires_external\": DependencyKind.requires_external,\n \"project_urls\": DependencyKind.project_url,\n },\n )\n ),\n canonical_version=canonical_version,\n description=Description(\n content_type=form.description_content_type.data,\n raw=form.description.data or \"\",\n html=rendered or \"\",\n rendered_by=readme.renderer_version(),\n ),\n **{\n k: getattr(form, k).data\n for k in {\n # This is a list of all the fields in the form that we\n # should pull off and insert into our new release.\n \"version\",\n \"summary\",\n \"license\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"keywords\",\n \"platform\",\n \"home_page\",\n \"download_url\",\n \"requires_python\",\n }\n },\n uploader=request.user,\n uploaded_via=request.user_agent,\n )\n request.db.add(release)\n # TODO: This should be handled by some sort of database trigger or\n # a SQLAlchemy hook or the like instead of doing it inline in\n # this view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"new release\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better solution to this than to just do it inline inside\n # this method. Ideally the version field would just be sortable, but\n # at least this should be some sort of hook or trigger.\n releases = (\n request.db.query(Release)\n .filter(Release.project == project)\n .options(orm.load_only(Release._pypi_ordering))\n .all()\n )\n for i, r in enumerate(\n sorted(releases, key=lambda x: packaging.version.parse(x.version))\n ):\n r._pypi_ordering = i\n\n # Pull the filename out of our POST data.\n filename = request.POST[\"content\"].filename\n\n # Make sure that the filename does not contain any path separators.\n if \"/\" in filename or \"\\\\\" in filename:\n raise _exc_with_message(\n HTTPBadRequest, \"Cannot upload a file with '/' or '\\\\' in the name.\"\n )\n\n # Make sure the filename ends with an allowed extension.\n if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:\n raise _exc_with_message(\n HTTPBadRequest,\n \"Invalid file extension: Use .egg, .tar.gz, .whl or .zip \"\n \"extension. (https://www.python.org/dev/peps/pep-0527)\",\n )\n\n # Make sure that our filename matches the project that it is being uploaded\n # to.\n prefix = pkg_resources.safe_name(project.name).lower()\n if not pkg_resources.safe_name(filename).lower().startswith(prefix):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Start filename for {!r} with {!r}.\".format(project.name, prefix),\n )\n\n # Check the content type of what is being uploaded\n if not request.POST[\"content\"].type or request.POST[\"content\"].type.startswith(\n \"image/\"\n ):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Ensure that the package filetype is allowed.\n # TODO: Once PEP 527 is completely implemented we should be able to delete\n # this and just move it into the form itself.\n if not project.allow_legacy_files and form.filetype.data not in {\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_egg\",\n }:\n raise _exc_with_message(HTTPBadRequest, \"Unknown type of file.\")\n\n # The project may or may not have a file size specified on the project, if\n # it does then it may or may not be smaller or larger than our global file\n # size limits.\n file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n temporary_filename = os.path.join(tmpdir, filename)\n\n # Buffer the entire file onto disk, checking the hash of the file as we\n # go along.\n with open(temporary_filename, \"wb\") as fp:\n file_size = 0\n file_hashes = {\n \"md5\": hashlib.md5(),\n \"sha256\": hashlib.sha256(),\n \"blake2_256\": hashlib.blake2b(digest_size=256 // 8),\n }\n for chunk in iter(lambda: request.POST[\"content\"].file.read(8096), b\"\"):\n file_size += len(chunk)\n if file_size > file_size_limit:\n raise _exc_with_message(\n HTTPBadRequest,\n \"File too large. \"\n + \"Limit for project {name!r} is {limit} MB. \".format(\n name=project.name, limit=file_size_limit // (1024 * 1024)\n )\n + \"See \"\n + request.help_url(_anchor=\"file-size-limit\"),\n )\n fp.write(chunk)\n for hasher in file_hashes.values():\n hasher.update(chunk)\n\n # Take our hash functions and compute the final hashes for them now.\n file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}\n\n # Actually verify the digests that we've gotten. We're going to use\n # hmac.compare_digest even though we probably don't actually need to\n # because it's better safe than sorry. In the case of multiple digests\n # we expect them all to be given.\n if not all(\n [\n hmac.compare_digest(\n getattr(form, \"{}_digest\".format(digest_name)).data.lower(),\n digest_value,\n )\n for digest_name, digest_value in file_hashes.items()\n if getattr(form, \"{}_digest\".format(digest_name)).data\n ]\n ):\n raise _exc_with_message(\n HTTPBadRequest,\n \"The digest supplied does not match a digest calculated \"\n \"from the uploaded file.\",\n )\n\n # Check to see if the file that was uploaded exists already or not.\n is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)\n if is_duplicate:\n return Response()\n elif is_duplicate is not None:\n raise _exc_with_message(\n HTTPBadRequest,\n # Note: Changing this error message to something that doesn't\n # start with \"File already exists\" will break the\n # --skip-existing functionality in twine\n # ref: https://github.com/pypa/warehouse/issues/3482\n # ref: https://github.com/pypa/twine/issues/332\n \"File already exists. See \"\n + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if the file that was uploaded exists in our filename log\n if request.db.query(\n request.db.query(Filename).filter(Filename.filename == filename).exists()\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n \"This filename has already been used, use a \"\n \"different version. \"\n \"See \" + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if uploading this file would create a duplicate sdist\n # for the current release.\n if (\n form.filetype.data == \"sdist\"\n and request.db.query(\n request.db.query(File)\n .filter((File.release == release) & (File.packagetype == \"sdist\"))\n .exists()\n ).scalar()\n ):\n raise _exc_with_message(\n HTTPBadRequest, \"Only one sdist may be uploaded per release.\"\n )\n\n # Check the file to make sure it is a valid distribution file.\n if not _is_valid_dist_file(temporary_filename, form.filetype.data):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Check that if it's a binary wheel, it's on a supported platform\n if filename.endswith(\".whl\"):\n wheel_info = _wheel_file_re.match(filename)\n plats = wheel_info.group(\"plat\").split(\".\")\n for plat in plats:\n if not _valid_platform_tag(plat):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Binary wheel '{filename}' has an unsupported \"\n \"platform tag '{plat}'.\".format(filename=filename, plat=plat),\n )\n\n # Also buffer the entire signature file to disk.\n if \"gpg_signature\" in request.POST:\n has_signature = True\n with open(os.path.join(tmpdir, filename + \".asc\"), \"wb\") as fp:\n signature_size = 0\n for chunk in iter(\n lambda: request.POST[\"gpg_signature\"].file.read(8096), b\"\"\n ):\n signature_size += len(chunk)\n if signature_size > MAX_SIGSIZE:\n raise _exc_with_message(HTTPBadRequest, \"Signature too large.\")\n fp.write(chunk)\n\n # Check whether signature is ASCII armored\n with open(os.path.join(tmpdir, filename + \".asc\"), \"rb\") as fp:\n if not fp.read().startswith(b\"-----BEGIN PGP SIGNATURE-----\"):\n raise _exc_with_message(\n HTTPBadRequest, \"PGP signature isn't ASCII armored.\"\n )\n else:\n has_signature = False\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(Filename(filename=filename))\n\n # Store the information about the file in the database.\n file_ = File(\n release=release,\n filename=filename,\n python_version=form.pyversion.data,\n packagetype=form.filetype.data,\n comment_text=form.comment.data,\n size=file_size,\n has_signature=bool(has_signature),\n md5_digest=file_hashes[\"md5\"],\n sha256_digest=file_hashes[\"sha256\"],\n blake2_256_digest=file_hashes[\"blake2_256\"],\n # Figure out what our filepath is going to be, we're going to use a\n # directory structure based on the hash of the file contents. This\n # will ensure that the contents of the file cannot change without\n # it also changing the path that the file is saved too.\n path=\"/\".join(\n [\n file_hashes[PATH_HASHER][:2],\n file_hashes[PATH_HASHER][2:4],\n file_hashes[PATH_HASHER][4:],\n filename,\n ]\n ),\n uploaded_via=request.user_agent,\n )\n request.db.add(file_)\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"add {python_version} file {filename}\".format(\n python_version=file_.python_version, filename=file_.filename\n ),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better answer about how to make this transactional so\n # this won't take affect until after a commit has happened, for\n # now we'll just ignore it and save it before the transaction is\n # committed.\n storage = request.find_service(IFileStorage)\n storage.store(\n file_.path,\n os.path.join(tmpdir, filename),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n if has_signature:\n storage.store(\n file_.pgp_path,\n os.path.join(tmpdir, filename + \".asc\"),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n\n # Log a successful upload\n metrics.increment(\"warehouse.upload.ok\", tags=[f\"filetype:{form.filetype.data}\"])\n\n return Response()\n\n\ndef _legacy_purge(status, *args, **kwargs):\n if status:\n requests.post(*args, **kwargs)\n\n\n@view_config(\n route_name=\"forklift.legacy.submit\", require_csrf=False, require_methods=[\"POST\"]\n)\n@view_config(\n route_name=\"forklift.legacy.submit_pkg_info\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef submit(request):\n return _exc_with_message(\n HTTPGone,\n (\n \"Project pre-registration is no longer required or supported, \"\n \"upload your files instead.\"\n ),\n )\n\n\n@view_config(\n route_name=\"forklift.legacy.doc_upload\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef doc_upload(request):\n return _exc_with_message(\n HTTPGone,\n \"Uploading documentation is no longer supported, we recommend using \"\n \"https://readthedocs.org/.\",\n )\n", "path": "warehouse/forklift/legacy.py" } ]
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport email\nimport hashlib\nimport hmac\nimport os.path\nimport re\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom cgi import FieldStorage, parse_header\nfrom itertools import chain\n\nimport packaging.requirements\nimport packaging.specifiers\nimport packaging.utils\nimport packaging.version\nimport pkg_resources\nimport requests\nimport stdlib_list\nimport wtforms\nimport wtforms.validators\n\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import exists, func, orm\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom warehouse import forms\nfrom warehouse.admin.squats import Squat\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import (\n BlacklistedProject,\n Dependency,\n DependencyKind,\n Description,\n File,\n Filename,\n JournalEntry,\n Project,\n Release,\n Role,\n)\nfrom warehouse.utils import http, readme\n\nMAX_FILESIZE = 60 * 1024 * 1024 # 60M\nMAX_SIGSIZE = 8 * 1024 # 8K\n\nPATH_HASHER = \"blake2_256\"\n\n\ndef namespace_stdlib_list(module_list):\n for module_name in module_list:\n parts = module_name.split(\".\")\n for i, part in enumerate(parts):\n yield \".\".join(parts[: i + 1])\n\n\nSTDLIB_PROHIBITTED = {\n packaging.utils.canonicalize_name(s.rstrip(\"-_.\").lstrip(\"-_.\"))\n for s in chain.from_iterable(\n namespace_stdlib_list(stdlib_list.stdlib_list(version))\n for version in stdlib_list.short_versions\n )\n}\n\n# Wheel platform checking\n\n# Note: defining new platform ABI compatibility tags that don't\n# have a python.org binary release to anchor them is a\n# complex task that needs more than just OS+architecture info.\n# For Linux specifically, the platform ABI is defined by each\n# individual distro version, so wheels built on one version may\n# not even work on older versions of the same distro, let alone\n# a completely different distro.\n#\n# That means new entries should only be added given an\n# accompanying ABI spec that explains how to build a\n# compatible binary (see the manylinux specs as examples).\n\n# These platforms can be handled by a simple static list:\n_allowed_platforms = {\n \"any\",\n \"win32\",\n \"win_amd64\",\n \"win_ia64\",\n \"manylinux1_x86_64\",\n \"manylinux1_i686\",\n \"manylinux2010_x86_64\",\n \"manylinux2010_i686\",\n \"linux_armv6l\",\n \"linux_armv7l\",\n}\n# macosx is a little more complicated:\n_macosx_platform_re = re.compile(r\"macosx_10_(\\d+)+_(?P<arch>.*)\")\n_macosx_arches = {\n \"ppc\",\n \"ppc64\",\n \"i386\",\n \"x86_64\",\n \"intel\",\n \"fat\",\n \"fat32\",\n \"fat64\",\n \"universal\",\n}\n\n\n# Actual checking code;\ndef _valid_platform_tag(platform_tag):\n if platform_tag in _allowed_platforms:\n return True\n m = _macosx_platform_re.match(platform_tag)\n if m and m.group(\"arch\") in _macosx_arches:\n return True\n return False\n\n\n_error_message_order = [\"metadata_version\", \"name\", \"version\"]\n\n\n_dist_file_regexes = {\n # True/False is for legacy or not.\n True: re.compile(r\".+?\\.(exe|tar\\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$\", re.I),\n False: re.compile(r\".+?\\.(tar\\.gz|zip|whl|egg)$\", re.I),\n}\n\n\n_wheel_file_re = re.compile(\n r\"\"\"\n ^\n (?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n (\n (-(?P<build>\\d.*?))?\n -(?P<pyver>.+?)\n -(?P<abi>.+?)\n -(?P<plat>.+?)\n (?:\\.whl|\\.dist-info)\n )\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\n_project_name_re = re.compile(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\", re.IGNORECASE\n)\n\n\n_legacy_specifier_re = re.compile(r\"^(?P<name>\\S+)(?: \\((?P<specifier>\\S+)\\))?$\")\n\n\n_valid_description_content_types = {\"text/plain\", \"text/x-rst\", \"text/markdown\"}\n\n_valid_markdown_variants = {\"CommonMark\", \"GFM\"}\n\n\ndef _exc_with_message(exc, message):\n # The crappy old API that PyPI offered uses the status to pass down\n # messages to the client. So this function will make that easier to do.\n resp = exc(message)\n resp.status = \"{} {}\".format(resp.status_code, message)\n return resp\n\n\ndef _validate_pep440_version(form, field):\n parsed = packaging.version.parse(field.data)\n\n # Check that this version is a valid PEP 440 version at all.\n if not isinstance(parsed, packaging.version.Version):\n raise wtforms.validators.ValidationError(\n \"Start and end with a letter or numeral containing only \"\n \"ASCII numeric and '.', '_' and '-'.\"\n )\n\n # Check that this version does not have a PEP 440 local segment attached\n # to it.\n if parsed.local is not None:\n raise wtforms.validators.ValidationError(\"Can't use PEP 440 local versions.\")\n\n\ndef _parse_legacy_requirement(requirement):\n parsed = _legacy_specifier_re.search(requirement)\n if parsed is None:\n raise ValueError(\"Invalid requirement.\")\n return parsed.groupdict()[\"name\"], parsed.groupdict()[\"specifier\"]\n\n\ndef _validate_pep440_specifier(specifier):\n try:\n packaging.specifiers.SpecifierSet(specifier)\n except packaging.specifiers.InvalidSpecifier:\n raise wtforms.validators.ValidationError(\n \"Invalid specifier in requirement.\"\n ) from None\n\n\ndef _validate_pep440_specifier_field(form, field):\n return _validate_pep440_specifier(field.data)\n\n\ndef _validate_legacy_non_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement.replace(\"_\", \"\"))\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't direct dependency: {!r}\".format(requirement)\n )\n\n if any(\n not identifier.isalnum() or identifier[0].isdigit()\n for identifier in req.name.split(\".\")\n ):\n raise wtforms.validators.ValidationError(\"Use a valid Python identifier.\")\n\n\ndef _validate_legacy_non_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_non_dist_req(datum)\n\n\ndef _validate_legacy_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement)\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}.\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n\n\ndef _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_dist_req(datum)\n\n\ndef _validate_requires_external(requirement):\n name, specifier = _parse_legacy_requirement(requirement)\n\n # TODO: Is it really reasonable to parse the specifier using PEP 440?\n if specifier is not None:\n _validate_pep440_specifier(specifier)\n\n\ndef _validate_requires_external_list(form, field):\n for datum in field.data:\n _validate_requires_external(datum)\n\n\ndef _validate_project_url(value):\n try:\n label, url = value.split(\", \", 1)\n except ValueError:\n raise wtforms.validators.ValidationError(\n \"Use both a label and an URL.\"\n ) from None\n\n if not label:\n raise wtforms.validators.ValidationError(\"Use a label.\")\n\n if len(label) > 32:\n raise wtforms.validators.ValidationError(\"Use 32 characters or less.\")\n\n if not url:\n raise wtforms.validators.ValidationError(\"Use an URL.\")\n\n if not http.is_valid_uri(url, require_authority=False):\n raise wtforms.validators.ValidationError(\"Use valid URL.\")\n\n\ndef _validate_project_url_list(form, field):\n for datum in field.data:\n _validate_project_url(datum)\n\n\ndef _validate_rfc822_email_field(form, field):\n email_validator = wtforms.validators.Email(message=\"Use a valid email address\")\n addresses = email.utils.getaddresses([field.data])\n\n for real_name, address in addresses:\n email_validator(form, type(\"field\", (), {\"data\": address}))\n\n\ndef _validate_description_content_type(form, field):\n def _raise(message):\n raise wtforms.validators.ValidationError(\n f\"Invalid description content type: {message}\"\n )\n\n content_type, parameters = parse_header(field.data)\n if content_type not in _valid_description_content_types:\n _raise(\"type/subtype is not valid\")\n\n charset = parameters.get(\"charset\")\n if charset and charset != \"UTF-8\":\n _raise(\"Use a valid charset\")\n\n variant = parameters.get(\"variant\")\n if (\n content_type == \"text/markdown\"\n and variant\n and variant not in _valid_markdown_variants\n ):\n _raise(\n \"Use a valid variant, expected one of {}\".format(\n \", \".join(_valid_markdown_variants)\n )\n )\n\n\ndef _construct_dependencies(form, types):\n for name, kind in types.items():\n for item in getattr(form, name).data:\n yield Dependency(kind=kind.value, specifier=item)\n\n\nclass ListField(wtforms.Field):\n def process_formdata(self, valuelist):\n self.data = [v.strip() for v in valuelist if v.strip()]\n\n\n# TODO: Eventually this whole validation thing should move to the packaging\n# library and we should just call that. However until PEP 426 is done\n# that library won't have an API for this.\nclass MetadataForm(forms.Form):\n\n # Metadata version\n metadata_version = wtforms.StringField(\n description=\"Metadata-Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n # Note: This isn't really Metadata 2.0, however bdist_wheel\n # claims it is producing a Metadata 2.0 metadata when in\n # reality it's more like 1.2 with some extensions.\n [\"1.0\", \"1.1\", \"1.2\", \"2.0\", \"2.1\"],\n message=\"Use a known metadata version.\",\n ),\n ],\n )\n\n # Identity Project and Release\n name = wtforms.StringField(\n description=\"Name\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n _project_name_re,\n re.IGNORECASE,\n message=(\n \"Start and end with a letter or numeral containing \"\n \"only ASCII numeric and '.', '_' and '-'.\"\n ),\n ),\n ],\n )\n version = wtforms.StringField(\n description=\"Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n r\"^(?!\\s).*(?<!\\s)$\",\n message=\"Can't have leading or trailing whitespace.\",\n ),\n _validate_pep440_version,\n ],\n )\n\n # Additional Release metadata\n summary = wtforms.StringField(\n description=\"Summary\",\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Length(max=512),\n wtforms.validators.Regexp(\n r\"^.+$\", # Rely on the fact that . doesn't match a newline.\n message=\"Use a single line only.\",\n ),\n ],\n )\n description = wtforms.StringField(\n description=\"Description\", validators=[wtforms.validators.Optional()]\n )\n author = wtforms.StringField(\n description=\"Author\", validators=[wtforms.validators.Optional()]\n )\n description_content_type = wtforms.StringField(\n description=\"Description-Content-Type\",\n validators=[wtforms.validators.Optional(), _validate_description_content_type],\n )\n author_email = wtforms.StringField(\n description=\"Author-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n maintainer = wtforms.StringField(\n description=\"Maintainer\", validators=[wtforms.validators.Optional()]\n )\n maintainer_email = wtforms.StringField(\n description=\"Maintainer-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n license = wtforms.StringField(\n description=\"License\", validators=[wtforms.validators.Optional()]\n )\n keywords = wtforms.StringField(\n description=\"Keywords\", validators=[wtforms.validators.Optional()]\n )\n classifiers = wtforms.fields.SelectMultipleField(description=\"Classifier\")\n platform = wtforms.StringField(\n description=\"Platform\", validators=[wtforms.validators.Optional()]\n )\n\n # URLs\n home_page = wtforms.StringField(\n description=\"Home-Page\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n download_url = wtforms.StringField(\n description=\"Download-URL\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n\n # Dependency Information\n requires_python = wtforms.StringField(\n description=\"Requires-Python\",\n validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],\n )\n\n # File information\n pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])\n filetype = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n [\n \"bdist_dmg\",\n \"bdist_dumb\",\n \"bdist_egg\",\n \"bdist_msi\",\n \"bdist_rpm\",\n \"bdist_wheel\",\n \"bdist_wininst\",\n \"sdist\",\n ],\n message=\"Use a known file type.\",\n ),\n ]\n )\n comment = wtforms.StringField(validators=[wtforms.validators.Optional()])\n md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])\n sha256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, SHA256 message digest.\",\n ),\n ]\n )\n blake2_256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, BLAKE2 message digest.\",\n ),\n ]\n )\n\n # Legacy dependency information\n requires = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n provides = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n obsoletes = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n\n # Newer dependency information\n requires_dist = ListField(\n description=\"Requires-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n provides_dist = ListField(\n description=\"Provides-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n obsoletes_dist = ListField(\n description=\"Obsoletes-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n requires_external = ListField(\n description=\"Requires-External\",\n validators=[wtforms.validators.Optional(), _validate_requires_external_list],\n )\n\n # Newer metadata information\n project_urls = ListField(\n description=\"Project-URL\",\n validators=[wtforms.validators.Optional(), _validate_project_url_list],\n )\n\n def full_validate(self):\n # All non source releases *must* have a pyversion\n if (\n self.filetype.data\n and self.filetype.data != \"sdist\"\n and not self.pyversion.data\n ):\n raise wtforms.validators.ValidationError(\n \"Python version is required for binary distribution uploads.\"\n )\n\n # All source releases *must* have a pyversion of \"source\"\n if self.filetype.data == \"sdist\":\n if not self.pyversion.data:\n self.pyversion.data = \"source\"\n elif self.pyversion.data != \"source\":\n raise wtforms.validators.ValidationError(\n \"Use 'source' as Python version for an sdist.\"\n )\n\n # We *must* have at least one digest to verify against.\n if not self.md5_digest.data and not self.sha256_digest.data:\n raise wtforms.validators.ValidationError(\n \"Include at least one message digest.\"\n )\n\n\n_safe_zipnames = re.compile(r\"(purelib|platlib|headers|scripts|data).+\", re.I)\n# .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2\n_tar_filenames_re = re.compile(r\"\\.(?:tar$|t(?:ar\\.)?(?P<z_type>gz|bz2)$)\")\n\n\ndef _is_valid_dist_file(filename, filetype):\n \"\"\"\n Perform some basic checks to see whether the indicated file could be\n a valid distribution file.\n \"\"\"\n\n # If our file is a zipfile, then ensure that it's members are only\n # compressed with supported compression methods.\n if zipfile.is_zipfile(filename):\n with zipfile.ZipFile(filename) as zfp:\n for zinfo in zfp.infolist():\n if zinfo.compress_type not in {\n zipfile.ZIP_STORED,\n zipfile.ZIP_DEFLATED,\n }:\n return False\n\n tar_fn_match = _tar_filenames_re.search(filename)\n if tar_fn_match:\n # Ensure that this is a valid tar file, and that it contains PKG-INFO.\n z_type = tar_fn_match.group(\"z_type\") or \"\"\n try:\n with tarfile.open(filename, f\"r:{z_type}\") as tar:\n # This decompresses the entire stream to validate it and the\n # tar within. Easy CPU DoS attack. :/\n bad_tar = True\n member = tar.next()\n while member:\n parts = os.path.split(member.name)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n bad_tar = False\n member = tar.next()\n if bad_tar:\n return False\n except tarfile.ReadError:\n return False\n elif filename.endswith(\".exe\"):\n # The only valid filetype for a .exe file is \"bdist_wininst\".\n if filetype != \"bdist_wininst\":\n return False\n\n # Ensure that the .exe is a valid zip file, and that all of the files\n # contained within it have safe filenames.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch where there\n # isn't one.\n for zipname in zfp.namelist(): # pragma: no branch\n if not _safe_zipnames.match(zipname):\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".msi\"):\n # The only valid filetype for a .msi is \"bdist_msi\"\n if filetype != \"bdist_msi\":\n return False\n\n # Check the first 8 bytes of the MSI file. This was taken from the\n # legacy implementation of PyPI which itself took it from the\n # implementation of `file` I believe.\n with open(filename, \"rb\") as fp:\n if fp.read(8) != b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\":\n return False\n elif filename.endswith(\".zip\") or filename.endswith(\".egg\"):\n # Ensure that the .zip/.egg is a valid zip file, and that it has a\n # PKG-INFO file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".whl\"):\n # Ensure that the .whl is a valid zip file, and that it has a WHEEL\n # file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"WHEEL\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n\n # If we haven't yet decided it's not valid, then we'll assume it is and\n # allow it.\n return True\n\n\ndef _is_duplicate_file(db_session, filename, hashes):\n \"\"\"\n Check to see if file already exists, and if it's content matches.\n A file is considered to exist if its filename *or* blake2 digest are\n present in a file row in the database.\n\n Returns:\n - True: This file is a duplicate and all further processing should halt.\n - False: This file exists, but it is not a duplicate.\n - None: This file does not exist.\n \"\"\"\n\n file_ = (\n db_session.query(File)\n .filter(\n (File.filename == filename)\n | (File.blake2_256_digest == hashes[\"blake2_256\"])\n )\n .first()\n )\n\n if file_ is not None:\n return (\n file_.filename == filename\n and file_.sha256_digest == hashes[\"sha256\"]\n and file_.md5_digest == hashes[\"md5\"]\n and file_.blake2_256_digest == hashes[\"blake2_256\"]\n )\n\n return None\n\n\ndef _no_deprecated_classifiers(request):\n deprecated_classifiers = {\n classifier.classifier\n for classifier in (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(True))\n .all()\n )\n }\n\n def validate_no_deprecated_classifiers(form, field):\n invalid_classifiers = set(field.data or []) & deprecated_classifiers\n if invalid_classifiers:\n first_invalid_classifier = sorted(invalid_classifiers)[0]\n host = request.registry.settings.get(\"warehouse.domain\")\n classifiers_url = request.route_url(\"classifiers\", _host=host)\n\n raise wtforms.validators.ValidationError(\n f\"Classifier {first_invalid_classifier!r} has been \"\n f\"deprecated, see {classifiers_url} for a list of valid \"\n \"classifiers.\"\n )\n\n return validate_no_deprecated_classifiers\n\n\n@view_config(\n route_name=\"forklift.legacy.file_upload\",\n uses_session=True,\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef file_upload(request):\n # If we're in read-only mode, let upload clients know\n if request.flags.enabled(\"read-only\"):\n raise _exc_with_message(\n HTTPForbidden, \"Read-only mode: Uploads are temporarily disabled\"\n )\n\n # Log an attempt to upload\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.upload.attempt\")\n\n # Before we do anything, if there isn't an authenticated user with this\n # request, then we'll go ahead and bomb out.\n if request.authenticated_userid is None:\n raise _exc_with_message(\n HTTPForbidden, \"Invalid or non-existent authentication information.\"\n )\n\n # Ensure that user has a verified, primary email address. This should both\n # reduce the ease of spam account creation and activity, as well as act as\n # a forcing function for https://github.com/pypa/warehouse/issues/3632.\n # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,\n # we might consider a different condition, possibly looking at\n # User.is_active instead.\n if not (request.user.primary_email and request.user.primary_email.verified):\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"User {!r} does not have a verified primary email address. \"\n \"Please add a verified primary email before attempting to \"\n \"upload to PyPI. See {project_help} for more information.\"\n \"for more information.\"\n ).format(\n request.user.username,\n project_help=request.help_url(_anchor=\"verified-email\"),\n ),\n ) from None\n\n # Do some cleanup of the various form fields\n for key in list(request.POST):\n value = request.POST.get(key)\n if isinstance(value, str):\n # distutils \"helpfully\" substitutes unknown, but \"required\" values\n # with the string \"UNKNOWN\". This is basically never what anyone\n # actually wants so we'll just go ahead and delete anything whose\n # value is UNKNOWN.\n if value.strip() == \"UNKNOWN\":\n del request.POST[key]\n\n # Escape NUL characters, which psycopg doesn't like\n if \"\\x00\" in value:\n request.POST[key] = value.replace(\"\\x00\", \"\\\\x00\")\n\n # We require protocol_version 1, it's the only supported version however\n # passing a different version should raise an error.\n if request.POST.get(\"protocol_version\", \"1\") != \"1\":\n raise _exc_with_message(HTTPBadRequest, \"Unknown protocol version.\")\n\n # Check if any fields were supplied as a tuple and have become a\n # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a\n # FieldStorage, however.\n # ref: https://github.com/pypa/warehouse/issues/2185\n # ref: https://github.com/pypa/warehouse/issues/2491\n for field in set(request.POST) - {\"content\", \"gpg_signature\"}:\n values = request.POST.getall(field)\n if any(isinstance(value, FieldStorage) for value in values):\n raise _exc_with_message(HTTPBadRequest, f\"{field}: Should not be a tuple.\")\n\n # Look up all of the valid classifiers\n all_classifiers = request.db.query(Classifier).all()\n\n # Validate and process the incoming metadata.\n form = MetadataForm(request.POST)\n\n # Add a validator for deprecated classifiers\n form.classifiers.validators.append(_no_deprecated_classifiers(request))\n\n form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]\n if not form.validate():\n for field_name in _error_message_order:\n if field_name in form.errors:\n break\n else:\n field_name = sorted(form.errors.keys())[0]\n\n if field_name in form:\n field = form[field_name]\n if field.description and isinstance(field, wtforms.StringField):\n error_message = (\n \"{value!r} is an invalid value for {field}. \".format(\n value=field.data, field=field.description\n )\n + \"Error: {} \".format(form.errors[field_name][0])\n + \"See \"\n \"https://packaging.python.org/specifications/core-metadata\"\n )\n else:\n error_message = \"Invalid value for {field}. Error: {msgs[0]}\".format(\n field=field_name, msgs=form.errors[field_name]\n )\n else:\n error_message = \"Error: {}\".format(form.errors[field_name][0])\n\n raise _exc_with_message(HTTPBadRequest, error_message)\n\n # Ensure that we have file data in the request.\n if \"content\" not in request.POST:\n raise _exc_with_message(HTTPBadRequest, \"Upload payload does not have a file.\")\n\n # Look up the project first before doing anything else, this is so we can\n # automatically register it if we need to and can check permissions before\n # going any further.\n try:\n project = (\n request.db.query(Project)\n .filter(\n Project.normalized_name == func.normalize_pep426_name(form.name.data)\n )\n .one()\n )\n except NoResultFound:\n # Check for AdminFlag set by a PyPI Administrator disabling new project\n # registration, reasons for this include Spammers, security\n # vulnerabilities, or just wanting to be lazy and not worry ;)\n if request.flags.enabled(\"disallow-new-project-registration\"):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"New project registration temporarily disabled. \"\n \"See {projecthelp} for details\"\n ).format(projecthelp=request.help_url(_anchor=\"admin-intervention\")),\n ) from None\n\n # Before we create the project, we're going to check our blacklist to\n # see if this project is even allowed to be registered. If it is not,\n # then we're going to deny the request to create this project.\n if request.db.query(\n exists().where(\n BlacklistedProject.name == func.normalize_pep426_name(form.name.data)\n )\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed. \"\n \"See {projecthelp} \"\n \"for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # Also check for collisions with Python Standard Library modules.\n if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed (conflict with Python \"\n \"Standard Library module name). See \"\n \"{projecthelp} for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # The project doesn't exist in our database, so first we'll check for\n # projects with a similar name\n squattees = (\n request.db.query(Project)\n .filter(\n func.levenshtein(\n Project.normalized_name, func.normalize_pep426_name(form.name.data)\n )\n <= 2\n )\n .all()\n )\n\n # Next we'll create the project\n project = Project(name=form.name.data)\n request.db.add(project)\n\n # Now that the project exists, add any squats which it is the squatter for\n for squattee in squattees:\n request.db.add(Squat(squatter=project, squattee=squattee))\n\n # Then we'll add a role setting the current user as the \"Owner\" of the\n # project.\n request.db.add(Role(user=request.user, project=project, role_name=\"Owner\"))\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"create\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"add Owner {}\".format(request.user.username),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # Check that the user has permission to do things to this project, if this\n # is a new project this will act as a sanity check for the role we just\n # added above.\n if not request.has_permission(\"upload\", project):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"The credential associated with user '{0}' \"\n \"isn't allowed to upload to project '{1}'. \"\n \"See {2} for more information.\"\n ).format(\n request.user.username,\n project.name,\n request.help_url(_anchor=\"project-name\"),\n ),\n )\n\n # Update name if it differs but is still equivalent. We don't need to check if\n # they are equivalent when normalized because that's already been done when we\n # queried for the project.\n if project.name != form.name.data:\n project.name = form.name.data\n\n # Render our description so we can save from having to render this data every time\n # we load a project description page.\n rendered = None\n if form.description.data:\n description_content_type = form.description_content_type.data\n if not description_content_type:\n description_content_type = \"text/x-rst\"\n\n rendered = readme.render(\n form.description.data, description_content_type, use_fallback=False\n )\n\n # Uploading should prevent broken rendered descriptions.\n if rendered is None:\n if form.description_content_type.data:\n message = (\n \"The description failed to render \"\n \"for '{description_content_type}'.\"\n ).format(description_content_type=description_content_type)\n else:\n message = (\n \"The description failed to render \"\n \"in the default format of reStructuredText.\"\n )\n raise _exc_with_message(\n HTTPBadRequest,\n \"{message} See {projecthelp} for more information.\".format(\n message=message,\n projecthelp=request.help_url(_anchor=\"description-content-type\"),\n ),\n ) from None\n\n try:\n canonical_version = packaging.utils.canonicalize_version(form.version.data)\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project)\n & (Release.canonical_version == canonical_version)\n )\n .one()\n )\n except MultipleResultsFound:\n # There are multiple releases of this project which have the same\n # canonical version that were uploaded before we checked for\n # canonical version equivalence, so return the exact match instead\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project) & (Release.version == form.version.data)\n )\n .one()\n )\n except NoResultFound:\n release = Release(\n project=project,\n _classifiers=[\n c for c in all_classifiers if c.classifier in form.classifiers.data\n ],\n dependencies=list(\n _construct_dependencies(\n form,\n {\n \"requires\": DependencyKind.requires,\n \"provides\": DependencyKind.provides,\n \"obsoletes\": DependencyKind.obsoletes,\n \"requires_dist\": DependencyKind.requires_dist,\n \"provides_dist\": DependencyKind.provides_dist,\n \"obsoletes_dist\": DependencyKind.obsoletes_dist,\n \"requires_external\": DependencyKind.requires_external,\n \"project_urls\": DependencyKind.project_url,\n },\n )\n ),\n canonical_version=canonical_version,\n description=Description(\n content_type=form.description_content_type.data,\n raw=form.description.data or \"\",\n html=rendered or \"\",\n rendered_by=readme.renderer_version(),\n ),\n **{\n k: getattr(form, k).data\n for k in {\n # This is a list of all the fields in the form that we\n # should pull off and insert into our new release.\n \"version\",\n \"summary\",\n \"license\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"keywords\",\n \"platform\",\n \"home_page\",\n \"download_url\",\n \"requires_python\",\n }\n },\n uploader=request.user,\n uploaded_via=request.user_agent,\n )\n request.db.add(release)\n # TODO: This should be handled by some sort of database trigger or\n # a SQLAlchemy hook or the like instead of doing it inline in\n # this view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"new release\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better solution to this than to just do it inline inside\n # this method. Ideally the version field would just be sortable, but\n # at least this should be some sort of hook or trigger.\n releases = (\n request.db.query(Release)\n .filter(Release.project == project)\n .options(orm.load_only(Release._pypi_ordering))\n .all()\n )\n for i, r in enumerate(\n sorted(releases, key=lambda x: packaging.version.parse(x.version))\n ):\n r._pypi_ordering = i\n\n # Pull the filename out of our POST data.\n filename = request.POST[\"content\"].filename\n\n # Make sure that the filename does not contain any path separators.\n if \"/\" in filename or \"\\\\\" in filename:\n raise _exc_with_message(\n HTTPBadRequest, \"Cannot upload a file with '/' or '\\\\' in the name.\"\n )\n\n # Make sure the filename ends with an allowed extension.\n if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:\n raise _exc_with_message(\n HTTPBadRequest,\n \"Invalid file extension: Use .egg, .tar.gz, .whl or .zip \"\n \"extension. (https://www.python.org/dev/peps/pep-0527)\",\n )\n\n # Make sure that our filename matches the project that it is being uploaded\n # to.\n prefix = pkg_resources.safe_name(project.name).lower()\n if not pkg_resources.safe_name(filename).lower().startswith(prefix):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Start filename for {!r} with {!r}.\".format(project.name, prefix),\n )\n\n # Check the content type of what is being uploaded\n if not request.POST[\"content\"].type or request.POST[\"content\"].type.startswith(\n \"image/\"\n ):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Ensure that the package filetype is allowed.\n # TODO: Once PEP 527 is completely implemented we should be able to delete\n # this and just move it into the form itself.\n if not project.allow_legacy_files and form.filetype.data not in {\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_egg\",\n }:\n raise _exc_with_message(HTTPBadRequest, \"Unknown type of file.\")\n\n # The project may or may not have a file size specified on the project, if\n # it does then it may or may not be smaller or larger than our global file\n # size limits.\n file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n temporary_filename = os.path.join(tmpdir, filename)\n\n # Buffer the entire file onto disk, checking the hash of the file as we\n # go along.\n with open(temporary_filename, \"wb\") as fp:\n file_size = 0\n file_hashes = {\n \"md5\": hashlib.md5(),\n \"sha256\": hashlib.sha256(),\n \"blake2_256\": hashlib.blake2b(digest_size=256 // 8),\n }\n for chunk in iter(lambda: request.POST[\"content\"].file.read(8096), b\"\"):\n file_size += len(chunk)\n if file_size > file_size_limit:\n raise _exc_with_message(\n HTTPBadRequest,\n \"File too large. \"\n + \"Limit for project {name!r} is {limit} MB. \".format(\n name=project.name, limit=file_size_limit // (1024 * 1024)\n )\n + \"See \"\n + request.help_url(_anchor=\"file-size-limit\"),\n )\n fp.write(chunk)\n for hasher in file_hashes.values():\n hasher.update(chunk)\n\n # Take our hash functions and compute the final hashes for them now.\n file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}\n\n # Actually verify the digests that we've gotten. We're going to use\n # hmac.compare_digest even though we probably don't actually need to\n # because it's better safe than sorry. In the case of multiple digests\n # we expect them all to be given.\n if not all(\n [\n hmac.compare_digest(\n getattr(form, \"{}_digest\".format(digest_name)).data.lower(),\n digest_value,\n )\n for digest_name, digest_value in file_hashes.items()\n if getattr(form, \"{}_digest\".format(digest_name)).data\n ]\n ):\n raise _exc_with_message(\n HTTPBadRequest,\n \"The digest supplied does not match a digest calculated \"\n \"from the uploaded file.\",\n )\n\n # Check to see if the file that was uploaded exists already or not.\n is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)\n if is_duplicate:\n return Response()\n elif is_duplicate is not None:\n raise _exc_with_message(\n HTTPBadRequest,\n # Note: Changing this error message to something that doesn't\n # start with \"File already exists\" will break the\n # --skip-existing functionality in twine\n # ref: https://github.com/pypa/warehouse/issues/3482\n # ref: https://github.com/pypa/twine/issues/332\n \"File already exists. See \"\n + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if the file that was uploaded exists in our filename log\n if request.db.query(\n request.db.query(Filename).filter(Filename.filename == filename).exists()\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n \"This filename has already been used, use a \"\n \"different version. \"\n \"See \" + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if uploading this file would create a duplicate sdist\n # for the current release.\n if (\n form.filetype.data == \"sdist\"\n and request.db.query(\n request.db.query(File)\n .filter((File.release == release) & (File.packagetype == \"sdist\"))\n .exists()\n ).scalar()\n ):\n raise _exc_with_message(\n HTTPBadRequest, \"Only one sdist may be uploaded per release.\"\n )\n\n # Check the file to make sure it is a valid distribution file.\n if not _is_valid_dist_file(temporary_filename, form.filetype.data):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Check that if it's a binary wheel, it's on a supported platform\n if filename.endswith(\".whl\"):\n wheel_info = _wheel_file_re.match(filename)\n plats = wheel_info.group(\"plat\").split(\".\")\n for plat in plats:\n if not _valid_platform_tag(plat):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Binary wheel '{filename}' has an unsupported \"\n \"platform tag '{plat}'.\".format(filename=filename, plat=plat),\n )\n\n # Also buffer the entire signature file to disk.\n if \"gpg_signature\" in request.POST:\n has_signature = True\n with open(os.path.join(tmpdir, filename + \".asc\"), \"wb\") as fp:\n signature_size = 0\n for chunk in iter(\n lambda: request.POST[\"gpg_signature\"].file.read(8096), b\"\"\n ):\n signature_size += len(chunk)\n if signature_size > MAX_SIGSIZE:\n raise _exc_with_message(HTTPBadRequest, \"Signature too large.\")\n fp.write(chunk)\n\n # Check whether signature is ASCII armored\n with open(os.path.join(tmpdir, filename + \".asc\"), \"rb\") as fp:\n if not fp.read().startswith(b\"-----BEGIN PGP SIGNATURE-----\"):\n raise _exc_with_message(\n HTTPBadRequest, \"PGP signature isn't ASCII armored.\"\n )\n else:\n has_signature = False\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(Filename(filename=filename))\n\n # Store the information about the file in the database.\n file_ = File(\n release=release,\n filename=filename,\n python_version=form.pyversion.data,\n packagetype=form.filetype.data,\n comment_text=form.comment.data,\n size=file_size,\n has_signature=bool(has_signature),\n md5_digest=file_hashes[\"md5\"],\n sha256_digest=file_hashes[\"sha256\"],\n blake2_256_digest=file_hashes[\"blake2_256\"],\n # Figure out what our filepath is going to be, we're going to use a\n # directory structure based on the hash of the file contents. This\n # will ensure that the contents of the file cannot change without\n # it also changing the path that the file is saved too.\n path=\"/\".join(\n [\n file_hashes[PATH_HASHER][:2],\n file_hashes[PATH_HASHER][2:4],\n file_hashes[PATH_HASHER][4:],\n filename,\n ]\n ),\n uploaded_via=request.user_agent,\n )\n request.db.add(file_)\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"add {python_version} file {filename}\".format(\n python_version=file_.python_version, filename=file_.filename\n ),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better answer about how to make this transactional so\n # this won't take affect until after a commit has happened, for\n # now we'll just ignore it and save it before the transaction is\n # committed.\n storage = request.find_service(IFileStorage)\n storage.store(\n file_.path,\n os.path.join(tmpdir, filename),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n if has_signature:\n storage.store(\n file_.pgp_path,\n os.path.join(tmpdir, filename + \".asc\"),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n\n # Log a successful upload\n metrics.increment(\"warehouse.upload.ok\", tags=[f\"filetype:{form.filetype.data}\"])\n\n return Response()\n\n\ndef _legacy_purge(status, *args, **kwargs):\n if status:\n requests.post(*args, **kwargs)\n\n\n@view_config(\n route_name=\"forklift.legacy.submit\", require_csrf=False, require_methods=[\"POST\"]\n)\n@view_config(\n route_name=\"forklift.legacy.submit_pkg_info\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef submit(request):\n return _exc_with_message(\n HTTPGone,\n (\n \"Project pre-registration is no longer required or supported, \"\n \"upload your files instead.\"\n ),\n )\n\n\n@view_config(\n route_name=\"forklift.legacy.doc_upload\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef doc_upload(request):\n return _exc_with_message(\n HTTPGone,\n \"Uploading documentation is no longer supported, we recommend using \"\n \"https://readthedocs.org/.\",\n )\n", "path": "warehouse/forklift/legacy.py" } ]
diff --git a/tests/unit/forklift/test_legacy.py b/tests/unit/forklift/test_legacy.py index 003610d8d508..ad3a28a33366 100644 --- a/tests/unit/forklift/test_legacy.py +++ b/tests/unit/forklift/test_legacy.py @@ -155,7 +155,8 @@ def test_validate_legacy_non_dist_req_list(self, monkeypatch): assert validator.calls == [pretend.call(datum) for datum in data] @pytest.mark.parametrize( - "requirement", ["foo (>=1.0)", "foo", "foo2", "foo-bar", "foo_bar"] + "requirement", + ["foo (>=1.0)", "foo", "foo2", "foo-bar", "foo_bar", "foo == 2.*"], ) def test_validate_legacy_dist_req_valid(self, requirement): legacy._validate_legacy_dist_req(requirement) @@ -170,7 +171,6 @@ def test_validate_legacy_dist_req_valid(self, requirement): "_foo", "_foo (>=1.0)", "name @ https://github.com/pypa", - "test-pypi-version-specifier-dep==0.0.1+cuda9", ], ) def test_validate_legacy_dist_req_invalid(self, requirement): diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py index f4a33b20a130..74d6dfe4d432 100644 --- a/warehouse/forklift/legacy.py +++ b/warehouse/forklift/legacy.py @@ -252,11 +252,6 @@ def _validate_legacy_dist_req(requirement): "Can't have direct dependency: {!r}".format(requirement) ) - if any(packaging.version.Version(spec.version).local for spec in req.specifier): - raise wtforms.validators.ValidationError( - "Can't have dependency with local version: {!r}".format(requirement) - ) - def _validate_legacy_dist_req_list(form, field): for datum in field.data:
Upload to PyPI fails when dependency version contains * **Describe the bug** PyPI raises `400 Client error` when uploading a package that specifies requirement using `== 2.*`. This is a valid version specifier referred to [PEP 440](https://www.python.org/dev/peps/pep-0440/#compatible-release). The whole error is: ``` HTTPError: 400 Client Error: Invalid value for requires_dist. Error: Invalid version: '2.*' for url: https://test.pypi.org/legacy/ ``` **Expected behavior** Upload should pass with no errors. (Note that in the example below the expected behaviour would be to fail with authentication error, as you don't have sufficient permissions on the project.) **To Reproduce** This is a minimal reproducer: https://github.com/dblenkus/warehouse-requirements-issue Install twine with `pip install twine` and try to upload the package with `twine upload -r testpypi dist/*`. **My Platform** MacOS 10.14.6 twine 1.13.0 **Additional context** This has worked few days ago on July 26th 2019.
pypa__pip-10029
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright 2016-2018 Julien Danjou\n# Copyright 2017 Elisey Zanko\n# Copyright 2016 Étienne Bersac\n# Copyright 2016 Joshua Harlow\n# Copyright 2013-2014 Ray Holder\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ntry:\n from inspect import iscoroutinefunction\nexcept ImportError:\n iscoroutinefunction = None\n\ntry:\n import tornado\nexcept ImportError:\n tornado = None\n\nimport sys\nimport threading\nimport typing as t\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom concurrent import futures\n\n\nfrom pip._vendor import six\n\nfrom pip._vendor.tenacity import _utils\n\n# Import all built-in retry strategies for easier usage.\nfrom .retry import retry_base # noqa\nfrom .retry import retry_all # noqa\nfrom .retry import retry_always # noqa\nfrom .retry import retry_any # noqa\nfrom .retry import retry_if_exception # noqa\nfrom .retry import retry_if_exception_type # noqa\nfrom .retry import retry_if_not_result # noqa\nfrom .retry import retry_if_result # noqa\nfrom .retry import retry_never # noqa\nfrom .retry import retry_unless_exception_type # noqa\nfrom .retry import retry_if_exception_message # noqa\nfrom .retry import retry_if_not_exception_message # noqa\n\n# Import all nap strategies for easier usage.\nfrom .nap import sleep # noqa\nfrom .nap import sleep_using_event # noqa\n\n# Import all built-in stop strategies for easier usage.\nfrom .stop import stop_after_attempt # noqa\nfrom .stop import stop_after_delay # noqa\nfrom .stop import stop_all # noqa\nfrom .stop import stop_any # noqa\nfrom .stop import stop_never # noqa\nfrom .stop import stop_when_event_set # noqa\n\n# Import all built-in wait strategies for easier usage.\nfrom .wait import wait_chain # noqa\nfrom .wait import wait_combine # noqa\nfrom .wait import wait_exponential # noqa\nfrom .wait import wait_fixed # noqa\nfrom .wait import wait_incrementing # noqa\nfrom .wait import wait_none # noqa\nfrom .wait import wait_random # noqa\nfrom .wait import wait_random_exponential # noqa\nfrom .wait import wait_random_exponential as wait_full_jitter # noqa\n\n# Import all built-in before strategies for easier usage.\nfrom .before import before_log # noqa\nfrom .before import before_nothing # noqa\n\n# Import all built-in after strategies for easier usage.\nfrom .after import after_log # noqa\nfrom .after import after_nothing # noqa\n\n# Import all built-in after strategies for easier usage.\nfrom .before_sleep import before_sleep_log # noqa\nfrom .before_sleep import before_sleep_nothing # noqa\n\n\nWrappedFn = t.TypeVar(\"WrappedFn\", bound=t.Callable)\n\n\[email protected]\ndef retry(fn):\n # type: (WrappedFn) -> WrappedFn\n \"\"\"Type signature for @retry as a raw decorator.\"\"\"\n pass\n\n\[email protected]\ndef retry(*dargs, **dkw): # noqa\n # type: (...) -> t.Callable[[WrappedFn], WrappedFn]\n \"\"\"Type signature for the @retry() decorator constructor.\"\"\"\n pass\n\n\ndef retry(*dargs, **dkw): # noqa\n \"\"\"Wrap a function with a new `Retrying` object.\n\n :param dargs: positional arguments passed to Retrying object\n :param dkw: keyword arguments passed to the Retrying object\n \"\"\"\n # support both @retry and @retry() as valid syntax\n if len(dargs) == 1 and callable(dargs[0]):\n return retry()(dargs[0])\n else:\n\n def wrap(f):\n if isinstance(f, retry_base):\n warnings.warn(\n (\n \"Got retry_base instance ({cls}) as callable argument, \"\n + \"this will probably hang indefinitely (did you mean \"\n + \"retry={cls}(...)?)\"\n ).format(cls=f.__class__.__name__)\n )\n if iscoroutinefunction is not None and iscoroutinefunction(f):\n r = AsyncRetrying(*dargs, **dkw)\n elif (\n tornado\n and hasattr(tornado.gen, \"is_coroutine_function\")\n and tornado.gen.is_coroutine_function(f)\n ):\n r = TornadoRetrying(*dargs, **dkw)\n else:\n r = Retrying(*dargs, **dkw)\n\n return r.wraps(f)\n\n return wrap\n\n\nclass TryAgain(Exception):\n \"\"\"Always retry the executed function when raised.\"\"\"\n\n\nNO_RESULT = object()\n\n\nclass DoAttempt(object):\n pass\n\n\nclass DoSleep(float):\n pass\n\n\nclass BaseAction(object):\n \"\"\"Base class for representing actions to take by retry object.\n\n Concrete implementations must define:\n - __init__: to initialize all necessary fields\n - REPR_ATTRS: class variable specifying attributes to include in repr(self)\n - NAME: for identification in retry object methods and callbacks\n \"\"\"\n\n REPR_FIELDS = ()\n NAME = None\n\n def __repr__(self):\n state_str = \", \".join(\n \"%s=%r\" % (field, getattr(self, field)) for field in self.REPR_FIELDS\n )\n return \"%s(%s)\" % (type(self).__name__, state_str)\n\n def __str__(self):\n return repr(self)\n\n\nclass RetryAction(BaseAction):\n REPR_FIELDS = (\"sleep\",)\n NAME = \"retry\"\n\n def __init__(self, sleep):\n self.sleep = float(sleep)\n\n\n_unset = object()\n\n\ndef _first_set(first, second):\n return second if first is _unset else first\n\n\nclass RetryError(Exception):\n \"\"\"Encapsulates the last attempt instance right before giving up.\"\"\"\n\n def __init__(self, last_attempt):\n self.last_attempt = last_attempt\n super(RetryError, self).__init__(last_attempt)\n\n def reraise(self):\n if self.last_attempt.failed:\n raise self.last_attempt.result()\n raise self\n\n def __str__(self):\n return \"{0}[{1}]\".format(self.__class__.__name__, self.last_attempt)\n\n\nclass AttemptManager(object):\n \"\"\"Manage attempt context.\"\"\"\n\n def __init__(self, retry_state):\n self.retry_state = retry_state\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_value, traceback):\n if isinstance(exc_value, BaseException):\n self.retry_state.set_exception((exc_type, exc_value, traceback))\n return True # Swallow exception.\n else:\n # We don't have the result, actually.\n self.retry_state.set_result(None)\n\n\nclass BaseRetrying(object):\n __metaclass__ = ABCMeta\n\n def __init__(\n self,\n sleep=sleep,\n stop=stop_never,\n wait=wait_none(),\n retry=retry_if_exception_type(),\n before=before_nothing,\n after=after_nothing,\n before_sleep=None,\n reraise=False,\n retry_error_cls=RetryError,\n retry_error_callback=None,\n ):\n self.sleep = sleep\n self.stop = stop\n self.wait = wait\n self.retry = retry\n self.before = before\n self.after = after\n self.before_sleep = before_sleep\n self.reraise = reraise\n self._local = threading.local()\n self.retry_error_cls = retry_error_cls\n self.retry_error_callback = retry_error_callback\n\n # This attribute was moved to RetryCallState and is deprecated on\n # Retrying objects but kept for backward compatibility.\n self.fn = None\n\n def copy(\n self,\n sleep=_unset,\n stop=_unset,\n wait=_unset,\n retry=_unset,\n before=_unset,\n after=_unset,\n before_sleep=_unset,\n reraise=_unset,\n retry_error_cls=_unset,\n retry_error_callback=_unset,\n ):\n \"\"\"Copy this object with some parameters changed if needed.\"\"\"\n return self.__class__(\n sleep=_first_set(sleep, self.sleep),\n stop=_first_set(stop, self.stop),\n wait=_first_set(wait, self.wait),\n retry=_first_set(retry, self.retry),\n before=_first_set(before, self.before),\n after=_first_set(after, self.after),\n before_sleep=_first_set(before_sleep, self.before_sleep),\n reraise=_first_set(reraise, self.reraise),\n retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),\n retry_error_callback=_first_set(\n retry_error_callback, self.retry_error_callback\n ),\n )\n\n def __repr__(self):\n attrs = dict(\n _utils.visible_attrs(self, attrs={\"me\": id(self)}),\n __class__=self.__class__.__name__,\n )\n return (\n \"<%(__class__)s object at 0x%(me)x (stop=%(stop)s, \"\n \"wait=%(wait)s, sleep=%(sleep)s, retry=%(retry)s, \"\n \"before=%(before)s, after=%(after)s)>\"\n ) % (attrs)\n\n @property\n def statistics(self):\n \"\"\"Return a dictionary of runtime statistics.\n\n This dictionary will be empty when the controller has never been\n ran. When it is running or has ran previously it should have (but\n may not) have useful and/or informational keys and values when\n running is underway and/or completed.\n\n .. warning:: The keys in this dictionary **should** be some what\n stable (not changing), but there existence **may**\n change between major releases as new statistics are\n gathered or removed so before accessing keys ensure that\n they actually exist and handle when they do not.\n\n .. note:: The values in this dictionary are local to the thread\n running call (so if multiple threads share the same retrying\n object - either directly or indirectly) they will each have\n there own view of statistics they have collected (in the\n future we may provide a way to aggregate the various\n statistics from each thread).\n \"\"\"\n try:\n return self._local.statistics\n except AttributeError:\n self._local.statistics = {}\n return self._local.statistics\n\n def wraps(self, f):\n \"\"\"Wrap a function for retrying.\n\n :param f: A function to wraps for retrying.\n \"\"\"\n\n @_utils.wraps(f)\n def wrapped_f(*args, **kw):\n return self(f, *args, **kw)\n\n def retry_with(*args, **kwargs):\n return self.copy(*args, **kwargs).wraps(f)\n\n wrapped_f.retry = self\n wrapped_f.retry_with = retry_with\n\n return wrapped_f\n\n def begin(self, fn):\n self.statistics.clear()\n self.statistics[\"start_time\"] = _utils.now()\n self.statistics[\"attempt_number\"] = 1\n self.statistics[\"idle_for\"] = 0\n self.fn = fn\n\n def iter(self, retry_state): # noqa\n fut = retry_state.outcome\n if fut is None:\n if self.before is not None:\n self.before(retry_state)\n return DoAttempt()\n\n is_explicit_retry = retry_state.outcome.failed and isinstance(\n retry_state.outcome.exception(), TryAgain\n )\n if not (is_explicit_retry or self.retry(retry_state=retry_state)):\n return fut.result()\n\n if self.after is not None:\n self.after(retry_state=retry_state)\n\n self.statistics[\"delay_since_first_attempt\"] = retry_state.seconds_since_start\n if self.stop(retry_state=retry_state):\n if self.retry_error_callback:\n return self.retry_error_callback(retry_state=retry_state)\n retry_exc = self.retry_error_cls(fut)\n if self.reraise:\n raise retry_exc.reraise()\n six.raise_from(retry_exc, fut.exception())\n\n if self.wait:\n sleep = self.wait(retry_state=retry_state)\n else:\n sleep = 0.0\n retry_state.next_action = RetryAction(sleep)\n retry_state.idle_for += sleep\n self.statistics[\"idle_for\"] += sleep\n self.statistics[\"attempt_number\"] += 1\n\n if self.before_sleep is not None:\n self.before_sleep(retry_state=retry_state)\n\n return DoSleep(sleep)\n\n def __iter__(self):\n self.begin(None)\n\n retry_state = RetryCallState(self, fn=None, args=(), kwargs={})\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n yield AttemptManager(retry_state=retry_state)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n break\n\n @abstractmethod\n def __call__(self, *args, **kwargs):\n pass\n\n def call(self, *args, **kwargs):\n \"\"\"Use ``__call__`` instead because this method is deprecated.\"\"\"\n warnings.warn(\n \"'call()' method is deprecated. \" + \"Use '__call__()' instead\",\n DeprecationWarning,\n )\n return self.__call__(*args, **kwargs)\n\n\nclass Retrying(BaseRetrying):\n \"\"\"Retrying controller.\"\"\"\n\n def __call__(self, fn, *args, **kwargs):\n self.begin(fn)\n\n retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n try:\n result = fn(*args, **kwargs)\n except BaseException: # noqa: B902\n retry_state.set_exception(sys.exc_info())\n else:\n retry_state.set_result(result)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n return do\n\n\nclass Future(futures.Future):\n \"\"\"Encapsulates a (future or past) attempted call to a target function.\"\"\"\n\n def __init__(self, attempt_number):\n super(Future, self).__init__()\n self.attempt_number = attempt_number\n\n @property\n def failed(self):\n \"\"\"Return whether a exception is being held in this future.\"\"\"\n return self.exception() is not None\n\n @classmethod\n def construct(cls, attempt_number, value, has_exception):\n \"\"\"Construct a new Future object.\"\"\"\n fut = cls(attempt_number)\n if has_exception:\n fut.set_exception(value)\n else:\n fut.set_result(value)\n return fut\n\n\nclass RetryCallState(object):\n \"\"\"State related to a single call wrapped with Retrying.\"\"\"\n\n def __init__(self, retry_object, fn, args, kwargs):\n #: Retry call start timestamp\n self.start_time = _utils.now()\n #: Retry manager object\n self.retry_object = retry_object\n #: Function wrapped by this retry call\n self.fn = fn\n #: Arguments of the function wrapped by this retry call\n self.args = args\n #: Keyword arguments of the function wrapped by this retry call\n self.kwargs = kwargs\n\n #: The number of the current attempt\n self.attempt_number = 1\n #: Last outcome (result or exception) produced by the function\n self.outcome = None\n #: Timestamp of the last outcome\n self.outcome_timestamp = None\n #: Time spent sleeping in retries\n self.idle_for = 0\n #: Next action as decided by the retry manager\n self.next_action = None\n\n @property\n def seconds_since_start(self):\n if self.outcome_timestamp is None:\n return None\n return self.outcome_timestamp - self.start_time\n\n def prepare_for_next_attempt(self):\n self.outcome = None\n self.outcome_timestamp = None\n self.attempt_number += 1\n self.next_action = None\n\n def set_result(self, val):\n ts = _utils.now()\n fut = Future(self.attempt_number)\n fut.set_result(val)\n self.outcome, self.outcome_timestamp = fut, ts\n\n def set_exception(self, exc_info):\n ts = _utils.now()\n fut = Future(self.attempt_number)\n _utils.capture(fut, exc_info)\n self.outcome, self.outcome_timestamp = fut, ts\n\n\nif iscoroutinefunction:\n from pip._vendor.tenacity._asyncio import AsyncRetrying\n\nif tornado:\n from pip._vendor.tenacity.tornadoweb import TornadoRetrying\n", "path": "src/pip/_vendor/tenacity/__init__.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright 2016-2018 Julien Danjou\n# Copyright 2017 Elisey Zanko\n# Copyright 2016 Étienne Bersac\n# Copyright 2016 Joshua Harlow\n# Copyright 2013-2014 Ray Holder\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ntry:\n from inspect import iscoroutinefunction\nexcept ImportError:\n iscoroutinefunction = None\n\n# Replace a conditional import with a hard-coded None so that pip does\n# not attempt to use tornado even if it is present in the environment.\n# If tornado is non-None, tenacity will attempt to execute some code\n# that is sensitive to the version of tornado, which could break pip\n# if an old version is found.\ntornado = None\n\nimport sys\nimport threading\nimport typing as t\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom concurrent import futures\n\n\nfrom pip._vendor import six\n\nfrom pip._vendor.tenacity import _utils\n\n# Import all built-in retry strategies for easier usage.\nfrom .retry import retry_base # noqa\nfrom .retry import retry_all # noqa\nfrom .retry import retry_always # noqa\nfrom .retry import retry_any # noqa\nfrom .retry import retry_if_exception # noqa\nfrom .retry import retry_if_exception_type # noqa\nfrom .retry import retry_if_not_result # noqa\nfrom .retry import retry_if_result # noqa\nfrom .retry import retry_never # noqa\nfrom .retry import retry_unless_exception_type # noqa\nfrom .retry import retry_if_exception_message # noqa\nfrom .retry import retry_if_not_exception_message # noqa\n\n# Import all nap strategies for easier usage.\nfrom .nap import sleep # noqa\nfrom .nap import sleep_using_event # noqa\n\n# Import all built-in stop strategies for easier usage.\nfrom .stop import stop_after_attempt # noqa\nfrom .stop import stop_after_delay # noqa\nfrom .stop import stop_all # noqa\nfrom .stop import stop_any # noqa\nfrom .stop import stop_never # noqa\nfrom .stop import stop_when_event_set # noqa\n\n# Import all built-in wait strategies for easier usage.\nfrom .wait import wait_chain # noqa\nfrom .wait import wait_combine # noqa\nfrom .wait import wait_exponential # noqa\nfrom .wait import wait_fixed # noqa\nfrom .wait import wait_incrementing # noqa\nfrom .wait import wait_none # noqa\nfrom .wait import wait_random # noqa\nfrom .wait import wait_random_exponential # noqa\nfrom .wait import wait_random_exponential as wait_full_jitter # noqa\n\n# Import all built-in before strategies for easier usage.\nfrom .before import before_log # noqa\nfrom .before import before_nothing # noqa\n\n# Import all built-in after strategies for easier usage.\nfrom .after import after_log # noqa\nfrom .after import after_nothing # noqa\n\n# Import all built-in after strategies for easier usage.\nfrom .before_sleep import before_sleep_log # noqa\nfrom .before_sleep import before_sleep_nothing # noqa\n\n\nWrappedFn = t.TypeVar(\"WrappedFn\", bound=t.Callable)\n\n\[email protected]\ndef retry(fn):\n # type: (WrappedFn) -> WrappedFn\n \"\"\"Type signature for @retry as a raw decorator.\"\"\"\n pass\n\n\[email protected]\ndef retry(*dargs, **dkw): # noqa\n # type: (...) -> t.Callable[[WrappedFn], WrappedFn]\n \"\"\"Type signature for the @retry() decorator constructor.\"\"\"\n pass\n\n\ndef retry(*dargs, **dkw): # noqa\n \"\"\"Wrap a function with a new `Retrying` object.\n\n :param dargs: positional arguments passed to Retrying object\n :param dkw: keyword arguments passed to the Retrying object\n \"\"\"\n # support both @retry and @retry() as valid syntax\n if len(dargs) == 1 and callable(dargs[0]):\n return retry()(dargs[0])\n else:\n\n def wrap(f):\n if isinstance(f, retry_base):\n warnings.warn(\n (\n \"Got retry_base instance ({cls}) as callable argument, \"\n + \"this will probably hang indefinitely (did you mean \"\n + \"retry={cls}(...)?)\"\n ).format(cls=f.__class__.__name__)\n )\n if iscoroutinefunction is not None and iscoroutinefunction(f):\n r = AsyncRetrying(*dargs, **dkw)\n elif (\n tornado\n and hasattr(tornado.gen, \"is_coroutine_function\")\n and tornado.gen.is_coroutine_function(f)\n ):\n r = TornadoRetrying(*dargs, **dkw)\n else:\n r = Retrying(*dargs, **dkw)\n\n return r.wraps(f)\n\n return wrap\n\n\nclass TryAgain(Exception):\n \"\"\"Always retry the executed function when raised.\"\"\"\n\n\nNO_RESULT = object()\n\n\nclass DoAttempt(object):\n pass\n\n\nclass DoSleep(float):\n pass\n\n\nclass BaseAction(object):\n \"\"\"Base class for representing actions to take by retry object.\n\n Concrete implementations must define:\n - __init__: to initialize all necessary fields\n - REPR_ATTRS: class variable specifying attributes to include in repr(self)\n - NAME: for identification in retry object methods and callbacks\n \"\"\"\n\n REPR_FIELDS = ()\n NAME = None\n\n def __repr__(self):\n state_str = \", \".join(\n \"%s=%r\" % (field, getattr(self, field)) for field in self.REPR_FIELDS\n )\n return \"%s(%s)\" % (type(self).__name__, state_str)\n\n def __str__(self):\n return repr(self)\n\n\nclass RetryAction(BaseAction):\n REPR_FIELDS = (\"sleep\",)\n NAME = \"retry\"\n\n def __init__(self, sleep):\n self.sleep = float(sleep)\n\n\n_unset = object()\n\n\ndef _first_set(first, second):\n return second if first is _unset else first\n\n\nclass RetryError(Exception):\n \"\"\"Encapsulates the last attempt instance right before giving up.\"\"\"\n\n def __init__(self, last_attempt):\n self.last_attempt = last_attempt\n super(RetryError, self).__init__(last_attempt)\n\n def reraise(self):\n if self.last_attempt.failed:\n raise self.last_attempt.result()\n raise self\n\n def __str__(self):\n return \"{0}[{1}]\".format(self.__class__.__name__, self.last_attempt)\n\n\nclass AttemptManager(object):\n \"\"\"Manage attempt context.\"\"\"\n\n def __init__(self, retry_state):\n self.retry_state = retry_state\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_value, traceback):\n if isinstance(exc_value, BaseException):\n self.retry_state.set_exception((exc_type, exc_value, traceback))\n return True # Swallow exception.\n else:\n # We don't have the result, actually.\n self.retry_state.set_result(None)\n\n\nclass BaseRetrying(object):\n __metaclass__ = ABCMeta\n\n def __init__(\n self,\n sleep=sleep,\n stop=stop_never,\n wait=wait_none(),\n retry=retry_if_exception_type(),\n before=before_nothing,\n after=after_nothing,\n before_sleep=None,\n reraise=False,\n retry_error_cls=RetryError,\n retry_error_callback=None,\n ):\n self.sleep = sleep\n self.stop = stop\n self.wait = wait\n self.retry = retry\n self.before = before\n self.after = after\n self.before_sleep = before_sleep\n self.reraise = reraise\n self._local = threading.local()\n self.retry_error_cls = retry_error_cls\n self.retry_error_callback = retry_error_callback\n\n # This attribute was moved to RetryCallState and is deprecated on\n # Retrying objects but kept for backward compatibility.\n self.fn = None\n\n def copy(\n self,\n sleep=_unset,\n stop=_unset,\n wait=_unset,\n retry=_unset,\n before=_unset,\n after=_unset,\n before_sleep=_unset,\n reraise=_unset,\n retry_error_cls=_unset,\n retry_error_callback=_unset,\n ):\n \"\"\"Copy this object with some parameters changed if needed.\"\"\"\n return self.__class__(\n sleep=_first_set(sleep, self.sleep),\n stop=_first_set(stop, self.stop),\n wait=_first_set(wait, self.wait),\n retry=_first_set(retry, self.retry),\n before=_first_set(before, self.before),\n after=_first_set(after, self.after),\n before_sleep=_first_set(before_sleep, self.before_sleep),\n reraise=_first_set(reraise, self.reraise),\n retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),\n retry_error_callback=_first_set(\n retry_error_callback, self.retry_error_callback\n ),\n )\n\n def __repr__(self):\n attrs = dict(\n _utils.visible_attrs(self, attrs={\"me\": id(self)}),\n __class__=self.__class__.__name__,\n )\n return (\n \"<%(__class__)s object at 0x%(me)x (stop=%(stop)s, \"\n \"wait=%(wait)s, sleep=%(sleep)s, retry=%(retry)s, \"\n \"before=%(before)s, after=%(after)s)>\"\n ) % (attrs)\n\n @property\n def statistics(self):\n \"\"\"Return a dictionary of runtime statistics.\n\n This dictionary will be empty when the controller has never been\n ran. When it is running or has ran previously it should have (but\n may not) have useful and/or informational keys and values when\n running is underway and/or completed.\n\n .. warning:: The keys in this dictionary **should** be some what\n stable (not changing), but there existence **may**\n change between major releases as new statistics are\n gathered or removed so before accessing keys ensure that\n they actually exist and handle when they do not.\n\n .. note:: The values in this dictionary are local to the thread\n running call (so if multiple threads share the same retrying\n object - either directly or indirectly) they will each have\n there own view of statistics they have collected (in the\n future we may provide a way to aggregate the various\n statistics from each thread).\n \"\"\"\n try:\n return self._local.statistics\n except AttributeError:\n self._local.statistics = {}\n return self._local.statistics\n\n def wraps(self, f):\n \"\"\"Wrap a function for retrying.\n\n :param f: A function to wraps for retrying.\n \"\"\"\n\n @_utils.wraps(f)\n def wrapped_f(*args, **kw):\n return self(f, *args, **kw)\n\n def retry_with(*args, **kwargs):\n return self.copy(*args, **kwargs).wraps(f)\n\n wrapped_f.retry = self\n wrapped_f.retry_with = retry_with\n\n return wrapped_f\n\n def begin(self, fn):\n self.statistics.clear()\n self.statistics[\"start_time\"] = _utils.now()\n self.statistics[\"attempt_number\"] = 1\n self.statistics[\"idle_for\"] = 0\n self.fn = fn\n\n def iter(self, retry_state): # noqa\n fut = retry_state.outcome\n if fut is None:\n if self.before is not None:\n self.before(retry_state)\n return DoAttempt()\n\n is_explicit_retry = retry_state.outcome.failed and isinstance(\n retry_state.outcome.exception(), TryAgain\n )\n if not (is_explicit_retry or self.retry(retry_state=retry_state)):\n return fut.result()\n\n if self.after is not None:\n self.after(retry_state=retry_state)\n\n self.statistics[\"delay_since_first_attempt\"] = retry_state.seconds_since_start\n if self.stop(retry_state=retry_state):\n if self.retry_error_callback:\n return self.retry_error_callback(retry_state=retry_state)\n retry_exc = self.retry_error_cls(fut)\n if self.reraise:\n raise retry_exc.reraise()\n six.raise_from(retry_exc, fut.exception())\n\n if self.wait:\n sleep = self.wait(retry_state=retry_state)\n else:\n sleep = 0.0\n retry_state.next_action = RetryAction(sleep)\n retry_state.idle_for += sleep\n self.statistics[\"idle_for\"] += sleep\n self.statistics[\"attempt_number\"] += 1\n\n if self.before_sleep is not None:\n self.before_sleep(retry_state=retry_state)\n\n return DoSleep(sleep)\n\n def __iter__(self):\n self.begin(None)\n\n retry_state = RetryCallState(self, fn=None, args=(), kwargs={})\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n yield AttemptManager(retry_state=retry_state)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n break\n\n @abstractmethod\n def __call__(self, *args, **kwargs):\n pass\n\n def call(self, *args, **kwargs):\n \"\"\"Use ``__call__`` instead because this method is deprecated.\"\"\"\n warnings.warn(\n \"'call()' method is deprecated. \" + \"Use '__call__()' instead\",\n DeprecationWarning,\n )\n return self.__call__(*args, **kwargs)\n\n\nclass Retrying(BaseRetrying):\n \"\"\"Retrying controller.\"\"\"\n\n def __call__(self, fn, *args, **kwargs):\n self.begin(fn)\n\n retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n try:\n result = fn(*args, **kwargs)\n except BaseException: # noqa: B902\n retry_state.set_exception(sys.exc_info())\n else:\n retry_state.set_result(result)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n return do\n\n\nclass Future(futures.Future):\n \"\"\"Encapsulates a (future or past) attempted call to a target function.\"\"\"\n\n def __init__(self, attempt_number):\n super(Future, self).__init__()\n self.attempt_number = attempt_number\n\n @property\n def failed(self):\n \"\"\"Return whether a exception is being held in this future.\"\"\"\n return self.exception() is not None\n\n @classmethod\n def construct(cls, attempt_number, value, has_exception):\n \"\"\"Construct a new Future object.\"\"\"\n fut = cls(attempt_number)\n if has_exception:\n fut.set_exception(value)\n else:\n fut.set_result(value)\n return fut\n\n\nclass RetryCallState(object):\n \"\"\"State related to a single call wrapped with Retrying.\"\"\"\n\n def __init__(self, retry_object, fn, args, kwargs):\n #: Retry call start timestamp\n self.start_time = _utils.now()\n #: Retry manager object\n self.retry_object = retry_object\n #: Function wrapped by this retry call\n self.fn = fn\n #: Arguments of the function wrapped by this retry call\n self.args = args\n #: Keyword arguments of the function wrapped by this retry call\n self.kwargs = kwargs\n\n #: The number of the current attempt\n self.attempt_number = 1\n #: Last outcome (result or exception) produced by the function\n self.outcome = None\n #: Timestamp of the last outcome\n self.outcome_timestamp = None\n #: Time spent sleeping in retries\n self.idle_for = 0\n #: Next action as decided by the retry manager\n self.next_action = None\n\n @property\n def seconds_since_start(self):\n if self.outcome_timestamp is None:\n return None\n return self.outcome_timestamp - self.start_time\n\n def prepare_for_next_attempt(self):\n self.outcome = None\n self.outcome_timestamp = None\n self.attempt_number += 1\n self.next_action = None\n\n def set_result(self, val):\n ts = _utils.now()\n fut = Future(self.attempt_number)\n fut.set_result(val)\n self.outcome, self.outcome_timestamp = fut, ts\n\n def set_exception(self, exc_info):\n ts = _utils.now()\n fut = Future(self.attempt_number)\n _utils.capture(fut, exc_info)\n self.outcome, self.outcome_timestamp = fut, ts\n\n\nif iscoroutinefunction:\n from pip._vendor.tenacity._asyncio import AsyncRetrying\n\nif tornado:\n from pip._vendor.tenacity.tornadoweb import TornadoRetrying\n", "path": "src/pip/_vendor/tenacity/__init__.py" } ]
diff --git a/news/10020.bugfix.rst b/news/10020.bugfix.rst new file mode 100644 index 00000000000..9425626fb07 --- /dev/null +++ b/news/10020.bugfix.rst @@ -0,0 +1 @@ +Remove unused optional ``tornado`` import in vendored ``tenacity`` to prevent old versions of Tornado from breaking pip. diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py index 5f8cb505896..42e9d8940b1 100644 --- a/src/pip/_vendor/tenacity/__init__.py +++ b/src/pip/_vendor/tenacity/__init__.py @@ -22,10 +22,12 @@ except ImportError: iscoroutinefunction = None -try: - import tornado -except ImportError: - tornado = None +# Replace a conditional import with a hard-coded None so that pip does +# not attempt to use tornado even if it is present in the environment. +# If tornado is non-None, tenacity will attempt to execute some code +# that is sensitive to the version of tornado, which could break pip +# if an old version is found. +tornado = None import sys import threading diff --git a/tools/vendoring/patches/tenacity.patch b/tools/vendoring/patches/tenacity.patch new file mode 100644 index 00000000000..006588b3653 --- /dev/null +++ b/tools/vendoring/patches/tenacity.patch @@ -0,0 +1,21 @@ +diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py +index 5f8cb5058..42e9d8940 100644 +--- a/src/pip/_vendor/tenacity/__init__.py ++++ b/src/pip/_vendor/tenacity/__init__.py +@@ -22,10 +22,12 @@ try: + except ImportError: + iscoroutinefunction = None + +-try: +- import tornado +-except ImportError: +- tornado = None ++# Replace a conditional import with a hard-coded None so that pip does ++# not attempt to use tornado even if it is present in the environment. ++# If tornado is non-None, tenacity will attempt to execute some code ++# that is sensitive to the version of tornado, which could break pip ++# if an old version is found. ++tornado = None + + import sys + import threading
Vendoring of `tenacity` is leaky ### Description Tenacity contains a [conditional import of tornado](https://github.com/pypa/pip/blob/88eb4f092e58f3aee1d389ad4f9047df46e44bb4/src/pip/_vendor/tenacity/__init__.py#L25-L28). This makes the behavior of `pip` sensitive to third-party packages outside of its vendor directory. Specifically, if a version of tornado that does not include the `tornado.gen.sleep` function is installed, `pip` will fail to start. (This is unlikely since this function has been around a long time and we have no plans of deprecating it or removing it. But we do have a report of this happening in https://github.com/tornadoweb/tornado/issues/3034) ### Expected behavior Pip should not be affected by the presence or absence of any other third-party packages. Any conditional imports in its vendored dependencies should be modified to unconditionally fail (e.g. replace the above-linked block with `tornado = None`). ### pip version 21.1.2 ### Python version 3.8 ### OS linux ### How to Reproduce 1. `pip install tornado==4.0.0` 2. `pip --version` ### Output ```sh-session ~/ImpressionableVoluminousCategories$ pip --version pip 21.1.2 from /opt/virtualenvs/python3/lib/python3.8/site-packages/pip (python 3.8) ~/ImpressionableVoluminousCategories$ pip install tornado==4.0.0 Collecting tornado==4.0.0 Downloading tornado-4.0.tar.gz (313 kB) |████████████████████████████████| 313 kB 4.5 MB/s Requirement already satisfied: certifi in /opt/virtualenvs/python3/lib/python3.8/site-packages (from tornado==4.0.0) (2020.12.5) Building wheels for collected packages: tornado Building wheel for tornado (setup.py) ... done Created wheel for tornado: filename=tornado-4.0-cp38-cp38-linux_x86_64.whl size=344556 sha256=d9c5e6911e5bdac5b90db4b33d01891562365e235396bd336380dd45cb61a9b7 Stored in directory: /home/runner/.cache/pip/wheels/9a/d7/93/a846246f95067512a78899329bdb84a695d693e67c28a4e71f Successfully built tornado Installing collected packages: tornado Successfully installed tornado-4.0 ~/ImpressionableVoluminousCategories$ pip --version Traceback (most recent call last): File "/opt/virtualenvs/python3/bin/pip", line 5, in <module> from pip._internal.cli.main import main File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/main.py", line 9, in <module> from pip._internal.cli.autocompletion import autocomplete File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/autocompletion.py", line 10, in <module> from pip._internal.cli.main_parser import create_main_parser File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/main_parser.py", line 8, in <module> from pip._internal.cli import cmdoptions File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/cmdoptions.py", line 23, in <module> from pip._internal.cli.parser import ConfigOptionParser File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/cli/parser.py", line 12, in <module> from pip._internal.configuration import Configuration, ConfigurationError File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/configuration.py", line 27, in <module> from pip._internal.utils.misc import ensure_dir, enum File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_internal/utils/misc.py", line 38, in <module> from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py", line 523, in <module> from pip._vendor.tenacity.tornadoweb import TornadoRetrying File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/tornadoweb.py", line 26, in <module> class TornadoRetrying(BaseRetrying): File "/opt/virtualenvs/python3/lib/python3.8/site-packages/pip/_vendor/tenacity/tornadoweb.py", line 27, in TornadoRetrying def __init__(self, sleep=gen.sleep, **kwargs): AttributeError: module 'tornado.gen' has no attribute 'sleep' ~/ImpressionableVoluminousCategories$ ``` ``` ### Code of Conduct - [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).
spotify__luigi-2323
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport abc\nimport logging\nimport operator\nimport os\nimport subprocess\nimport tempfile\nimport warnings\n\nfrom luigi import six\n\nimport luigi\nimport luigi.contrib.hadoop\nfrom luigi.target import FileAlreadyExists, FileSystemTarget\nfrom luigi.task import flatten\n\nif six.PY3:\n unicode = str\n\nlogger = logging.getLogger('luigi-interface')\n\n\nclass HiveCommandError(RuntimeError):\n\n def __init__(self, message, out=None, err=None):\n super(HiveCommandError, self).__init__(message, out, err)\n self.message = message\n self.out = out\n self.err = err\n\n\ndef load_hive_cmd():\n return luigi.configuration.get_config().get('hive', 'command', 'hive').split(' ')\n\n\ndef get_hive_syntax():\n return luigi.configuration.get_config().get('hive', 'release', 'cdh4')\n\n\ndef run_hive(args, check_return_code=True):\n \"\"\"\n Runs the `hive` from the command line, passing in the given args, and\n returning stdout.\n\n With the apache release of Hive, so of the table existence checks\n (which are done using DESCRIBE do not exit with a return code of 0\n so we need an option to ignore the return code and just return stdout for parsing\n \"\"\"\n cmd = load_hive_cmd() + args\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n if check_return_code and p.returncode != 0:\n raise HiveCommandError(\"Hive command: {0} failed with error code: {1}\".format(\" \".join(cmd), p.returncode),\n stdout, stderr)\n return stdout\n\n\ndef run_hive_cmd(hivecmd, check_return_code=True):\n \"\"\"\n Runs the given hive query and returns stdout.\n \"\"\"\n return run_hive(['-e', hivecmd], check_return_code)\n\n\ndef run_hive_script(script):\n \"\"\"\n Runs the contents of the given script in hive and returns stdout.\n \"\"\"\n if not os.path.isfile(script):\n raise RuntimeError(\"Hive script: {0} does not exist.\".format(script))\n return run_hive(['-f', script])\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HiveClient(object): # interface\n\n @abc.abstractmethod\n def table_location(self, table, database='default', partition=None):\n \"\"\"\n Returns location of db.table (or db.table.partition). partition is a dict of partition key to\n value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def table_schema(self, table, database='default'):\n \"\"\"\n Returns list of [(name, type)] for each column in database.table.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def table_exists(self, table, database='default', partition=None):\n \"\"\"\n Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to\n value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def partition_spec(self, partition):\n \"\"\" Turn a dict into a string partition specification \"\"\"\n pass\n\n\nclass HiveCommandClient(HiveClient):\n \"\"\"\n Uses `hive` invocations to find information.\n \"\"\"\n\n def table_location(self, table, database='default', partition=None):\n cmd = \"use {0}; describe formatted {1}\".format(database, table)\n if partition is not None:\n cmd += \" PARTITION ({0})\".format(self.partition_spec(partition))\n\n stdout = run_hive_cmd(cmd)\n\n for line in stdout.split(\"\\n\"):\n if \"Location:\" in line:\n return line.split(\"\\t\")[1]\n\n def table_exists(self, table, database='default', partition=None):\n if partition is None:\n stdout = run_hive_cmd('use {0}; show tables like \"{1}\";'.format(database, table))\n\n return stdout and table.lower() in stdout\n else:\n stdout = run_hive_cmd(\"\"\"use %s; show partitions %s partition\n (%s)\"\"\" % (database, table, self.partition_spec(partition)))\n\n if stdout:\n return True\n else:\n return False\n\n def table_schema(self, table, database='default'):\n describe = run_hive_cmd(\"use {0}; describe {1}\".format(database, table))\n if not describe or \"does not exist\" in describe:\n return None\n return [tuple([x.strip() for x in line.strip().split(\"\\t\")]) for line in describe.strip().split(\"\\n\")]\n\n def partition_spec(self, partition):\n \"\"\"\n Turns a dict into the a Hive partition specification string.\n \"\"\"\n return ','.join([\"`{0}`='{1}'\".format(k, v) for (k, v) in\n sorted(six.iteritems(partition), key=operator.itemgetter(0))])\n\n\nclass ApacheHiveCommandClient(HiveCommandClient):\n \"\"\"\n A subclass for the HiveCommandClient to (in some cases) ignore the return code from\n the hive command so that we can just parse the output.\n \"\"\"\n\n def table_schema(self, table, database='default'):\n describe = run_hive_cmd(\"use {0}; describe {1}\".format(database, table), False)\n if not describe or \"Table not found\" in describe:\n return None\n return [tuple([x.strip() for x in line.strip().split(\"\\t\")]) for line in describe.strip().split(\"\\n\")]\n\n\nclass MetastoreClient(HiveClient):\n\n def table_location(self, table, database='default', partition=None):\n with HiveThriftContext() as client:\n if partition is not None:\n try:\n import hive_metastore.ttypes\n partition_str = self.partition_spec(partition)\n thrift_table = client.get_partition_by_name(database, table, partition_str)\n except hive_metastore.ttypes.NoSuchObjectException:\n return ''\n else:\n thrift_table = client.get_table(database, table)\n return thrift_table.sd.location\n\n def table_exists(self, table, database='default', partition=None):\n with HiveThriftContext() as client:\n if partition is None:\n return table in client.get_all_tables(database)\n else:\n return partition in self._existing_partitions(table, database, client)\n\n def _existing_partitions(self, table, database, client):\n def _parse_partition_string(partition_string):\n partition_def = {}\n for part in partition_string.split(\"/\"):\n name, value = part.split(\"=\")\n partition_def[name] = value\n return partition_def\n\n # -1 is max_parts, the # of partition names to return (-1 = unlimited)\n partition_strings = client.get_partition_names(database, table, -1)\n return [_parse_partition_string(existing_partition) for existing_partition in partition_strings]\n\n def table_schema(self, table, database='default'):\n with HiveThriftContext() as client:\n return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)]\n\n def partition_spec(self, partition):\n return \"/\".join(\"%s=%s\" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0)))\n\n\nclass HiveThriftContext(object):\n \"\"\"\n Context manager for hive metastore client.\n \"\"\"\n\n def __enter__(self):\n try:\n from thrift.transport import TSocket\n from thrift.transport import TTransport\n from thrift.protocol import TBinaryProtocol\n # Note that this will only work with a CDH release.\n # This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax.\n # If using the Apache release of Hive this import will fail.\n from hive_metastore import ThriftHiveMetastore\n config = luigi.configuration.get_config()\n host = config.get('hive', 'metastore_host')\n port = config.getint('hive', 'metastore_port')\n transport = TSocket.TSocket(host, port)\n transport = TTransport.TBufferedTransport(transport)\n protocol = TBinaryProtocol.TBinaryProtocol(transport)\n transport.open()\n self.transport = transport\n return ThriftHiveMetastore.Client(protocol)\n except ImportError as e:\n raise Exception('Could not import Hive thrift library:' + str(e))\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.transport.close()\n\n\ndef get_default_client():\n syntax = get_hive_syntax()\n if syntax == \"apache\":\n return ApacheHiveCommandClient()\n elif syntax == \"metastore\":\n return MetastoreClient()\n else:\n return HiveCommandClient()\n\n\nclient = get_default_client()\n\n\nclass HiveQueryTask(luigi.contrib.hadoop.BaseHadoopJobTask):\n \"\"\"\n Task to run a hive query.\n \"\"\"\n\n # by default, we let hive figure these out.\n n_reduce_tasks = None\n bytes_per_reducer = None\n reducers_max = None\n\n @abc.abstractmethod\n def query(self):\n \"\"\" Text of query to run in hive \"\"\"\n raise RuntimeError(\"Must implement query!\")\n\n def hiverc(self):\n \"\"\"\n Location of an rc file to run before the query\n if hiverc-location key is specified in luigi.cfg, will default to the value there\n otherwise returns None.\n\n Returning a list of rc files will load all of them in order.\n \"\"\"\n return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None)\n\n def hivevars(self):\n \"\"\"\n Returns a dict of key=value settings to be passed along\n to the hive command line via --hivevar.\n This option can be used as a separated namespace for script local variables.\n See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+VariableSubstitution\n \"\"\"\n return {}\n\n def hiveconfs(self):\n \"\"\"\n Returns a dict of key=value settings to be passed along\n to the hive command line via --hiveconf. By default, sets\n mapred.job.name to task_id and if not None, sets:\n\n * mapred.reduce.tasks (n_reduce_tasks)\n * mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)\n * hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)\n * hive.exec.reducers.max (reducers_max)\n \"\"\"\n jcs = {}\n jcs['mapred.job.name'] = \"'\" + self.task_id + \"'\"\n if self.n_reduce_tasks is not None:\n jcs['mapred.reduce.tasks'] = self.n_reduce_tasks\n if self.pool is not None:\n # Supporting two schedulers: fair (default) and capacity using the same option\n scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')\n if scheduler_type == 'fair':\n jcs['mapred.fairscheduler.pool'] = self.pool\n elif scheduler_type == 'capacity':\n jcs['mapred.job.queue.name'] = self.pool\n if self.bytes_per_reducer is not None:\n jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer\n if self.reducers_max is not None:\n jcs['hive.exec.reducers.max'] = self.reducers_max\n return jcs\n\n def job_runner(self):\n return HiveQueryRunner()\n\n\nclass HiveQueryRunner(luigi.contrib.hadoop.JobRunner):\n \"\"\"\n Runs a HiveQueryTask by shelling out to hive.\n \"\"\"\n\n def prepare_outputs(self, job):\n \"\"\"\n Called before job is started.\n\n If output is a `FileSystemTarget`, create parent directories so the hive command won't fail\n \"\"\"\n outputs = flatten(job.output())\n for o in outputs:\n if isinstance(o, FileSystemTarget):\n parent_dir = os.path.dirname(o.path)\n if parent_dir and not o.fs.exists(parent_dir):\n logger.info(\"Creating parent directory %r\", parent_dir)\n try:\n # there is a possible race condition\n # which needs to be handled here\n o.fs.mkdir(parent_dir)\n except FileAlreadyExists:\n pass\n\n def get_arglist(self, f_name, job):\n arglist = load_hive_cmd() + ['-f', f_name]\n hiverc = job.hiverc()\n if hiverc:\n if isinstance(hiverc, str):\n hiverc = [hiverc]\n for rcfile in hiverc:\n arglist += ['-i', rcfile]\n hiveconfs = job.hiveconfs()\n if hiveconfs:\n for k, v in six.iteritems(hiveconfs):\n arglist += ['--hiveconf', '{0}={1}'.format(k, v)]\n hivevars = job.hivevars()\n if hivevars:\n for k, v in six.iteritems(hivevars):\n arglist += ['--hivevar', '{0}={1}'.format(k, v)]\n logger.info(arglist)\n return arglist\n\n def run_job(self, job, tracking_url_callback=None):\n if tracking_url_callback is not None:\n warnings.warn(\"tracking_url_callback argument is deprecated, task.set_tracking_url is \"\n \"used instead.\", DeprecationWarning)\n\n self.prepare_outputs(job)\n with tempfile.NamedTemporaryFile() as f:\n query = job.query()\n if isinstance(query, unicode):\n query = query.encode('utf8')\n f.write(query)\n f.flush()\n arglist = self.get_arglist(f.name, job)\n return luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url)\n\n\nclass HiveTableTarget(luigi.Target):\n \"\"\"\n exists returns true if the table exists.\n \"\"\"\n\n def __init__(self, table, database='default', client=None):\n self.database = database\n self.table = table\n self.client = client or get_default_client()\n\n def exists(self):\n logger.debug(\"Checking if Hive table '%s.%s' exists\", self.database, self.table)\n return self.client.table_exists(self.table, self.database)\n\n @property\n def path(self):\n \"\"\"\n Returns the path to this table in HDFS.\n \"\"\"\n location = self.client.table_location(self.table, self.database)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location\n\n def open(self, mode):\n return NotImplementedError(\"open() is not supported for HiveTableTarget\")\n\n\nclass HivePartitionTarget(luigi.Target):\n \"\"\"\n exists returns true if the table's partition exists.\n \"\"\"\n\n def __init__(self, table, partition, database='default', fail_missing_table=True, client=None):\n self.database = database\n self.table = table\n self.partition = partition\n self.client = client or get_default_client()\n\n self.fail_missing_table = fail_missing_table\n\n def exists(self):\n try:\n logger.debug(\"Checking Hive table '{d}.{t}' for partition {p}\".format(d=self.database, t=self.table, p=str(self.partition)))\n return self.client.table_exists(self.table, self.database, self.partition)\n except HiveCommandError:\n if self.fail_missing_table:\n raise\n else:\n if self.client.table_exists(self.table, self.database):\n # a real error occurred\n raise\n else:\n # oh the table just doesn't exist\n return False\n\n @property\n def path(self):\n \"\"\"\n Returns the path for this HiveTablePartitionTarget's data.\n \"\"\"\n location = self.client.table_location(self.table, self.database, self.partition)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location\n\n def open(self, mode):\n return NotImplementedError(\"open() is not supported for HivePartitionTarget\")\n\n\nclass ExternalHiveTask(luigi.ExternalTask):\n \"\"\"\n External task that depends on a Hive table/partition.\n \"\"\"\n\n database = luigi.Parameter(default='default')\n table = luigi.Parameter()\n partition = luigi.DictParameter(default={}, description='Python dictionary specifying the target partition e.g. {\"date\": \"2013-01-25\"}')\n\n def output(self):\n if len(self.partition) != 0:\n assert self.partition, \"partition required\"\n return HivePartitionTarget(table=self.table,\n partition=self.partition,\n database=self.database)\n else:\n return HiveTableTarget(self.table, self.database)\n", "path": "luigi/contrib/hive.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport abc\nimport logging\nimport operator\nimport os\nimport subprocess\nimport tempfile\nimport warnings\n\nfrom luigi import six\n\nimport luigi\nimport luigi.contrib.hadoop\nfrom luigi.target import FileAlreadyExists, FileSystemTarget\nfrom luigi.task import flatten\n\nif six.PY3:\n unicode = str\n\nlogger = logging.getLogger('luigi-interface')\n\n\nclass HiveCommandError(RuntimeError):\n\n def __init__(self, message, out=None, err=None):\n super(HiveCommandError, self).__init__(message, out, err)\n self.message = message\n self.out = out\n self.err = err\n\n\ndef load_hive_cmd():\n return luigi.configuration.get_config().get('hive', 'command', 'hive').split(' ')\n\n\ndef get_hive_syntax():\n return luigi.configuration.get_config().get('hive', 'release', 'cdh4')\n\n\ndef run_hive(args, check_return_code=True):\n \"\"\"\n Runs the `hive` from the command line, passing in the given args, and\n returning stdout.\n\n With the apache release of Hive, so of the table existence checks\n (which are done using DESCRIBE do not exit with a return code of 0\n so we need an option to ignore the return code and just return stdout for parsing\n \"\"\"\n cmd = load_hive_cmd() + args\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n if check_return_code and p.returncode != 0:\n raise HiveCommandError(\"Hive command: {0} failed with error code: {1}\".format(\" \".join(cmd), p.returncode),\n stdout, stderr)\n return stdout.decode('utf-8')\n\n\ndef run_hive_cmd(hivecmd, check_return_code=True):\n \"\"\"\n Runs the given hive query and returns stdout.\n \"\"\"\n return run_hive(['-e', hivecmd], check_return_code)\n\n\ndef run_hive_script(script):\n \"\"\"\n Runs the contents of the given script in hive and returns stdout.\n \"\"\"\n if not os.path.isfile(script):\n raise RuntimeError(\"Hive script: {0} does not exist.\".format(script))\n return run_hive(['-f', script])\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HiveClient(object): # interface\n\n @abc.abstractmethod\n def table_location(self, table, database='default', partition=None):\n \"\"\"\n Returns location of db.table (or db.table.partition). partition is a dict of partition key to\n value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def table_schema(self, table, database='default'):\n \"\"\"\n Returns list of [(name, type)] for each column in database.table.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def table_exists(self, table, database='default', partition=None):\n \"\"\"\n Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to\n value.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def partition_spec(self, partition):\n \"\"\" Turn a dict into a string partition specification \"\"\"\n pass\n\n\nclass HiveCommandClient(HiveClient):\n \"\"\"\n Uses `hive` invocations to find information.\n \"\"\"\n\n def table_location(self, table, database='default', partition=None):\n cmd = \"use {0}; describe formatted {1}\".format(database, table)\n if partition is not None:\n cmd += \" PARTITION ({0})\".format(self.partition_spec(partition))\n\n stdout = run_hive_cmd(cmd)\n\n for line in stdout.split(\"\\n\"):\n if \"Location:\" in line:\n return line.split(\"\\t\")[1]\n\n def table_exists(self, table, database='default', partition=None):\n if partition is None:\n stdout = run_hive_cmd('use {0}; show tables like \"{1}\";'.format(database, table))\n\n return stdout and table.lower() in stdout\n else:\n stdout = run_hive_cmd(\"\"\"use %s; show partitions %s partition\n (%s)\"\"\" % (database, table, self.partition_spec(partition)))\n\n if stdout:\n return True\n else:\n return False\n\n def table_schema(self, table, database='default'):\n describe = run_hive_cmd(\"use {0}; describe {1}\".format(database, table))\n if not describe or \"does not exist\" in describe:\n return None\n return [tuple([x.strip() for x in line.strip().split(\"\\t\")]) for line in describe.strip().split(\"\\n\")]\n\n def partition_spec(self, partition):\n \"\"\"\n Turns a dict into the a Hive partition specification string.\n \"\"\"\n return ','.join([\"`{0}`='{1}'\".format(k, v) for (k, v) in\n sorted(six.iteritems(partition), key=operator.itemgetter(0))])\n\n\nclass ApacheHiveCommandClient(HiveCommandClient):\n \"\"\"\n A subclass for the HiveCommandClient to (in some cases) ignore the return code from\n the hive command so that we can just parse the output.\n \"\"\"\n\n def table_schema(self, table, database='default'):\n describe = run_hive_cmd(\"use {0}; describe {1}\".format(database, table), False)\n if not describe or \"Table not found\" in describe:\n return None\n return [tuple([x.strip() for x in line.strip().split(\"\\t\")]) for line in describe.strip().split(\"\\n\")]\n\n\nclass MetastoreClient(HiveClient):\n\n def table_location(self, table, database='default', partition=None):\n with HiveThriftContext() as client:\n if partition is not None:\n try:\n import hive_metastore.ttypes\n partition_str = self.partition_spec(partition)\n thrift_table = client.get_partition_by_name(database, table, partition_str)\n except hive_metastore.ttypes.NoSuchObjectException:\n return ''\n else:\n thrift_table = client.get_table(database, table)\n return thrift_table.sd.location\n\n def table_exists(self, table, database='default', partition=None):\n with HiveThriftContext() as client:\n if partition is None:\n return table in client.get_all_tables(database)\n else:\n return partition in self._existing_partitions(table, database, client)\n\n def _existing_partitions(self, table, database, client):\n def _parse_partition_string(partition_string):\n partition_def = {}\n for part in partition_string.split(\"/\"):\n name, value = part.split(\"=\")\n partition_def[name] = value\n return partition_def\n\n # -1 is max_parts, the # of partition names to return (-1 = unlimited)\n partition_strings = client.get_partition_names(database, table, -1)\n return [_parse_partition_string(existing_partition) for existing_partition in partition_strings]\n\n def table_schema(self, table, database='default'):\n with HiveThriftContext() as client:\n return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)]\n\n def partition_spec(self, partition):\n return \"/\".join(\"%s=%s\" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0)))\n\n\nclass HiveThriftContext(object):\n \"\"\"\n Context manager for hive metastore client.\n \"\"\"\n\n def __enter__(self):\n try:\n from thrift.transport import TSocket\n from thrift.transport import TTransport\n from thrift.protocol import TBinaryProtocol\n # Note that this will only work with a CDH release.\n # This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax.\n # If using the Apache release of Hive this import will fail.\n from hive_metastore import ThriftHiveMetastore\n config = luigi.configuration.get_config()\n host = config.get('hive', 'metastore_host')\n port = config.getint('hive', 'metastore_port')\n transport = TSocket.TSocket(host, port)\n transport = TTransport.TBufferedTransport(transport)\n protocol = TBinaryProtocol.TBinaryProtocol(transport)\n transport.open()\n self.transport = transport\n return ThriftHiveMetastore.Client(protocol)\n except ImportError as e:\n raise Exception('Could not import Hive thrift library:' + str(e))\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.transport.close()\n\n\ndef get_default_client():\n syntax = get_hive_syntax()\n if syntax == \"apache\":\n return ApacheHiveCommandClient()\n elif syntax == \"metastore\":\n return MetastoreClient()\n else:\n return HiveCommandClient()\n\n\nclient = get_default_client()\n\n\nclass HiveQueryTask(luigi.contrib.hadoop.BaseHadoopJobTask):\n \"\"\"\n Task to run a hive query.\n \"\"\"\n\n # by default, we let hive figure these out.\n n_reduce_tasks = None\n bytes_per_reducer = None\n reducers_max = None\n\n @abc.abstractmethod\n def query(self):\n \"\"\" Text of query to run in hive \"\"\"\n raise RuntimeError(\"Must implement query!\")\n\n def hiverc(self):\n \"\"\"\n Location of an rc file to run before the query\n if hiverc-location key is specified in luigi.cfg, will default to the value there\n otherwise returns None.\n\n Returning a list of rc files will load all of them in order.\n \"\"\"\n return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None)\n\n def hivevars(self):\n \"\"\"\n Returns a dict of key=value settings to be passed along\n to the hive command line via --hivevar.\n This option can be used as a separated namespace for script local variables.\n See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+VariableSubstitution\n \"\"\"\n return {}\n\n def hiveconfs(self):\n \"\"\"\n Returns a dict of key=value settings to be passed along\n to the hive command line via --hiveconf. By default, sets\n mapred.job.name to task_id and if not None, sets:\n\n * mapred.reduce.tasks (n_reduce_tasks)\n * mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)\n * hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)\n * hive.exec.reducers.max (reducers_max)\n \"\"\"\n jcs = {}\n jcs['mapred.job.name'] = \"'\" + self.task_id + \"'\"\n if self.n_reduce_tasks is not None:\n jcs['mapred.reduce.tasks'] = self.n_reduce_tasks\n if self.pool is not None:\n # Supporting two schedulers: fair (default) and capacity using the same option\n scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')\n if scheduler_type == 'fair':\n jcs['mapred.fairscheduler.pool'] = self.pool\n elif scheduler_type == 'capacity':\n jcs['mapred.job.queue.name'] = self.pool\n if self.bytes_per_reducer is not None:\n jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer\n if self.reducers_max is not None:\n jcs['hive.exec.reducers.max'] = self.reducers_max\n return jcs\n\n def job_runner(self):\n return HiveQueryRunner()\n\n\nclass HiveQueryRunner(luigi.contrib.hadoop.JobRunner):\n \"\"\"\n Runs a HiveQueryTask by shelling out to hive.\n \"\"\"\n\n def prepare_outputs(self, job):\n \"\"\"\n Called before job is started.\n\n If output is a `FileSystemTarget`, create parent directories so the hive command won't fail\n \"\"\"\n outputs = flatten(job.output())\n for o in outputs:\n if isinstance(o, FileSystemTarget):\n parent_dir = os.path.dirname(o.path)\n if parent_dir and not o.fs.exists(parent_dir):\n logger.info(\"Creating parent directory %r\", parent_dir)\n try:\n # there is a possible race condition\n # which needs to be handled here\n o.fs.mkdir(parent_dir)\n except FileAlreadyExists:\n pass\n\n def get_arglist(self, f_name, job):\n arglist = load_hive_cmd() + ['-f', f_name]\n hiverc = job.hiverc()\n if hiverc:\n if isinstance(hiverc, str):\n hiverc = [hiverc]\n for rcfile in hiverc:\n arglist += ['-i', rcfile]\n hiveconfs = job.hiveconfs()\n if hiveconfs:\n for k, v in six.iteritems(hiveconfs):\n arglist += ['--hiveconf', '{0}={1}'.format(k, v)]\n hivevars = job.hivevars()\n if hivevars:\n for k, v in six.iteritems(hivevars):\n arglist += ['--hivevar', '{0}={1}'.format(k, v)]\n logger.info(arglist)\n return arglist\n\n def run_job(self, job, tracking_url_callback=None):\n if tracking_url_callback is not None:\n warnings.warn(\"tracking_url_callback argument is deprecated, task.set_tracking_url is \"\n \"used instead.\", DeprecationWarning)\n\n self.prepare_outputs(job)\n with tempfile.NamedTemporaryFile() as f:\n query = job.query()\n if isinstance(query, unicode):\n query = query.encode('utf8')\n f.write(query)\n f.flush()\n arglist = self.get_arglist(f.name, job)\n return luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url)\n\n\nclass HiveTableTarget(luigi.Target):\n \"\"\"\n exists returns true if the table exists.\n \"\"\"\n\n def __init__(self, table, database='default', client=None):\n self.database = database\n self.table = table\n self.client = client or get_default_client()\n\n def exists(self):\n logger.debug(\"Checking if Hive table '%s.%s' exists\", self.database, self.table)\n return self.client.table_exists(self.table, self.database)\n\n @property\n def path(self):\n \"\"\"\n Returns the path to this table in HDFS.\n \"\"\"\n location = self.client.table_location(self.table, self.database)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location\n\n def open(self, mode):\n return NotImplementedError(\"open() is not supported for HiveTableTarget\")\n\n\nclass HivePartitionTarget(luigi.Target):\n \"\"\"\n exists returns true if the table's partition exists.\n \"\"\"\n\n def __init__(self, table, partition, database='default', fail_missing_table=True, client=None):\n self.database = database\n self.table = table\n self.partition = partition\n self.client = client or get_default_client()\n\n self.fail_missing_table = fail_missing_table\n\n def exists(self):\n try:\n logger.debug(\"Checking Hive table '{d}.{t}' for partition {p}\".format(d=self.database, t=self.table, p=str(self.partition)))\n return self.client.table_exists(self.table, self.database, self.partition)\n except HiveCommandError:\n if self.fail_missing_table:\n raise\n else:\n if self.client.table_exists(self.table, self.database):\n # a real error occurred\n raise\n else:\n # oh the table just doesn't exist\n return False\n\n @property\n def path(self):\n \"\"\"\n Returns the path for this HiveTablePartitionTarget's data.\n \"\"\"\n location = self.client.table_location(self.table, self.database, self.partition)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location\n\n def open(self, mode):\n return NotImplementedError(\"open() is not supported for HivePartitionTarget\")\n\n\nclass ExternalHiveTask(luigi.ExternalTask):\n \"\"\"\n External task that depends on a Hive table/partition.\n \"\"\"\n\n database = luigi.Parameter(default='default')\n table = luigi.Parameter()\n partition = luigi.DictParameter(default={}, description='Python dictionary specifying the target partition e.g. {\"date\": \"2013-01-25\"}')\n\n def output(self):\n if len(self.partition) != 0:\n assert self.partition, \"partition required\"\n return HivePartitionTarget(table=self.table,\n partition=self.partition,\n database=self.database)\n else:\n return HiveTableTarget(self.table, self.database)\n", "path": "luigi/contrib/hive.py" } ]
diff --git a/luigi/contrib/hive.py b/luigi/contrib/hive.py index a310677b12..91c84e320f 100644 --- a/luigi/contrib/hive.py +++ b/luigi/contrib/hive.py @@ -68,7 +68,7 @@ def run_hive(args, check_return_code=True): if check_return_code and p.returncode != 0: raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode), stdout, stderr) - return stdout + return stdout.decode('utf-8') def run_hive_cmd(hivecmd, check_return_code=True): diff --git a/test/contrib/hive_test.py b/test/contrib/hive_test.py index 6ae4138470..4cf6655c66 100644 --- a/test/contrib/hive_test.py +++ b/test/contrib/hive_test.py @@ -32,7 +32,7 @@ class HiveTest(unittest.TestCase): def mock_hive_cmd(self, args, check_return=True): self.last_hive_cmd = args self.count += 1 - return "statement{0}".format(self.count) + return six.u("statement{0}".format(self.count)) def setUp(self): self.run_hive_cmd_saved = luigi.contrib.hive.run_hive @@ -262,7 +262,7 @@ def test_run_hive_command(self, popen): # I'm testing this again to check the return codes # I didn't want to tear up all the existing tests to change how run_hive is mocked comm = mock.Mock(name='communicate_mock') - comm.return_value = "some return stuff", "" + comm.return_value = six.b("some return stuff"), "" preturn = mock.Mock(name='open_mock') preturn.returncode = 0 @@ -275,7 +275,7 @@ def test_run_hive_command(self, popen): preturn.returncode = 17 self.assertRaises(luigi.contrib.hive.HiveCommandError, luigi.contrib.hive.run_hive, ["blah", "blah"]) - comm.return_value = "", "some stderr stuff" + comm.return_value = six.b(""), "some stderr stuff" returned = luigi.contrib.hive.run_hive(["blah", "blah"], False) self.assertEqual("", returned)
TypeError: 'str' does not support the buffer interface in luigi.contrib.hive Hi, I'm running luigi task with python3.4 and trying call print(HiveTableTarget(table = "tbl", database = "db").exists()). I'm getting error the following stacktrace: ``` Traceback (most recent call last): File "/usr/local/lib/python3.4/dist-packages/luigi/worker.py", line 137, in run new_deps = self._run_get_new_deps() File "/usr/local/lib/python3.4/dist-packages/luigi/worker.py", line 88, in _run_get_new_deps task_gen = self.task.run() File "hive.py", line 10, in run print(str(target.exists())) File "/usr/local/lib/python3.4/dist-packages/luigi/contrib/hive.py", line 376, in exists return self.client.table_exists(self.table, self.database) File "/usr/local/lib/python3.4/dist-packages/luigi/contrib/hive.py", line 141, in table_exists return stdout and table.lower() in stdout TypeError: 'str' does not support the buffer interface ``` I changed the last line in the stacktrace into ``` return stdout and table.lower() in str(stdout) ``` and it works now. Is this a bug or do I use wrong python version or something else? Thanks
kivy__kivy-3066
[ { "content": "# found a way to include it more easily.\n'''\nSDL2 Window\n===========\n\nWindowing provider directly based on our own wrapped version of SDL.\n\nTODO:\n - fix keys\n - support scrolling\n - clean code\n - manage correctly all sdl events\n\n'''\n\n__all__ = ('WindowSDL2', )\n\nfrom os.path import join\nfrom kivy import kivy_data_dir\nfrom kivy.logger import Logger\nfrom kivy.base import EventLoop, ExceptionManager, stopTouchApp\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.core.window import WindowBase\nfrom kivy.core.window._window_sdl2 import _WindowSDL2Storage\nfrom kivy.input.provider import MotionEventProvider\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.resources import resource_find\nfrom kivy.utils import platform, deprecated\nfrom kivy.compat import unichr\nfrom collections import deque\n\nKMOD_LCTRL = 64\nKMOD_RCTRL = 128\nKMOD_RSHIFT = 2\nKMOD_LSHIFT = 1\nKMOD_RALT = 512\nKMOD_LALT = 256\nKMOD_LMETA = 1024\nKMOD_RMETA = 2048\n\nSDLK_SHIFTL = 1073742049\nSDLK_SHIFTR = 1073742053\nSDLK_LCTRL = 1073742048\nSDLK_RCTRL = 1073742052\nSDLK_LALT = 1073742050\nSDLK_RALT = 1073742054\nSDLK_LEFT = 1073741904\nSDLK_RIGHT = 1073741903\nSDLK_UP = 1073741906\nSDLK_DOWN = 1073741905\nSDLK_HOME = 1073741898\nSDLK_END = 1073741901\nSDLK_PAGEUP = 1073741899\nSDLK_PAGEDOWN = 1073741902\n\n\nclass SDL2MotionEvent(MotionEvent):\n def depack(self, args):\n self.is_touch = True\n self.profile = ('pos', )\n self.sx, self.sy = args\n super(SDL2MotionEvent, self).depack(args)\n\n\nclass SDL2MotionEventProvider(MotionEventProvider):\n win = None\n q = deque()\n touchmap = {}\n\n def update(self, dispatch_fn):\n touchmap = self.touchmap\n while True:\n try:\n value = self.q.pop()\n except IndexError:\n return\n\n action, fid, x, y = value\n x = x / 32768.\n y = 1 - (y / 32768.)\n if fid not in touchmap:\n touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))\n else:\n me = touchmap[fid]\n me.move((x, y))\n if action == 'fingerdown':\n dispatch_fn('begin', me)\n elif action == 'fingerup':\n me.update_time_end()\n dispatch_fn('end', me)\n del touchmap[fid]\n else:\n dispatch_fn('update', me)\n\n\nclass WindowSDL(WindowBase):\n\n def __init__(self, **kwargs):\n self._win = _WindowSDL2Storage()\n super(WindowSDL, self).__init__()\n self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,\n KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,\n KMOD_RMETA)\n self.command_keys = {\n 27: 'escape',\n 9: 'tab',\n 8: 'backspace',\n 13: 'enter',\n 127: 'del',\n 271: 'enter',\n 273: 'up',\n 274: 'down',\n 275: 'right',\n 276: 'left',\n 278: 'home',\n 279: 'end',\n 280: 'pgup',\n 281: 'pgdown'}\n self._mouse_buttons_down = set()\n\n def create_window(self, *largs):\n\n if self._fake_fullscreen:\n if not self.borderless:\n self.fullscreen = self._fake_fullscreen = False\n elif not self.fullscreen or self.fullscreen == 'auto':\n self.borderless = self._fake_fullscreen = False\n\n if self.fullscreen == 'fake':\n self.borderless = self._fake_fullscreen = True\n Logger.warning(\"The 'fake' fullscreen option has been \"\n \"deprecated, use Window.borderless or the \"\n \"borderless Config option instead.\")\n\n if not self.initialized:\n\n if self.position == 'auto':\n pos = None, None\n elif self.position == 'custom':\n pos = self.left, self.top\n\n # setup !\n w, h = self._size\n resizable = Config.getboolean('graphics', 'resizable')\n gl_size = self._win.setup_window(pos[0], pos[1], w, h,\n self.borderless, self.fullscreen,\n resizable)\n # never stay with a None pos, application using w.center\n # will be fired.\n self._pos = (0, 0)\n else:\n w, h = self._size\n self._win.resize_window(w, h)\n self._win.set_border_state(self.borderless)\n self._win.set_fullscreen_mode(self.fullscreen)\n\n super(WindowSDL, self).create_window()\n\n # auto add input provider\n Logger.info('Window: auto add sdl input provider')\n from kivy.base import EventLoop\n SDL2MotionEventProvider.win = self\n EventLoop.add_input_provider(SDL2MotionEventProvider('sdl', ''))\n\n # set window icon before calling set_mode\n try:\n filename_icon = self.icon or Config.get('kivy', 'window_icon')\n if filename_icon == '':\n logo_size = 32\n if platform == 'macosx':\n logo_size = 512\n elif platform == 'win':\n logo_size = 64\n filename_icon = 'kivy-icon-{}.png'.format(logo_size)\n filename_icon = resource_find(\n join(kivy_data_dir, 'logo', filename_icon))\n self.set_icon(filename_icon)\n except:\n Logger.exception('Window: cannot set icon')\n\n def close(self):\n self._win.teardown_window()\n self.dispatch('on_close')\n\n def maximize(self):\n if self._is_desktop:\n self._win.maximize_window()\n else:\n Logger.warning('Window: maximize() is used only on desktop OSes.')\n\n def minimize(self):\n if self._is_desktop:\n self._win.minimize_window()\n else:\n Logger.warning('Window: minimize() is used only on desktop OSes.')\n\n def restore(self):\n if self._is_desktop:\n self._win.restore_window()\n else:\n Logger.warning('Window: restore() is used only on desktop OSes.')\n\n def hide(self):\n if self._is_desktop:\n self._win.hide_window()\n else:\n Logger.warning('Window: hide() is used only on desktop OSes.')\n\n def show(self):\n if self._is_desktop:\n self._win.show_window()\n else:\n Logger.warning('Window: show() is used only on desktop OSes.')\n\n @deprecated\n def toggle_fullscreen(self):\n if self.fullscreen in (True, 'auto'):\n self.fullscreen = False\n else:\n self.fullscreen = 'auto'\n\n def set_title(self, title):\n self._win.set_window_title(title)\n\n def set_icon(self, filename):\n self._win.set_window_icon(str(filename))\n\n def screenshot(self, *largs, **kwargs):\n filename = super(WindowSDL, self).screenshot(*largs, **kwargs)\n if filename is None:\n return\n\n from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE\n width, height = self.size\n data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)\n self._win.save_bytes_in_png(filename, data, width, height)\n Logger.debug('Window: Screenshot saved at <%s>' % filename)\n return filename\n\n def flip(self):\n self._win.flip()\n super(WindowSDL, self).flip()\n\n def _mainloop(self):\n EventLoop.idle()\n\n while True:\n event = self._win.poll()\n if event is False:\n break\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n\n elif action in ('fingermotion', 'fingerdown', 'fingerup'):\n # for finger, pass the raw event to SDL motion event provider\n # XXX this is problematic. On OSX, it generates touches with 0,\n # 0 coordinates, at the same times as mouse. But it works.\n # We have a conflict of using either the mouse or the finger.\n # Right now, we have no mechanism that we could use to know\n # which is the preferred one for the application.\n #SDL2MotionEventProvider.q.appendleft(event)\n pass\n\n elif action == 'mousemotion':\n x, y = args\n self.mouse_pos = x, self.system_size[1] - y\n self._mouse_x = x\n self._mouse_y = y\n # don't dispatch motion if no button are pressed\n if len(self._mouse_buttons_down) == 0:\n continue\n self._mouse_meta = self.modifiers\n self.dispatch('on_mouse_move', x, y, self.modifiers)\n\n elif action in ('mousebuttondown', 'mousebuttonup'):\n x, y, button = args\n btn = 'left'\n if button == 3:\n btn = 'right'\n elif button == 2:\n btn = 'middle'\n eventname = 'on_mouse_down'\n self._mouse_buttons_down.add(button)\n if action == 'mousebuttonup':\n eventname = 'on_mouse_up'\n self._mouse_buttons_down.remove(button)\n self._mouse_x = x\n self._mouse_y = y\n self.dispatch(eventname, x, y, btn, self.modifiers)\n elif action.startswith('mousewheel'):\n self._update_modifiers()\n x, y, button = args\n btn = 'scrolldown'\n if action.endswith('up'):\n btn = 'scrollup'\n elif action.endswith('right'):\n btn = 'scrollright'\n elif action.endswith('left'):\n btn = 'scrollleft'\n\n self._mouse_meta = self.modifiers\n self._mouse_btn = btn\n #times = x if y == 0 else y\n #times = min(abs(times), 100)\n #for k in range(times):\n self._mouse_down = True\n self.dispatch('on_mouse_down',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n self._mouse_down = False\n self.dispatch('on_mouse_up',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n\n elif action == 'dropfile':\n dropfile = args\n self.dispatch('on_dropfile', dropfile[0])\n # video resize\n elif action == 'windowresized':\n self._size = args\n # don't use trigger here, we want to delay the resize event\n cb = self._do_resize\n Clock.unschedule(cb)\n Clock.schedule_once(cb, .1)\n\n elif action == 'windowresized':\n self.canvas.ask_update()\n\n elif action == 'windowrestored':\n self.canvas.ask_update()\n\n elif action == 'windowexposed':\n self.canvas.ask_update()\n\n elif action == 'windowminimized':\n if Config.getboolean('kivy', 'pause_on_minimize'):\n self.do_pause()\n\n elif action == 'joyaxismotion':\n stickid, axisid, value = args\n self.dispatch('on_joy_axis', stickid, axisid, value)\n elif action == 'joyhatmotion':\n stickid, hatid, value = args\n self.dispatch('on_joy_hat', stickid, hatid, value)\n elif action == 'joyballmotion':\n stickid, ballid, xrel, yrel = args\n self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)\n elif action == 'joybuttondown':\n stickid, buttonid = args\n self.dispatch('on_joy_button_down', stickid, buttonid)\n elif action == 'joybuttonup':\n stickid, buttonid = args\n self.dispatch('on_joy_button_up', stickid, buttonid)\n\n elif action in ('keydown', 'keyup'):\n mod, key, scancode, kstr = args\n if mod in self._meta_keys:\n try:\n kstr = unichr(key)\n except ValueError:\n pass\n\n key_swap = {\n SDLK_LEFT: 276,\n SDLK_RIGHT: 275,\n SDLK_UP: 273,\n SDLK_DOWN: 274,\n SDLK_HOME: 278,\n SDLK_END: 279,\n SDLK_PAGEDOWN: 281,\n SDLK_PAGEUP: 280,\n SDLK_SHIFTL: 303,\n SDLK_SHIFTR: 304,\n SDLK_LCTRL: KMOD_LCTRL,\n SDLK_RCTRL: KMOD_RCTRL,\n SDLK_LALT: KMOD_LALT,\n SDLK_RALT: KMOD_RALT}\n\n if platform == 'ios':\n # XXX ios keyboard suck, when backspace is hit, the delete\n # keycode is sent. fix it.\n key_swap[127] = 8 # back\n\n try:\n key = key_swap[key]\n except KeyError:\n pass\n\n if action == 'keydown':\n self._update_modifiers(mod, key)\n else:\n self._update_modifiers(mod) # ignore the key, it\n # has been released\n if 'shift' in self._modifiers and key\\\n not in self.command_keys.keys():\n return\n\n if action == 'keyup':\n self.dispatch('on_key_up', key, scancode)\n continue\n\n # don't dispatch more key if down event is accepted\n if self.dispatch('on_key_down', key,\n scancode, kstr,\n self.modifiers):\n continue\n self.dispatch('on_keyboard', key,\n scancode, kstr,\n self.modifiers)\n\n elif action == 'textinput':\n key = args[0][0]\n # XXX on IOS, keydown/up don't send unicode anymore.\n # With latest sdl, the text is sent over textinput\n # Right now, redo keydown/up, but we need to seperate both call\n # too. (and adapt on_key_* API.)\n self.dispatch('on_key_down', key, None, args[0],\n self.modifiers)\n self.dispatch('on_keyboard', None, None, args[0],\n self.modifiers)\n self.dispatch('on_key_up', key, None, args[0],\n self.modifiers)\n\n # unhandled event !\n else:\n Logger.trace('WindowSDL: Unhandled event %s' % str(event))\n\n def _do_resize(self, dt):\n Logger.debug('Window: Resize window to %s' % str(self._size))\n self._win.resize_display_mode(*self._size)\n self.dispatch('on_resize', *self._size)\n\n def do_pause(self):\n # should go to app pause mode.\n from kivy.app import App\n from kivy.base import stopTouchApp\n app = App.get_running_app()\n if not app:\n Logger.info('WindowSDL: No running App found, exit.')\n stopTouchApp()\n return\n\n if not app.dispatch('on_pause'):\n Logger.info('WindowSDL: App doesn\\'t support pause mode, stop.')\n stopTouchApp()\n return\n\n # XXX FIXME wait for sdl resume\n while True:\n event = self._win.poll()\n if event is False:\n continue\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n elif action == 'windowrestored':\n break\n\n app.dispatch('on_resume')\n\n def mainloop(self):\n # don't known why, but pygame required a resize event\n # for opengl, before mainloop... window reinit ?\n self.dispatch('on_resize', *self.size)\n\n while not EventLoop.quit and EventLoop.status == 'started':\n try:\n self._mainloop()\n except BaseException as inst:\n # use exception manager first\n r = ExceptionManager.handle_exception(inst)\n if r == ExceptionManager.RAISE:\n stopTouchApp()\n raise\n else:\n pass\n\n # force deletion of window\n self._win.teardown_window()\n\n #\n # Pygame wrapper\n #\n def _update_modifiers(self, mods=None, key=None):\n # Available mod, from dir(pygame)\n # 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',\n # 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',\n # 'KMOD_MODE', 'KMOD_NONE'\n if mods is None and key is None:\n return\n modifiers = set()\n\n if mods is not None:\n if mods & (KMOD_RSHIFT | KMOD_LSHIFT):\n modifiers.add('shift')\n if mods & (KMOD_RALT | KMOD_LALT):\n modifiers.add('alt')\n if mods & (KMOD_RCTRL | KMOD_LCTRL):\n modifiers.add('ctrl')\n if mods & (KMOD_RMETA | KMOD_LMETA):\n modifiers.add('meta')\n\n if key is not None:\n if key in (KMOD_RSHIFT, KMOD_LSHIFT):\n modifiers.add('shift')\n if key in (KMOD_RALT, KMOD_LALT):\n modifiers.add('alt')\n if key in (KMOD_RCTRL, KMOD_LCTRL):\n modifiers.add('ctrl')\n if key in (KMOD_RMETA, KMOD_LMETA):\n modifiers.add('meta')\n\n self._modifiers = list(modifiers)\n return\n\n def request_keyboard(self, callback, target, input_type='text'):\n self._sdl_keyboard = super(WindowSDL, self).\\\n request_keyboard(callback, target, input_type)\n self._win.show_keyboard()\n Clock.schedule_interval(self._check_keyboard_shown, 1 / 5.)\n return self._sdl_keyboard\n\n def release_keyboard(self, *largs):\n super(WindowSDL, self).release_keyboard(*largs)\n self._win.hide_keyboard()\n self._sdl_keyboard = None\n return True\n\n def _check_keyboard_shown(self, dt):\n if self._sdl_keyboard is None:\n return False\n if not self._win.is_keyboard_shown():\n self._sdl_keyboard.release()\n\n", "path": "kivy/core/window/window_sdl2.py" } ]
[ { "content": "# found a way to include it more easily.\n'''\nSDL2 Window\n===========\n\nWindowing provider directly based on our own wrapped version of SDL.\n\nTODO:\n - fix keys\n - support scrolling\n - clean code\n - manage correctly all sdl events\n\n'''\n\n__all__ = ('WindowSDL2', )\n\nfrom os.path import join\nfrom kivy import kivy_data_dir\nfrom kivy.logger import Logger\nfrom kivy.base import EventLoop, ExceptionManager, stopTouchApp\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.core.window import WindowBase\nfrom kivy.core.window._window_sdl2 import _WindowSDL2Storage\nfrom kivy.input.provider import MotionEventProvider\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.resources import resource_find\nfrom kivy.utils import platform, deprecated\nfrom kivy.compat import unichr\nfrom collections import deque\n\nKMOD_LCTRL = 64\nKMOD_RCTRL = 128\nKMOD_RSHIFT = 2\nKMOD_LSHIFT = 1\nKMOD_RALT = 512\nKMOD_LALT = 256\nKMOD_LMETA = 1024\nKMOD_RMETA = 2048\n\nSDLK_SHIFTL = 1073742049\nSDLK_SHIFTR = 1073742053\nSDLK_LCTRL = 1073742048\nSDLK_RCTRL = 1073742052\nSDLK_LALT = 1073742050\nSDLK_RALT = 1073742054\nSDLK_LEFT = 1073741904\nSDLK_RIGHT = 1073741903\nSDLK_UP = 1073741906\nSDLK_DOWN = 1073741905\nSDLK_HOME = 1073741898\nSDLK_END = 1073741901\nSDLK_PAGEUP = 1073741899\nSDLK_PAGEDOWN = 1073741902\n\n\nclass SDL2MotionEvent(MotionEvent):\n def depack(self, args):\n self.is_touch = True\n self.profile = ('pos', )\n self.sx, self.sy = args\n super(SDL2MotionEvent, self).depack(args)\n\n\nclass SDL2MotionEventProvider(MotionEventProvider):\n win = None\n q = deque()\n touchmap = {}\n\n def update(self, dispatch_fn):\n touchmap = self.touchmap\n while True:\n try:\n value = self.q.pop()\n except IndexError:\n return\n\n action, fid, x, y = value\n x = x / 32768.\n y = 1 - (y / 32768.)\n if fid not in touchmap:\n touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))\n else:\n me = touchmap[fid]\n me.move((x, y))\n if action == 'fingerdown':\n dispatch_fn('begin', me)\n elif action == 'fingerup':\n me.update_time_end()\n dispatch_fn('end', me)\n del touchmap[fid]\n else:\n dispatch_fn('update', me)\n\n\nclass WindowSDL(WindowBase):\n\n def __init__(self, **kwargs):\n self._win = _WindowSDL2Storage()\n super(WindowSDL, self).__init__()\n self._mouse_x = self._mouse_y = -1\n self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,\n KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,\n KMOD_RMETA)\n self.command_keys = {\n 27: 'escape',\n 9: 'tab',\n 8: 'backspace',\n 13: 'enter',\n 127: 'del',\n 271: 'enter',\n 273: 'up',\n 274: 'down',\n 275: 'right',\n 276: 'left',\n 278: 'home',\n 279: 'end',\n 280: 'pgup',\n 281: 'pgdown'}\n self._mouse_buttons_down = set()\n\n def create_window(self, *largs):\n\n if self._fake_fullscreen:\n if not self.borderless:\n self.fullscreen = self._fake_fullscreen = False\n elif not self.fullscreen or self.fullscreen == 'auto':\n self.borderless = self._fake_fullscreen = False\n\n if self.fullscreen == 'fake':\n self.borderless = self._fake_fullscreen = True\n Logger.warning(\"The 'fake' fullscreen option has been \"\n \"deprecated, use Window.borderless or the \"\n \"borderless Config option instead.\")\n\n if not self.initialized:\n\n if self.position == 'auto':\n pos = None, None\n elif self.position == 'custom':\n pos = self.left, self.top\n\n # setup !\n w, h = self._size\n resizable = Config.getboolean('graphics', 'resizable')\n gl_size = self._win.setup_window(pos[0], pos[1], w, h,\n self.borderless, self.fullscreen,\n resizable)\n # never stay with a None pos, application using w.center\n # will be fired.\n self._pos = (0, 0)\n else:\n w, h = self._size\n self._win.resize_window(w, h)\n self._win.set_border_state(self.borderless)\n self._win.set_fullscreen_mode(self.fullscreen)\n\n super(WindowSDL, self).create_window()\n\n # auto add input provider\n Logger.info('Window: auto add sdl input provider')\n from kivy.base import EventLoop\n SDL2MotionEventProvider.win = self\n EventLoop.add_input_provider(SDL2MotionEventProvider('sdl', ''))\n\n # set window icon before calling set_mode\n try:\n filename_icon = self.icon or Config.get('kivy', 'window_icon')\n if filename_icon == '':\n logo_size = 32\n if platform == 'macosx':\n logo_size = 512\n elif platform == 'win':\n logo_size = 64\n filename_icon = 'kivy-icon-{}.png'.format(logo_size)\n filename_icon = resource_find(\n join(kivy_data_dir, 'logo', filename_icon))\n self.set_icon(filename_icon)\n except:\n Logger.exception('Window: cannot set icon')\n\n def close(self):\n self._win.teardown_window()\n self.dispatch('on_close')\n\n def maximize(self):\n if self._is_desktop:\n self._win.maximize_window()\n else:\n Logger.warning('Window: maximize() is used only on desktop OSes.')\n\n def minimize(self):\n if self._is_desktop:\n self._win.minimize_window()\n else:\n Logger.warning('Window: minimize() is used only on desktop OSes.')\n\n def restore(self):\n if self._is_desktop:\n self._win.restore_window()\n else:\n Logger.warning('Window: restore() is used only on desktop OSes.')\n\n def hide(self):\n if self._is_desktop:\n self._win.hide_window()\n else:\n Logger.warning('Window: hide() is used only on desktop OSes.')\n\n def show(self):\n if self._is_desktop:\n self._win.show_window()\n else:\n Logger.warning('Window: show() is used only on desktop OSes.')\n\n @deprecated\n def toggle_fullscreen(self):\n if self.fullscreen in (True, 'auto'):\n self.fullscreen = False\n else:\n self.fullscreen = 'auto'\n\n def set_title(self, title):\n self._win.set_window_title(title)\n\n def set_icon(self, filename):\n self._win.set_window_icon(str(filename))\n\n def screenshot(self, *largs, **kwargs):\n filename = super(WindowSDL, self).screenshot(*largs, **kwargs)\n if filename is None:\n return\n\n from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE\n width, height = self.size\n data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)\n self._win.save_bytes_in_png(filename, data, width, height)\n Logger.debug('Window: Screenshot saved at <%s>' % filename)\n return filename\n\n def flip(self):\n self._win.flip()\n super(WindowSDL, self).flip()\n\n def _mainloop(self):\n EventLoop.idle()\n\n while True:\n event = self._win.poll()\n if event is False:\n break\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n\n elif action in ('fingermotion', 'fingerdown', 'fingerup'):\n # for finger, pass the raw event to SDL motion event provider\n # XXX this is problematic. On OSX, it generates touches with 0,\n # 0 coordinates, at the same times as mouse. But it works.\n # We have a conflict of using either the mouse or the finger.\n # Right now, we have no mechanism that we could use to know\n # which is the preferred one for the application.\n #SDL2MotionEventProvider.q.appendleft(event)\n pass\n\n elif action == 'mousemotion':\n x, y = args\n self.mouse_pos = x, self.system_size[1] - y\n self._mouse_x = x\n self._mouse_y = y\n # don't dispatch motion if no button are pressed\n if len(self._mouse_buttons_down) == 0:\n continue\n self._mouse_meta = self.modifiers\n self.dispatch('on_mouse_move', x, y, self.modifiers)\n\n elif action in ('mousebuttondown', 'mousebuttonup'):\n x, y, button = args\n btn = 'left'\n if button == 3:\n btn = 'right'\n elif button == 2:\n btn = 'middle'\n eventname = 'on_mouse_down'\n self._mouse_buttons_down.add(button)\n if action == 'mousebuttonup':\n eventname = 'on_mouse_up'\n self._mouse_buttons_down.remove(button)\n self._mouse_x = x\n self._mouse_y = y\n self.dispatch(eventname, x, y, btn, self.modifiers)\n elif action.startswith('mousewheel'):\n self._update_modifiers()\n x, y, button = args\n btn = 'scrolldown'\n if action.endswith('up'):\n btn = 'scrollup'\n elif action.endswith('right'):\n btn = 'scrollright'\n elif action.endswith('left'):\n btn = 'scrollleft'\n\n self._mouse_meta = self.modifiers\n self._mouse_btn = btn\n #times = x if y == 0 else y\n #times = min(abs(times), 100)\n #for k in range(times):\n self._mouse_down = True\n self.dispatch('on_mouse_down',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n self._mouse_down = False\n self.dispatch('on_mouse_up',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n\n elif action == 'dropfile':\n dropfile = args\n self.dispatch('on_dropfile', dropfile[0])\n # video resize\n elif action == 'windowresized':\n self._size = args\n # don't use trigger here, we want to delay the resize event\n cb = self._do_resize\n Clock.unschedule(cb)\n Clock.schedule_once(cb, .1)\n\n elif action == 'windowresized':\n self.canvas.ask_update()\n\n elif action == 'windowrestored':\n self.canvas.ask_update()\n\n elif action == 'windowexposed':\n self.canvas.ask_update()\n\n elif action == 'windowminimized':\n if Config.getboolean('kivy', 'pause_on_minimize'):\n self.do_pause()\n\n elif action == 'joyaxismotion':\n stickid, axisid, value = args\n self.dispatch('on_joy_axis', stickid, axisid, value)\n elif action == 'joyhatmotion':\n stickid, hatid, value = args\n self.dispatch('on_joy_hat', stickid, hatid, value)\n elif action == 'joyballmotion':\n stickid, ballid, xrel, yrel = args\n self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)\n elif action == 'joybuttondown':\n stickid, buttonid = args\n self.dispatch('on_joy_button_down', stickid, buttonid)\n elif action == 'joybuttonup':\n stickid, buttonid = args\n self.dispatch('on_joy_button_up', stickid, buttonid)\n\n elif action in ('keydown', 'keyup'):\n mod, key, scancode, kstr = args\n if mod in self._meta_keys:\n try:\n kstr = unichr(key)\n except ValueError:\n pass\n\n key_swap = {\n SDLK_LEFT: 276,\n SDLK_RIGHT: 275,\n SDLK_UP: 273,\n SDLK_DOWN: 274,\n SDLK_HOME: 278,\n SDLK_END: 279,\n SDLK_PAGEDOWN: 281,\n SDLK_PAGEUP: 280,\n SDLK_SHIFTL: 303,\n SDLK_SHIFTR: 304,\n SDLK_LCTRL: KMOD_LCTRL,\n SDLK_RCTRL: KMOD_RCTRL,\n SDLK_LALT: KMOD_LALT,\n SDLK_RALT: KMOD_RALT}\n\n if platform == 'ios':\n # XXX ios keyboard suck, when backspace is hit, the delete\n # keycode is sent. fix it.\n key_swap[127] = 8 # back\n\n try:\n key = key_swap[key]\n except KeyError:\n pass\n\n if action == 'keydown':\n self._update_modifiers(mod, key)\n else:\n self._update_modifiers(mod) # ignore the key, it\n # has been released\n if 'shift' in self._modifiers and key\\\n not in self.command_keys.keys():\n return\n\n if action == 'keyup':\n self.dispatch('on_key_up', key, scancode)\n continue\n\n # don't dispatch more key if down event is accepted\n if self.dispatch('on_key_down', key,\n scancode, kstr,\n self.modifiers):\n continue\n self.dispatch('on_keyboard', key,\n scancode, kstr,\n self.modifiers)\n\n elif action == 'textinput':\n key = args[0][0]\n # XXX on IOS, keydown/up don't send unicode anymore.\n # With latest sdl, the text is sent over textinput\n # Right now, redo keydown/up, but we need to seperate both call\n # too. (and adapt on_key_* API.)\n self.dispatch('on_key_down', key, None, args[0],\n self.modifiers)\n self.dispatch('on_keyboard', None, None, args[0],\n self.modifiers)\n self.dispatch('on_key_up', key, None, args[0],\n self.modifiers)\n\n # unhandled event !\n else:\n Logger.trace('WindowSDL: Unhandled event %s' % str(event))\n\n def _do_resize(self, dt):\n Logger.debug('Window: Resize window to %s' % str(self._size))\n self._win.resize_display_mode(*self._size)\n self.dispatch('on_resize', *self._size)\n\n def do_pause(self):\n # should go to app pause mode.\n from kivy.app import App\n from kivy.base import stopTouchApp\n app = App.get_running_app()\n if not app:\n Logger.info('WindowSDL: No running App found, exit.')\n stopTouchApp()\n return\n\n if not app.dispatch('on_pause'):\n Logger.info('WindowSDL: App doesn\\'t support pause mode, stop.')\n stopTouchApp()\n return\n\n # XXX FIXME wait for sdl resume\n while True:\n event = self._win.poll()\n if event is False:\n continue\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n elif action == 'windowrestored':\n break\n\n app.dispatch('on_resume')\n\n def mainloop(self):\n # don't known why, but pygame required a resize event\n # for opengl, before mainloop... window reinit ?\n self.dispatch('on_resize', *self.size)\n\n while not EventLoop.quit and EventLoop.status == 'started':\n try:\n self._mainloop()\n except BaseException as inst:\n # use exception manager first\n r = ExceptionManager.handle_exception(inst)\n if r == ExceptionManager.RAISE:\n stopTouchApp()\n raise\n else:\n pass\n\n # force deletion of window\n self._win.teardown_window()\n\n #\n # Pygame wrapper\n #\n def _update_modifiers(self, mods=None, key=None):\n # Available mod, from dir(pygame)\n # 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',\n # 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',\n # 'KMOD_MODE', 'KMOD_NONE'\n if mods is None and key is None:\n return\n modifiers = set()\n\n if mods is not None:\n if mods & (KMOD_RSHIFT | KMOD_LSHIFT):\n modifiers.add('shift')\n if mods & (KMOD_RALT | KMOD_LALT):\n modifiers.add('alt')\n if mods & (KMOD_RCTRL | KMOD_LCTRL):\n modifiers.add('ctrl')\n if mods & (KMOD_RMETA | KMOD_LMETA):\n modifiers.add('meta')\n\n if key is not None:\n if key in (KMOD_RSHIFT, KMOD_LSHIFT):\n modifiers.add('shift')\n if key in (KMOD_RALT, KMOD_LALT):\n modifiers.add('alt')\n if key in (KMOD_RCTRL, KMOD_LCTRL):\n modifiers.add('ctrl')\n if key in (KMOD_RMETA, KMOD_LMETA):\n modifiers.add('meta')\n\n self._modifiers = list(modifiers)\n return\n\n def request_keyboard(self, callback, target, input_type='text'):\n self._sdl_keyboard = super(WindowSDL, self).\\\n request_keyboard(callback, target, input_type)\n self._win.show_keyboard()\n Clock.schedule_interval(self._check_keyboard_shown, 1 / 5.)\n return self._sdl_keyboard\n\n def release_keyboard(self, *largs):\n super(WindowSDL, self).release_keyboard(*largs)\n self._win.hide_keyboard()\n self._sdl_keyboard = None\n return True\n\n def _check_keyboard_shown(self, dt):\n if self._sdl_keyboard is None:\n return False\n if not self._win.is_keyboard_shown():\n self._sdl_keyboard.release()\n\n", "path": "kivy/core/window/window_sdl2.py" } ]
diff --git a/kivy/core/window/window_sdl2.py b/kivy/core/window/window_sdl2.py index cfcbe44faf..96d7e94d70 100644 --- a/kivy/core/window/window_sdl2.py +++ b/kivy/core/window/window_sdl2.py @@ -99,6 +99,7 @@ class WindowSDL(WindowBase): def __init__(self, **kwargs): self._win = _WindowSDL2Storage() super(WindowSDL, self).__init__() + self._mouse_x = self._mouse_y = -1 self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT, KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA, KMOD_RMETA)
SDL2 window crash on (atleast) Windows Run the following ``` python from kivy.app import App from kivy.uix.label import Label class TestApp(App): def build(self): return Label(text='Use scroll wheel without having the mouse touch the kivy window first') TestApp().run() ``` The new window gets focus, without touching the window with the mousepointer, scroll the scrollwheel. Result: ``` Traceback (most recent call last): File "dbg.py", line 11, in <module> TestApp().run() File "C:\dev\python\kivy\kivy\kivy\app.py", line 824, in run runTouchApp() File "C:\dev\python\kivy\kivy\kivy\base.py", line 484, in runTouchApp EventLoop.window.mainloop() File "C:\dev\python\kivy\kivy\kivy\core\window\window_sdl2.py", line 478, in mainloop self._mainloop() File "C:\dev\python\kivy\kivy\kivy\core\window\window_sdl2.py", line 315, in _mainloop self._mouse_x, self._mouse_y, btn, self.modifiers) AttributeError: 'WindowSDL' object has no attribute '_mouse_x' ``` _mouse_x and _mouse_y aren't set, should probably just be set to self._mouse_x = x self._mouse_y = y as in the mousebutton\* or mousemotion cases.
conda__conda-5426
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom genericpath import exists\nfrom glob import glob\nfrom logging import getLogger\nimport sys\n\nfrom .compat import iteritems, on_win\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\ndef is_admin_on_windows(): # pragma: unix no cover\n # http://stackoverflow.com/a/1026626/2127762\n if not on_win: # pragma: no cover\n return False\n try:\n from ctypes import windll\n return windll.shell32.IsUserAnAdmin()() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n log.warn('%r', e)\n return 'unknown'\n\n\n@memoize\ndef linux_get_libc_version():\n \"\"\"\n If on linux, returns (libc_family, version), otherwise (None, None)\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None, None\n\n from os import confstr, confstr_names, readlink\n\n # Python 2.7 does not have either of these keys in confstr_names, so provide\n # hard-coded defaults and assert if the key is in confstr_names but differs.\n # These are defined by POSIX anyway so should never change.\n confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),\n ('CS_GNU_LIBPTHREAD_VERSION', 3)])\n\n val = None\n for k, v in iteritems(confstr_names_fallback):\n assert k not in confstr_names or confstr_names[k] == v, (\n \"confstr_names_fallback for %s is %s yet in confstr_names it is %s\"\n \"\" % (k, confstr_names_fallback[k], confstr_names[k])\n )\n try:\n val = str(confstr(v))\n except:\n pass\n else:\n if val:\n break\n\n if not val:\n # Weird, play it safe and assume glibc 2.5\n family, version = 'glibc', '2.5'\n log.warning(\"Failed to detect libc family and version, assuming %s/%s\", family, version)\n return family, version\n family, version = val.split(' ')\n\n # NPTL is just the name of the threading library, even though the\n # version refers to that of uClibc. readlink() can help to try to\n # figure out a better name instead.\n if family == 'NPTL':\n clibs = glob('/lib/libc.so*')\n for clib in clibs:\n clib = readlink(clib)\n if exists(clib):\n if clib.startswith('libuClibc'):\n if version.startswith('0.'):\n family = 'uClibc'\n else:\n family = 'uClibc-ng'\n return family, version\n # This could be some other C library; it is unlikely though.\n family = 'uClibc'\n log.warning(\"Failed to detect non-glibc family, assuming %s (%s)\", family, version)\n return family, version\n return family, version\n", "path": "conda/common/platform.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom genericpath import exists\nfrom glob import glob\nfrom logging import getLogger\nimport sys\n\nfrom .compat import iteritems, on_win\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\ndef is_admin_on_windows(): # pragma: unix no cover\n # http://stackoverflow.com/a/1026626/2127762\n if not on_win: # pragma: no cover\n return False\n try:\n from ctypes import windll\n return windll.shell32.IsUserAnAdmin() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n log.info('%r', e)\n return 'unknown'\n\n\n@memoize\ndef linux_get_libc_version():\n \"\"\"\n If on linux, returns (libc_family, version), otherwise (None, None)\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None, None\n\n from os import confstr, confstr_names, readlink\n\n # Python 2.7 does not have either of these keys in confstr_names, so provide\n # hard-coded defaults and assert if the key is in confstr_names but differs.\n # These are defined by POSIX anyway so should never change.\n confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),\n ('CS_GNU_LIBPTHREAD_VERSION', 3)])\n\n val = None\n for k, v in iteritems(confstr_names_fallback):\n assert k not in confstr_names or confstr_names[k] == v, (\n \"confstr_names_fallback for %s is %s yet in confstr_names it is %s\"\n \"\" % (k, confstr_names_fallback[k], confstr_names[k])\n )\n try:\n val = str(confstr(v))\n except:\n pass\n else:\n if val:\n break\n\n if not val:\n # Weird, play it safe and assume glibc 2.5\n family, version = 'glibc', '2.5'\n log.warning(\"Failed to detect libc family and version, assuming %s/%s\", family, version)\n return family, version\n family, version = val.split(' ')\n\n # NPTL is just the name of the threading library, even though the\n # version refers to that of uClibc. readlink() can help to try to\n # figure out a better name instead.\n if family == 'NPTL':\n clibs = glob('/lib/libc.so*')\n for clib in clibs:\n clib = readlink(clib)\n if exists(clib):\n if clib.startswith('libuClibc'):\n if version.startswith('0.'):\n family = 'uClibc'\n else:\n family = 'uClibc-ng'\n return family, version\n # This could be some other C library; it is unlikely though.\n family = 'uClibc'\n log.warning(\"Failed to detect non-glibc family, assuming %s (%s)\", family, version)\n return family, version\n return family, version\n", "path": "conda/common/platform.py" } ]
diff --git a/conda/common/platform.py b/conda/common/platform.py index 17eced7c47c..3d4cf40105e 100644 --- a/conda/common/platform.py +++ b/conda/common/platform.py @@ -19,12 +19,12 @@ def is_admin_on_windows(): # pragma: unix no cover return False try: from ctypes import windll - return windll.shell32.IsUserAnAdmin()() != 0 + return windll.shell32.IsUserAnAdmin() != 0 except ImportError as e: log.debug('%r', e) return 'unknown' except Exception as e: - log.warn('%r', e) + log.info('%r', e) return 'unknown' diff --git a/tests/common/test_platform.py b/tests/common/test_platform.py new file mode 100644 index 00000000000..2af72ad03b8 --- /dev/null +++ b/tests/common/test_platform.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function, unicode_literals + +from conda.common.compat import on_win +from conda.common.platform import is_admin_on_windows + + +def test_is_admin_on_windows(): + result = is_admin_on_windows() + if not on_win: + assert result is False + else: + assert result is False or result is True
Launching navigator via prompt warnings appear _From @RidaZubair on May 24, 2017 9:47_ **OS:** Windows **Anaconda: 4.4.0** **Actual:** On launching navigator via prompt following warning appears on prompt ![2](https://cloud.githubusercontent.com/assets/27444898/26396930/175ef622-408e-11e7-8e75-e9c2218e15de.png) _Copied from original issue: ContinuumIO/navigator#1189_
modin-project__modin-3390
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Define data shapes.\"\"\"\n\nimport os\nimport json\n\nfrom .compatibility import ASV_USE_BACKEND, ASV_DATASET_SIZE\n\nRAND_LOW = 0\nRAND_HIGH = 1_000_000_000 if ASV_USE_BACKEND == \"omnisci\" else 100\n\nBINARY_OP_DATA_SIZE = {\n \"big\": [\n [[5000, 5000], [5000, 5000]],\n # the case extremely inefficient\n # [[20, 500_000], [10, 1_000_000]],\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[250, 250], [250, 250]],\n [[20, 10_000], [10, 25_000]],\n [[10_000, 20], [25_000, 10]],\n ],\n}\nUNARY_OP_DATA_SIZE = {\n \"big\": [\n [5000, 5000],\n # the case extremely inefficient\n # [10, 1_000_000],\n [1_000_000, 10],\n ],\n \"small\": [\n [250, 250],\n [10, 10_000],\n [10_000, 10],\n ],\n}\nSERIES_DATA_SIZE = {\n \"big\": [\n (100_000, 1),\n ],\n \"small\": [\n (10_000, 1),\n ],\n}\n\n\nOMNISCI_BINARY_OP_DATA_SIZE = {\n \"big\": [\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[10_000, 20], [25_000, 10]],\n ],\n}\nOMNISCI_UNARY_OP_DATA_SIZE = {\n \"big\": [\n [1_000_000, 10],\n ],\n \"small\": [\n [10_000, 10],\n ],\n}\nOMNISCI_SERIES_DATA_SIZE = {\n \"big\": [\n [10_000_000, 1],\n ],\n \"small\": [\n [100_000, 1],\n ],\n}\n\nBINARY_SHAPES = (\n OMNISCI_BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nUNARY_SHAPES = (\n OMNISCI_UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nSERIES_SHAPES = (\n OMNISCI_SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n)\n\nDEFAULT_GROUPBY_NGROUPS = {\n \"big\": [100, \"huge_amount_groups\"],\n \"small\": [5],\n}\nGROUPBY_NGROUPS = DEFAULT_GROUPBY_NGROUPS[ASV_DATASET_SIZE]\n\n_DEFAULT_CONFIG_T = [\n (\n UNARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeGroupByMultiColumn\",\n \"TimeGroupByDefaultAggregations\",\n \"TimeGroupByDictionaryAggregation\",\n \"TimeSetItem\",\n \"TimeInsert\",\n \"TimeArithmetic\",\n \"TimeSortValues\",\n \"TimeDrop\",\n \"TimeHead\",\n \"TimeFillna\",\n \"TimeFillnaDataFrame\",\n \"TimeValueCountsFrame\",\n \"TimeValueCountsSeries\",\n \"TimeIndexing\",\n \"TimeMultiIndexing\",\n \"TimeResetIndex\",\n \"TimeAstype\",\n \"TimeDescribe\",\n \"TimeProperties\",\n # IO benchmarks\n \"TimeReadCsvSkiprows\",\n \"TimeReadCsvTrueFalseValues\",\n \"TimeReadCsvNamesDtype\",\n # Scalability benchmarks\n \"TimeFromPandas\",\n \"TimeToPandas\",\n # OmniSci backend benchmarks\n \"omnisci.TimeJoin\",\n \"omnisci.TimeBinaryOpDataFrame\",\n \"omnisci.TimeArithmetic\",\n \"omnisci.TimeSortValues\",\n \"omnisci.TimeDrop\",\n \"omnisci.TimeHead\",\n \"omnisci.TimeFillna\",\n \"omnisci.TimeIndexing\",\n \"omnisci.TimeResetIndex\",\n \"omnisci.TimeAstype\",\n \"omnisci.TimeDescribe\",\n \"omnisci.TimeProperties\",\n \"omnisci.TimeGroupByDefaultAggregations\",\n \"omnisci.TimeGroupByMultiColumn\",\n # OmniSci backend IO benchmarks\n \"omnisci.TimeReadCsvNames\",\n ],\n ),\n (\n BINARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeJoin\",\n \"TimeMerge\",\n \"TimeConcat\",\n \"TimeAppend\",\n \"TimeBinaryOp\",\n # OmniSci backend benchmarks\n \"omnisci.TimeMerge\",\n \"omnisci.TimeAppend\",\n ],\n ),\n (\n SERIES_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeFillnaSeries\",\n # OmniSci backend benchmarks\n \"omnisci.TimeBinaryOpSeries\",\n \"omnisci.TimeValueCountsSeries\",\n ],\n ),\n]\nDEFAULT_CONFIG = {}\nfor _shape, _names in _DEFAULT_CONFIG_T:\n DEFAULT_CONFIG.update({_name: _shape for _name in _names})\n\nCONFIG_FROM_FILE = None\n\n\ndef get_benchmark_shapes(bench_id: str):\n \"\"\"\n Get custom benchmark shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.\n\n If `bench_id` benchmark is not found in the file, then the default value will\n be used.\n\n Parameters\n ----------\n bench_id : str\n Unique benchmark identifier that is used to get shapes.\n\n Returns\n -------\n list\n Benchmark shapes.\n \"\"\"\n global CONFIG_FROM_FILE\n if not CONFIG_FROM_FILE:\n try:\n from modin.config import AsvDataSizeConfig\n\n filename = AsvDataSizeConfig.get()\n except ImportError:\n filename = os.environ.get(\"MODIN_ASV_DATASIZE_CONFIG\", None)\n if filename:\n # should be json\n with open(filename) as _f:\n CONFIG_FROM_FILE = json.load(_f)\n\n if CONFIG_FROM_FILE and bench_id in CONFIG_FROM_FILE:\n # example: \"omnisci.TimeReadCsvNames\": [[5555, 55], [3333, 33]]\n return CONFIG_FROM_FILE[bench_id]\n return DEFAULT_CONFIG[bench_id]\n", "path": "asv_bench/benchmarks/utils/data_shapes.py" } ]
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Define data shapes.\"\"\"\n\nimport os\nimport json\n\nfrom .compatibility import ASV_USE_BACKEND, ASV_DATASET_SIZE\n\nRAND_LOW = 0\nRAND_HIGH = 1_000_000_000 if ASV_USE_BACKEND == \"omnisci\" else 100\n\nBINARY_OP_DATA_SIZE = {\n \"big\": [\n [[5000, 5000], [5000, 5000]],\n # the case extremely inefficient\n # [[20, 500_000], [10, 1_000_000]],\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[250, 250], [250, 250]],\n [[10_000, 20], [25_000, 10]],\n ],\n}\nUNARY_OP_DATA_SIZE = {\n \"big\": [\n [5000, 5000],\n # the case extremely inefficient\n # [10, 1_000_000],\n [1_000_000, 10],\n ],\n \"small\": [\n [250, 250],\n [10_000, 10],\n ],\n}\nSERIES_DATA_SIZE = {\n \"big\": [\n (100_000, 1),\n ],\n \"small\": [\n (10_000, 1),\n ],\n}\n\n\nOMNISCI_BINARY_OP_DATA_SIZE = {\n \"big\": [\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[10_000, 20], [25_000, 10]],\n ],\n}\nOMNISCI_UNARY_OP_DATA_SIZE = {\n \"big\": [\n [1_000_000, 10],\n ],\n \"small\": [\n [10_000, 10],\n ],\n}\nOMNISCI_SERIES_DATA_SIZE = {\n \"big\": [\n [10_000_000, 1],\n ],\n \"small\": [\n [100_000, 1],\n ],\n}\n\nBINARY_SHAPES = (\n OMNISCI_BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nUNARY_SHAPES = (\n OMNISCI_UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nSERIES_SHAPES = (\n OMNISCI_SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n)\n\nDEFAULT_GROUPBY_NGROUPS = {\n \"big\": [100, \"huge_amount_groups\"],\n \"small\": [5],\n}\nGROUPBY_NGROUPS = DEFAULT_GROUPBY_NGROUPS[ASV_DATASET_SIZE]\n\n_DEFAULT_CONFIG_T = [\n (\n UNARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeGroupByMultiColumn\",\n \"TimeGroupByDefaultAggregations\",\n \"TimeGroupByDictionaryAggregation\",\n \"TimeSetItem\",\n \"TimeInsert\",\n \"TimeArithmetic\",\n \"TimeSortValues\",\n \"TimeDrop\",\n \"TimeHead\",\n \"TimeFillna\",\n \"TimeFillnaDataFrame\",\n \"TimeValueCountsFrame\",\n \"TimeValueCountsSeries\",\n \"TimeIndexing\",\n \"TimeMultiIndexing\",\n \"TimeResetIndex\",\n \"TimeAstype\",\n \"TimeDescribe\",\n \"TimeProperties\",\n # IO benchmarks\n \"TimeReadCsvSkiprows\",\n \"TimeReadCsvTrueFalseValues\",\n \"TimeReadCsvNamesDtype\",\n # Scalability benchmarks\n \"TimeFromPandas\",\n \"TimeToPandas\",\n # OmniSci backend benchmarks\n \"omnisci.TimeJoin\",\n \"omnisci.TimeBinaryOpDataFrame\",\n \"omnisci.TimeArithmetic\",\n \"omnisci.TimeSortValues\",\n \"omnisci.TimeDrop\",\n \"omnisci.TimeHead\",\n \"omnisci.TimeFillna\",\n \"omnisci.TimeIndexing\",\n \"omnisci.TimeResetIndex\",\n \"omnisci.TimeAstype\",\n \"omnisci.TimeDescribe\",\n \"omnisci.TimeProperties\",\n \"omnisci.TimeGroupByDefaultAggregations\",\n \"omnisci.TimeGroupByMultiColumn\",\n # OmniSci backend IO benchmarks\n \"omnisci.TimeReadCsvNames\",\n ],\n ),\n (\n BINARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeJoin\",\n \"TimeMerge\",\n \"TimeConcat\",\n \"TimeAppend\",\n \"TimeBinaryOp\",\n # OmniSci backend benchmarks\n \"omnisci.TimeMerge\",\n \"omnisci.TimeAppend\",\n ],\n ),\n (\n SERIES_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeFillnaSeries\",\n # OmniSci backend benchmarks\n \"omnisci.TimeBinaryOpSeries\",\n \"omnisci.TimeValueCountsSeries\",\n ],\n ),\n]\nDEFAULT_CONFIG = {}\nfor _shape, _names in _DEFAULT_CONFIG_T:\n DEFAULT_CONFIG.update({_name: _shape for _name in _names})\n\nCONFIG_FROM_FILE = None\n\n\ndef get_benchmark_shapes(bench_id: str):\n \"\"\"\n Get custom benchmark shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.\n\n If `bench_id` benchmark is not found in the file, then the default value will\n be used.\n\n Parameters\n ----------\n bench_id : str\n Unique benchmark identifier that is used to get shapes.\n\n Returns\n -------\n list\n Benchmark shapes.\n \"\"\"\n global CONFIG_FROM_FILE\n if not CONFIG_FROM_FILE:\n try:\n from modin.config import AsvDataSizeConfig\n\n filename = AsvDataSizeConfig.get()\n except ImportError:\n filename = os.environ.get(\"MODIN_ASV_DATASIZE_CONFIG\", None)\n if filename:\n # should be json\n with open(filename) as _f:\n CONFIG_FROM_FILE = json.load(_f)\n\n if CONFIG_FROM_FILE and bench_id in CONFIG_FROM_FILE:\n # example: \"omnisci.TimeReadCsvNames\": [[5555, 55], [3333, 33]]\n return CONFIG_FROM_FILE[bench_id]\n return DEFAULT_CONFIG[bench_id]\n", "path": "asv_bench/benchmarks/utils/data_shapes.py" } ]
diff --git a/asv_bench/benchmarks/utils/data_shapes.py b/asv_bench/benchmarks/utils/data_shapes.py index a5987cf7be1..cea86d67703 100644 --- a/asv_bench/benchmarks/utils/data_shapes.py +++ b/asv_bench/benchmarks/utils/data_shapes.py @@ -30,7 +30,6 @@ ], "small": [ [[250, 250], [250, 250]], - [[20, 10_000], [10, 25_000]], [[10_000, 20], [25_000, 10]], ], } @@ -43,7 +42,6 @@ ], "small": [ [250, 250], - [10, 10_000], [10_000, 10], ], }
Do not check ASV benchmarks on test data, where the number of rows is much less than the number of columns These sizes can be removed because such cases are not used in benchmarking: https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L33 and https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L46
gratipay__gratipay.com-1750
[ { "content": "import os\n\nfrom gittip import NotSane\nfrom aspen.utils import typecheck\nfrom psycopg2 import IntegrityError\n\n\n# Exceptions\n# ==========\n\nclass UnknownPlatform(Exception): pass\n\nclass NeedConfirmation(Exception):\n \"\"\"Represent the case where we need user confirmation during a merge.\n\n This is used in the workflow for merging one participant into another.\n\n \"\"\"\n\n def __init__(self, a, b, c):\n self.other_is_a_real_participant = a\n self.this_is_others_last_account_elsewhere = b\n self.we_already_have_that_kind_of_account = c\n self._all = (a, b, c)\n\n def __repr__(self):\n return \"<NeedConfirmation: %r %r %r>\" % self._all\n __str__ = __repr__\n\n def __eq__(self, other):\n return self._all == other._all\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __nonzero__(self):\n # bool(need_confirmation)\n A, B, C = self._all\n return A or C\n\n\n# Mixin\n# =====\n\nclass MixinElsewhere(object):\n \"\"\"We use this as a mixin for Participant, and in a hackish way on the\n homepage and community pages.\n\n \"\"\"\n\n def get_accounts_elsewhere(self):\n \"\"\"Return a four-tuple of elsewhere Records.\n \"\"\"\n github_account = None\n twitter_account = None\n bitbucket_account = None\n bountysource_account = None\n\n ACCOUNTS = \"SELECT * FROM elsewhere WHERE participant=%s\"\n accounts = self.db.all(ACCOUNTS, (self.username,))\n\n for account in accounts:\n if account.platform == \"github\":\n github_account = account\n elif account.platform == \"twitter\":\n twitter_account = account\n elif account.platform == \"bitbucket\":\n bitbucket_account = account\n elif account.platform == \"bountysource\":\n bountysource_account = account\n else:\n raise UnknownPlatform(account.platform)\n\n return ( github_account\n , twitter_account\n , bitbucket_account\n , bountysource_account\n )\n\n\n def get_img_src(self, size=128):\n \"\"\"Return a value for <img src=\"...\" />.\n\n Until we have our own profile pics, delegate. XXX Is this an attack\n vector? Can someone inject this value? Don't think so, but if you make\n it happen, let me know, eh? Thanks. :)\n\n https://www.gittip.com/security.txt\n\n \"\"\"\n typecheck(size, int)\n\n src = '/assets/%s/avatar-default.gif' % os.environ['__VERSION__']\n\n github, twitter, bitbucket, bountysource = \\\n self.get_accounts_elsewhere()\n if github is not None:\n # GitHub -> Gravatar: http://en.gravatar.com/site/implement/images/\n if 'gravatar_id' in github.user_info:\n gravatar_hash = github.user_info['gravatar_id']\n src = \"https://www.gravatar.com/avatar/%s.jpg?s=%s\"\n src %= (gravatar_hash, size)\n\n elif twitter is not None:\n # https://dev.twitter.com/docs/api/1.1/get/users/show\n if 'profile_image_url_https' in twitter.user_info:\n src = twitter.user_info['profile_image_url_https']\n\n # For Twitter, we don't have good control over size. The\n # biggest option is 73px(?!), but that's too small. Let's go\n # with the original: even though it may be huge, that's\n # preferrable to guaranteed blurriness. :-/\n\n src = src.replace('_normal.', '.')\n\n return src\n\n\n def take_over(self, account_elsewhere, have_confirmation=False):\n \"\"\"Given an AccountElsewhere and a bool, raise NeedConfirmation or return None.\n\n This method associates an account on another platform (GitHub, Twitter,\n etc.) with the given Gittip participant. Every account elsewhere has an\n associated Gittip participant account, even if its only a stub\n participant (it allows us to track pledges to that account should they\n ever decide to join Gittip).\n\n In certain circumstances, we want to present the user with a\n confirmation before proceeding to reconnect the account elsewhere to\n the new Gittip account; NeedConfirmation is the signal to request\n confirmation. If it was the last account elsewhere connected to the old\n Gittip account, then we absorb the old Gittip account into the new one,\n effectively archiving the old account.\n\n Here's what absorbing means:\n\n - consolidated tips to and fro are set up for the new participant\n\n Amounts are summed, so if alice tips bob $1 and carl $1, and\n then bob absorbs carl, then alice tips bob $2(!) and carl $0.\n\n And if bob tips alice $1 and carl tips alice $1, and then bob\n absorbs carl, then bob tips alice $2(!) and carl tips alice $0.\n\n The ctime of each new consolidated tip is the older of the two\n tips that are being consolidated.\n\n If alice tips bob $1, and alice absorbs bob, then alice tips\n bob $0.\n\n If alice tips bob $1, and bob absorbs alice, then alice tips\n bob $0.\n\n - all tips to and from the other participant are set to zero\n - the absorbed username is released for reuse\n - the absorption is recorded in an absorptions table\n\n This is done in one transaction.\n\n \"\"\"\n # Lazy imports to dodge circular imports.\n from gittip.models.participant import reserve_a_random_username\n from gittip.models.participant import gen_random_usernames\n\n platform = account_elsewhere.platform\n user_id = account_elsewhere.user_id\n\n CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS = \"\"\"\n\n CREATE TEMP TABLE __temp_unique_tips ON COMMIT drop AS\n\n -- Get all the latest tips from everyone to everyone.\n\n SELECT DISTINCT ON (tipper, tippee)\n ctime, tipper, tippee, amount\n FROM tips\n ORDER BY tipper, tippee, mtime DESC;\n\n \"\"\"\n\n CONSOLIDATE_TIPS_RECEIVING = \"\"\"\n\n -- Create a new set of tips, one for each current tip *to* either\n -- the dead or the live account. If a user was tipping both the\n -- dead and the live account, then we create one new combined tip\n -- to the live account (via the GROUP BY and sum()).\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), tipper, %(live)s AS tippee, sum(amount)\n\n FROM __temp_unique_tips\n\n WHERE (tippee = %(dead)s OR tippee = %(live)s)\n -- Include tips *to* either the dead or live account.\n\n AND NOT (tipper = %(dead)s OR tipper = %(live)s)\n -- Don't include tips *from* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n\n AND amount > 0\n -- Don't include zeroed out tips, so we avoid a no-op\n -- zero tip entry.\n\n GROUP BY tipper\n\n \"\"\"\n\n CONSOLIDATE_TIPS_GIVING = \"\"\"\n\n -- Create a new set of tips, one for each current tip *from* either\n -- the dead or the live account. If both the dead and the live\n -- account were tipping a given user, then we create one new\n -- combined tip from the live account (via the GROUP BY and sum()).\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), %(live)s AS tipper, tippee, sum(amount)\n\n FROM __temp_unique_tips\n\n WHERE (tipper = %(dead)s OR tipper = %(live)s)\n -- Include tips *from* either the dead or live account.\n\n AND NOT (tippee = %(dead)s OR tippee = %(live)s)\n -- Don't include tips *to* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n\n AND amount > 0\n -- Don't include zeroed out tips, so we avoid a no-op\n -- zero tip entry.\n\n GROUP BY tippee\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_RECEIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM __temp_unique_tips\n WHERE tippee=%s AND amount > 0\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_GIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM __temp_unique_tips\n WHERE tipper=%s AND amount > 0\n\n \"\"\"\n\n with self.db.get_cursor() as cursor:\n\n # Load the existing connection.\n # =============================\n # Every account elsewhere has at least a stub participant account\n # on Gittip.\n\n rec = cursor.one(\"\"\"\n\n SELECT participant\n , claimed_time IS NULL AS is_stub\n FROM elsewhere\n JOIN participants ON participant=participants.username\n WHERE elsewhere.platform=%s AND elsewhere.user_id=%s\n\n \"\"\", (platform, user_id), default=NotSane)\n\n other_username = rec.participant\n\n\n # Make sure we have user confirmation if needed.\n # ==============================================\n # We need confirmation in whatever combination of the following\n # three cases:\n #\n # - the other participant is not a stub; we are taking the\n # account elsewhere away from another viable Gittip\n # participant\n #\n # - the other participant has no other accounts elsewhere; taking\n # away the account elsewhere will leave the other Gittip\n # participant without any means of logging in, and it will be\n # archived and its tips absorbed by us\n #\n # - we already have an account elsewhere connected from the given\n # platform, and it will be handed off to a new stub\n # participant\n\n # other_is_a_real_participant\n other_is_a_real_participant = not rec.is_stub\n\n # this_is_others_last_account_elsewhere\n nelsewhere = cursor.one( \"SELECT count(*) FROM elsewhere \"\n \"WHERE participant=%s\"\n , (other_username,)\n )\n assert nelsewhere > 0 # sanity check\n this_is_others_last_account_elsewhere = (nelsewhere == 1)\n\n # we_already_have_that_kind_of_account\n nparticipants = cursor.one( \"SELECT count(*) FROM elsewhere \"\n \"WHERE participant=%s AND platform=%s\"\n , (self.username, platform)\n )\n assert nparticipants in (0, 1) # sanity check\n we_already_have_that_kind_of_account = nparticipants == 1\n\n need_confirmation = NeedConfirmation( other_is_a_real_participant\n , this_is_others_last_account_elsewhere\n , we_already_have_that_kind_of_account\n )\n if need_confirmation and not have_confirmation:\n raise need_confirmation\n\n\n # We have user confirmation. Proceed.\n # ===================================\n # There is a race condition here. The last person to call this will\n # win. XXX: I'm not sure what will happen to the DB and UI for the\n # loser.\n\n\n # Move any old account out of the way.\n # ====================================\n\n if we_already_have_that_kind_of_account:\n new_stub_username = reserve_a_random_username(cursor)\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND participant=%s\"\n , (new_stub_username, platform, self.username)\n )\n\n\n # Do the deal.\n # ============\n # If other_is_not_a_stub, then other will have the account\n # elsewhere taken away from them with this call. If there are other\n # browsing sessions open from that account, they will stay open\n # until they expire (XXX Is that okay?)\n\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND user_id=%s\"\n , (self.username, platform, user_id)\n )\n\n\n # Fold the old participant into the new as appropriate.\n # =====================================================\n # We want to do this whether or not other is a stub participant.\n\n if this_is_others_last_account_elsewhere:\n\n # Take over tips.\n # ===============\n\n x, y = self.username, other_username\n cursor.run(CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS)\n cursor.run(CONSOLIDATE_TIPS_RECEIVING, dict(live=x, dead=y))\n cursor.run(CONSOLIDATE_TIPS_GIVING, dict(live=x, dead=y))\n cursor.run(ZERO_OUT_OLD_TIPS_RECEIVING, (other_username,))\n cursor.run(ZERO_OUT_OLD_TIPS_GIVING, (other_username,))\n\n\n # Archive the old participant.\n # ============================\n # We always give them a new, random username. We sign out\n # the old participant.\n\n for archive_username in gen_random_usernames():\n try:\n username = cursor.one(\"\"\"\n\n UPDATE participants\n SET username=%s\n , username_lower=%s\n , session_token=NULL\n , session_expires=now()\n WHERE username=%s\n RETURNING username\n\n \"\"\", ( archive_username\n , archive_username.lower()\n , other_username\n ), default=NotSane)\n except IntegrityError:\n continue # archive_username is already taken;\n # extremely unlikely, but ...\n # XXX But can the UPDATE fail in other ways?\n else:\n assert username == archive_username\n break\n\n\n # Record the absorption.\n # ======================\n # This is for preservation of history.\n\n cursor.run( \"INSERT INTO absorptions \"\n \"(absorbed_was, absorbed_by, archived_as) \"\n \"VALUES (%s, %s, %s)\"\n , ( other_username\n , self.username\n , archive_username\n )\n )\n\n# Utter Hack\n# ==========\n\ndef utter_hack(records):\n for rec in records:\n yield UtterHack(rec)\n\nclass UtterHack(MixinElsewhere):\n def __init__(self, rec):\n for name in rec._fields:\n setattr(self, name, getattr(rec, name))\n", "path": "gittip/models/_mixin_elsewhere.py" } ]
[ { "content": "import os\n\nfrom gittip import NotSane\nfrom aspen.utils import typecheck\nfrom psycopg2 import IntegrityError\n\n\n# Exceptions\n# ==========\n\nclass UnknownPlatform(Exception): pass\n\nclass NeedConfirmation(Exception):\n \"\"\"Represent the case where we need user confirmation during a merge.\n\n This is used in the workflow for merging one participant into another.\n\n \"\"\"\n\n def __init__(self, a, b, c):\n self.other_is_a_real_participant = a\n self.this_is_others_last_account_elsewhere = b\n self.we_already_have_that_kind_of_account = c\n self._all = (a, b, c)\n\n def __repr__(self):\n return \"<NeedConfirmation: %r %r %r>\" % self._all\n __str__ = __repr__\n\n def __eq__(self, other):\n return self._all == other._all\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __nonzero__(self):\n # bool(need_confirmation)\n A, B, C = self._all\n return A or C\n\n\n# Mixin\n# =====\n\nclass MixinElsewhere(object):\n \"\"\"We use this as a mixin for Participant, and in a hackish way on the\n homepage and community pages.\n\n \"\"\"\n\n def get_accounts_elsewhere(self):\n \"\"\"Return a four-tuple of elsewhere Records.\n \"\"\"\n github_account = None\n twitter_account = None\n bitbucket_account = None\n bountysource_account = None\n\n ACCOUNTS = \"SELECT * FROM elsewhere WHERE participant=%s\"\n accounts = self.db.all(ACCOUNTS, (self.username,))\n\n for account in accounts:\n if account.platform == \"github\":\n github_account = account\n elif account.platform == \"twitter\":\n twitter_account = account\n elif account.platform == \"bitbucket\":\n bitbucket_account = account\n elif account.platform == \"bountysource\":\n bountysource_account = account\n else:\n raise UnknownPlatform(account.platform)\n\n return ( github_account\n , twitter_account\n , bitbucket_account\n , bountysource_account\n )\n\n\n def get_img_src(self, size=128):\n \"\"\"Return a value for <img src=\"...\" />.\n\n Until we have our own profile pics, delegate. XXX Is this an attack\n vector? Can someone inject this value? Don't think so, but if you make\n it happen, let me know, eh? Thanks. :)\n\n https://www.gittip.com/security.txt\n\n \"\"\"\n typecheck(size, int)\n\n src = '/assets/%s/avatar-default.gif' % os.environ['__VERSION__']\n\n github, twitter, bitbucket, bountysource = \\\n self.get_accounts_elsewhere()\n if github is not None:\n # GitHub -> Gravatar: http://en.gravatar.com/site/implement/images/\n if 'gravatar_id' in github.user_info:\n gravatar_hash = github.user_info['gravatar_id']\n src = \"https://www.gravatar.com/avatar/%s.jpg?s=%s\"\n src %= (gravatar_hash, size)\n\n elif twitter is not None:\n # https://dev.twitter.com/docs/api/1.1/get/users/show\n if 'profile_image_url_https' in twitter.user_info:\n src = twitter.user_info['profile_image_url_https']\n\n # For Twitter, we don't have good control over size. The\n # biggest option is 73px(?!), but that's too small. Let's go\n # with the original: even though it may be huge, that's\n # preferrable to guaranteed blurriness. :-/\n\n src = src.replace('_normal.', '.')\n\n return src\n\n\n def take_over(self, account_elsewhere, have_confirmation=False):\n \"\"\"Given an AccountElsewhere and a bool, raise NeedConfirmation or return None.\n\n This method associates an account on another platform (GitHub, Twitter,\n etc.) with the given Gittip participant. Every account elsewhere has an\n associated Gittip participant account, even if its only a stub\n participant (it allows us to track pledges to that account should they\n ever decide to join Gittip).\n\n In certain circumstances, we want to present the user with a\n confirmation before proceeding to reconnect the account elsewhere to\n the new Gittip account; NeedConfirmation is the signal to request\n confirmation. If it was the last account elsewhere connected to the old\n Gittip account, then we absorb the old Gittip account into the new one,\n effectively archiving the old account.\n\n Here's what absorbing means:\n\n - consolidated tips to and fro are set up for the new participant\n\n Amounts are summed, so if alice tips bob $1 and carl $1, and\n then bob absorbs carl, then alice tips bob $2(!) and carl $0.\n\n And if bob tips alice $1 and carl tips alice $1, and then bob\n absorbs carl, then bob tips alice $2(!) and carl tips alice $0.\n\n The ctime of each new consolidated tip is the older of the two\n tips that are being consolidated.\n\n If alice tips bob $1, and alice absorbs bob, then alice tips\n bob $0.\n\n If alice tips bob $1, and bob absorbs alice, then alice tips\n bob $0.\n\n - all tips to and from the other participant are set to zero\n - the absorbed username is released for reuse\n - the absorption is recorded in an absorptions table\n\n This is done in one transaction.\n\n \"\"\"\n # Lazy imports to dodge circular imports.\n from gittip.models.participant import reserve_a_random_username\n from gittip.models.participant import gen_random_usernames\n\n platform = account_elsewhere.platform\n user_id = account_elsewhere.user_id\n\n CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS = \"\"\"\n\n CREATE TEMP TABLE __temp_unique_tips ON COMMIT drop AS\n\n -- Get all the latest tips from everyone to everyone.\n\n SELECT DISTINCT ON (tipper, tippee)\n ctime, tipper, tippee, amount\n FROM tips\n ORDER BY tipper, tippee, mtime DESC;\n\n \"\"\"\n\n CONSOLIDATE_TIPS_RECEIVING = \"\"\"\n\n -- Create a new set of tips, one for each current tip *to* either\n -- the dead or the live account. If a user was tipping both the\n -- dead and the live account, then we create one new combined tip\n -- to the live account (via the GROUP BY and sum()).\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), tipper, %(live)s AS tippee, sum(amount)\n\n FROM __temp_unique_tips\n\n WHERE (tippee = %(dead)s OR tippee = %(live)s)\n -- Include tips *to* either the dead or live account.\n\n AND NOT (tipper = %(dead)s OR tipper = %(live)s)\n -- Don't include tips *from* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n\n AND amount > 0\n -- Don't include zeroed out tips, so we avoid a no-op\n -- zero tip entry.\n\n GROUP BY tipper\n\n \"\"\"\n\n CONSOLIDATE_TIPS_GIVING = \"\"\"\n\n -- Create a new set of tips, one for each current tip *from* either\n -- the dead or the live account. If both the dead and the live\n -- account were tipping a given user, then we create one new\n -- combined tip from the live account (via the GROUP BY and sum()).\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT min(ctime), %(live)s AS tipper, tippee, sum(amount)\n\n FROM __temp_unique_tips\n\n WHERE (tipper = %(dead)s OR tipper = %(live)s)\n -- Include tips *from* either the dead or live account.\n\n AND NOT (tippee = %(dead)s OR tippee = %(live)s)\n -- Don't include tips *to* the dead or live account,\n -- lest we convert cross-tipping to self-tipping.\n\n AND amount > 0\n -- Don't include zeroed out tips, so we avoid a no-op\n -- zero tip entry.\n\n GROUP BY tippee\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_RECEIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM __temp_unique_tips\n WHERE tippee=%s AND amount > 0\n\n \"\"\"\n\n ZERO_OUT_OLD_TIPS_GIVING = \"\"\"\n\n INSERT INTO tips (ctime, tipper, tippee, amount)\n\n SELECT ctime, tipper, tippee, 0 AS amount\n FROM __temp_unique_tips\n WHERE tipper=%s AND amount > 0\n\n \"\"\"\n\n with self.db.get_cursor() as cursor:\n\n # Load the existing connection.\n # =============================\n # Every account elsewhere has at least a stub participant account\n # on Gittip.\n\n rec = cursor.one(\"\"\"\n\n SELECT participant\n , claimed_time IS NULL AS is_stub\n FROM elsewhere\n JOIN participants ON participant=participants.username\n WHERE elsewhere.platform=%s AND elsewhere.user_id=%s\n\n \"\"\", (platform, user_id), default=NotSane)\n\n other_username = rec.participant\n\n\n # Make sure we have user confirmation if needed.\n # ==============================================\n # We need confirmation in whatever combination of the following\n # three cases:\n #\n # - the other participant is not a stub; we are taking the\n # account elsewhere away from another viable Gittip\n # participant\n #\n # - the other participant has no other accounts elsewhere; taking\n # away the account elsewhere will leave the other Gittip\n # participant without any means of logging in, and it will be\n # archived and its tips absorbed by us\n #\n # - we already have an account elsewhere connected from the given\n # platform, and it will be handed off to a new stub\n # participant\n\n # other_is_a_real_participant\n other_is_a_real_participant = not rec.is_stub\n\n # this_is_others_last_account_elsewhere\n nelsewhere = cursor.one( \"SELECT count(*) FROM elsewhere \"\n \"WHERE participant=%s\"\n , (other_username,)\n )\n assert nelsewhere > 0 # sanity check\n this_is_others_last_account_elsewhere = (nelsewhere == 1)\n\n # we_already_have_that_kind_of_account\n nparticipants = cursor.one( \"SELECT count(*) FROM elsewhere \"\n \"WHERE participant=%s AND platform=%s\"\n , (self.username, platform)\n )\n assert nparticipants in (0, 1) # sanity check\n we_already_have_that_kind_of_account = nparticipants == 1\n\n need_confirmation = NeedConfirmation( other_is_a_real_participant\n , this_is_others_last_account_elsewhere\n , we_already_have_that_kind_of_account\n )\n if need_confirmation and not have_confirmation:\n raise need_confirmation\n\n\n # We have user confirmation. Proceed.\n # ===================================\n # There is a race condition here. The last person to call this will\n # win. XXX: I'm not sure what will happen to the DB and UI for the\n # loser.\n\n\n # Move any old account out of the way.\n # ====================================\n\n if we_already_have_that_kind_of_account:\n new_stub_username = reserve_a_random_username(cursor)\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND participant=%s\"\n , (new_stub_username, platform, self.username)\n )\n\n\n # Do the deal.\n # ============\n # If other_is_not_a_stub, then other will have the account\n # elsewhere taken away from them with this call. If there are other\n # browsing sessions open from that account, they will stay open\n # until they expire (XXX Is that okay?)\n\n cursor.run( \"UPDATE elsewhere SET participant=%s \"\n \"WHERE platform=%s AND user_id=%s\"\n , (self.username, platform, user_id)\n )\n\n\n # Fold the old participant into the new as appropriate.\n # =====================================================\n # We want to do this whether or not other is a stub participant.\n\n if this_is_others_last_account_elsewhere:\n\n # Take over tips.\n # ===============\n\n x, y = self.username, other_username\n cursor.run(CREATE_TEMP_TABLE_FOR_UNIQUE_TIPS)\n cursor.run(CONSOLIDATE_TIPS_RECEIVING, dict(live=x, dead=y))\n cursor.run(CONSOLIDATE_TIPS_GIVING, dict(live=x, dead=y))\n cursor.run(ZERO_OUT_OLD_TIPS_RECEIVING, (other_username,))\n cursor.run(ZERO_OUT_OLD_TIPS_GIVING, (other_username,))\n\n\n # Archive the old participant.\n # ============================\n # We always give them a new, random username. We sign out\n # the old participant.\n\n for archive_username in gen_random_usernames():\n try:\n username = cursor.one(\"\"\"\n\n UPDATE participants\n SET username=%s\n , username_lower=%s\n , session_token=NULL\n , session_expires=now()\n WHERE username=%s\n RETURNING username\n\n \"\"\", ( archive_username\n , archive_username.lower()\n , other_username\n ), default=NotSane)\n except IntegrityError:\n continue # archive_username is already taken;\n # extremely unlikely, but ...\n # XXX But can the UPDATE fail in other ways?\n else:\n assert username == archive_username\n break\n\n\n # Record the absorption.\n # ======================\n # This is for preservation of history.\n\n cursor.run( \"INSERT INTO absorptions \"\n \"(absorbed_was, absorbed_by, archived_as) \"\n \"VALUES (%s, %s, %s)\"\n , ( other_username\n , self.username\n , archive_username\n )\n )\n\n# Utter Hack\n# ==========\n\ndef utter_hack(db, records):\n for rec in records:\n yield UtterHack(db, rec)\n\nclass UtterHack(MixinElsewhere):\n def __init__(self, db, rec):\n self.db = db\n for name in rec._fields:\n setattr(self, name, getattr(rec, name))\n", "path": "gittip/models/_mixin_elsewhere.py" } ]
diff --git a/gittip/models/_mixin_elsewhere.py b/gittip/models/_mixin_elsewhere.py index 2074dc1aa4..7a82337d0b 100644 --- a/gittip/models/_mixin_elsewhere.py +++ b/gittip/models/_mixin_elsewhere.py @@ -413,11 +413,12 @@ def take_over(self, account_elsewhere, have_confirmation=False): # Utter Hack # ========== -def utter_hack(records): +def utter_hack(db, records): for rec in records: - yield UtterHack(rec) + yield UtterHack(db, rec) class UtterHack(MixinElsewhere): - def __init__(self, rec): + def __init__(self, db, rec): + self.db = db for name in rec._fields: setattr(self, name, getattr(rec, name)) diff --git a/www/for/%slug/index.html.spt b/www/for/%slug/index.html.spt index a625948731..3ae6f81110 100644 --- a/www/for/%slug/index.html.spt +++ b/www/for/%slug/index.html.spt @@ -99,7 +99,7 @@ if community.nmembers >= website.NMEMBERS_THRESHOLD: # Run queries for listings. # ========================= - new_participants = utter_hack(query_cache.all(""" + new_participants = utter_hack(website.db, query_cache.all(""" -- new participants on community page SELECT username, claimed_time FROM ( @@ -121,7 +121,7 @@ if community.nmembers >= website.NMEMBERS_THRESHOLD: """, (community.slug, limit, offset))) - givers = utter_hack(query_cache.all(""" + givers = utter_hack(website.db, query_cache.all(""" -- top givers on community page SELECT tipper AS username, anonymous, sum(amount) AS amount @@ -152,7 +152,7 @@ if community.nmembers >= website.NMEMBERS_THRESHOLD: # XXX I'm nearly positive that one or both of givers and receivers can contain # orphan accounts. See https://github.com/gittip/www.gittip.com/issues/650 - receivers = utter_hack(query_cache.all(""" + receivers = utter_hack(website.db, query_cache.all(""" -- top receivers on community page SELECT tippee AS username, claimed_time, sum(amount) AS amount diff --git a/www/index.html.spt b/www/index.html.spt index c3c019796b..ddc914f77f 100644 --- a/www/index.html.spt +++ b/www/index.html.spt @@ -37,7 +37,7 @@ try: except ValueError: raise Response(400) -new_participants = utter_hack(website.db.all(""" +new_participants = utter_hack(website.db, website.db.all(""" -- new participants on homepage SELECT username, claimed_time, (SELECT user_info->'gravatar_id' @@ -58,7 +58,7 @@ new_participants = utter_hack(website.db.all(""" """, (limit, offset))) -givers = utter_hack(website.db.all(""" +givers = utter_hack(website.db, website.db.all(""" -- top givers on homepage SELECT * FROM homepage_top_givers @@ -71,7 +71,7 @@ givers = utter_hack(website.db.all(""" # XXX I'm nearly positive that one or both of givers and receivers can contain # orphan accounts. See https://github.com/gittip/www.gittip.com/issues/650 -receivers = utter_hack(website.db.all(""" +receivers = utter_hack(website.db, website.db.all(""" -- top receivers on homepage SELECT * FROM homepage_top_receivers @@ -101,14 +101,14 @@ receivers = utter_hack(website.db.all(""" <div class="as-content"> <h1>Who inspires you?</h1> <form id="jump"> - <span class="luxury">Enter a </span> + <span class="luxury">Enter a </span> <select> <option value="twitter">Twitter</option> <option value="github">GitHub</option> <option value="bitbucket">Bitbucket</option> - </select> + </select> <span class="luxury"> username: </span> - <input placeholder="username" /> + <input placeholder="username" /> <button type="submit">Go</button> </form> </div>
regression w/ unglobalizing of gittip.db We fixed a bunch (!m). I'm seeing one more on community pages: https://www.gittip.com/for/python/ ``` Traceback (most recent call last): File "aspen/website.py", line 81, in handle_safely response = self.handle(request) File "aspen/website.py", line 114, in handle response = request.resource.respond(request) File "aspen/resources/dynamic_resource.py", line 68, in respond response = self.get_response(context) File "aspen/resources/negotiated_resource.py", line 99, in get_response response.body = render(context) File "aspen/renderers/__init__.py", line 99, in __call__ return self.render_content(context) File "site-packages/aspen_tornado_renderer.py", line 14, in render_content return self.compiled.generate(**context) File "tornado/template.py", line 129, in generate return execute() File "/app/www/for/%slug/index.html.spt", line 233, in _execute {% if community.nmembers == 0 %} File "gittip/models/_mixin_elsewhere.py", line 96, in get_img_src self.get_accounts_elsewhere() File "gittip/models/_mixin_elsewhere.py", line 60, in get_accounts_elsewhere accounts = self.db.all(ACCOUNTS, (self.username,)) AttributeError: 'UtterHack' object has no attribute 'db' ``` https://app.getsentry.com/gittip/gittip/group/11624316/
cloudtools__troposphere-605
[ { "content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n def validate_title(self):\n iam_group_name(self.title)\n\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n }\n\n\nclass Role(AWSObject):\n def validate_title(self):\n iam_role_name(self.title)\n\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py" } ]
[ { "content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py" } ]
diff --git a/troposphere/iam.py b/troposphere/iam.py index 472fe2a3d..980d6a4b8 100644 --- a/troposphere/iam.py +++ b/troposphere/iam.py @@ -50,9 +50,6 @@ class Policy(AWSProperty): class Group(AWSObject): - def validate_title(self): - iam_group_name(self.title) - resource_type = "AWS::IAM::Group" props = { @@ -73,9 +70,6 @@ class InstanceProfile(AWSObject): class Role(AWSObject): - def validate_title(self): - iam_role_name(self.title) - resource_type = "AWS::IAM::Role" props = {
BUG: IAM Role title should not be validated The title specified in troposphere is not the actual physical resource name and should be validated as such (https://github.com/cloudtools/troposphere/blob/fe72f7d3f7b0711a22173c1240134173aafef574/troposphere/iam.py#L75-L77). The next snippet was created today using `troposphere==1.5.0`and `boto3==1.2.6`: ``` python >>> import boto3 >>> import troposphere >>> import troposphere.iam >>> import time >>> >>> long_title = 'VeryLongName{}'.format('Z' * 100) >>> print len(long_title) 112 >>> >>> # create a role ... >>> role = troposphere.iam.Role( ... long_title, ... AssumeRolePolicyDocument={ ... "Statement": [{ ... "Action": ["sts:AssumeRole"], ... "Effect": "Allow", ... "Principal": {"Service": ["ec2.amazonaws.com"]} ... }] ... }) Traceback (most recent call last): File "<stdin>", line 8, in <module> File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/__init__.py", line 44, in __init__ self.validate_title() File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/iam.py", line 77, in validate_title iam_role_name(self.title) File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/validators.py", line 98, in iam_role_name raise ValueError('IAM Role Name may not exceed 64 characters') ValueError: IAM Role Name may not exceed 64 characters >>> >>> >>> # delete validator ... >>> del troposphere.iam.Role.validate_title >>> # try again ... >>> role = troposphere.iam.Role( ... long_title, ... AssumeRolePolicyDocument={ ... "Statement": [{ ... "Action": ["sts:AssumeRole"], ... "Effect": "Allow", ... "Principal": {"Service": ["ec2.amazonaws.com"]} ... }] ... }) >>> template = troposphere.Template() >>> template.add_resource(role) <troposphere.iam.Role object at 0x10ee02990> >>> print template.to_json() { "Resources": { "VeryLongNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ": { "Properties": { "AssumeRolePolicyDocument": { "Statement": [ { "Action": [ "sts:AssumeRole" ], "Effect": "Allow", "Principal": { "Service": [ "ec2.amazonaws.com" ] } } ] } }, "Type": "AWS::IAM::Role" } } } >>> client = boto3.client('cloudformation', 'us-east-1') >>> stack = client.create_stack( ... StackName='testTroposphere', ... TemplateBody=template.to_json(), ... Capabilities=['CAPABILITY_IAM']) >>> >>> while client.describe_stacks(StackName=stack['StackId'])['Stacks'][0]['StackStatus'] != 'CREATE_COMPLETE': ... import time ... time.sleep(1) ... >>> resources = client.describe_stack_resources(StackName=stack['StackId']) >>> for r in resources['StackResources']: ... physical_id = r['PhysicalResourceId'] ... print("{} ({} chars)".format(physical_id, len(physical_id))) ... testTroposphere-VeryLongNameZZZZZZZZZZZZZZZZZZZZZZ-PTHEM9FPNX28 (63 chars) ``` The snippet above shows that the physical id was chosen by CloudFormation and isn't just a trimmed version of the title (it includes a random part too).
mne-tools__mne-bids-pipeline-139
[ { "content": "\"\"\"Set the configuration parameters for the study.\n\nYou need to define an environment variable `BIDS_ROOT` to point to the root\nof your BIDS dataset to be analyzed.\n\n\"\"\"\nimport importlib\nimport functools\nimport os\nfrom collections import defaultdict\nimport copy\nimport coloredlogs\nimport logging\n\nimport numpy as np\nimport mne\nfrom mne_bids.utils import get_entity_vals\n\n# Name, version, and hosting location of the pipeline\nPIPELINE_NAME = 'mne-study-template'\nVERSION = '0.1.dev0'\nCODE_URL = 'https://github.com/mne-tools/mne-study-template'\n\n\n# ``study_name`` : str\n# Specify the name of your study. It will be used to populate filenames for\n# saving the analysis results.\n#\n# Example\n# ~~~~~~~\n# >>> study_name = 'my-study'\n\nstudy_name = ''\n\n# ``bids_root`` : str or None\n# Speficy the BIDS root directory. Pass an empty string or ```None`` to use\n# the value specified in the ``BIDS_ROOT`` environment variable instead.\n# Raises an exception if the BIDS root has not been specified.\n#\n# Example\n# ~~~~~~~\n# >>> bids_root = '/path/to/your/bids_root' # Use this to specify a path here.\n# or\n# >>> bids_root = None # Make use of the ``BIDS_ROOT`` environment variable.\n\nbids_root = None\n\n# ``subjects_dir`` : str or None\n# Path to the directory that contains the MRI data files and their\n# derivativesfor all subjects. Specifically, the ``subjects_dir`` is the\n# $SUBJECTS_DIR used by the Freesurfer software. If ``None``, will use\n# ``'bids_root/derivatives/freesurfer/subjects'``.\n\nsubjects_dir = None\n\n# ``daysback`` : int\n# If not None apply a time shift to dates to adjust for limitateions\n# of fif files\n\ndaysback = None\n\n# ``interactive`` : boolean\n# If True, the scripts will provide some interactive elements, such as\n# figures. If running the scripts from a notebook or Spyder,\n# run %matplotlib qt in the command line to open the figures in a separate\n# window.\n\ninteractive = False\n\n# ``crop`` : tuple or None\n# If tuple, (tmin, tmax) to crop the raw data\n# If None (default), do not crop.\ncrop = None\n\n# BIDS params\n# see: bids-specification.rtfd.io/en/latest/99-appendices/04-entity-table.html\n\n# ``sessions`` : iterable or 'all'\n# The sessions to process.\nsessions = 'all'\n\n# ``task`` : str\n# The task to process.\ntask = ''\n\n# ``runs`` : iterable or 'all'\n# The runs to process.\nruns = 'all'\n\nacq = None\n\nproc = None\n\nrec = None\n\nspace = None\n\n# ``subjects_list`` : 'all' | list of str\n# Subjects to analyze. If ``'all``, include all subjects. To only\n# include a subset of subjects, pass a list of their identifiers. Even\n# if you plan on analyzing only a single subject, pass their identifier\n# as a list.\n#\n# Please note that if you intend to EXCLUDE only a few subjects, you\n# should consider setting ``subjects_list = 'all'`` and adding the\n# identifiers of the excluded subjects to ``exclude_subjects`` (see next\n# section).\n#\n# Example\n# ~~~~~~~\n# >>> subjects_list = 'all' # Include all subjects.\n# >>> subjects_list = ['05'] # Only include subject 05.\n# >>> subjects_list = ['01', '02'] # Only include subjects 01 and 02.\n\nsubjects_list = 'all'\n\n# ``exclude_subjects`` : list of str\n# Specify subjects to exclude from analysis. The MEG empty-room mock-subject\n# is automatically excluded from regular analysis.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Keep track of the criteria leading you to exclude\n# a participant (e.g. too many movements, missing blocks, aborted experiment,\n# did not understand the instructions, etc, ...)\n# The ``emptyroom`` subject will be excluded automatically.\n\nexclude_subjects = []\n\n# ``ch_types`` : list of st\n# The list of channel types to consider.\n#\n# Example\n# ~~~~~~~\n# >>> ch_types = ['meg', 'eeg'] # to use MEG and EEG channels\n# or\n# >>> ch_types = ['meg'] # to use only MEG\n# or\n# >>> ch_types = ['grad'] # to use only gradiometer MEG channels\n\n# Note: If `kind` is 'eeg', EEG ch_types will be used regardless of whether\n# specified here or not\nch_types = []\n\n###############################################################################\n# DEFINE ADDITIONAL CHANNELS\n# --------------------------\n# needed for 01-import_and_maxfilter.py\n\n# ``rename_channels`` : dict rename channels\n# Here you name or replace extra channels that were recorded, for instance\n# EOG, ECG.\n#\n# Example\n# ~~~~~~~\n# Here rename EEG061 to EOG061, EEG062 to EOG062, EEG063 to ECG063:\n# >>> rename_channels = {'EEG061': 'EOG061', 'EEG062': 'EOG062',\n# 'EEG063': 'ECG063'}\n\n# XXX should be done automatically from BIDS ?\nrename_channels = None\n\n# ``set_channel_types``: dict\n# Here you define types of channels to pick later.\n#\n# Example\n# ~~~~~~~\n# >>> set_channel_types = {'EEG061': 'eog', 'EEG062': 'eog',\n# 'EEG063': 'ecg', 'EEG064': 'misc'}\n\n# XXX should not be necessary\nset_channel_types = None\n\n###############################################################################\n# MAXWELL FILTER PARAMETERS\n# -------------------------\n# done in 01-import_and_maxfilter.py\n#\n# Note: For any of this to work, you must set ``mf_ctc_fname`` and\n# ``mf_cal_fname`` above.\n#\n# \"Bad\", i.e. flat and overly noisy channels, can be automatically detected\n# using a procedure inspired by the commercial MaxFilter by Elekta. First,\n# a copy of the data is low-pass filtered at 40 Hz. Then, channels with\n# unusually low variability are flagged as \"flat\", while channels with\n# excessively high variability are flagged as \"noisy\". Flat and noisy channels\n# are marked as \"bad\" and excluded from subsequent analysis. See\n# :func:`mne.preprocssessing.find_bad_channels_maxwell` for more information\n# on this procedure. The list of bad channels detected through this procedure\n# will be merged with the list of bad channels already present in the dataset,\n# if any.\n#\n# ``find_flat_channels_meg`` : bool\n# Auto-detect \"flat\" channels and mark them as bad.\n#\n# ``find_noisy_channels_meg`` : bool\n# Auto-detect \"noisy\" channels and mark them as bad.\n\nfind_flat_channels_meg = False\nfind_noisy_channels_meg = False\n\n# ``use_maxwell_filter`` : bool\n# Use or not maxwell filter to preprocess the data.\n#\n# Warning\n# ~~~~~~~\n# If the data were recorded with internal active compensation (MaxShield),\n# they need to be run through Maxwell filter to avoid distortions.\n# Bad channels need to be set through BIDS channels.tsv and / or via the\n# ``find_flat_channels_meg`` and ``find_noisy_channels_meg`` options above\n# before applying Maxwell filter.\n\nuse_maxwell_filter = False\n\n# There are two kinds of maxfiltering: SSS and tSSS\n# [SSS = signal space separation ; tSSS = temporal signal space separation]\n# (Taulu et al, 2004): http://cds.cern.ch/record/709081/files/0401166.pdf\n#\n# ``mf_st_duration`` : float | None\n# If not None, apply spatiotemporal SSS (tSSS) with specified buffer\n# duration (in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.\n# Spatiotemporal SSS acts as implicitly as a high-pass filter where the\n# cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer\n# buffers are generally better as long as your system can handle the\n# higher memory usage. To ensure that each window is processed\n# identically, choose a buffer length that divides evenly into your data.\n# Any data at the trailing edge that doesn't fit evenly into a whole\n# buffer window will be lumped into the previous buffer.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# If you are interested in low frequency activity (<0.1Hz), avoid using tSSS\n# and set mf_st_duration to None\n#\n# If you are interested in low frequency above 0.1 Hz, you can use the\n# default mf_st_duration to 10 s meaning it acts like a 0.1 Hz highpass filter.\n#\n# Example\n# ~~~~~~~\n# >>> mf_st_duration = None\n# or\n# >>> mf_st_duration = 10. # to apply tSSS with 0.1Hz highpass filter.\n\nmf_st_duration = None\n\n# ``mf_head_origin`` : array-like, shape (3,) | 'auto'\n# Origin of internal and external multipolar moment space in meters.\n# If 'auto', it will be estimated from headshape points.\n# If automatic fitting fails (e.g., due to having too few digitization\n# points), consider separately calling the fitting function with different\n# options or specifying the origin manually.\n#\n# Example\n# ~~~~~~~\n# >>> mf_head_origin = 'auto'\n\nmf_head_origin = 'auto'\n\n# ``cross talk`` : str\n# Path to the cross talk file\n#\n#\n# ``calibration`` : str\n# Path to the calibration file.\n#\n#\n# These 2 files should be downloaded and made available for running\n# maxwell filtering.\n#\n# Example\n# ~~~~~~~\n# >>> cal_files_path = os.path.join(study_path, 'SSS')\n# >>> mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')\n# >>> mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')\n#\n# Warning\n# ~~~~~~~\n# These 2 files are site and machine specific files that provide information\n# about the environmental noise. For practical purposes, place them in your\n# study folder.\n#\n# At NeuroSpin: ct_sparse and sss_call are on the meg_tmp server\n\n# cal_files_path = os.path.join(study_path, 'SSS')\n# mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')\n# mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')\n\nmf_ctc_fname = ''\nmf_cal_fname = ''\n\n# Despite all possible care to avoid movements in the MEG, the participant\n# will likely slowly drift down from the Dewar or slightly shift the head\n# around in the course of the recording session. Hence, to take this into\n# account, we are realigning all data to a single position. For this, you need\n# to define a reference run (typically the one in the middle of\n# the recording session).\n#\n# ``mf_reference_run`` : int\n# Which run to take as the reference for adjusting the head position of all\n# runs.\n#\n# Example\n# ~~~~~~~\n# >>> mf_reference_run = 0 # to use the first run\n\nmf_reference_run = 0\n\n###############################################################################\n# FREQUENCY FILTERING\n# -------------------\n# done in 02-frequency_filter.py\n\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# It is typically better to set your filtering properties on the raw data so\n# as to avoid what we call border (or edge) effects.\n#\n# If you use this pipeline for evoked responses, you could consider\n# a low-pass filter cut-off of h_freq = 40 Hz\n# and possibly a high-pass filter cut-off of l_freq = 1 Hz\n# so you would preserve only the power in the 1Hz to 40 Hz band.\n# Note that highpass filtering is not necessarily recommended as it can\n# distort waveforms of evoked components, or simply wash out any low\n# frequency that can may contain brain signal. It can also act as\n# a replacement for baseline correction in Epochs. See below.\n#\n# If you use this pipeline for time-frequency analysis, a default filtering\n# coult be a high-pass filter cut-off of l_freq = 1 Hz\n# a low-pass filter cut-off of h_freq = 120 Hz\n# so you would preserve only the power in the 1Hz to 120 Hz band.\n#\n# If you need more fancy analysis, you are already likely past this kind\n# of tips! :)\n\n\n# ``l_freq`` : float\n# The low-frequency cut-off in the highpass filtering step.\n# Keep it None if no highpass filtering should be applied.\n\nl_freq = 1.\n\n# ``h_freq`` : float\n# The high-frequency cut-off in the lowpass filtering step.\n# Keep it None if no lowpass filtering should be applied.\n\nh_freq = 40.\n\n###############################################################################\n# RESAMPLING\n# ----------\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# If you have acquired data with a very high sampling frequency (e.g. 2 kHz)\n# you will likely want to downsample to lighten up the size of the files you\n# are working with (pragmatics)\n# If you are interested in typical analysis (up to 120 Hz) you can typically\n# resample your data down to 500 Hz without preventing reliable time-frequency\n# exploration of your data\n#\n# ``resample_sfreq`` : float\n# Specifies at which sampling frequency the data should be resampled.\n# If None then no resampling will be done.\n#\n# Example\n# ~~~~~~~\n# >>> resample_sfreq = None # no resampling\n# or\n# >>> resample_sfreq = 500 # resample to 500Hz\n\nresample_sfreq = None\n\n# ``decim`` : int\n# Says how much to decimate data at the epochs level.\n# It is typically an alternative to the `resample_sfreq` parameter that\n# can be used for resampling raw data. 1 means no decimation.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Decimation requires to lowpass filtered the data to avoid aliasing.\n# Note that using decimation is much faster than resampling.\n#\n# Example\n# ~~~~~~~\n# >>> decim = 1 # no decimation\n# or\n# >>> decim = 4 # decimate by 4 ie devide sampling frequency by 4\n\ndecim = 1\n\n###############################################################################\n# AUTOMATIC REJECTION OF ARTIFACTS\n# --------------------------------\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Have a look at your raw data and train yourself to detect a blink, a heart\n# beat and an eye movement.\n# You can do a quick average of blink data and check what the amplitude looks\n# like.\n#\n# ``reject`` : dict | None\n# The rejection limits to make some epochs as bads.\n# This allows to remove strong transient artifacts.\n# If you want to reject and retrieve blinks later, e.g. with ICA,\n# don't specify a value for the eog channel (see examples below).\n# Make sure to include values for eeg if you have EEG data\n#\n# Note\n# ~~~~\n# These numbers tend to vary between subjects.. You might want to consider\n# using the autoreject method by Jas et al. 2018.\n# See https://autoreject.github.io\n#\n# Example\n# ~~~~~~~\n# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eog': 150e-6}\n# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 200e-6}\n# >>> reject = None\n\nreject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 150e-6}\n\n\n###############################################################################\n# RENAME EXPERIMENTAL EVENTS\n# --------------------------\n#\n# ``rename_events`` : dict\n# A dictionary specifying which events in the BIDS dataset to rename upon\n# loading, and before processing begins.\n#\n# Pass an empty dictionary to not perform any renaming.\n#\n# Example\n# ~~~~~~~\n# Rename ``audio_left`` in the BIDS dataset to ``audio/left`` in the pipeline:\n# >>> rename_events = {'audio_left': 'audio/left'}\n\nrename_events = dict()\n\n\n###############################################################################\n# EPOCHING\n# --------\n#\n# ``tmin``: float\n# A float in seconds that gives the start time before event of an epoch.\n#\n# Example\n# ~~~~~~~\n# >>> tmin = -0.2 # take 200ms before event onset.\n\ntmin = -0.2\n\n# ``tmax``: float\n# A float in seconds that gives the end time before event of an epoch.\n#\n# Example\n# ~~~~~~~\n# >>> tmax = 0.5 # take 500ms after event onset.\n\ntmax = 0.5\n\n# ``trigger_time_shift`` : float | None\n# If float it specifies the offset for the trigger and the stimulus\n# (in seconds). You need to measure this value for your specific\n# experiment/setup.\n#\n# Example\n# ~~~~~~~\n# >>> trigger_time_shift = 0 # don't apply any offset\n\ntrigger_time_shift = 0.\n\n# ``baseline`` : tuple\n# It specifies how to baseline the epochs; if None, no baseline is applied.\n#\n# Example\n# ~~~~~~~\n# >>> baseline = (None, 0) # baseline between tmin and 0\n\nbaseline = (None, 0)\n\n# `conditions`` : list\n# The condition names to consider. This can either be the keys of\n# ``event_id``, or – if event names were specified with ``/`` for\n# grouping – the name of the *grouped* condition (i.e., the\n# condition name before or after that ``/`` that is shared between the\n# respective conditions you wish to group). See the \"Subselecting epochs\"\n# tutorial for more information: https://mne.tools/stable/auto_tutorials/epochs/plot_10_epochs_overview.html#subselecting-epochs # noqa: 501\n#\n# Example\n# ~~~~~~~\n# >>> conditions = ['auditory/left', 'visual/left']\n# or\n# >>> conditions = ['auditory/left', 'auditory/right']\n# or\n# >>> conditions = ['auditory']\n# or\n# >>> conditions = ['auditory', 'visual']\n# or\n# >>> conditions = ['left', 'right']\n\nconditions = ['left', 'right']\n\n###############################################################################\n# ARTIFACT REMOVAL\n# ----------------\n#\n# You can choose between ICA and SSP to remove eye and heart artifacts.\n# SSP: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ssp.html?highlight=ssp # noqa\n# ICA: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ica.html?highlight=ica # noqa\n# if you choose ICA, run scripts 5a and 6a\n# if you choose SSP, run scripts 5b and 6b\n#\n# Currently you cannot use both.\n\n# SSP\n# ~~~\n#\n# ``use_ssp`` : bool\n# If True ICA should be used or not.\n\nuse_ssp = True\n\n# ICA\n# ~~~\n# ``use_ica`` : bool\n# If True ICA should be used or not.\n\nuse_ica = False\n\n# ``ica_algorithm`` : 'picard' | 'fastica' | 'extended_infomax'\n# The ICA algorithm to use.\n\nica_algorithm = 'picard'\n\n# ``ica_max_iterations`` : int\n# Maximum number of iterations to decompose the data into independent\n# components. A low number means to finish earlier, but the consequence is\n# that the algorithm may not have finished converging. To ensure\n# convergence, pick a high number here (e.g. 3000); yet the algorithm will\n# terminate as soon as it determines that is has successfully converged, and\n# not necessarily exhaust the maximum number of iterations. Note that the\n# default of 200 seems to be sufficient for Picard in many datasets, because\n# it converges quicker than the other algorithms; but e.g. for FastICA, this\n# limit may be too low to achieve convergence.\n\nica_max_iterations = 200\n\n# ``ica_decim`` : None | None\n# The decimation parameter to compute ICA. If 5 it means\n# that 1 every 5 sample is used by ICA solver. The higher the faster\n# it is to run but the less data you have to compute a good ICA. Set to\n# ``1`` ``None`` to not perform an decimation.\n\nica_decim = None\n\n\n# ``default_reject_comps_factory`` : callable\n# A factory function that returns a default rejection component dictionary:\n# A dictionary that specifies the indices of the ICA components to reject\n# for each subject. For example you can use:\n# rejcomps_man['subject01'] = dict(eeg=[12], meg=[7])\n\ndef default_reject_comps_factory():\n \"\"\"Return the default rejection component dictionary.\"\"\"\n return dict(meg=[], eeg=[])\n\n\nrejcomps_man = defaultdict(default_reject_comps_factory)\n\n# ``ica_ctps_ecg_threshold``: float\n# The threshold parameter passed to `find_bads_ecg` method.\n\nica_ctps_ecg_threshold = 0.1\n\n###############################################################################\n# DECODING\n# --------\n#\n# ``decoding_conditions`` : list\n# List of conditions to be classified.\n#\n# Example\n# ~~~~~~~\n# >>> decoding_conditions = [] # don't do decoding\n# or\n# >>> decoding_conditions = [('auditory', 'visual'), ('left', 'right')]\n\ndecoding_conditions = []\n# decoding_conditions = [('left', 'right')]\n\n###############################################################################\n# GROUP AVERAGE SENSORS\n# ---------------------\n#\n# ``interpolate_bads_grand_average`` : bool\n# Interpolate bad sensors in each dataset before calculating the grand\n# average. This parameter is passed to the `mne.grand_average` function via\n# the keyword argument `interpolate_bads`. It requires to have channel\n# locations set.\n#\n# Example\n# ~~~~~~~\n# >>> interpolate_bads_grand_average = True\n\ninterpolate_bads_grand_average = True\n\n# ``decoding_metric`` : str\n# The metric to use for cross-validation. It can be 'roc_auc' or 'accuracy'\n# or any metric supported by scikit-learn.\n\ndecoding_metric = 'roc_auc'\n\n# ``decoding_n_splits`` : int\n# The number of folds (a.k.a. splits) to use in the cross-validation.\n\ndecoding_n_splits = 5\n\n###############################################################################\n# TIME-FREQUENCY\n# --------------\n#\n# ``time_frequency_conditions`` : list\n# The conditions to compute time-frequency decomposition on.\n\n# time_frequency_conditions = ['left', 'right']\ntime_frequency_conditions = []\n\n###############################################################################\n# SOURCE SPACE PARAMETERS\n# -----------------------\n#\n\n# ``spacing`` : str\n# The spacing to use. Can be ``'ico#'`` for a recursively subdivided\n# icosahedron, ``'oct#'`` for a recursively subdivided octahedron,\n# ``'all'`` for all points, or an integer to use appoximate\n# distance-based spacing (in mm).\n\nspacing = 'oct6'\n\n# ``mindist`` : float\n# Exclude points closer than this distance (mm) to the bounding surface.\n\nmindist = 5\n\n# ``loose`` : float in [0, 1] | 'auto'\n# Value that weights the source variances of the dipole components\n# that are parallel (tangential) to the cortical surface. If loose\n# is 0 then the solution is computed with fixed orientation,\n# and fixed must be True or \"auto\".\n# If loose is 1, it corresponds to free orientations.\n# The default value ('auto') is set to 0.2 for surface-oriented source\n# space and set to 1.0 for volumetric, discrete, or mixed source spaces,\n# unless ``fixed is True`` in which case the value 0. is used.\n\nloose = 0.2\n\n# ``depth`` : None | float | dict\n# If float (default 0.8), it acts as the depth weighting exponent (``exp``)\n# to use (must be between 0 and 1). None is equivalent to 0, meaning no\n# depth weighting is performed. Can also be a `dict` containing additional\n# keyword arguments to pass to :func:`mne.forward.compute_depth_prior`\n# (see docstring for details and defaults).\n\ndepth = 0.8\n\n# inverse_method : \"MNE\" | \"dSPM\" | \"sLORETA\" | \"eLORETA\"\n# Use minimum norm, dSPM (default), sLORETA, or eLORETA.\n\ninverse_method = 'dSPM'\n\n# noise_cov : (None, 0) | ‘emptyroom’\n# Specify how to estimate the noise covariance matrix, which is used in\n# inverse modeling.\n#\n# If a tuple, it takes the form ``(tmin, tmax)`` with the time specified in\n# seconds. If the first value of the tuple is ``None``, the considered\n# period starts at the beginning of the epoch. If the second value of the\n# tuple is ``None``, the considered period ends at the end of the epoch.\n# The default, ``(None, 0)``, includes the entire period before the event,\n# which is typically the pre-stimulus period.\n#\n# If ``emptyroom``, the noise covariance matrix will be estimated from an\n# empty-room MEG recording. The empty-room recording will be automatically\n# selected based on recording date and time.\n#\n# Please note that when processing data that contains EEG channels, the noise\n# covariance can ONLY be estimated from the pre-stimulus period.\n#\n# Example\n# ~~~~~~~\n# Use the period from start of the epoch until 100 ms before the experimental\n# event:\n# >>> noise_cov = (None, -0.1)\n#\n# Use the time period from the experimental event until the end of the epoch:\n# >>> noise_cov = (0, None)\n#\n# Use an empty-room recording:\n# >>> noise_cov = 'emptyroom'\n\nnoise_cov = (None, 0)\n\n# smooth : int | None\n# Number of iterations for the smoothing of the surface data.\n# If None, smooth is automatically defined to fill the surface\n# with non-zero values. The default is spacing=None.\n\nsmooth = 10\n\nfsaverage_vertices = [np.arange(10242), np.arange(10242)]\n\n###############################################################################\n# ADVANCED\n# --------\n#\n# ``l_trans_bandwidth`` : float | 'auto'\n# A float that specifies the transition bandwidth of the\n# highpass filter. By default it's `'auto'` and uses default mne\n# parameters.\n\nl_trans_bandwidth = 'auto'\n\n# ``h_trans_bandwidth`` : float | 'auto'\n# A float that specifies the transition bandwidth of the\n# lowpass filter. By default it's `'auto'` and uses default mne\n# parameters.\n\nh_trans_bandwidth = 'auto'\n\n# ``N_JOBS`` : int\n# An integer that specifies how many subjects you want to run in parallel.\n\nN_JOBS = 1\n\n# ``random_state`` : None | int | np.random.RandomState\n# To specify the random generator state. This allows to have\n# the results more reproducible between machines and systems.\n# Some methods like ICA need random values for initialisation.\n\nrandom_state = 42\n\n# ``shortest_event`` : int\n# Minimum number of samples an event must last. If the\n# duration is less than this an exception will be raised.\n\nshortest_event = 1\n\n# ``allow_maxshield`` : bool\n# To import data that was recorded with Maxshield on before running\n# maxfilter set this to True.\n\nallow_maxshield = False\n\nlog_level = 'info'\nmne_log_level = 'error'\n\n# ``on_abort`` : 'continue' | 'abort'\n# Whether to abort processing as soon as an error occurs, or whether to\n# continue with all other processing steps for as long as possible.\n\non_error = 'abort'\n\n\n###############################################################################\n# #\n# CUSTOM CONFIGURATION ENDS HERE #\n# #\n###############################################################################\n\n\n###############################################################################\n# Logger\n# ------\n\nlogger = logging.getLogger('mne-study-template')\n\nlog_fmt = '%(asctime)s %(message)s'\nlog_date_fmt = coloredlogs.DEFAULT_DATE_FORMAT = '%H:%M:%S'\ncoloredlogs.install(level=log_level, logger=logger, fmt=log_fmt,\n date_fmt=log_date_fmt)\n\nmne.set_log_level(verbose=mne_log_level.upper())\n\n###############################################################################\n# Retrieve custom configuration options\n# -------------------------------------\n#\n# For testing a specific dataset, create a Python file with a name of your\n# liking (e.g., ``mydataset-template-config.py``), and set an environment\n# variable ``MNE_BIDS_STUDY_CONFIG`` to that file.\n#\n# Example\n# ~~~~~~~\n# ``export MNE_BIDS_STUDY_CONFIG=/data/mystudy/mydataset-template-config.py``\n\nif \"MNE_BIDS_STUDY_CONFIG\" in os.environ:\n cfg_path = os.environ['MNE_BIDS_STUDY_CONFIG']\n\n if os.path.exists(cfg_path):\n msg = f'Using custom configuration: {cfg_path}'\n logger.info(msg)\n else:\n msg = ('The custom configuration file specified in the '\n 'MNE_BIDS_STUDY_CONFIG environment variable could not be '\n 'found: {cfg_path}'.format(cfg_path=cfg_path))\n raise ValueError(msg)\n\n # Import configuration from an arbitrary path without having to fiddle\n # with `sys.path`.\n spec = importlib.util.spec_from_file_location(name='custom_config',\n location=cfg_path)\n custom_cfg = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(custom_cfg)\n del spec, cfg_path\n\n new = None\n for val in dir(custom_cfg):\n if not val.startswith('__'):\n exec(\"new = custom_cfg.%s\" % val)\n logger.debug('Overwriting: %s -> %s' % (val, new))\n exec(\"%s = custom_cfg.%s\" % (val, val))\n\n\n# BIDS_ROOT environment variable takes precedence over any configuration file\n# values.\nif os.getenv('BIDS_ROOT') is not None:\n bids_root = os.getenv('BIDS_ROOT')\n\n# If we don't have a bids_root until now, raise an exeception as we cannot\n# proceed.\nif not bids_root:\n msg = ('You need to specify `bids_root` in your configuration, or '\n 'define an environment variable `BIDS_ROOT` pointing to the '\n 'root folder of your BIDS dataset')\n raise ValueError(msg)\n\n\n###############################################################################\n# Derivates root\n# --------------\nderiv_root = os.path.join(bids_root, 'derivatives', PIPELINE_NAME)\n\n\n###############################################################################\n# CHECKS\n# ------\n\nif (use_maxwell_filter and\n len(set(ch_types).intersection(('meg', 'grad', 'mag'))) == 0):\n raise ValueError('Cannot use maxwell filter without MEG channels.')\n\nif use_ssp and use_ica:\n raise ValueError('Cannot use both SSP and ICA.')\n\nif use_ica and ica_algorithm not in ('picard', 'fastica', 'extended_infomax'):\n msg = (f\"Invalid ICA algorithm requested. Valid values for ica_algorithm \"\n f\"are: 'picard', 'fastica', and 'extended_infomax', but received \"\n f\"{ica_algorithm}.\")\n raise ValueError(msg)\n\nif not ch_types:\n msg = 'Please specify ch_types in your configuration.'\n raise ValueError(msg)\n\nif ch_types == ['eeg']:\n pass\nelif 'eeg' in ch_types and len(ch_types) > 1: # EEG + some other channel types\n msg = ('EEG data can only be analyzed separately from other channel '\n 'types. Please adjust `ch_types` in your configuration.')\n raise ValueError(msg)\nelif any([ch_type not in ('meg', 'mag', 'grad') for ch_type in ch_types]):\n msg = ('Invalid channel type passed. Please adjust `ch_types` in your '\n 'configuration.')\n raise ValueError(msg)\n\nif 'eeg' in ch_types:\n if use_ssp:\n msg = ('You requested SSP for EEG data via use_ssp=True. However, '\n 'this is not presently supported. Please use ICA instead by '\n 'setting use_ssp=False and use_ica=True.')\n raise ValueError(msg)\n if not use_ica:\n msg = ('You did not request ICA artifact correction for your data. '\n 'To turn it on, set use_ica=True.')\n logger.info(msg)\n\nif on_error not in ('continue', 'abort'):\n msg = (f\"on_error must be one of 'continue' or 'abort', but received \"\n f\"{on_error}.\")\n logger.info(msg)\n\nif isinstance(noise_cov, str) and noise_cov != 'emptyroom':\n msg = (f\"noise_cov must be a tuple or 'emptyroom', but received \"\n f\"{noise_cov}\")\n raise ValueError(msg)\n\nif noise_cov == 'emptyroom' and 'eeg' in ch_types:\n msg = ('You requested to process data that contains EEG channels. In this '\n 'case, noise covariance can only be estimated from the '\n 'experimental data, e.g., the pre-stimulus period. Please set '\n 'noise_cov to (tmin, tmax)')\n raise ValueError(msg)\n\n\n###############################################################################\n# Helper functions\n# ----------------\n\ndef get_sessions():\n sessions_ = copy.deepcopy(sessions) # Avoid clash with global variable.\n\n if sessions_ == 'all':\n sessions_ = get_entity_vals(bids_root, entity_key='ses')\n\n if not sessions_:\n return [None]\n else:\n return sessions_\n\n\ndef get_runs():\n runs_ = copy.deepcopy(runs) # Avoid clash with global variable.\n\n if runs_ == 'all':\n runs_ = get_entity_vals(bids_root, entity_key='run')\n\n if not runs_:\n return [None]\n else:\n return runs_\n\n\ndef get_subjects():\n if subjects_list == 'all':\n s = get_entity_vals(bids_root, entity_key='sub')\n else:\n s = subjects_list\n\n subjects = set(s) - set(exclude_subjects)\n # Drop empty-room subject.\n subjects = subjects - set(['emptyroom'])\n\n return list(subjects)\n\n\ndef get_task():\n if not task:\n tasks = get_entity_vals(bids_root, entity_key='task')\n if not tasks:\n return None\n else:\n return tasks[0]\n else:\n return task\n\n\ndef get_kind():\n # Content of ch_types should be sanitized already, so we don't need any\n # extra sanity checks here.\n if ch_types == ['eeg']:\n return 'eeg'\n else:\n return 'meg'\n\n\ndef get_reject():\n reject_ = reject.copy() # Avoid clash with global variable.\n kind = get_kind()\n\n if kind == 'eeg':\n ch_types_to_remove = ('mag', 'grad')\n else:\n ch_types_to_remove = ('eeg',)\n\n for ch_type in ch_types_to_remove:\n try:\n del reject_[ch_type]\n except KeyError:\n pass\n return reject_\n\n\ndef get_fs_subjects_dir():\n if not subjects_dir:\n return os.path.join(bids_root, 'derivatives', 'freesurfer', 'subjects')\n else:\n return subjects_dir\n\n\ndef get_subject_path(subject, session, kind):\n subject_path = f'sub-{subject}'\n if session is not None:\n subject_path = os.path.join(subject_path, f'ses-{session}')\n subject_path = os.path.join(subject_path, kind)\n return subject_path\n\n\ndef get_subject_deriv_path(subject, session, kind):\n subject_path = get_subject_path(subject=subject, session=session,\n kind=kind)\n deriv_path = os.path.join(deriv_root, subject_path)\n return deriv_path\n\n\ndef gen_log_message(message, step=None, subject=None, session=None, run=None):\n if subject is not None:\n subject = f'sub-{subject}'\n if session is not None:\n session = f'ses-{session}'\n if run is not None:\n run = f'run-{run}'\n\n prefix = ', '.join([item for item in [subject, session, run]\n if item is not None])\n if prefix:\n prefix = f'[{prefix}]'\n\n if step is not None:\n prefix = f'[Step-{step:02}]{prefix}'\n\n return prefix + ' ' + message\n\n\ndef failsafe_run(on_error):\n def failsafe_run_decorator(func):\n @functools.wraps(func) # Preserve \"identity\" of original function\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n message = 'A critical error occurred.'\n message = gen_log_message(message=message)\n\n if on_error == 'abort':\n logger.critical(message)\n raise(e)\n else:\n message = f'{message} The error message was:\\n{str(e)}'\n logger.critical(message)\n return wrapper\n return failsafe_run_decorator\n", "path": "config.py" } ]
[ { "content": "\"\"\"Set the configuration parameters for the study.\n\nYou need to define an environment variable `BIDS_ROOT` to point to the root\nof your BIDS dataset to be analyzed.\n\n\"\"\"\nimport importlib\nimport functools\nimport os\nfrom collections import defaultdict\nimport copy\nimport coloredlogs\nimport logging\n\nimport numpy as np\nimport mne\nfrom mne_bids.utils import get_entity_vals\n\n# Name, version, and hosting location of the pipeline\nPIPELINE_NAME = 'mne-study-template'\nVERSION = '0.1.dev0'\nCODE_URL = 'https://github.com/mne-tools/mne-study-template'\n\n\n# ``study_name`` : str\n# Specify the name of your study. It will be used to populate filenames for\n# saving the analysis results.\n#\n# Example\n# ~~~~~~~\n# >>> study_name = 'my-study'\n\nstudy_name = ''\n\n# ``bids_root`` : str or None\n# Speficy the BIDS root directory. Pass an empty string or ```None`` to use\n# the value specified in the ``BIDS_ROOT`` environment variable instead.\n# Raises an exception if the BIDS root has not been specified.\n#\n# Example\n# ~~~~~~~\n# >>> bids_root = '/path/to/your/bids_root' # Use this to specify a path here.\n# or\n# >>> bids_root = None # Make use of the ``BIDS_ROOT`` environment variable.\n\nbids_root = None\n\n# ``subjects_dir`` : str or None\n# Path to the directory that contains the MRI data files and their\n# derivativesfor all subjects. Specifically, the ``subjects_dir`` is the\n# $SUBJECTS_DIR used by the Freesurfer software. If ``None``, will use\n# ``'bids_root/derivatives/freesurfer/subjects'``.\n\nsubjects_dir = None\n\n# ``daysback`` : int\n# If not None apply a time shift to dates to adjust for limitateions\n# of fif files\n\ndaysback = None\n\n# ``interactive`` : boolean\n# If True, the scripts will provide some interactive elements, such as\n# figures. If running the scripts from a notebook or Spyder,\n# run %matplotlib qt in the command line to open the figures in a separate\n# window.\n\ninteractive = False\n\n# ``crop`` : tuple or None\n# If tuple, (tmin, tmax) to crop the raw data\n# If None (default), do not crop.\ncrop = None\n\n# BIDS params\n# see: bids-specification.rtfd.io/en/latest/99-appendices/04-entity-table.html\n\n# ``sessions`` : iterable or 'all'\n# The sessions to process.\nsessions = 'all'\n\n# ``task`` : str\n# The task to process.\ntask = ''\n\n# ``runs`` : iterable or 'all'\n# The runs to process.\nruns = 'all'\n\nacq = None\n\nproc = None\n\nrec = None\n\nspace = None\n\n# ``subjects_list`` : 'all' | list of str\n# Subjects to analyze. If ``'all``, include all subjects. To only\n# include a subset of subjects, pass a list of their identifiers. Even\n# if you plan on analyzing only a single subject, pass their identifier\n# as a list.\n#\n# Please note that if you intend to EXCLUDE only a few subjects, you\n# should consider setting ``subjects_list = 'all'`` and adding the\n# identifiers of the excluded subjects to ``exclude_subjects`` (see next\n# section).\n#\n# Example\n# ~~~~~~~\n# >>> subjects_list = 'all' # Include all subjects.\n# >>> subjects_list = ['05'] # Only include subject 05.\n# >>> subjects_list = ['01', '02'] # Only include subjects 01 and 02.\n\nsubjects_list = 'all'\n\n# ``exclude_subjects`` : list of str\n# Specify subjects to exclude from analysis. The MEG empty-room mock-subject\n# is automatically excluded from regular analysis.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Keep track of the criteria leading you to exclude\n# a participant (e.g. too many movements, missing blocks, aborted experiment,\n# did not understand the instructions, etc, ...)\n# The ``emptyroom`` subject will be excluded automatically.\n\nexclude_subjects = []\n\n# ``ch_types`` : list of st\n# The list of channel types to consider.\n#\n# Example\n# ~~~~~~~\n# >>> ch_types = ['meg', 'eeg'] # to use MEG and EEG channels\n# or\n# >>> ch_types = ['meg'] # to use only MEG\n# or\n# >>> ch_types = ['grad'] # to use only gradiometer MEG channels\n\n# Note: If `kind` is 'eeg', EEG ch_types will be used regardless of whether\n# specified here or not\nch_types = []\n\n###############################################################################\n# DEFINE ADDITIONAL CHANNELS\n# --------------------------\n# needed for 01-import_and_maxfilter.py\n\n# ``rename_channels`` : dict rename channels\n# Here you name or replace extra channels that were recorded, for instance\n# EOG, ECG.\n#\n# Example\n# ~~~~~~~\n# Here rename EEG061 to EOG061, EEG062 to EOG062, EEG063 to ECG063:\n# >>> rename_channels = {'EEG061': 'EOG061', 'EEG062': 'EOG062',\n# 'EEG063': 'ECG063'}\n\n# XXX should be done automatically from BIDS ?\nrename_channels = None\n\n# ``set_channel_types``: dict\n# Here you define types of channels to pick later.\n#\n# Example\n# ~~~~~~~\n# >>> set_channel_types = {'EEG061': 'eog', 'EEG062': 'eog',\n# 'EEG063': 'ecg', 'EEG064': 'misc'}\n\n# XXX should not be necessary\nset_channel_types = None\n\n###############################################################################\n# MAXWELL FILTER PARAMETERS\n# -------------------------\n# done in 01-import_and_maxfilter.py\n#\n# Note: For any of this to work, you must set ``mf_ctc_fname`` and\n# ``mf_cal_fname`` above.\n#\n# \"Bad\", i.e. flat and overly noisy channels, can be automatically detected\n# using a procedure inspired by the commercial MaxFilter by Elekta. First,\n# a copy of the data is low-pass filtered at 40 Hz. Then, channels with\n# unusually low variability are flagged as \"flat\", while channels with\n# excessively high variability are flagged as \"noisy\". Flat and noisy channels\n# are marked as \"bad\" and excluded from subsequent analysis. See\n# :func:`mne.preprocssessing.find_bad_channels_maxwell` for more information\n# on this procedure. The list of bad channels detected through this procedure\n# will be merged with the list of bad channels already present in the dataset,\n# if any.\n#\n# ``find_flat_channels_meg`` : bool\n# Auto-detect \"flat\" channels and mark them as bad.\n#\n# ``find_noisy_channels_meg`` : bool\n# Auto-detect \"noisy\" channels and mark them as bad.\n\nfind_flat_channels_meg = False\nfind_noisy_channels_meg = False\n\n# ``use_maxwell_filter`` : bool\n# Use or not maxwell filter to preprocess the data.\n#\n# Warning\n# ~~~~~~~\n# If the data were recorded with internal active compensation (MaxShield),\n# they need to be run through Maxwell filter to avoid distortions.\n# Bad channels need to be set through BIDS channels.tsv and / or via the\n# ``find_flat_channels_meg`` and ``find_noisy_channels_meg`` options above\n# before applying Maxwell filter.\n\nuse_maxwell_filter = False\n\n# There are two kinds of maxfiltering: SSS and tSSS\n# [SSS = signal space separation ; tSSS = temporal signal space separation]\n# (Taulu et al, 2004): http://cds.cern.ch/record/709081/files/0401166.pdf\n#\n# ``mf_st_duration`` : float | None\n# If not None, apply spatiotemporal SSS (tSSS) with specified buffer\n# duration (in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.\n# Spatiotemporal SSS acts as implicitly as a high-pass filter where the\n# cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer\n# buffers are generally better as long as your system can handle the\n# higher memory usage. To ensure that each window is processed\n# identically, choose a buffer length that divides evenly into your data.\n# Any data at the trailing edge that doesn't fit evenly into a whole\n# buffer window will be lumped into the previous buffer.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# If you are interested in low frequency activity (<0.1Hz), avoid using tSSS\n# and set mf_st_duration to None\n#\n# If you are interested in low frequency above 0.1 Hz, you can use the\n# default mf_st_duration to 10 s meaning it acts like a 0.1 Hz highpass filter.\n#\n# Example\n# ~~~~~~~\n# >>> mf_st_duration = None\n# or\n# >>> mf_st_duration = 10. # to apply tSSS with 0.1Hz highpass filter.\n\nmf_st_duration = None\n\n# ``mf_head_origin`` : array-like, shape (3,) | 'auto'\n# Origin of internal and external multipolar moment space in meters.\n# If 'auto', it will be estimated from headshape points.\n# If automatic fitting fails (e.g., due to having too few digitization\n# points), consider separately calling the fitting function with different\n# options or specifying the origin manually.\n#\n# Example\n# ~~~~~~~\n# >>> mf_head_origin = 'auto'\n\nmf_head_origin = 'auto'\n\n# ``cross talk`` : str\n# Path to the cross talk file\n#\n#\n# ``calibration`` : str\n# Path to the calibration file.\n#\n#\n# These 2 files should be downloaded and made available for running\n# maxwell filtering.\n#\n# Example\n# ~~~~~~~\n# >>> cal_files_path = os.path.join(study_path, 'SSS')\n# >>> mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')\n# >>> mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')\n#\n# Warning\n# ~~~~~~~\n# These 2 files are site and machine specific files that provide information\n# about the environmental noise. For practical purposes, place them in your\n# study folder.\n#\n# At NeuroSpin: ct_sparse and sss_call are on the meg_tmp server\n\n# cal_files_path = os.path.join(study_path, 'SSS')\n# mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')\n# mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')\n\nmf_ctc_fname = ''\nmf_cal_fname = ''\n\n# Despite all possible care to avoid movements in the MEG, the participant\n# will likely slowly drift down from the Dewar or slightly shift the head\n# around in the course of the recording session. Hence, to take this into\n# account, we are realigning all data to a single position. For this, you need\n# to define a reference run (typically the one in the middle of\n# the recording session).\n#\n# ``mf_reference_run`` : int\n# Which run to take as the reference for adjusting the head position of all\n# runs.\n#\n# Example\n# ~~~~~~~\n# >>> mf_reference_run = 0 # to use the first run\n\nmf_reference_run = 0\n\n###############################################################################\n# FREQUENCY FILTERING\n# -------------------\n# done in 02-frequency_filter.py\n\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# It is typically better to set your filtering properties on the raw data so\n# as to avoid what we call border (or edge) effects.\n#\n# If you use this pipeline for evoked responses, you could consider\n# a low-pass filter cut-off of h_freq = 40 Hz\n# and possibly a high-pass filter cut-off of l_freq = 1 Hz\n# so you would preserve only the power in the 1Hz to 40 Hz band.\n# Note that highpass filtering is not necessarily recommended as it can\n# distort waveforms of evoked components, or simply wash out any low\n# frequency that can may contain brain signal. It can also act as\n# a replacement for baseline correction in Epochs. See below.\n#\n# If you use this pipeline for time-frequency analysis, a default filtering\n# coult be a high-pass filter cut-off of l_freq = 1 Hz\n# a low-pass filter cut-off of h_freq = 120 Hz\n# so you would preserve only the power in the 1Hz to 120 Hz band.\n#\n# If you need more fancy analysis, you are already likely past this kind\n# of tips! :)\n\n\n# ``l_freq`` : float\n# The low-frequency cut-off in the highpass filtering step.\n# Keep it None if no highpass filtering should be applied.\n\nl_freq = 1.\n\n# ``h_freq`` : float\n# The high-frequency cut-off in the lowpass filtering step.\n# Keep it None if no lowpass filtering should be applied.\n\nh_freq = 40.\n\n###############################################################################\n# RESAMPLING\n# ----------\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# If you have acquired data with a very high sampling frequency (e.g. 2 kHz)\n# you will likely want to downsample to lighten up the size of the files you\n# are working with (pragmatics)\n# If you are interested in typical analysis (up to 120 Hz) you can typically\n# resample your data down to 500 Hz without preventing reliable time-frequency\n# exploration of your data\n#\n# ``resample_sfreq`` : float\n# Specifies at which sampling frequency the data should be resampled.\n# If None then no resampling will be done.\n#\n# Example\n# ~~~~~~~\n# >>> resample_sfreq = None # no resampling\n# or\n# >>> resample_sfreq = 500 # resample to 500Hz\n\nresample_sfreq = None\n\n# ``decim`` : int\n# Says how much to decimate data at the epochs level.\n# It is typically an alternative to the `resample_sfreq` parameter that\n# can be used for resampling raw data. 1 means no decimation.\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Decimation requires to lowpass filtered the data to avoid aliasing.\n# Note that using decimation is much faster than resampling.\n#\n# Example\n# ~~~~~~~\n# >>> decim = 1 # no decimation\n# or\n# >>> decim = 4 # decimate by 4 ie devide sampling frequency by 4\n\ndecim = 1\n\n###############################################################################\n# AUTOMATIC REJECTION OF ARTIFACTS\n# --------------------------------\n#\n# Good Practice / Advice\n# ~~~~~~~~~~~~~~~~~~~~~~\n# Have a look at your raw data and train yourself to detect a blink, a heart\n# beat and an eye movement.\n# You can do a quick average of blink data and check what the amplitude looks\n# like.\n#\n# ``reject`` : dict | None\n# The rejection limits to make some epochs as bads.\n# This allows to remove strong transient artifacts.\n# If you want to reject and retrieve blinks later, e.g. with ICA,\n# don't specify a value for the eog channel (see examples below).\n# Make sure to include values for eeg if you have EEG data\n#\n# Note\n# ~~~~\n# These numbers tend to vary between subjects.. You might want to consider\n# using the autoreject method by Jas et al. 2018.\n# See https://autoreject.github.io\n#\n# Example\n# ~~~~~~~\n# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eog': 150e-6}\n# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 200e-6}\n# >>> reject = None\n\nreject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 150e-6}\n\n\n###############################################################################\n# RENAME EXPERIMENTAL EVENTS\n# --------------------------\n#\n# ``rename_events`` : dict\n# A dictionary specifying which events in the BIDS dataset to rename upon\n# loading, and before processing begins.\n#\n# Pass an empty dictionary to not perform any renaming.\n#\n# Example\n# ~~~~~~~\n# Rename ``audio_left`` in the BIDS dataset to ``audio/left`` in the pipeline:\n# >>> rename_events = {'audio_left': 'audio/left'}\n\nrename_events = dict()\n\n\n###############################################################################\n# EPOCHING\n# --------\n#\n# ``tmin``: float\n# A float in seconds that gives the start time before event of an epoch.\n#\n# Example\n# ~~~~~~~\n# >>> tmin = -0.2 # take 200ms before event onset.\n\ntmin = -0.2\n\n# ``tmax``: float\n# A float in seconds that gives the end time before event of an epoch.\n#\n# Example\n# ~~~~~~~\n# >>> tmax = 0.5 # take 500ms after event onset.\n\ntmax = 0.5\n\n# ``baseline`` : tuple\n# It specifies how to baseline the epochs; if None, no baseline is applied.\n#\n# Example\n# ~~~~~~~\n# >>> baseline = (None, 0) # baseline between tmin and 0\n\nbaseline = (None, 0)\n\n# `conditions`` : list\n# The condition names to consider. This can either be the keys of\n# ``event_id``, or – if event names were specified with ``/`` for\n# grouping – the name of the *grouped* condition (i.e., the\n# condition name before or after that ``/`` that is shared between the\n# respective conditions you wish to group). See the \"Subselecting epochs\"\n# tutorial for more information: https://mne.tools/stable/auto_tutorials/epochs/plot_10_epochs_overview.html#subselecting-epochs # noqa: 501\n#\n# Example\n# ~~~~~~~\n# >>> conditions = ['auditory/left', 'visual/left']\n# or\n# >>> conditions = ['auditory/left', 'auditory/right']\n# or\n# >>> conditions = ['auditory']\n# or\n# >>> conditions = ['auditory', 'visual']\n# or\n# >>> conditions = ['left', 'right']\n\nconditions = ['left', 'right']\n\n###############################################################################\n# ARTIFACT REMOVAL\n# ----------------\n#\n# You can choose between ICA and SSP to remove eye and heart artifacts.\n# SSP: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ssp.html?highlight=ssp # noqa\n# ICA: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ica.html?highlight=ica # noqa\n# if you choose ICA, run scripts 5a and 6a\n# if you choose SSP, run scripts 5b and 6b\n#\n# Currently you cannot use both.\n\n# SSP\n# ~~~\n#\n# ``use_ssp`` : bool\n# If True ICA should be used or not.\n\nuse_ssp = True\n\n# ICA\n# ~~~\n# ``use_ica`` : bool\n# If True ICA should be used or not.\n\nuse_ica = False\n\n# ``ica_algorithm`` : 'picard' | 'fastica' | 'extended_infomax'\n# The ICA algorithm to use.\n\nica_algorithm = 'picard'\n\n# ``ica_max_iterations`` : int\n# Maximum number of iterations to decompose the data into independent\n# components. A low number means to finish earlier, but the consequence is\n# that the algorithm may not have finished converging. To ensure\n# convergence, pick a high number here (e.g. 3000); yet the algorithm will\n# terminate as soon as it determines that is has successfully converged, and\n# not necessarily exhaust the maximum number of iterations. Note that the\n# default of 200 seems to be sufficient for Picard in many datasets, because\n# it converges quicker than the other algorithms; but e.g. for FastICA, this\n# limit may be too low to achieve convergence.\n\nica_max_iterations = 200\n\n# ``ica_decim`` : None | None\n# The decimation parameter to compute ICA. If 5 it means\n# that 1 every 5 sample is used by ICA solver. The higher the faster\n# it is to run but the less data you have to compute a good ICA. Set to\n# ``1`` ``None`` to not perform an decimation.\n\nica_decim = None\n\n\n# ``default_reject_comps_factory`` : callable\n# A factory function that returns a default rejection component dictionary:\n# A dictionary that specifies the indices of the ICA components to reject\n# for each subject. For example you can use:\n# rejcomps_man['subject01'] = dict(eeg=[12], meg=[7])\n\ndef default_reject_comps_factory():\n \"\"\"Return the default rejection component dictionary.\"\"\"\n return dict(meg=[], eeg=[])\n\n\nrejcomps_man = defaultdict(default_reject_comps_factory)\n\n# ``ica_ctps_ecg_threshold``: float\n# The threshold parameter passed to `find_bads_ecg` method.\n\nica_ctps_ecg_threshold = 0.1\n\n###############################################################################\n# DECODING\n# --------\n#\n# ``decoding_conditions`` : list\n# List of conditions to be classified.\n#\n# Example\n# ~~~~~~~\n# >>> decoding_conditions = [] # don't do decoding\n# or\n# >>> decoding_conditions = [('auditory', 'visual'), ('left', 'right')]\n\ndecoding_conditions = []\n# decoding_conditions = [('left', 'right')]\n\n###############################################################################\n# GROUP AVERAGE SENSORS\n# ---------------------\n#\n# ``interpolate_bads_grand_average`` : bool\n# Interpolate bad sensors in each dataset before calculating the grand\n# average. This parameter is passed to the `mne.grand_average` function via\n# the keyword argument `interpolate_bads`. It requires to have channel\n# locations set.\n#\n# Example\n# ~~~~~~~\n# >>> interpolate_bads_grand_average = True\n\ninterpolate_bads_grand_average = True\n\n# ``decoding_metric`` : str\n# The metric to use for cross-validation. It can be 'roc_auc' or 'accuracy'\n# or any metric supported by scikit-learn.\n\ndecoding_metric = 'roc_auc'\n\n# ``decoding_n_splits`` : int\n# The number of folds (a.k.a. splits) to use in the cross-validation.\n\ndecoding_n_splits = 5\n\n###############################################################################\n# TIME-FREQUENCY\n# --------------\n#\n# ``time_frequency_conditions`` : list\n# The conditions to compute time-frequency decomposition on.\n\n# time_frequency_conditions = ['left', 'right']\ntime_frequency_conditions = []\n\n###############################################################################\n# SOURCE SPACE PARAMETERS\n# -----------------------\n#\n\n# ``spacing`` : str\n# The spacing to use. Can be ``'ico#'`` for a recursively subdivided\n# icosahedron, ``'oct#'`` for a recursively subdivided octahedron,\n# ``'all'`` for all points, or an integer to use appoximate\n# distance-based spacing (in mm).\n\nspacing = 'oct6'\n\n# ``mindist`` : float\n# Exclude points closer than this distance (mm) to the bounding surface.\n\nmindist = 5\n\n# ``loose`` : float in [0, 1] | 'auto'\n# Value that weights the source variances of the dipole components\n# that are parallel (tangential) to the cortical surface. If loose\n# is 0 then the solution is computed with fixed orientation,\n# and fixed must be True or \"auto\".\n# If loose is 1, it corresponds to free orientations.\n# The default value ('auto') is set to 0.2 for surface-oriented source\n# space and set to 1.0 for volumetric, discrete, or mixed source spaces,\n# unless ``fixed is True`` in which case the value 0. is used.\n\nloose = 0.2\n\n# ``depth`` : None | float | dict\n# If float (default 0.8), it acts as the depth weighting exponent (``exp``)\n# to use (must be between 0 and 1). None is equivalent to 0, meaning no\n# depth weighting is performed. Can also be a `dict` containing additional\n# keyword arguments to pass to :func:`mne.forward.compute_depth_prior`\n# (see docstring for details and defaults).\n\ndepth = 0.8\n\n# inverse_method : \"MNE\" | \"dSPM\" | \"sLORETA\" | \"eLORETA\"\n# Use minimum norm, dSPM (default), sLORETA, or eLORETA.\n\ninverse_method = 'dSPM'\n\n# noise_cov : (None, 0) | ‘emptyroom’\n# Specify how to estimate the noise covariance matrix, which is used in\n# inverse modeling.\n#\n# If a tuple, it takes the form ``(tmin, tmax)`` with the time specified in\n# seconds. If the first value of the tuple is ``None``, the considered\n# period starts at the beginning of the epoch. If the second value of the\n# tuple is ``None``, the considered period ends at the end of the epoch.\n# The default, ``(None, 0)``, includes the entire period before the event,\n# which is typically the pre-stimulus period.\n#\n# If ``emptyroom``, the noise covariance matrix will be estimated from an\n# empty-room MEG recording. The empty-room recording will be automatically\n# selected based on recording date and time.\n#\n# Please note that when processing data that contains EEG channels, the noise\n# covariance can ONLY be estimated from the pre-stimulus period.\n#\n# Example\n# ~~~~~~~\n# Use the period from start of the epoch until 100 ms before the experimental\n# event:\n# >>> noise_cov = (None, -0.1)\n#\n# Use the time period from the experimental event until the end of the epoch:\n# >>> noise_cov = (0, None)\n#\n# Use an empty-room recording:\n# >>> noise_cov = 'emptyroom'\n\nnoise_cov = (None, 0)\n\n# smooth : int | None\n# Number of iterations for the smoothing of the surface data.\n# If None, smooth is automatically defined to fill the surface\n# with non-zero values. The default is spacing=None.\n\nsmooth = 10\n\nfsaverage_vertices = [np.arange(10242), np.arange(10242)]\n\n###############################################################################\n# ADVANCED\n# --------\n#\n# ``l_trans_bandwidth`` : float | 'auto'\n# A float that specifies the transition bandwidth of the\n# highpass filter. By default it's `'auto'` and uses default mne\n# parameters.\n\nl_trans_bandwidth = 'auto'\n\n# ``h_trans_bandwidth`` : float | 'auto'\n# A float that specifies the transition bandwidth of the\n# lowpass filter. By default it's `'auto'` and uses default mne\n# parameters.\n\nh_trans_bandwidth = 'auto'\n\n# ``N_JOBS`` : int\n# An integer that specifies how many subjects you want to run in parallel.\n\nN_JOBS = 1\n\n# ``random_state`` : None | int | np.random.RandomState\n# To specify the random generator state. This allows to have\n# the results more reproducible between machines and systems.\n# Some methods like ICA need random values for initialisation.\n\nrandom_state = 42\n\n# ``shortest_event`` : int\n# Minimum number of samples an event must last. If the\n# duration is less than this an exception will be raised.\n\nshortest_event = 1\n\n# ``allow_maxshield`` : bool\n# To import data that was recorded with Maxshield on before running\n# maxfilter set this to True.\n\nallow_maxshield = False\n\nlog_level = 'info'\nmne_log_level = 'error'\n\n# ``on_abort`` : 'continue' | 'abort'\n# Whether to abort processing as soon as an error occurs, or whether to\n# continue with all other processing steps for as long as possible.\n\non_error = 'abort'\n\n\n###############################################################################\n# #\n# CUSTOM CONFIGURATION ENDS HERE #\n# #\n###############################################################################\n\n\n###############################################################################\n# Logger\n# ------\n\nlogger = logging.getLogger('mne-study-template')\n\nlog_fmt = '%(asctime)s %(message)s'\nlog_date_fmt = coloredlogs.DEFAULT_DATE_FORMAT = '%H:%M:%S'\ncoloredlogs.install(level=log_level, logger=logger, fmt=log_fmt,\n date_fmt=log_date_fmt)\n\nmne.set_log_level(verbose=mne_log_level.upper())\n\n###############################################################################\n# Retrieve custom configuration options\n# -------------------------------------\n#\n# For testing a specific dataset, create a Python file with a name of your\n# liking (e.g., ``mydataset-template-config.py``), and set an environment\n# variable ``MNE_BIDS_STUDY_CONFIG`` to that file.\n#\n# Example\n# ~~~~~~~\n# ``export MNE_BIDS_STUDY_CONFIG=/data/mystudy/mydataset-template-config.py``\n\nif \"MNE_BIDS_STUDY_CONFIG\" in os.environ:\n cfg_path = os.environ['MNE_BIDS_STUDY_CONFIG']\n\n if os.path.exists(cfg_path):\n msg = f'Using custom configuration: {cfg_path}'\n logger.info(msg)\n else:\n msg = ('The custom configuration file specified in the '\n 'MNE_BIDS_STUDY_CONFIG environment variable could not be '\n 'found: {cfg_path}'.format(cfg_path=cfg_path))\n raise ValueError(msg)\n\n # Import configuration from an arbitrary path without having to fiddle\n # with `sys.path`.\n spec = importlib.util.spec_from_file_location(name='custom_config',\n location=cfg_path)\n custom_cfg = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(custom_cfg)\n del spec, cfg_path\n\n new = None\n for val in dir(custom_cfg):\n if not val.startswith('__'):\n exec(\"new = custom_cfg.%s\" % val)\n logger.debug('Overwriting: %s -> %s' % (val, new))\n exec(\"%s = custom_cfg.%s\" % (val, val))\n\n\n# BIDS_ROOT environment variable takes precedence over any configuration file\n# values.\nif os.getenv('BIDS_ROOT') is not None:\n bids_root = os.getenv('BIDS_ROOT')\n\n# If we don't have a bids_root until now, raise an exeception as we cannot\n# proceed.\nif not bids_root:\n msg = ('You need to specify `bids_root` in your configuration, or '\n 'define an environment variable `BIDS_ROOT` pointing to the '\n 'root folder of your BIDS dataset')\n raise ValueError(msg)\n\n\n###############################################################################\n# Derivates root\n# --------------\nderiv_root = os.path.join(bids_root, 'derivatives', PIPELINE_NAME)\n\n\n###############################################################################\n# CHECKS\n# ------\n\nif (use_maxwell_filter and\n len(set(ch_types).intersection(('meg', 'grad', 'mag'))) == 0):\n raise ValueError('Cannot use maxwell filter without MEG channels.')\n\nif use_ssp and use_ica:\n raise ValueError('Cannot use both SSP and ICA.')\n\nif use_ica and ica_algorithm not in ('picard', 'fastica', 'extended_infomax'):\n msg = (f\"Invalid ICA algorithm requested. Valid values for ica_algorithm \"\n f\"are: 'picard', 'fastica', and 'extended_infomax', but received \"\n f\"{ica_algorithm}.\")\n raise ValueError(msg)\n\nif not ch_types:\n msg = 'Please specify ch_types in your configuration.'\n raise ValueError(msg)\n\nif ch_types == ['eeg']:\n pass\nelif 'eeg' in ch_types and len(ch_types) > 1: # EEG + some other channel types\n msg = ('EEG data can only be analyzed separately from other channel '\n 'types. Please adjust `ch_types` in your configuration.')\n raise ValueError(msg)\nelif any([ch_type not in ('meg', 'mag', 'grad') for ch_type in ch_types]):\n msg = ('Invalid channel type passed. Please adjust `ch_types` in your '\n 'configuration.')\n raise ValueError(msg)\n\nif 'eeg' in ch_types:\n if use_ssp:\n msg = ('You requested SSP for EEG data via use_ssp=True. However, '\n 'this is not presently supported. Please use ICA instead by '\n 'setting use_ssp=False and use_ica=True.')\n raise ValueError(msg)\n if not use_ica:\n msg = ('You did not request ICA artifact correction for your data. '\n 'To turn it on, set use_ica=True.')\n logger.info(msg)\n\nif on_error not in ('continue', 'abort'):\n msg = (f\"on_error must be one of 'continue' or 'abort', but received \"\n f\"{on_error}.\")\n logger.info(msg)\n\nif isinstance(noise_cov, str) and noise_cov != 'emptyroom':\n msg = (f\"noise_cov must be a tuple or 'emptyroom', but received \"\n f\"{noise_cov}\")\n raise ValueError(msg)\n\nif noise_cov == 'emptyroom' and 'eeg' in ch_types:\n msg = ('You requested to process data that contains EEG channels. In this '\n 'case, noise covariance can only be estimated from the '\n 'experimental data, e.g., the pre-stimulus period. Please set '\n 'noise_cov to (tmin, tmax)')\n raise ValueError(msg)\n\n\n###############################################################################\n# Helper functions\n# ----------------\n\ndef get_sessions():\n sessions_ = copy.deepcopy(sessions) # Avoid clash with global variable.\n\n if sessions_ == 'all':\n sessions_ = get_entity_vals(bids_root, entity_key='ses')\n\n if not sessions_:\n return [None]\n else:\n return sessions_\n\n\ndef get_runs():\n runs_ = copy.deepcopy(runs) # Avoid clash with global variable.\n\n if runs_ == 'all':\n runs_ = get_entity_vals(bids_root, entity_key='run')\n\n if not runs_:\n return [None]\n else:\n return runs_\n\n\ndef get_subjects():\n if subjects_list == 'all':\n s = get_entity_vals(bids_root, entity_key='sub')\n else:\n s = subjects_list\n\n subjects = set(s) - set(exclude_subjects)\n # Drop empty-room subject.\n subjects = subjects - set(['emptyroom'])\n\n return list(subjects)\n\n\ndef get_task():\n if not task:\n tasks = get_entity_vals(bids_root, entity_key='task')\n if not tasks:\n return None\n else:\n return tasks[0]\n else:\n return task\n\n\ndef get_kind():\n # Content of ch_types should be sanitized already, so we don't need any\n # extra sanity checks here.\n if ch_types == ['eeg']:\n return 'eeg'\n else:\n return 'meg'\n\n\ndef get_reject():\n reject_ = reject.copy() # Avoid clash with global variable.\n kind = get_kind()\n\n if kind == 'eeg':\n ch_types_to_remove = ('mag', 'grad')\n else:\n ch_types_to_remove = ('eeg',)\n\n for ch_type in ch_types_to_remove:\n try:\n del reject_[ch_type]\n except KeyError:\n pass\n return reject_\n\n\ndef get_fs_subjects_dir():\n if not subjects_dir:\n return os.path.join(bids_root, 'derivatives', 'freesurfer', 'subjects')\n else:\n return subjects_dir\n\n\ndef get_subject_path(subject, session, kind):\n subject_path = f'sub-{subject}'\n if session is not None:\n subject_path = os.path.join(subject_path, f'ses-{session}')\n subject_path = os.path.join(subject_path, kind)\n return subject_path\n\n\ndef get_subject_deriv_path(subject, session, kind):\n subject_path = get_subject_path(subject=subject, session=session,\n kind=kind)\n deriv_path = os.path.join(deriv_root, subject_path)\n return deriv_path\n\n\ndef gen_log_message(message, step=None, subject=None, session=None, run=None):\n if subject is not None:\n subject = f'sub-{subject}'\n if session is not None:\n session = f'ses-{session}'\n if run is not None:\n run = f'run-{run}'\n\n prefix = ', '.join([item for item in [subject, session, run]\n if item is not None])\n if prefix:\n prefix = f'[{prefix}]'\n\n if step is not None:\n prefix = f'[Step-{step:02}]{prefix}'\n\n return prefix + ' ' + message\n\n\ndef failsafe_run(on_error):\n def failsafe_run_decorator(func):\n @functools.wraps(func) # Preserve \"identity\" of original function\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n message = 'A critical error occurred.'\n message = gen_log_message(message=message)\n\n if on_error == 'abort':\n logger.critical(message)\n raise(e)\n else:\n message = f'{message} The error message was:\\n{str(e)}'\n logger.critical(message)\n return wrapper\n return failsafe_run_decorator\n", "path": "config.py" } ]
diff --git a/config.py b/config.py index 17301ec1b..239d32a44 100644 --- a/config.py +++ b/config.py @@ -461,17 +461,6 @@ tmax = 0.5 -# ``trigger_time_shift`` : float | None -# If float it specifies the offset for the trigger and the stimulus -# (in seconds). You need to measure this value for your specific -# experiment/setup. -# -# Example -# ~~~~~~~ -# >>> trigger_time_shift = 0 # don't apply any offset - -trigger_time_shift = 0. - # ``baseline`` : tuple # It specifies how to baseline the epochs; if None, no baseline is applied. #
config.trigger_time_shift is currently unused And also lacks clarification of what exactly it is supposed to do.
plone__Products.CMFPlone-3093
[ { "content": "# -*- coding: utf-8 -*-\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl.class_init import InitializeClass\nfrom App.special_dtml import DTMLFile\nfrom OFS.Folder import Folder\nfrom OFS.PropertyManager import PropertyManager\nfrom Products.CMFCore.ActionInformation import ActionInformation\nfrom Products.CMFCore.ActionProviderBase import ActionProviderBase\nfrom Products.CMFCore.Expression import Expression, createExprContext\nfrom Products.CMFCore.permissions import ManagePortal, View\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.utils import registerToolInterface\nfrom Products.CMFCore.utils import UniqueObject\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IControlPanel\nfrom Products.CMFPlone.PloneBaseTool import PloneBaseTool\nfrom zope.component.hooks import getSite\nfrom zope.i18n import translate\nfrom zope.i18nmessageid import Message\nfrom zope.interface import implementer\n\nimport six\n\n\nclass PloneConfiglet(ActionInformation):\n\n def __init__(self, appId, **kwargs):\n self.appId = appId\n ActionInformation.__init__(self, **kwargs)\n\n def getAppId(self):\n return self.appId\n\n def getDescription(self):\n return self.description\n\n def clone(self):\n return self.__class__(**self.__dict__)\n\n def getAction(self, ec):\n res = ActionInformation.getAction(self, ec)\n res['description'] = self.getDescription()\n return res\n\n\n@implementer(IControlPanel)\nclass PloneControlPanel(PloneBaseTool, UniqueObject,\n Folder, ActionProviderBase, PropertyManager):\n \"\"\"Weave together the various sources of \"actions\" which\n are apropos to the current user and context.\n \"\"\"\n\n security = ClassSecurityInfo()\n\n id = 'portal_controlpanel'\n title = 'Control Panel'\n toolicon = 'skins/plone_images/site_icon.png'\n meta_type = 'Plone Control Panel Tool'\n _actions_form = DTMLFile('www/editPloneConfiglets', globals())\n\n manage_options = (ActionProviderBase.manage_options +\n PropertyManager.manage_options)\n\n group = dict(\n member=[\n ('Member', _(u'My Preferences')),\n ],\n site=[\n ('plone-general', _(u'General')),\n ('plone-content', _(u'Content')),\n ('plone-users', _(u'Users')),\n ('plone-security', _(u'Security')),\n ('plone-advanced', _(u'Advanced')),\n ('Plone', _(u'Plone Configuration')),\n ('Products', _(u'Add-on Configuration')),\n ]\n )\n\n def __init__(self, **kw):\n if kw:\n self.__dict__.update(**kw)\n\n security.declareProtected(ManagePortal, 'registerConfiglets')\n\n def registerConfiglets(self, configlets):\n for conf in configlets:\n self.registerConfiglet(**conf)\n\n security.declareProtected(ManagePortal, 'getGroupIds')\n\n def getGroupIds(self, category='site'):\n groups = self.group.get(category, [])\n return [g[0] for g in groups if g]\n\n security.declareProtected(View, 'getGroups')\n\n def getGroups(self, category='site'):\n groups = self.group.get(category, [])\n return [{'id': g[0], 'title': g[1]} for g in groups if g]\n\n security.declarePrivate('listActions')\n\n def listActions(self, info=None, object=None):\n # This exists here to shut up a deprecation warning about old-style\n # actions in CMFCore's ActionProviderBase. It was decided not to\n # move configlets to be based on action tool categories for Plone 4\n # (see PLIP #8804), but that (or an alternative) will have to happen\n # before CMF 2.4 when support for old-style actions is removed.\n return self._actions or ()\n\n security.declarePublic('maySeeSomeConfiglets')\n\n def maySeeSomeConfiglets(self):\n groups = self.getGroups('site')\n\n all = []\n for group in groups:\n all.extend(self.enumConfiglets(group=group['id']))\n all = [item for item in all if item['visible']]\n return len(all) != 0\n\n security.declarePublic('enumConfiglets')\n\n def enumConfiglets(self, group=None):\n portal = getToolByName(self, 'portal_url').getPortalObject()\n context = createExprContext(self, portal, self)\n res = []\n for a in self.listActions():\n verified = 0\n for permission in a.permissions:\n if _checkPermission(permission, portal):\n verified = 1\n if verified and a.category == group and a.testCondition(context) \\\n and a.visible:\n res.append(a.getAction(context))\n # Translate the title for sorting\n if getattr(self, 'REQUEST', None) is not None:\n for a in res:\n title = a['title']\n if not isinstance(title, Message):\n title = Message(title, domain='plone')\n a['title'] = translate(title,\n context=self.REQUEST)\n\n def _id(v):\n return v['id']\n res.sort(key=_id)\n return res\n\n security.declareProtected(ManagePortal, 'unregisterConfiglet')\n\n def unregisterConfiglet(self, id):\n actids = [o.id for o in self.listActions()]\n selection = [actids.index(a) for a in actids if a == id]\n if not selection:\n return\n self.deleteActions(selection)\n\n security.declareProtected(ManagePortal, 'unregisterApplication')\n\n def unregisterApplication(self, appId):\n acts = list(self.listActions())\n selection = [acts.index(a) for a in acts if a.appId == appId]\n if not selection:\n return\n self.deleteActions(selection)\n\n def _extractAction(self, properties, index):\n # Extract an ActionInformation from the funky form properties.\n id = str(properties.get('id_%d' % index, ''))\n name = str(properties.get('name_%d' % index, ''))\n action = str(properties.get('action_%d' % index, ''))\n condition = str(properties.get('condition_%d' % index, ''))\n category = str(properties.get('category_%d' % index, ''))\n visible = properties.get('visible_%d' % index, 0)\n permissions = properties.get('permission_%d' % index, ())\n appId = properties.get('appId_%d' % index, '')\n description = properties.get('description_%d' % index, '')\n icon_expr = properties.get('icon_expr_%d' % index, '')\n\n if not name:\n raise ValueError('A name is required.')\n\n if action != '':\n action = Expression(text=action)\n\n if condition != '':\n condition = Expression(text=condition)\n\n if category == '':\n category = 'object'\n\n if not isinstance(visible, int):\n try:\n visible = int(visible)\n except ValueError:\n visible = 0\n\n if isinstance(permissions, six.string_types):\n permissions = (permissions, )\n\n return PloneConfiglet(id=id,\n title=name,\n action=action,\n condition=condition,\n permissions=permissions,\n category=category,\n visible=visible,\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n security.declareProtected(ManagePortal, 'addAction')\n\n def addAction(self,\n id,\n name,\n action,\n condition='',\n permission='',\n category='Plone',\n visible=1,\n appId=None,\n icon_expr='',\n description='',\n REQUEST=None,\n ):\n # Add an action to our list.\n if not name:\n raise ValueError('A name is required.')\n\n a_expr = action and Expression(text=str(action)) or ''\n c_expr = condition and Expression(text=str(condition)) or ''\n\n if not isinstance(permission, tuple):\n permission = permission and (str(permission), ) or ()\n\n new_actions = self._cloneActions()\n\n new_action = PloneConfiglet(id=str(id),\n title=name,\n action=a_expr,\n condition=c_expr,\n permissions=permission,\n category=str(category),\n visible=int(visible),\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n new_actions.append(new_action)\n self._actions = tuple(new_actions)\n\n if REQUEST is not None:\n return self.manage_editActionsForm(\n REQUEST, manage_tabs_message='Added.')\n\n security.declareProtected(ManagePortal, 'registerConfiglet')\n registerConfiglet = addAction\n\n security.declareProtected(ManagePortal, 'manage_editActionsForm')\n\n def manage_editActionsForm(self, REQUEST, manage_tabs_message=None):\n \"\"\" Show the 'Actions' management tab.\n \"\"\"\n actions = []\n\n for a in self.listActions():\n\n a1 = {}\n a1['id'] = a.getId()\n a1['name'] = a.Title()\n p = a.getPermissions()\n if p:\n a1['permission'] = p[0]\n else:\n a1['permission'] = ''\n a1['category'] = a.getCategory() or 'object'\n a1['visible'] = a.getVisibility()\n a1['action'] = a.getActionExpression()\n a1['condition'] = a.getCondition()\n a1['appId'] = a.getAppId()\n a1['description'] = a.getDescription()\n a1['icon_expr'] = a.getIconExpression()\n actions.append(a1)\n\n # possible_permissions is in OFS.role.RoleManager.\n pp = self.possible_permissions()\n return self._actions_form(\n self,\n REQUEST,\n actions=actions,\n possible_permissions=pp,\n management_view='Actions',\n manage_tabs_message=manage_tabs_message,\n )\n\n @property\n def site_url(self):\n \"\"\"Return the absolute URL to the current site, which is likely not\n necessarily the portal root.\n Used by ``portlet_prefs`` to construct the URL to\n ``@@overview-controlpanel``.\n \"\"\"\n return getSite().absolute_url()\n\n\nInitializeClass(PloneControlPanel)\nregisterToolInterface('portal_controlpanel', IControlPanel)\n", "path": "Products/CMFPlone/PloneControlPanel.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl.class_init import InitializeClass\nfrom App.special_dtml import DTMLFile\nfrom OFS.Folder import Folder\nfrom OFS.PropertyManager import PropertyManager\nfrom Products.CMFCore.ActionInformation import ActionInformation\nfrom Products.CMFCore.ActionProviderBase import ActionProviderBase\nfrom Products.CMFCore.Expression import Expression, createExprContext\nfrom Products.CMFCore.permissions import ManagePortal, View\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.utils import registerToolInterface\nfrom Products.CMFCore.utils import UniqueObject\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IControlPanel\nfrom Products.CMFPlone.PloneBaseTool import PloneBaseTool\nfrom zope.component.hooks import getSite\nfrom zope.i18n import translate\nfrom zope.i18nmessageid import Message\nfrom zope.interface import implementer\n\nimport six\n\n\nclass PloneConfiglet(ActionInformation):\n\n def __init__(self, appId, **kwargs):\n self.appId = appId\n ActionInformation.__init__(self, **kwargs)\n\n def getAppId(self):\n return self.appId\n\n def getDescription(self):\n return self.description\n\n def clone(self):\n return self.__class__(**self.__dict__)\n\n def getAction(self, ec):\n res = ActionInformation.getAction(self, ec)\n res['description'] = self.getDescription()\n return res\n\n\n@implementer(IControlPanel)\nclass PloneControlPanel(PloneBaseTool, UniqueObject,\n Folder, ActionProviderBase, PropertyManager):\n \"\"\"Weave together the various sources of \"actions\" which\n are apropos to the current user and context.\n \"\"\"\n\n security = ClassSecurityInfo()\n\n id = 'portal_controlpanel'\n title = 'Control Panel'\n toolicon = 'skins/plone_images/site_icon.png'\n meta_type = 'Plone Control Panel Tool'\n _actions_form = DTMLFile('www/editPloneConfiglets', globals())\n\n manage_options = (ActionProviderBase.manage_options +\n PropertyManager.manage_options)\n\n group = dict(\n member=[\n ('Member', _(u'My Preferences')),\n ],\n site=[\n ('plone-general', _(u'General')),\n ('plone-content', _(u'Content')),\n ('plone-users', _(u'Users')),\n ('plone-security', _(u'Security')),\n ('plone-advanced', _(u'Advanced')),\n ('Plone', _(u'Plone Configuration')),\n ('Products', _(u'Add-on Configuration')),\n ]\n )\n\n def __init__(self, **kw):\n if kw:\n self.__dict__.update(**kw)\n\n security.declareProtected(ManagePortal, 'registerConfiglets')\n\n def registerConfiglets(self, configlets):\n for conf in configlets:\n self.registerConfiglet(**conf)\n\n security.declareProtected(ManagePortal, 'getGroupIds')\n\n def getGroupIds(self, category='site'):\n groups = self.group.get(category, [])\n return [g[0] for g in groups if g]\n\n security.declareProtected(View, 'getGroups')\n\n def getGroups(self, category='site'):\n groups = self.group.get(category, [])\n return [{'id': g[0], 'title': g[1]} for g in groups if g]\n\n security.declarePrivate('listActions')\n\n def listActions(self, info=None, object=None):\n # This exists here to shut up a deprecation warning about old-style\n # actions in CMFCore's ActionProviderBase. It was decided not to\n # move configlets to be based on action tool categories for Plone 4\n # (see PLIP #8804), but that (or an alternative) will have to happen\n # before CMF 2.4 when support for old-style actions is removed.\n return self._actions or ()\n\n security.declarePublic('maySeeSomeConfiglets')\n\n def maySeeSomeConfiglets(self):\n groups = self.getGroups('site')\n\n all = []\n for group in groups:\n all.extend(self.enumConfiglets(group=group['id']))\n all = [item for item in all if item['visible']]\n return len(all) != 0\n\n security.declarePublic('enumConfiglets')\n\n def enumConfiglets(self, group=None):\n portal = getToolByName(self, 'portal_url').getPortalObject()\n context = createExprContext(self, portal, self)\n res = []\n for a in self.listActions():\n verified = 0\n for permission in a.permissions:\n if _checkPermission(permission, portal):\n verified = 1\n if verified and a.category == group and a.testCondition(context) \\\n and a.visible:\n res.append(a.getAction(context))\n # Translate the title for sorting\n if getattr(self, 'REQUEST', None) is not None:\n for a in res:\n title = a['title']\n if not isinstance(title, Message):\n title = Message(title, domain='plone')\n a['title'] = translate(title,\n context=self.REQUEST)\n\n def _title(v):\n return v['title']\n\n res.sort(key=_title)\n return res\n\n security.declareProtected(ManagePortal, 'unregisterConfiglet')\n\n def unregisterConfiglet(self, id):\n actids = [o.id for o in self.listActions()]\n selection = [actids.index(a) for a in actids if a == id]\n if not selection:\n return\n self.deleteActions(selection)\n\n security.declareProtected(ManagePortal, 'unregisterApplication')\n\n def unregisterApplication(self, appId):\n acts = list(self.listActions())\n selection = [acts.index(a) for a in acts if a.appId == appId]\n if not selection:\n return\n self.deleteActions(selection)\n\n def _extractAction(self, properties, index):\n # Extract an ActionInformation from the funky form properties.\n id = str(properties.get('id_%d' % index, ''))\n name = str(properties.get('name_%d' % index, ''))\n action = str(properties.get('action_%d' % index, ''))\n condition = str(properties.get('condition_%d' % index, ''))\n category = str(properties.get('category_%d' % index, ''))\n visible = properties.get('visible_%d' % index, 0)\n permissions = properties.get('permission_%d' % index, ())\n appId = properties.get('appId_%d' % index, '')\n description = properties.get('description_%d' % index, '')\n icon_expr = properties.get('icon_expr_%d' % index, '')\n\n if not name:\n raise ValueError('A name is required.')\n\n if action != '':\n action = Expression(text=action)\n\n if condition != '':\n condition = Expression(text=condition)\n\n if category == '':\n category = 'object'\n\n if not isinstance(visible, int):\n try:\n visible = int(visible)\n except ValueError:\n visible = 0\n\n if isinstance(permissions, six.string_types):\n permissions = (permissions, )\n\n return PloneConfiglet(id=id,\n title=name,\n action=action,\n condition=condition,\n permissions=permissions,\n category=category,\n visible=visible,\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n security.declareProtected(ManagePortal, 'addAction')\n\n def addAction(self,\n id,\n name,\n action,\n condition='',\n permission='',\n category='Plone',\n visible=1,\n appId=None,\n icon_expr='',\n description='',\n REQUEST=None,\n ):\n # Add an action to our list.\n if not name:\n raise ValueError('A name is required.')\n\n a_expr = action and Expression(text=str(action)) or ''\n c_expr = condition and Expression(text=str(condition)) or ''\n\n if not isinstance(permission, tuple):\n permission = permission and (str(permission), ) or ()\n\n new_actions = self._cloneActions()\n\n new_action = PloneConfiglet(id=str(id),\n title=name,\n action=a_expr,\n condition=c_expr,\n permissions=permission,\n category=str(category),\n visible=int(visible),\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n new_actions.append(new_action)\n self._actions = tuple(new_actions)\n\n if REQUEST is not None:\n return self.manage_editActionsForm(\n REQUEST, manage_tabs_message='Added.')\n\n security.declareProtected(ManagePortal, 'registerConfiglet')\n registerConfiglet = addAction\n\n security.declareProtected(ManagePortal, 'manage_editActionsForm')\n\n def manage_editActionsForm(self, REQUEST, manage_tabs_message=None):\n \"\"\" Show the 'Actions' management tab.\n \"\"\"\n actions = []\n\n for a in self.listActions():\n\n a1 = {}\n a1['id'] = a.getId()\n a1['name'] = a.Title()\n p = a.getPermissions()\n if p:\n a1['permission'] = p[0]\n else:\n a1['permission'] = ''\n a1['category'] = a.getCategory() or 'object'\n a1['visible'] = a.getVisibility()\n a1['action'] = a.getActionExpression()\n a1['condition'] = a.getCondition()\n a1['appId'] = a.getAppId()\n a1['description'] = a.getDescription()\n a1['icon_expr'] = a.getIconExpression()\n actions.append(a1)\n\n # possible_permissions is in OFS.role.RoleManager.\n pp = self.possible_permissions()\n return self._actions_form(\n self,\n REQUEST,\n actions=actions,\n possible_permissions=pp,\n management_view='Actions',\n manage_tabs_message=manage_tabs_message,\n )\n\n @property\n def site_url(self):\n \"\"\"Return the absolute URL to the current site, which is likely not\n necessarily the portal root.\n Used by ``portlet_prefs`` to construct the URL to\n ``@@overview-controlpanel``.\n \"\"\"\n return getSite().absolute_url()\n\n\nInitializeClass(PloneControlPanel)\nregisterToolInterface('portal_controlpanel', IControlPanel)\n", "path": "Products/CMFPlone/PloneControlPanel.py" } ]
diff --git a/Products/CMFPlone/PloneControlPanel.py b/Products/CMFPlone/PloneControlPanel.py index b749d2d51f..9e97d7c58b 100644 --- a/Products/CMFPlone/PloneControlPanel.py +++ b/Products/CMFPlone/PloneControlPanel.py @@ -143,9 +143,10 @@ def enumConfiglets(self, group=None): a['title'] = translate(title, context=self.REQUEST) - def _id(v): - return v['id'] - res.sort(key=_id) + def _title(v): + return v['title'] + + res.sort(key=_title) return res security.declareProtected(ManagePortal, 'unregisterConfiglet') diff --git a/news/721.bugfix b/news/721.bugfix new file mode 100644 index 0000000000..d6b094302b --- /dev/null +++ b/news/721.bugfix @@ -0,0 +1,2 @@ +Change control panel item sorting and sort them by title +[erral]
sorting in control panel The items of the control panel are completely unsorted (should be sorted in alphabetical order (depending on the current language in Plone). ![screenshot-dev1 veit-schiele de 8080 2015-07-18 13-35-44](https://cloud.githubusercontent.com/assets/594239/8761438/07d76bf4-2d52-11e5-8540-82fd53932690.png)
bridgecrewio__checkov-5936
[ { "content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass CloudSqlMajorVersion(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure SQL database is using latest Major version\"\n id = \"CKV_GCP_79\"\n supported_resources = ['google_sql_database_instance']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'database_version'\n\n def get_expected_values(self):\n return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2019_STANDARD\", \"SQLSERVER_2019_WEB\",\n \"SQLSERVER_2019_ENTERPRISE\", \"SQLSERVER_2019_EXPRESS\"]\n\n\ncheck = CloudSqlMajorVersion()\n", "path": "checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py" } ]
[ { "content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass CloudSqlMajorVersion(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure SQL database is using latest Major version\"\n id = \"CKV_GCP_79\"\n supported_resources = ['google_sql_database_instance']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'database_version'\n\n def get_expected_values(self):\n return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2022_STANDARD\", \"SQLSERVER_2022_WEB\",\n \"SQLSERVER_2022_ENTERPRISE\", \"SQLSERVER_2022_EXPRESS\"]\n\n\ncheck = CloudSqlMajorVersion()\n", "path": "checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py" } ]
diff --git a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py index 96bf503c3e..4df7105ccd 100644 --- a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py +++ b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py @@ -14,8 +14,8 @@ def get_inspected_key(self): return 'database_version' def get_expected_values(self): - return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_WEB", - "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS"] + return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2022_STANDARD", "SQLSERVER_2022_WEB", + "SQLSERVER_2022_ENTERPRISE", "SQLSERVER_2022_EXPRESS"] check = CloudSqlMajorVersion() diff --git a/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf b/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf index 0f8ff31f73..431cf323c6 100644 --- a/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf +++ b/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf @@ -154,7 +154,7 @@ resource "google_sql_database_instance" "pass2" { } resource "google_sql_database_instance" "fail3" { - database_version = "SQLSERVER_2017_STANDARD" + database_version = "SQLSERVER_2019_STANDARD" name = "general-sqlserver12" project = "gcp-bridgecrew-deployment" region = "us-central1" @@ -210,7 +210,7 @@ resource "google_sql_database_instance" "fail3" { } resource "google_sql_database_instance" "pass3" { - database_version = "SQLSERVER_2019_STANDARD" + database_version = "SQLSERVER_2022_STANDARD" name = "general-sqlserver12" project = "gcp-bridgecrew-deployment" region = "us-central1"
CKV_GCP_79 SQL Server latest version is 2022 instead of 2019 **Describe the issue** The `CKV_GCP_79` about SQL server is pinned at 2019 but 2022 is the latest version : https://learn.microsoft.com/en-us/troubleshoot/sql/releases/download-and-install-latest-updates **Examples** Related to this files : https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py https://github.com/bridgecrewio/checkov/blob/d07fdc994015772a9fa0dc1a12d1391b5765916c/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf#L213
freedomofpress__securedrop-3429
[ { "content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nSecureDrop Admin Toolkit.\n\nFor use by administrators to install, maintain, and manage their SD\ninstances.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport io\nimport re\nimport string\nimport subprocess\nimport sys\nimport types\nimport prompt_toolkit\nfrom prompt_toolkit.validation import Validator, ValidationError\nimport yaml\n\nsdlog = logging.getLogger(__name__)\nRELEASE_KEY = '22245C81E3BAEB4138B36061310F561200F4AD77'\n\n\nclass FingerprintException(Exception):\n pass\n\n\nclass JournalistAlertEmailException(Exception):\n pass\n\n\nclass SiteConfig(object):\n\n class ValidateNotEmpty(Validator):\n def validate(self, document):\n if document.text != '':\n return True\n raise ValidationError(\n message=\"Must not be an empty string\")\n\n class ValidateTime(Validator):\n def validate(self, document):\n if document.text.isdigit() and int(document.text) in range(0, 24):\n return True\n raise ValidationError(\n message=\"Must be an integer between 0 and 23\")\n\n class ValidateUser(Validator):\n def validate(self, document):\n text = document.text\n if text != '' and text != 'root' and text != 'amnesia':\n return True\n raise ValidationError(\n message=\"Must not be root, amnesia or an empty string\")\n\n class ValidateIP(Validator):\n def validate(self, document):\n if re.match('((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\\.|$)){4}$',\n document.text):\n return True\n raise ValidationError(\n message=\"An IP address must be something like 10.240.20.83\")\n\n class ValidateDNS(Validator):\n def validate(self):\n raise Exception() # pragma: no cover\n\n def is_tails(self):\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n return id == 'Tails'\n\n def lookup_fqdn(self, fqdn, dns=None):\n cmd = 'host -W=10 -T -4 ' + fqdn\n if self.is_tails():\n cmd = 'torify ' + cmd\n cmd += ' ' + (dns and dns or '8.8.8.8')\n try:\n result = subprocess.check_output(cmd.split(' '),\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n result = e.output\n sdlog.debug(cmd + ' => ' + result)\n return 'has address' in result\n\n class ValidateDNSServer(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn('gnu.org', document.text):\n return True\n raise ValidationError(\n message='Unable to resolve gnu.org using this DNS')\n\n class ValidateFQDN(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn(document.text):\n return True\n raise ValidationError(\n message='Unable to resolve ' + document.text)\n\n class ValidatePath(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidatePath, self).__init__()\n\n def validate(self, document):\n if document.text == '':\n raise ValidationError(\n message='an existing file name is required')\n path = os.path.join(self.basedir, document.text)\n if os.path.exists(path):\n return True\n raise ValidationError(\n message=path + ' file does not exist')\n\n class ValidateOptionalPath(ValidatePath):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalPath, self).validate(\n document)\n\n class ValidateYesNo(Validator):\n def validate(self, document):\n text = document.text.lower()\n if text == 'yes' or text == 'no':\n return True\n raise ValidationError(message=\"Must be either yes or no\")\n\n class ValidateFingerprint(Validator):\n def validate(self, document):\n text = document.text.replace(' ', '')\n if text == '65A1B5FF195B56353CC63DFFCC40EF1228271441':\n raise ValidationError(\n message='This is the TEST journalist fingerprint')\n if text == '600BC6D5142C68F35DDBCEA87B597104EDDDC102':\n raise ValidationError(\n message='This is the TEST admin fingerprint')\n if not re.match('[a-fA-F0-9]{40}$', text):\n raise ValidationError(\n message='fingerprints must be 40 hexadecimal characters')\n return True\n\n class ValidateOptionalFingerprint(ValidateFingerprint):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalFingerprint,\n self).validate(document)\n\n class ValidateInt(Validator):\n def validate(self, document):\n if re.match('\\d+$', document.text):\n return True\n raise ValidationError(message=\"Must be an integer\")\n\n class Locales(object):\n def __init__(self, appdir):\n self.translation_dir = os.path.realpath(\n os.path.join(appdir, 'translations'))\n\n def get_translations(self):\n translations = set(['en_US'])\n for dirname in os.listdir(self.translation_dir):\n if dirname != 'messages.pot':\n translations.add(dirname)\n return translations\n\n class ValidateLocales(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidateLocales, self).__init__()\n\n def validate(self, document):\n desired = document.text.split()\n existing = SiteConfig.Locales(self.basedir).get_translations()\n missing = set(desired) - set(existing)\n if not missing:\n return True\n raise ValidationError(\n message=\"The following locales do not exist \" + \" \".join(\n missing))\n\n class ValidateOSSECUsername(Validator):\n def validate(self, document):\n text = document.text\n if text and '@' not in text and 'test' != text:\n return True\n raise ValidationError(\n message=\"The SASL username should not include the domain name\")\n\n class ValidateOSSECPassword(Validator):\n def validate(self, document):\n text = document.text\n if len(text) >= 8 and 'password123' != text:\n return True\n raise ValidationError(\n message=\"Password for OSSEC email account must be strong\")\n\n class ValidateEmail(Validator):\n def validate(self, document):\n text = document.text\n if text == '':\n raise ValidationError(\n message=(\"Must not be empty\"))\n if '@' not in text:\n raise ValidationError(\n message=(\"Must contain a @\"))\n return True\n\n class ValidateOSSECEmail(ValidateEmail):\n def validate(self, document):\n super(SiteConfig.ValidateOSSECEmail, self).validate(document)\n text = document.text\n if '[email protected]' != text:\n return True\n raise ValidationError(\n message=(\"Must be set to something other than \"\n \"[email protected]\"))\n\n class ValidateOptionalEmail(ValidateEmail):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalEmail, self).validate(\n document)\n\n def __init__(self, args):\n self.args = args\n self.config = {}\n translations = SiteConfig.Locales(\n self.args.app_path).get_translations()\n translations = \" \".join(translations)\n self.desc = [\n ['ssh_users', 'sd', str,\n u'Username for SSH access to the servers',\n SiteConfig.ValidateUser(),\n None],\n ['daily_reboot_time', 4, int,\n u'Daily reboot time of the server (24-hour clock)',\n SiteConfig.ValidateTime(),\n int],\n ['app_ip', '10.20.2.2', str,\n u'Local IPv4 address for the Application Server',\n SiteConfig.ValidateIP(),\n None],\n ['monitor_ip', '10.20.3.2', str,\n u'Local IPv4 address for the Monitor Server',\n SiteConfig.ValidateIP(),\n None],\n ['app_hostname', 'app', str,\n u'Hostname for Application Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['monitor_hostname', 'mon', str,\n u'Hostname for Monitor Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['dns_server', '8.8.8.8', str,\n u'DNS server specified during installation',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['securedrop_app_gpg_public_key', 'SecureDrop.asc', str,\n u'Local filepath to public key for '\n 'SecureDrop Application GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['securedrop_app_https_on_source_interface', False, bool,\n u'Whether HTTPS should be enabled on '\n 'Source Interface (requires EV cert)',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_app_https_certificate_cert_src', '', str,\n u'Local filepath to HTTPS certificate '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_https_certificate_key_src', '', str,\n u'Local filepath to HTTPS certificate key '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_https_certificate_chain_src', '', str,\n u'Local filepath to HTTPS certificate chain file '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_gpg_fingerprint', '', str,\n u'Full fingerprint for the SecureDrop Application GPG Key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_gpg_public_key', 'ossec.pub', str,\n u'Local filepath to OSSEC alerts GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['ossec_gpg_fpr', '', str,\n u'Full fingerprint for the OSSEC alerts GPG public key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_email', '', str,\n u'Admin email address for receiving OSSEC alerts',\n SiteConfig.ValidateOSSECEmail(),\n None],\n ['journalist_alert_gpg_public_key', '', str,\n u'Local filepath to journalist alerts GPG public key (optional)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['journalist_gpg_fpr', '', str,\n u'Full fingerprint for the journalist alerts '\n u'GPG public key (optional)',\n SiteConfig.ValidateOptionalFingerprint(),\n self.sanitize_fingerprint],\n ['journalist_alert_email', '', str,\n u'Email address for receiving journalist alerts (optional)',\n SiteConfig.ValidateOptionalEmail(),\n None],\n ['smtp_relay', \"smtp.gmail.com\", str,\n u'SMTP relay for sending OSSEC alerts',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['smtp_relay_port', 587, int,\n u'SMTP port for sending OSSEC alerts',\n SiteConfig.ValidateInt(),\n int],\n ['sasl_domain', \"gmail.com\", str,\n u'SASL domain for sending OSSEC alerts',\n None,\n None],\n ['sasl_username', '', str,\n u'SASL username for sending OSSEC alerts',\n SiteConfig.ValidateOSSECUsername(),\n None],\n ['sasl_password', '', str,\n u'SASL password for sending OSSEC alerts',\n SiteConfig.ValidateOSSECPassword(),\n None],\n ['enable_ssh_over_tor', True, bool,\n u'Enable SSH over Tor (recommended, disables SSH over LAN). '\n u'If you respond no, SSH will be available over LAN only',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_supported_locales', [], types.ListType,\n u'Space separated list of additional locales to support '\n '(' + translations + ')',\n SiteConfig.ValidateLocales(self.args.app_path),\n string.split],\n ]\n\n def load_and_update_config(self):\n if self.exists():\n self.config = self.load()\n\n return self.update_config()\n\n def update_config(self):\n self.config.update(self.user_prompt_config())\n self.save()\n self.validate_gpg_keys()\n self.validate_journalist_alert_email()\n return True\n\n def user_prompt_config(self):\n config = {}\n for desc in self.desc:\n (var, default, type, prompt, validator, transform) = desc\n if var == 'journalist_gpg_fpr':\n if not config.get('journalist_alert_gpg_public_key',\n None):\n config[var] = ''\n continue\n if var == 'journalist_alert_email':\n if not config.get('journalist_alert_gpg_public_key',\n None):\n config[var] = ''\n continue\n config[var] = self.user_prompt_config_one(desc,\n self.config.get(var))\n return config\n\n def user_prompt_config_one(self, desc, from_config):\n (var, default, type, prompt, validator, transform) = desc\n if from_config is not None:\n default = from_config\n prompt += ': '\n return self.validated_input(prompt, default, validator, transform)\n\n def validated_input(self, prompt, default, validator, transform):\n if type(default) is bool:\n default = default and 'yes' or 'no'\n if type(default) is int:\n default = str(default)\n if isinstance(default, types.ListType):\n default = \" \".join(default)\n if type(default) is not str:\n default = str(default)\n kwargs = {}\n if validator:\n kwargs['validator'] = validator\n value = prompt_toolkit.prompt(prompt,\n default=unicode(default, 'utf-8'),\n **kwargs)\n if transform:\n return transform(value)\n else:\n return value\n\n def sanitize_fingerprint(self, value):\n return value.upper().replace(' ', '')\n\n def validate_gpg_keys(self):\n keys = (('securedrop_app_gpg_public_key',\n 'securedrop_app_gpg_fingerprint'),\n\n ('ossec_alert_gpg_public_key',\n 'ossec_gpg_fpr'),\n\n ('journalist_alert_gpg_public_key',\n 'journalist_gpg_fpr'))\n validate = os.path.join(\n os.path.dirname(__file__), '..', 'bin',\n 'validate-gpg-key.sh')\n for (public_key, fingerprint) in keys:\n if (self.config[public_key] == '' and\n self.config[fingerprint] == ''):\n continue\n public_key = os.path.join(self.args.ansible_path,\n self.config[public_key])\n fingerprint = self.config[fingerprint]\n try:\n sdlog.debug(subprocess.check_output(\n [validate, public_key, fingerprint],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n raise FingerprintException(\n \"fingerprint {} \".format(fingerprint) +\n \"does not match \" +\n \"the public key {}\".format(public_key))\n return True\n\n def validate_journalist_alert_email(self):\n if (self.config['journalist_alert_gpg_public_key'] == '' and\n self.config['journalist_gpg_fpr'] == ''):\n return True\n\n class Document(object):\n def __init__(self, text):\n self.text = text\n\n try:\n SiteConfig.ValidateEmail().validate(Document(\n self.config['journalist_alert_email']))\n except ValidationError as e:\n raise JournalistAlertEmailException(\n \"journalist alerts email: \" + e.message)\n return True\n\n def exists(self):\n return os.path.exists(self.args.site_config)\n\n def save(self):\n with io.open(self.args.site_config, 'w') as site_config_file:\n yaml.safe_dump(self.config,\n site_config_file,\n default_flow_style=False)\n\n def load(self):\n try:\n with io.open(self.args.site_config) as site_config_file:\n return yaml.safe_load(site_config_file)\n except IOError:\n sdlog.error(\"Config file missing, re-run with sdconfig\")\n raise\n except yaml.YAMLError:\n sdlog.error(\"There was an issue processing {}\".format(\n self.args.site_config))\n raise\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef sdconfig(args):\n \"\"\"Configure SD site settings\"\"\"\n SiteConfig(args).load_and_update_config()\n\n\ndef install_securedrop(args):\n \"\"\"Install/Update SecureDrop\"\"\"\n SiteConfig(args).load()\n\n sdlog.info(\"Now installing SecureDrop on remote servers.\")\n sdlog.info(\"You will be prompted for the sudo password on the \"\n \"servers.\")\n sdlog.info(\"The sudo password is only necessary during initial \"\n \"installation.\")\n return subprocess.check_call([os.path.join(args.ansible_path,\n 'securedrop-prod.yml'), '--ask-become-pass'],\n cwd=args.ansible_path)\n\n\ndef backup_securedrop(args):\n \"\"\"Perform backup of the SecureDrop Application Server.\n Creates a tarball of submissions and server config, and fetches\n back to the Admin Workstation. Future `restore` actions can be performed\n with the backup tarball.\"\"\"\n sdlog.info(\"Backing up the SecureDrop Application Server\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-backup.yml'),\n ]\n return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef restore_securedrop(args):\n \"\"\"Perform restore of the SecureDrop Application Server.\n Requires a tarball of submissions and server config, created via\n the `backup` action.\"\"\"\n sdlog.info(\"Restoring the SecureDrop Application Server from backup\")\n # Canonicalize filepath to backup tarball, so Ansible sees only the\n # basename. The files must live in args.ansible_path,\n # but the securedrop-admin\n # script will be invoked from the repo root, so preceding dirs are likely.\n restore_file_basename = os.path.basename(args.restore_file)\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-restore.yml'),\n '-e',\n \"restore_file='{}'\".format(restore_file_basename),\n ]\n return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef run_tails_config(args):\n \"\"\"Configure Tails environment post SD install\"\"\"\n sdlog.info(\"Configuring Tails workstation environment\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n ansible_cmd = [\n os.path.join(args.ansible_path, 'securedrop-tails.yml'),\n \"--ask-become-pass\",\n # Passing an empty inventory file to override the automatic dynamic\n # inventory script, which fails if no site vars are configured.\n '-i', '/dev/null',\n ]\n return subprocess.check_call(ansible_cmd,\n cwd=args.ansible_path)\n\n\ndef check_for_updates_wrapper(args):\n res, tag = check_for_updates(args)\n # Because the command worked properly exit with 0.\n return 0\n\n\ndef check_for_updates(args):\n \"\"\"Check for SecureDrop updates\"\"\"\n sdlog.info(\"Checking for SecureDrop updates...\")\n\n # Determine what branch we are on\n current_tag = subprocess.check_output(['git', 'describe'], cwd=args.root)\n\n # Fetch all branches\n git_fetch_cmd = ['git', 'fetch', '--all']\n subprocess.check_call(git_fetch_cmd, cwd=args.root)\n\n # Get latest tag\n git_all_tags = [\"git\", \"tag\"]\n all_tags = subprocess.check_output(git_all_tags,\n cwd=args.root).rstrip('\\n').split('\\n')\n\n # Do not check out any release candidate tags\n all_prod_tags = [x for x in all_tags if 'rc' not in x]\n\n latest_tag = all_prod_tags[-1]\n\n if current_tag != latest_tag:\n sdlog.info(\"Update needed\")\n return True, latest_tag\n sdlog.info(\"All updates applied\")\n return False, latest_tag\n\n\ndef get_release_key_from_keyserver(args, keyserver=None, timeout=45):\n gpg_recv = ['timeout', str(timeout), 'gpg', '--recv-key']\n release_key = [RELEASE_KEY]\n\n # We construct the gpg --recv-key command based on optional keyserver arg.\n if keyserver:\n get_key_cmd = gpg_recv + ['--keyserver', keyserver] + release_key\n else:\n get_key_cmd = gpg_recv + release_key\n\n subprocess.check_call(get_key_cmd, cwd=args.root)\n\n\ndef update(args):\n \"\"\"Verify, and apply latest SecureDrop workstation update\"\"\"\n sdlog.info(\"Applying SecureDrop updates...\")\n\n update_status, latest_tag = check_for_updates(args)\n\n if not update_status:\n # Exit if we're up to date\n return 0\n\n sdlog.info(\"Verifying signature on latest update...\")\n\n try:\n # First try to get the release key using Tails default keyserver\n get_release_key_from_keyserver(args)\n except subprocess.CalledProcessError:\n # Now try to get the key from a secondary keyserver.\n secondary_keyserver = 'hkps://hkps.pool.sks-keyservers.net'\n get_release_key_from_keyserver(args,\n keyserver=secondary_keyserver)\n\n git_verify_tag_cmd = ['git', 'tag', '-v', latest_tag]\n try:\n sig_result = subprocess.check_output(git_verify_tag_cmd,\n stderr=subprocess.STDOUT,\n cwd=args.root)\n\n good_sig_text = 'Good signature from \"SecureDrop Release Signing Key\"'\n bad_sig_text = 'BAD signature'\n # To ensure that an adversary cannot name a malicious key good_sig_text\n # we check that bad_sig_text does not appear and that the release key\n # appears on the second line of the output.\n gpg_lines = sig_result.split('\\n')\n if RELEASE_KEY in gpg_lines[1] and \\\n sig_result.count(good_sig_text) == 1 and \\\n bad_sig_text not in sig_result:\n sdlog.info(\"Signature verification successful.\")\n else: # If anything else happens, fail and exit 1\n sdlog.info(\"Signature verification failed.\")\n return 1\n\n except subprocess.CalledProcessError:\n # If there is no signature, or if the signature does not verify,\n # then git tag -v exits subprocess.check_output will exit 1\n # and subprocess.check_output will throw a CalledProcessError\n sdlog.info(\"Signature verification failed.\")\n return 1\n\n # Only if the proper signature verifies do we check out the latest\n git_checkout_cmd = ['git', 'checkout', latest_tag]\n subprocess.check_call(git_checkout_cmd, cwd=args.root)\n\n sdlog.info(\"Updated to SecureDrop {}.\".format(latest_tag))\n return 0\n\n\ndef get_logs(args):\n \"\"\"Get logs for forensics and debugging purposes\"\"\"\n sdlog.info(\"Gathering logs for forensics and debugging\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-logs.yml'),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n sdlog.info(\"Encrypt logs and send to [email protected] or upload \"\n \"to the SecureDrop support portal.\")\n return 0\n\n\ndef set_default_paths(args):\n if not args.ansible_path:\n args.ansible_path = args.root + \"/install_files/ansible-base\"\n args.ansible_path = os.path.realpath(args.ansible_path)\n if not args.site_config:\n args.site_config = args.ansible_path + \"/group_vars/all/site-specific\"\n args.site_config = os.path.realpath(args.site_config)\n if not args.app_path:\n args.app_path = args.root + \"/securedrop\"\n args.app_path = os.path.realpath(args.app_path)\n return args\n\n\ndef parse_argv(argv):\n class ArgParseFormatterCombo(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawTextHelpFormatter):\n \"\"\"Needed to combine formatting classes for help output\"\"\"\n pass\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=ArgParseFormatterCombo)\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.add_argument('-d', action='store_true', default=False,\n help=\"Developer mode. Not to be used in production.\")\n parser.add_argument('--root', required=True,\n help=\"path to the root of the SecureDrop repository\")\n parser.add_argument('--site-config',\n help=\"path to the YAML site configuration file\")\n parser.add_argument('--ansible-path',\n help=\"path to the Ansible root\")\n parser.add_argument('--app-path',\n help=\"path to the SecureDrop application root\")\n subparsers = parser.add_subparsers()\n\n parse_sdconfig = subparsers.add_parser('sdconfig', help=sdconfig.__doc__)\n parse_sdconfig.set_defaults(func=sdconfig)\n\n parse_install = subparsers.add_parser('install',\n help=install_securedrop.__doc__)\n parse_install.set_defaults(func=install_securedrop)\n\n parse_tailsconfig = subparsers.add_parser('tailsconfig',\n help=run_tails_config.__doc__)\n parse_tailsconfig.set_defaults(func=run_tails_config)\n\n parse_backup = subparsers.add_parser('backup',\n help=backup_securedrop.__doc__)\n parse_backup.set_defaults(func=backup_securedrop)\n\n parse_restore = subparsers.add_parser('restore',\n help=restore_securedrop.__doc__)\n parse_restore.set_defaults(func=restore_securedrop)\n parse_restore.add_argument(\"restore_file\")\n\n parse_update = subparsers.add_parser('update', help=update.__doc__)\n parse_update.set_defaults(func=update)\n\n parse_check_updates = subparsers.add_parser('check_for_updates',\n help=check_for_updates.__doc__)\n parse_check_updates.set_defaults(func=check_for_updates_wrapper)\n\n parse_logs = subparsers.add_parser('logs',\n help=get_logs.__doc__)\n parse_logs.set_defaults(func=get_logs)\n\n return set_default_paths(parser.parse_args(argv))\n\n\ndef main(argv):\n args = parse_argv(argv)\n setup_logger(args.v)\n if args.v:\n return_code = args.func(args)\n sys.exit(return_code)\n else:\n try:\n return_code = args.func(args)\n except KeyboardInterrupt:\n sys.exit(-1)\n except Exception as e:\n raise SystemExit(\n 'ERROR (run with -v for more): {msg}'.format(msg=e))\n else:\n sys.exit(return_code)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "admin/securedrop_admin/__init__.py" } ]
[ { "content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nSecureDrop Admin Toolkit.\n\nFor use by administrators to install, maintain, and manage their SD\ninstances.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport io\nimport re\nimport string\nimport subprocess\nimport sys\nimport types\nimport prompt_toolkit\nfrom prompt_toolkit.validation import Validator, ValidationError\nimport yaml\n\nsdlog = logging.getLogger(__name__)\nRELEASE_KEY = '22245C81E3BAEB4138B36061310F561200F4AD77'\n\n\nclass FingerprintException(Exception):\n pass\n\n\nclass JournalistAlertEmailException(Exception):\n pass\n\n\nclass SiteConfig(object):\n\n class ValidateNotEmpty(Validator):\n def validate(self, document):\n if document.text != '':\n return True\n raise ValidationError(\n message=\"Must not be an empty string\")\n\n class ValidateTime(Validator):\n def validate(self, document):\n if document.text.isdigit() and int(document.text) in range(0, 24):\n return True\n raise ValidationError(\n message=\"Must be an integer between 0 and 23\")\n\n class ValidateUser(Validator):\n def validate(self, document):\n text = document.text\n if text != '' and text != 'root' and text != 'amnesia':\n return True\n raise ValidationError(\n message=\"Must not be root, amnesia or an empty string\")\n\n class ValidateIP(Validator):\n def validate(self, document):\n if re.match('((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\\.|$)){4}$',\n document.text):\n return True\n raise ValidationError(\n message=\"An IP address must be something like 10.240.20.83\")\n\n class ValidateDNS(Validator):\n def validate(self):\n raise Exception() # pragma: no cover\n\n def is_tails(self):\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n return id == 'Tails'\n\n def lookup_fqdn(self, fqdn, dns=None):\n cmd = 'host -W=10 -T -4 ' + fqdn\n if self.is_tails():\n cmd = 'torify ' + cmd\n cmd += ' ' + (dns and dns or '8.8.8.8')\n try:\n result = subprocess.check_output(cmd.split(' '),\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n result = e.output\n sdlog.debug(cmd + ' => ' + result)\n return 'has address' in result\n\n class ValidateDNSServer(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn('gnu.org', document.text):\n return True\n raise ValidationError(\n message='Unable to resolve gnu.org using this DNS')\n\n class ValidateFQDN(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn(document.text):\n return True\n raise ValidationError(\n message='Unable to resolve ' + document.text)\n\n class ValidatePath(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidatePath, self).__init__()\n\n def validate(self, document):\n if document.text == '':\n raise ValidationError(\n message='an existing file name is required')\n path = os.path.join(self.basedir, document.text)\n if os.path.exists(path):\n return True\n raise ValidationError(\n message=path + ' file does not exist')\n\n class ValidateOptionalPath(ValidatePath):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalPath, self).validate(\n document)\n\n class ValidateYesNo(Validator):\n def validate(self, document):\n text = document.text.lower()\n if text == 'yes' or text == 'no':\n return True\n raise ValidationError(message=\"Must be either yes or no\")\n\n class ValidateFingerprint(Validator):\n def validate(self, document):\n text = document.text.replace(' ', '')\n if text == '65A1B5FF195B56353CC63DFFCC40EF1228271441':\n raise ValidationError(\n message='This is the TEST journalist fingerprint')\n if text == '600BC6D5142C68F35DDBCEA87B597104EDDDC102':\n raise ValidationError(\n message='This is the TEST admin fingerprint')\n if not re.match('[a-fA-F0-9]{40}$', text):\n raise ValidationError(\n message='fingerprints must be 40 hexadecimal characters')\n return True\n\n class ValidateOptionalFingerprint(ValidateFingerprint):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalFingerprint,\n self).validate(document)\n\n class ValidateInt(Validator):\n def validate(self, document):\n if re.match('\\d+$', document.text):\n return True\n raise ValidationError(message=\"Must be an integer\")\n\n class Locales(object):\n def __init__(self, appdir):\n self.translation_dir = os.path.realpath(\n os.path.join(appdir, 'translations'))\n\n def get_translations(self):\n translations = set(['en_US'])\n for dirname in os.listdir(self.translation_dir):\n if dirname != 'messages.pot':\n translations.add(dirname)\n return translations\n\n class ValidateLocales(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidateLocales, self).__init__()\n\n def validate(self, document):\n desired = document.text.split()\n existing = SiteConfig.Locales(self.basedir).get_translations()\n missing = set(desired) - set(existing)\n if not missing:\n return True\n raise ValidationError(\n message=\"The following locales do not exist \" + \" \".join(\n missing))\n\n class ValidateOSSECUsername(Validator):\n def validate(self, document):\n text = document.text\n if text and '@' not in text and 'test' != text:\n return True\n raise ValidationError(\n message=\"The SASL username should not include the domain name\")\n\n class ValidateOSSECPassword(Validator):\n def validate(self, document):\n text = document.text\n if len(text) >= 8 and 'password123' != text:\n return True\n raise ValidationError(\n message=\"Password for OSSEC email account must be strong\")\n\n class ValidateEmail(Validator):\n def validate(self, document):\n text = document.text\n if text == '':\n raise ValidationError(\n message=(\"Must not be empty\"))\n if '@' not in text:\n raise ValidationError(\n message=(\"Must contain a @\"))\n return True\n\n class ValidateOSSECEmail(ValidateEmail):\n def validate(self, document):\n super(SiteConfig.ValidateOSSECEmail, self).validate(document)\n text = document.text\n if '[email protected]' != text:\n return True\n raise ValidationError(\n message=(\"Must be set to something other than \"\n \"[email protected]\"))\n\n class ValidateOptionalEmail(ValidateEmail):\n def validate(self, document):\n if document.text == '':\n return True\n return super(SiteConfig.ValidateOptionalEmail, self).validate(\n document)\n\n def __init__(self, args):\n self.args = args\n self.config = {}\n translations = SiteConfig.Locales(\n self.args.app_path).get_translations()\n translations = \" \".join(translations)\n self.desc = [\n ['ssh_users', 'sd', str,\n u'Username for SSH access to the servers',\n SiteConfig.ValidateUser(),\n None],\n ['daily_reboot_time', 4, int,\n u'Daily reboot time of the server (24-hour clock)',\n SiteConfig.ValidateTime(),\n int],\n ['app_ip', '10.20.2.2', str,\n u'Local IPv4 address for the Application Server',\n SiteConfig.ValidateIP(),\n None],\n ['monitor_ip', '10.20.3.2', str,\n u'Local IPv4 address for the Monitor Server',\n SiteConfig.ValidateIP(),\n None],\n ['app_hostname', 'app', str,\n u'Hostname for Application Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['monitor_hostname', 'mon', str,\n u'Hostname for Monitor Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['dns_server', '8.8.8.8', str,\n u'DNS server specified during installation',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['securedrop_app_gpg_public_key', 'SecureDrop.asc', str,\n u'Local filepath to public key for '\n 'SecureDrop Application GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['securedrop_app_https_on_source_interface', False, bool,\n u'Whether HTTPS should be enabled on '\n 'Source Interface (requires EV cert)',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_app_https_certificate_cert_src', '', str,\n u'Local filepath to HTTPS certificate '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_https_certificate_key_src', '', str,\n u'Local filepath to HTTPS certificate key '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_https_certificate_chain_src', '', str,\n u'Local filepath to HTTPS certificate chain file '\n '(optional, only if using HTTPS on source interface)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['securedrop_app_gpg_fingerprint', '', str,\n u'Full fingerprint for the SecureDrop Application GPG Key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_gpg_public_key', 'ossec.pub', str,\n u'Local filepath to OSSEC alerts GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['ossec_gpg_fpr', '', str,\n u'Full fingerprint for the OSSEC alerts GPG public key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_email', '', str,\n u'Admin email address for receiving OSSEC alerts',\n SiteConfig.ValidateOSSECEmail(),\n None],\n ['journalist_alert_gpg_public_key', '', str,\n u'Local filepath to journalist alerts GPG public key (optional)',\n SiteConfig.ValidateOptionalPath(self.args.ansible_path),\n None],\n ['journalist_gpg_fpr', '', str,\n u'Full fingerprint for the journalist alerts '\n u'GPG public key (optional)',\n SiteConfig.ValidateOptionalFingerprint(),\n self.sanitize_fingerprint],\n ['journalist_alert_email', '', str,\n u'Email address for receiving journalist alerts (optional)',\n SiteConfig.ValidateOptionalEmail(),\n None],\n ['smtp_relay', \"smtp.gmail.com\", str,\n u'SMTP relay for sending OSSEC alerts',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['smtp_relay_port', 587, int,\n u'SMTP port for sending OSSEC alerts',\n SiteConfig.ValidateInt(),\n int],\n ['sasl_domain', \"gmail.com\", str,\n u'SASL domain for sending OSSEC alerts',\n None,\n None],\n ['sasl_username', '', str,\n u'SASL username for sending OSSEC alerts',\n SiteConfig.ValidateOSSECUsername(),\n None],\n ['sasl_password', '', str,\n u'SASL password for sending OSSEC alerts',\n SiteConfig.ValidateOSSECPassword(),\n None],\n ['enable_ssh_over_tor', True, bool,\n u'Enable SSH over Tor (recommended, disables SSH over LAN). '\n u'If you respond no, SSH will be available over LAN only',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_supported_locales', [], types.ListType,\n u'Space separated list of additional locales to support '\n '(' + translations + ')',\n SiteConfig.ValidateLocales(self.args.app_path),\n string.split],\n ]\n\n def load_and_update_config(self):\n if self.exists():\n self.config = self.load()\n\n return self.update_config()\n\n def update_config(self):\n self.config.update(self.user_prompt_config())\n self.save()\n self.validate_gpg_keys()\n self.validate_journalist_alert_email()\n return True\n\n def user_prompt_config(self):\n config = {}\n for desc in self.desc:\n (var, default, type, prompt, validator, transform) = desc\n if var == 'journalist_gpg_fpr':\n if not config.get('journalist_alert_gpg_public_key',\n None):\n config[var] = ''\n continue\n if var == 'journalist_alert_email':\n if not config.get('journalist_alert_gpg_public_key',\n None):\n config[var] = ''\n continue\n config[var] = self.user_prompt_config_one(desc,\n self.config.get(var))\n return config\n\n def user_prompt_config_one(self, desc, from_config):\n (var, default, type, prompt, validator, transform) = desc\n if from_config is not None:\n default = from_config\n prompt += ': '\n return self.validated_input(prompt, default, validator, transform)\n\n def validated_input(self, prompt, default, validator, transform):\n if type(default) is bool:\n default = default and 'yes' or 'no'\n if type(default) is int:\n default = str(default)\n if isinstance(default, types.ListType):\n default = \" \".join(default)\n if type(default) is not str:\n default = str(default)\n kwargs = {}\n if validator:\n kwargs['validator'] = validator\n value = prompt_toolkit.prompt(prompt,\n default=unicode(default, 'utf-8'),\n **kwargs)\n if transform:\n return transform(value)\n else:\n return value\n\n def sanitize_fingerprint(self, value):\n return value.upper().replace(' ', '')\n\n def validate_gpg_keys(self):\n keys = (('securedrop_app_gpg_public_key',\n 'securedrop_app_gpg_fingerprint'),\n\n ('ossec_alert_gpg_public_key',\n 'ossec_gpg_fpr'),\n\n ('journalist_alert_gpg_public_key',\n 'journalist_gpg_fpr'))\n validate = os.path.join(\n os.path.dirname(__file__), '..', 'bin',\n 'validate-gpg-key.sh')\n for (public_key, fingerprint) in keys:\n if (self.config[public_key] == '' and\n self.config[fingerprint] == ''):\n continue\n public_key = os.path.join(self.args.ansible_path,\n self.config[public_key])\n fingerprint = self.config[fingerprint]\n try:\n sdlog.debug(subprocess.check_output(\n [validate, public_key, fingerprint],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n raise FingerprintException(\n \"fingerprint {} \".format(fingerprint) +\n \"does not match \" +\n \"the public key {}\".format(public_key))\n return True\n\n def validate_journalist_alert_email(self):\n if (self.config['journalist_alert_gpg_public_key'] == '' and\n self.config['journalist_gpg_fpr'] == ''):\n return True\n\n class Document(object):\n def __init__(self, text):\n self.text = text\n\n try:\n SiteConfig.ValidateEmail().validate(Document(\n self.config['journalist_alert_email']))\n except ValidationError as e:\n raise JournalistAlertEmailException(\n \"journalist alerts email: \" + e.message)\n return True\n\n def exists(self):\n return os.path.exists(self.args.site_config)\n\n def save(self):\n with io.open(self.args.site_config, 'w') as site_config_file:\n yaml.safe_dump(self.config,\n site_config_file,\n default_flow_style=False)\n\n def load(self):\n try:\n with io.open(self.args.site_config) as site_config_file:\n return yaml.safe_load(site_config_file)\n except IOError:\n sdlog.error(\"Config file missing, re-run with sdconfig\")\n raise\n except yaml.YAMLError:\n sdlog.error(\"There was an issue processing {}\".format(\n self.args.site_config))\n raise\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef sdconfig(args):\n \"\"\"Configure SD site settings\"\"\"\n SiteConfig(args).load_and_update_config()\n\n\ndef install_securedrop(args):\n \"\"\"Install/Update SecureDrop\"\"\"\n SiteConfig(args).load()\n\n sdlog.info(\"Now installing SecureDrop on remote servers.\")\n sdlog.info(\"You will be prompted for the sudo password on the \"\n \"servers.\")\n sdlog.info(\"The sudo password is only necessary during initial \"\n \"installation.\")\n return subprocess.check_call([os.path.join(args.ansible_path,\n 'securedrop-prod.yml'), '--ask-become-pass'],\n cwd=args.ansible_path)\n\n\ndef backup_securedrop(args):\n \"\"\"Perform backup of the SecureDrop Application Server.\n Creates a tarball of submissions and server config, and fetches\n back to the Admin Workstation. Future `restore` actions can be performed\n with the backup tarball.\"\"\"\n sdlog.info(\"Backing up the SecureDrop Application Server\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-backup.yml'),\n ]\n return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef restore_securedrop(args):\n \"\"\"Perform restore of the SecureDrop Application Server.\n Requires a tarball of submissions and server config, created via\n the `backup` action.\"\"\"\n sdlog.info(\"Restoring the SecureDrop Application Server from backup\")\n # Canonicalize filepath to backup tarball, so Ansible sees only the\n # basename. The files must live in args.ansible_path,\n # but the securedrop-admin\n # script will be invoked from the repo root, so preceding dirs are likely.\n restore_file_basename = os.path.basename(args.restore_file)\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-restore.yml'),\n '-e',\n \"restore_file='{}'\".format(restore_file_basename),\n ]\n return subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef run_tails_config(args):\n \"\"\"Configure Tails environment post SD install\"\"\"\n sdlog.info(\"Configuring Tails workstation environment\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n ansible_cmd = [\n os.path.join(args.ansible_path, 'securedrop-tails.yml'),\n \"--ask-become-pass\",\n # Passing an empty inventory file to override the automatic dynamic\n # inventory script, which fails if no site vars are configured.\n '-i', '/dev/null',\n ]\n return subprocess.check_call(ansible_cmd,\n cwd=args.ansible_path)\n\n\ndef check_for_updates_wrapper(args):\n res, tag = check_for_updates(args)\n # Because the command worked properly exit with 0.\n return 0\n\n\ndef check_for_updates(args):\n \"\"\"Check for SecureDrop updates\"\"\"\n sdlog.info(\"Checking for SecureDrop updates...\")\n\n # Determine what branch we are on\n current_tag = subprocess.check_output(['git', 'describe'],\n cwd=args.root).rstrip('\\n')\n\n # Fetch all branches\n git_fetch_cmd = ['git', 'fetch', '--all']\n subprocess.check_call(git_fetch_cmd, cwd=args.root)\n\n # Get latest tag\n git_all_tags = [\"git\", \"tag\"]\n all_tags = subprocess.check_output(git_all_tags,\n cwd=args.root).rstrip('\\n').split('\\n')\n\n # Do not check out any release candidate tags\n all_prod_tags = [x for x in all_tags if 'rc' not in x]\n\n latest_tag = all_prod_tags[-1]\n\n if current_tag != latest_tag:\n sdlog.info(\"Update needed\")\n return True, latest_tag\n sdlog.info(\"All updates applied\")\n return False, latest_tag\n\n\ndef get_release_key_from_keyserver(args, keyserver=None, timeout=45):\n gpg_recv = ['timeout', str(timeout), 'gpg', '--recv-key']\n release_key = [RELEASE_KEY]\n\n # We construct the gpg --recv-key command based on optional keyserver arg.\n if keyserver:\n get_key_cmd = gpg_recv + ['--keyserver', keyserver] + release_key\n else:\n get_key_cmd = gpg_recv + release_key\n\n subprocess.check_call(get_key_cmd, cwd=args.root)\n\n\ndef update(args):\n \"\"\"Verify, and apply latest SecureDrop workstation update\"\"\"\n sdlog.info(\"Applying SecureDrop updates...\")\n\n update_status, latest_tag = check_for_updates(args)\n\n if not update_status:\n # Exit if we're up to date\n return 0\n\n sdlog.info(\"Verifying signature on latest update...\")\n\n try:\n # First try to get the release key using Tails default keyserver\n get_release_key_from_keyserver(args)\n except subprocess.CalledProcessError:\n # Now try to get the key from a secondary keyserver.\n secondary_keyserver = 'hkps://hkps.pool.sks-keyservers.net'\n get_release_key_from_keyserver(args,\n keyserver=secondary_keyserver)\n\n git_verify_tag_cmd = ['git', 'tag', '-v', latest_tag]\n try:\n sig_result = subprocess.check_output(git_verify_tag_cmd,\n stderr=subprocess.STDOUT,\n cwd=args.root)\n\n good_sig_text = 'Good signature from \"SecureDrop Release Signing Key\"'\n bad_sig_text = 'BAD signature'\n # To ensure that an adversary cannot name a malicious key good_sig_text\n # we check that bad_sig_text does not appear and that the release key\n # appears on the second line of the output.\n gpg_lines = sig_result.split('\\n')\n if RELEASE_KEY in gpg_lines[1] and \\\n sig_result.count(good_sig_text) == 1 and \\\n bad_sig_text not in sig_result:\n sdlog.info(\"Signature verification successful.\")\n else: # If anything else happens, fail and exit 1\n sdlog.info(\"Signature verification failed.\")\n return 1\n\n except subprocess.CalledProcessError:\n # If there is no signature, or if the signature does not verify,\n # then git tag -v exits subprocess.check_output will exit 1\n # and subprocess.check_output will throw a CalledProcessError\n sdlog.info(\"Signature verification failed.\")\n return 1\n\n # Only if the proper signature verifies do we check out the latest\n git_checkout_cmd = ['git', 'checkout', latest_tag]\n subprocess.check_call(git_checkout_cmd, cwd=args.root)\n\n sdlog.info(\"Updated to SecureDrop {}.\".format(latest_tag))\n return 0\n\n\ndef get_logs(args):\n \"\"\"Get logs for forensics and debugging purposes\"\"\"\n sdlog.info(\"Gathering logs for forensics and debugging\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-logs.yml'),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n sdlog.info(\"Encrypt logs and send to [email protected] or upload \"\n \"to the SecureDrop support portal.\")\n return 0\n\n\ndef set_default_paths(args):\n if not args.ansible_path:\n args.ansible_path = args.root + \"/install_files/ansible-base\"\n args.ansible_path = os.path.realpath(args.ansible_path)\n if not args.site_config:\n args.site_config = args.ansible_path + \"/group_vars/all/site-specific\"\n args.site_config = os.path.realpath(args.site_config)\n if not args.app_path:\n args.app_path = args.root + \"/securedrop\"\n args.app_path = os.path.realpath(args.app_path)\n return args\n\n\ndef parse_argv(argv):\n class ArgParseFormatterCombo(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawTextHelpFormatter):\n \"\"\"Needed to combine formatting classes for help output\"\"\"\n pass\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=ArgParseFormatterCombo)\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.add_argument('-d', action='store_true', default=False,\n help=\"Developer mode. Not to be used in production.\")\n parser.add_argument('--root', required=True,\n help=\"path to the root of the SecureDrop repository\")\n parser.add_argument('--site-config',\n help=\"path to the YAML site configuration file\")\n parser.add_argument('--ansible-path',\n help=\"path to the Ansible root\")\n parser.add_argument('--app-path',\n help=\"path to the SecureDrop application root\")\n subparsers = parser.add_subparsers()\n\n parse_sdconfig = subparsers.add_parser('sdconfig', help=sdconfig.__doc__)\n parse_sdconfig.set_defaults(func=sdconfig)\n\n parse_install = subparsers.add_parser('install',\n help=install_securedrop.__doc__)\n parse_install.set_defaults(func=install_securedrop)\n\n parse_tailsconfig = subparsers.add_parser('tailsconfig',\n help=run_tails_config.__doc__)\n parse_tailsconfig.set_defaults(func=run_tails_config)\n\n parse_backup = subparsers.add_parser('backup',\n help=backup_securedrop.__doc__)\n parse_backup.set_defaults(func=backup_securedrop)\n\n parse_restore = subparsers.add_parser('restore',\n help=restore_securedrop.__doc__)\n parse_restore.set_defaults(func=restore_securedrop)\n parse_restore.add_argument(\"restore_file\")\n\n parse_update = subparsers.add_parser('update', help=update.__doc__)\n parse_update.set_defaults(func=update)\n\n parse_check_updates = subparsers.add_parser('check_for_updates',\n help=check_for_updates.__doc__)\n parse_check_updates.set_defaults(func=check_for_updates_wrapper)\n\n parse_logs = subparsers.add_parser('logs',\n help=get_logs.__doc__)\n parse_logs.set_defaults(func=get_logs)\n\n return set_default_paths(parser.parse_args(argv))\n\n\ndef main(argv):\n args = parse_argv(argv)\n setup_logger(args.v)\n if args.v:\n return_code = args.func(args)\n sys.exit(return_code)\n else:\n try:\n return_code = args.func(args)\n except KeyboardInterrupt:\n sys.exit(-1)\n except Exception as e:\n raise SystemExit(\n 'ERROR (run with -v for more): {msg}'.format(msg=e))\n else:\n sys.exit(return_code)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "admin/securedrop_admin/__init__.py" } ]
diff --git a/admin/securedrop_admin/__init__.py b/admin/securedrop_admin/__init__.py index 000b059d0b..5757fa0e93 100755 --- a/admin/securedrop_admin/__init__.py +++ b/admin/securedrop_admin/__init__.py @@ -588,7 +588,8 @@ def check_for_updates(args): sdlog.info("Checking for SecureDrop updates...") # Determine what branch we are on - current_tag = subprocess.check_output(['git', 'describe'], cwd=args.root) + current_tag = subprocess.check_output(['git', 'describe'], + cwd=args.root).rstrip('\n') # Fetch all branches git_fetch_cmd = ['git', 'fetch', '--all'] diff --git a/admin/tests/test_securedrop-admin.py b/admin/tests/test_securedrop-admin.py index b50f3779bd..70ed2eee47 100644 --- a/admin/tests/test_securedrop-admin.py +++ b/admin/tests/test_securedrop-admin.py @@ -66,6 +66,21 @@ def test_check_for_updates_update_needed(self, tmpdir, caplog): assert update_status is True assert tag == '0.6.1' + def test_check_for_updates_ensure_newline_stripped(self, tmpdir, caplog): + """Regression test for #3426""" + git_repo_path = str(tmpdir) + args = argparse.Namespace(root=git_repo_path) + current_tag = "0.6.1\n" + tags_available = "0.6\n0.6-rc1\n0.6.1\n" + + with mock.patch('subprocess.check_call'): + with mock.patch('subprocess.check_output', + side_effect=[current_tag, tags_available]): + update_status, tag = securedrop_admin.check_for_updates(args) + assert "All updates applied" in caplog.text + assert update_status is False + assert tag == '0.6.1' + def test_check_for_updates_update_not_needed(self, tmpdir, caplog): git_repo_path = str(tmpdir) args = argparse.Namespace(root=git_repo_path)
[QA] Tails GUI updater reporting new versions # Bug ## Description The 0.7.0 GUI updater reports new versions even when it's running the latest. Just ran a pre-flight check with the 0.7.0 tag, checked out inside a Tails VM. The install portion completed fine. So did `./securedrop-admin tailsconfig`. However, after finishing, it popped up the GUI declaring there were new updates. Which there definitely should not be, given that 0.7.0 is the latest release. After a reboot of the Tails VM, the GUI updater displayed again, prompting to install updates. I accepted. Here's the detailed output: https://gist.github.com/conorsch/2e2da8fb909df067b693949474ef945c ## Steps to Reproduce See above. ## Expected Behavior 0.7.0 is determined to be latest release; no further prompting. ## Actual Behavior Prompts for updates even though 0.7.0 is latest release. ## Comments
vyperlang__vyper-2513
[ { "content": "# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\n__version__ = \"0.3.0\"\n\nextras_require = {\n \"test\": [\n \"pytest>=5.4,<6.0\",\n \"pytest-cov>=2.10,<3.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n \"web3==5.12.3\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n ],\n \"lint\": [\n \"black==21.9b0\",\n \"flake8==3.9.2\",\n \"flake8-bugbear==20.1.4\",\n \"flake8-use-fstring==1.1\",\n \"isort==5.9.3\",\n \"mypy==0.910\",\n ],\n \"docs\": [\"recommonmark\", \"sphinx>=3.0,<4.0\", \"sphinx_rtd_theme>=0.5,<0.6\"],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"test\"] + extras_require[\"lint\"] + extras_require[\"docs\"] + extras_require[\"dev\"]\n)\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_version.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\ntry:\n commithash = subprocess.check_output(\"git rev-parse HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(f\"{__version__}\\n{commithash_str}\")\nexcept subprocess.CalledProcessError:\n pass\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"vyper\",\n version=__version__,\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n python_requires=\">=3.7,<3.10\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"asttokens==2.0.4\",\n \"pycryptodome>=3.5.1,<4\",\n \"semantic-version==2.8.5\",\n \"cached-property==1.5.2 ; python_version<'3.8'\",\n ],\n setup_requires=[\"pytest-runner\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"vyper-lll=vyper.cli.vyper_lll:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\n__version__ = \"0.3.0\"\n\nextras_require = {\n \"test\": [\n \"pytest>=5.4,<6.0\",\n \"pytest-cov>=2.10,<3.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n \"web3==5.21.0\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n ],\n \"lint\": [\n \"black==21.9b0\",\n \"flake8==3.9.2\",\n \"flake8-bugbear==20.1.4\",\n \"flake8-use-fstring==1.1\",\n \"isort==5.9.3\",\n \"mypy==0.910\",\n ],\n \"docs\": [\"recommonmark\", \"sphinx>=3.0,<4.0\", \"sphinx_rtd_theme>=0.5,<0.6\"],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"test\"] + extras_require[\"lint\"] + extras_require[\"docs\"] + extras_require[\"dev\"]\n)\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_version.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\ntry:\n commithash = subprocess.check_output(\"git rev-parse HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(f\"{__version__}\\n{commithash_str}\")\nexcept subprocess.CalledProcessError:\n pass\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"vyper\",\n version=__version__,\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n python_requires=\">=3.7,<3.10\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"asttokens==2.0.4\",\n \"pycryptodome>=3.5.1,<4\",\n \"semantic-version==2.8.5\",\n \"cached-property==1.5.2 ; python_version<'3.8'\",\n ],\n setup_requires=[\"pytest-runner\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"vyper-lll=vyper.cli.vyper_lll:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 3231e310d5..44f27fa37f 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ "pytest-xdist>=1.32,<2.0", "eth-tester[py-evm]>=0.5.0b1,<0.6", "py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+ - "web3==5.12.3", + "web3==5.21.0", "tox>=3.15,<4.0", "lark-parser==0.10.0", "hypothesis[lark]>=5.37.1,<6.0", diff --git a/tests/parser/features/test_assert.py b/tests/parser/features/test_assert.py index d31edc74a4..de4b7359f7 100644 --- a/tests/parser/features/test_assert.py +++ b/tests/parser/features/test_assert.py @@ -3,6 +3,11 @@ from eth_tester.exceptions import TransactionFailed +# web3 returns f"execution reverted: {err_str}" +def _fixup_err_str(s): + return s.replace("execution reverted: ", "") + + def test_assert_refund(w3, get_contract_with_gas_estimation, assert_tx_failed): code = """ @external @@ -45,21 +50,21 @@ def test3(reason_str: String[32]): with pytest.raises(TransactionFailed) as e_info: c.test(0) - assert e_info.value.args[0] == "larger than one please" + assert _fixup_err_str(e_info.value.args[0]) == "larger than one please" # a = 0, b = 1 with pytest.raises(TransactionFailed) as e_info: c.test2(0, 1, "") - assert e_info.value.args[0] == "a is not large enough" + assert _fixup_err_str(e_info.value.args[0]) == "a is not large enough" # a = 1, b = 0 with pytest.raises(TransactionFailed) as e_info: c.test2(2, 2, " because I said so") - assert e_info.value.args[0] == "b may only be 1" + " because I said so" + assert _fixup_err_str(e_info.value.args[0]) == "b may only be 1" + " because I said so" # return correct value assert c.test2(5, 1, "") == 17 with pytest.raises(TransactionFailed) as e_info: c.test3("An exception") - assert e_info.value.args[0] == "An exception" + assert _fixup_err_str(e_info.value.args[0]) == "An exception" invalid_code = [ @@ -183,7 +188,7 @@ def test(x: uint256[3]) -> bool: assert_tx_failed(lambda: c.test([1, 3, 5])) -def test_assest_reason_revert_length(w3, get_contract, memory_mocker): +def test_assert_reason_revert_length(w3, get_contract, memory_mocker): code = """ @external def test() -> int128: @@ -194,7 +199,7 @@ def test() -> int128: w3.manager.provider.ethereum_tester.backend.is_eip838_error = lambda err: False with pytest.raises(TransactionFailed) as e_info: c.test() - error_bytes = eval(e_info.value.args[0]) + error_bytes = eval(_fixup_err_str(e_info.value.args[0])) assert len(error_bytes) == 100 msg = decode_single("string", error_bytes[36:]) assert msg == "oops"
test fail with web3.py 5.21.0 ### Version Information * vyper Version (output of `vyper --version`): latest master (cff69d63) * OS: macos * Python Version (output of `python --version`): 3.9.6 ### What's your issue about? tests fail tests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J... FAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO... FAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as... FAILED tests/parser/features/test_assert.py::test_assest_reason_revert misses the string "execution reverted"
fossasia__open-event-server-5151
[ { "content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n print(view_kwargs['feedback_id'])\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n\n if data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')), )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself, )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py" } ]
[ { "content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n print(view_kwargs['feedback_id'])\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n\n if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')), )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself, )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py" } ]
diff --git a/app/api/users.py b/app/api/users.py index cba8d5ccc8..3be87cb7d3 100644 --- a/app/api/users.py +++ b/app/api/users.py @@ -183,7 +183,7 @@ def before_update_object(self, user, data, view_kwargs): if data.get('email') and data['email'] != user.email: view_kwargs['email_changed'] = user.email - if data.get('is_admin') != user.is_admin: + if has_access('is_super_admin') and data.get('is_admin') != user.is_admin: user.is_admin = not user.is_admin def after_update_object(self, user, data, view_kwargs):
For the edit action button in admin/users the super admin should be allowed to make the users as an admin. **Describe the bug** <!-- A clear and concise description of what the bug is. --> The patch for the users is working but is_admin is a readonly property even for the super user. Used the patch request for users and changed the value for is_admin from true to false. But there is no change after the request to the value of is_admin. **To Reproduce** Steps to reproduce the behavior: 1. Go to v1/users/user_id 2. Send a patch request 3. Change the value of is_admin for some user from false to true. 4. See error ![image](https://user-images.githubusercontent.com/22127980/42907807-8f6542fc-8afc-11e8-84cd-0fe415042fb2.png) After the request ![image](https://user-images.githubusercontent.com/22127980/42907825-9ecafdd6-8afc-11e8-8372-5cc65888b08b.png)
feast-dev__feast-3588
[ { "content": "import contextlib\nimport os\nimport uuid\nimport warnings\nfrom datetime import datetime\nfrom functools import reduce\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ContextManager,\n Dict,\n Iterator,\n List,\n Optional,\n Tuple,\n Union,\n cast,\n)\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow\nfrom pydantic import Field, StrictStr\nfrom pydantic.typing import Literal\nfrom pytz import utc\n\nfrom feast import OnDemandFeatureView\nfrom feast.data_source import DataSource\nfrom feast.errors import (\n EntitySQLEmptyResults,\n InvalidEntityType,\n InvalidSparkSessionException,\n)\nfrom feast.feature_logging import LoggingConfig, LoggingSource\nfrom feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_VAL, FeatureView\nfrom feast.infra.offline_stores import offline_utils\nfrom feast.infra.offline_stores.offline_store import (\n OfflineStore,\n RetrievalJob,\n RetrievalMetadata,\n)\nfrom feast.infra.offline_stores.snowflake_source import (\n SavedDatasetSnowflakeStorage,\n SnowflakeLoggingDestination,\n SnowflakeSource,\n)\nfrom feast.infra.registry.base_registry import BaseRegistry\nfrom feast.infra.utils.snowflake.snowflake_utils import (\n GetSnowflakeConnection,\n execute_snowflake_statement,\n write_pandas,\n write_parquet,\n)\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\nfrom feast.saved_dataset import SavedDatasetStorage\nfrom feast.usage import log_exceptions_and_usage\n\ntry:\n from snowflake.connector import SnowflakeConnection\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"snowflake\", str(e))\n\nif TYPE_CHECKING:\n from pyspark.sql import DataFrame, SparkSession\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\nclass SnowflakeOfflineStoreConfig(FeastConfigBaseModel):\n \"\"\"Offline store config for Snowflake\"\"\"\n\n type: Literal[\"snowflake.offline\"] = \"snowflake.offline\"\n \"\"\" Offline store type selector \"\"\"\n\n config_path: Optional[str] = os.path.expanduser(\"~/.snowsql/config\")\n \"\"\" Snowflake config path -- absolute path required (Cant use ~)\"\"\"\n\n account: Optional[str] = None\n \"\"\" Snowflake deployment identifier -- drop .snowflakecomputing.com \"\"\"\n\n user: Optional[str] = None\n \"\"\" Snowflake user name \"\"\"\n\n password: Optional[str] = None\n \"\"\" Snowflake password \"\"\"\n\n role: Optional[str] = None\n \"\"\" Snowflake role name \"\"\"\n\n warehouse: Optional[str] = None\n \"\"\" Snowflake warehouse name \"\"\"\n\n authenticator: Optional[str] = None\n \"\"\" Snowflake authenticator name \"\"\"\n\n database: StrictStr\n \"\"\" Snowflake database name \"\"\"\n\n schema_: Optional[str] = Field(\"PUBLIC\", alias=\"schema\")\n \"\"\" Snowflake schema name \"\"\"\n\n storage_integration_name: Optional[str] = None\n \"\"\" Storage integration name in snowflake \"\"\"\n\n blob_export_location: Optional[str] = None\n \"\"\" Location (in S3, Google storage or Azure storage) where data is offloaded \"\"\"\n\n convert_timestamp_columns: Optional[bool] = None\n \"\"\" Convert timestamp columns on export to a Parquet-supported format \"\"\"\n\n class Config:\n allow_population_by_field_name = True\n\n\nclass SnowflakeOfflineStore(OfflineStore):\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def pull_latest_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n created_timestamp_column: Optional[str],\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(data_source, SnowflakeSource)\n\n from_expression = data_source.get_table_query_string()\n if not data_source.database and data_source.table:\n from_expression = f'\"{config.offline_store.database}\".\"{config.offline_store.schema_}\".{from_expression}'\n\n if join_key_columns:\n partition_by_join_key_string = '\"' + '\", \"'.join(join_key_columns) + '\"'\n partition_by_join_key_string = (\n \"PARTITION BY \" + partition_by_join_key_string\n )\n else:\n partition_by_join_key_string = \"\"\n\n timestamp_columns = [timestamp_field]\n if created_timestamp_column:\n timestamp_columns.append(created_timestamp_column)\n\n timestamp_desc_string = '\"' + '\" DESC, \"'.join(timestamp_columns) + '\" DESC'\n field_string = (\n '\"'\n + '\", \"'.join(join_key_columns + feature_name_columns + timestamp_columns)\n + '\"'\n )\n\n if config.offline_store.convert_timestamp_columns:\n select_fields = list(\n map(\n lambda field_name: f'\"{field_name}\"',\n join_key_columns + feature_name_columns,\n )\n )\n select_timestamps = list(\n map(\n lambda field_name: f\"to_varchar({field_name}, 'YYYY-MM-DD\\\"T\\\"HH24:MI:SS.FFTZH:TZM') as {field_name}\",\n timestamp_columns,\n )\n )\n inner_field_string = \", \".join(select_fields + select_timestamps)\n else:\n select_fields = list(\n map(\n lambda field_name: f'\"{field_name}\"',\n join_key_columns + feature_name_columns + timestamp_columns,\n )\n )\n inner_field_string = \", \".join(select_fields)\n\n if data_source.snowflake_options.warehouse:\n config.offline_store.warehouse = data_source.snowflake_options.warehouse\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n start_date = start_date.astimezone(tz=utc)\n end_date = end_date.astimezone(tz=utc)\n\n query = f\"\"\"\n SELECT\n {field_string}\n {f''', TRIM({repr(DUMMY_ENTITY_VAL)}::VARIANT,'\"') AS \"{DUMMY_ENTITY_ID}\"''' if not join_key_columns else \"\"}\n FROM (\n SELECT {inner_field_string},\n ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS \"_feast_row\"\n FROM {from_expression}\n WHERE \"{timestamp_field}\" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'\n )\n WHERE \"_feast_row\" = 1\n \"\"\"\n\n return SnowflakeRetrievalJob(\n query=query,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=False,\n on_demand_feature_views=None,\n )\n\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def pull_all_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(data_source, SnowflakeSource)\n\n from_expression = data_source.get_table_query_string()\n if not data_source.database and data_source.table:\n from_expression = f'\"{config.offline_store.database}\".\"{config.offline_store.schema_}\".{from_expression}'\n\n field_string = (\n '\"'\n + '\", \"'.join(join_key_columns + feature_name_columns + [timestamp_field])\n + '\"'\n )\n\n if data_source.snowflake_options.warehouse:\n config.offline_store.warehouse = data_source.snowflake_options.warehouse\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n start_date = start_date.astimezone(tz=utc)\n end_date = end_date.astimezone(tz=utc)\n\n query = f\"\"\"\n SELECT {field_string}\n FROM {from_expression}\n WHERE \"{timestamp_field}\" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'\n \"\"\"\n\n return SnowflakeRetrievalJob(\n query=query,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=False,\n )\n\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def get_historical_features(\n config: RepoConfig,\n feature_views: List[FeatureView],\n feature_refs: List[str],\n entity_df: Union[pd.DataFrame, str],\n registry: BaseRegistry,\n project: str,\n full_feature_names: bool = False,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n for fv in feature_views:\n assert isinstance(fv.batch_source, SnowflakeSource)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n entity_schema = _get_entity_schema(entity_df, snowflake_conn, config)\n\n entity_df_event_timestamp_col = (\n offline_utils.infer_event_timestamp_from_entity_df(entity_schema)\n )\n\n entity_df_event_timestamp_range = _get_entity_df_event_timestamp_range(\n entity_df,\n entity_df_event_timestamp_col,\n snowflake_conn,\n )\n\n @contextlib.contextmanager\n def query_generator() -> Iterator[str]:\n\n table_name = offline_utils.get_temp_entity_table_name()\n\n _upload_entity_df(entity_df, snowflake_conn, config, table_name)\n\n expected_join_keys = offline_utils.get_expected_join_keys(\n project, feature_views, registry\n )\n\n offline_utils.assert_expected_columns_in_entity_df(\n entity_schema, expected_join_keys, entity_df_event_timestamp_col\n )\n\n # Build a query context containing all information required to template the Snowflake SQL query\n query_context = offline_utils.get_feature_view_query_context(\n feature_refs,\n feature_views,\n registry,\n project,\n entity_df_event_timestamp_range,\n )\n\n query_context = _fix_entity_selections_identifiers(query_context)\n\n # Generate the Snowflake SQL query from the query context\n query = offline_utils.build_point_in_time_query(\n query_context,\n left_table_query_string=table_name,\n entity_df_event_timestamp_col=entity_df_event_timestamp_col,\n entity_df_columns=entity_schema.keys(),\n query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN,\n full_feature_names=full_feature_names,\n )\n\n yield query\n\n return SnowflakeRetrievalJob(\n query=query_generator,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=full_feature_names,\n on_demand_feature_views=OnDemandFeatureView.get_requested_odfvs(\n feature_refs, project, registry\n ),\n metadata=RetrievalMetadata(\n features=feature_refs,\n keys=list(entity_schema.keys() - {entity_df_event_timestamp_col}),\n min_event_timestamp=entity_df_event_timestamp_range[0],\n max_event_timestamp=entity_df_event_timestamp_range[1],\n ),\n )\n\n @staticmethod\n def write_logged_features(\n config: RepoConfig,\n data: Union[pyarrow.Table, Path],\n source: LoggingSource,\n logging_config: LoggingConfig,\n registry: BaseRegistry,\n ):\n assert isinstance(logging_config.destination, SnowflakeLoggingDestination)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n if isinstance(data, Path):\n write_parquet(\n snowflake_conn,\n data,\n source.get_schema(registry),\n table_name=logging_config.destination.table_name,\n auto_create_table=True,\n )\n else:\n write_pandas(\n snowflake_conn,\n data.to_pandas(),\n table_name=logging_config.destination.table_name,\n auto_create_table=True,\n )\n\n @staticmethod\n def offline_write_batch(\n config: RepoConfig,\n feature_view: FeatureView,\n table: pyarrow.Table,\n progress: Optional[Callable[[int], Any]],\n ):\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(feature_view.batch_source, SnowflakeSource)\n\n pa_schema, column_names = offline_utils.get_pyarrow_schema_from_batch_source(\n config, feature_view.batch_source\n )\n if column_names != table.column_names:\n raise ValueError(\n f\"The input pyarrow table has schema {table.schema} with the incorrect columns {table.column_names}. \"\n f\"The schema is expected to be {pa_schema} with the columns (in this exact order) to be {column_names}.\"\n )\n\n if table.schema != pa_schema:\n table = table.cast(pa_schema)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n write_pandas(\n snowflake_conn,\n table.to_pandas(),\n table_name=feature_view.batch_source.table,\n auto_create_table=True,\n )\n\n\nclass SnowflakeRetrievalJob(RetrievalJob):\n def __init__(\n self,\n query: Union[str, Callable[[], ContextManager[str]]],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n full_feature_names: bool,\n on_demand_feature_views: Optional[List[OnDemandFeatureView]] = None,\n metadata: Optional[RetrievalMetadata] = None,\n ):\n\n if not isinstance(query, str):\n self._query_generator = query\n else:\n\n @contextlib.contextmanager\n def query_generator() -> Iterator[str]:\n assert isinstance(query, str)\n yield query\n\n self._query_generator = query_generator\n\n self.snowflake_conn = snowflake_conn\n self.config = config\n self._full_feature_names = full_feature_names\n self._on_demand_feature_views = on_demand_feature_views or []\n self._metadata = metadata\n self.export_path: Optional[str]\n if self.config.offline_store.blob_export_location:\n self.export_path = f\"{self.config.offline_store.blob_export_location}/{self.config.project}/{uuid.uuid4()}\"\n else:\n self.export_path = None\n\n @property\n def full_feature_names(self) -> bool:\n return self._full_feature_names\n\n @property\n def on_demand_feature_views(self) -> List[OnDemandFeatureView]:\n return self._on_demand_feature_views\n\n def _to_df_internal(self, timeout: Optional[int] = None) -> pd.DataFrame:\n with self._query_generator() as query:\n\n df = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_pandas_all()\n\n return df\n\n def _to_arrow_internal(self, timeout: Optional[int] = None) -> pyarrow.Table:\n with self._query_generator() as query:\n\n pa_table = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_arrow_all()\n\n if pa_table:\n return pa_table\n else:\n empty_result = execute_snowflake_statement(self.snowflake_conn, query)\n\n return pyarrow.Table.from_pandas(\n pd.DataFrame(columns=[md.name for md in empty_result.description])\n )\n\n def to_snowflake(self, table_name: str, temporary=False) -> None:\n \"\"\"Save dataset as a new Snowflake table\"\"\"\n if self.on_demand_feature_views:\n transformed_df = self.to_df()\n\n write_pandas(\n self.snowflake_conn, transformed_df, table_name, auto_create_table=True\n )\n\n return None\n\n with self._query_generator() as query:\n query = f'CREATE {\"TEMPORARY\" if temporary else \"\"} TABLE IF NOT EXISTS \"{table_name}\" AS ({query});\\n'\n\n execute_snowflake_statement(self.snowflake_conn, query)\n\n def to_sql(self) -> str:\n \"\"\"\n Returns the SQL query that will be executed in Snowflake to build the historical feature table.\n \"\"\"\n with self._query_generator() as query:\n return query\n\n def to_spark_df(self, spark_session: \"SparkSession\") -> \"DataFrame\":\n \"\"\"\n Method to convert snowflake query results to pyspark data frame.\n\n Args:\n spark_session: spark Session variable of current environment.\n\n Returns:\n spark_df: A pyspark dataframe.\n \"\"\"\n\n try:\n from pyspark.sql import DataFrame, SparkSession\n except ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"spark\", str(e))\n\n if isinstance(spark_session, SparkSession):\n with self._query_generator() as query:\n\n arrow_batches = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_arrow_batches()\n\n if arrow_batches:\n spark_df = reduce(\n DataFrame.unionAll,\n [\n spark_session.createDataFrame(batch.to_pandas())\n for batch in arrow_batches\n ],\n )\n\n return spark_df\n\n else:\n raise EntitySQLEmptyResults(query)\n\n else:\n raise InvalidSparkSessionException(spark_session)\n\n def persist(self, storage: SavedDatasetStorage, allow_overwrite: bool = False):\n assert isinstance(storage, SavedDatasetSnowflakeStorage)\n self.to_snowflake(table_name=storage.snowflake_options.table)\n\n @property\n def metadata(self) -> Optional[RetrievalMetadata]:\n return self._metadata\n\n def supports_remote_storage_export(self) -> bool:\n return (\n self.config.offline_store.storage_integration_name\n and self.config.offline_store.blob_export_location\n )\n\n def to_remote_storage(self) -> List[str]:\n if not self.export_path:\n raise ValueError(\n \"to_remote_storage() requires `blob_export_location` to be specified in config\"\n )\n if not self.config.offline_store.storage_integration_name:\n raise ValueError(\n \"to_remote_storage() requires `storage_integration_name` to be specified in config\"\n )\n\n table = f\"temporary_{uuid.uuid4().hex}\"\n self.to_snowflake(table)\n\n query = f\"\"\"\n COPY INTO '{self.export_path}/{table}' FROM \"{self.config.offline_store.database}\".\"{self.config.offline_store.schema_}\".\"{table}\"\\n\n STORAGE_INTEGRATION = {self.config.offline_store.storage_integration_name}\\n\n FILE_FORMAT = (TYPE = PARQUET)\n DETAILED_OUTPUT = TRUE\n HEADER = TRUE\n \"\"\"\n cursor = execute_snowflake_statement(self.snowflake_conn, query)\n\n file_name_column_index = [\n idx for idx, rm in enumerate(cursor.description) if rm.name == \"FILE_NAME\"\n ][0]\n return [\n f\"{self.export_path}/{row[file_name_column_index]}\"\n for row in cursor.fetchall()\n ]\n\n\ndef _get_entity_schema(\n entity_df: Union[pd.DataFrame, str],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n) -> Dict[str, np.dtype]:\n\n if isinstance(entity_df, pd.DataFrame):\n\n return dict(zip(entity_df.columns, entity_df.dtypes))\n\n else:\n\n query = f\"SELECT * FROM ({entity_df}) LIMIT 1\"\n limited_entity_df = execute_snowflake_statement(\n snowflake_conn, query\n ).fetch_pandas_all()\n\n return dict(zip(limited_entity_df.columns, limited_entity_df.dtypes))\n\n\ndef _upload_entity_df(\n entity_df: Union[pd.DataFrame, str],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n table_name: str,\n) -> None:\n\n if isinstance(entity_df, pd.DataFrame):\n # Write the data from the DataFrame to the table\n # Known issues with following entity data types: BINARY\n write_pandas(\n snowflake_conn,\n entity_df,\n table_name,\n auto_create_table=True,\n create_temp_table=True,\n )\n\n return None\n elif isinstance(entity_df, str):\n # If the entity_df is a string (SQL query), create a Snowflake table out of it,\n query = f'CREATE TEMPORARY TABLE \"{table_name}\" AS ({entity_df})'\n execute_snowflake_statement(snowflake_conn, query)\n\n return None\n else:\n raise InvalidEntityType(type(entity_df))\n\n\ndef _fix_entity_selections_identifiers(query_context) -> list:\n\n for i, qc in enumerate(query_context):\n for j, es in enumerate(qc.entity_selections):\n query_context[i].entity_selections[j] = f'\"{es}\"'.replace(\" AS \", '\" AS \"')\n\n return query_context\n\n\ndef _get_entity_df_event_timestamp_range(\n entity_df: Union[pd.DataFrame, str],\n entity_df_event_timestamp_col: str,\n snowflake_conn: SnowflakeConnection,\n) -> Tuple[datetime, datetime]:\n if isinstance(entity_df, pd.DataFrame):\n entity_df_event_timestamp = entity_df.loc[\n :, entity_df_event_timestamp_col\n ].infer_objects()\n if pd.api.types.is_string_dtype(entity_df_event_timestamp):\n entity_df_event_timestamp = pd.to_datetime(\n entity_df_event_timestamp, utc=True\n )\n entity_df_event_timestamp_range = (\n entity_df_event_timestamp.min().to_pydatetime(),\n entity_df_event_timestamp.max().to_pydatetime(),\n )\n elif isinstance(entity_df, str):\n # If the entity_df is a string (SQL query), determine range\n # from table\n query = f'SELECT MIN(\"{entity_df_event_timestamp_col}\") AS \"min_value\", MAX(\"{entity_df_event_timestamp_col}\") AS \"max_value\" FROM ({entity_df})'\n results = execute_snowflake_statement(snowflake_conn, query).fetchall()\n\n entity_df_event_timestamp_range = cast(Tuple[datetime, datetime], results[0])\n if (\n entity_df_event_timestamp_range[0] is None\n or entity_df_event_timestamp_range[1] is None\n ):\n raise EntitySQLEmptyResults(entity_df)\n else:\n raise InvalidEntityType(type(entity_df))\n\n return entity_df_event_timestamp_range\n\n\nMULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = \"\"\"\n/*\n Compute a deterministic hash for the `left_table_query_string` that will be used throughout\n all the logic as the field to GROUP BY the data\n*/\nWITH \"entity_dataframe\" AS (\n SELECT *,\n \"{{entity_df_event_timestamp_col}}\" AS \"entity_timestamp\"\n {% for featureview in featureviews %}\n {% if featureview.entities %}\n ,(\n {% for entity in featureview.entities %}\n CAST(\"{{entity}}\" AS VARCHAR) ||\n {% endfor %}\n CAST(\"{{entity_df_event_timestamp_col}}\" AS VARCHAR)\n ) AS \"{{featureview.name}}__entity_row_unique_id\"\n {% else %}\n ,CAST(\"{{entity_df_event_timestamp_col}}\" AS VARCHAR) AS \"{{featureview.name}}__entity_row_unique_id\"\n {% endif %}\n {% endfor %}\n FROM \"{{ left_table_query_string }}\"\n),\n\n{% for featureview in featureviews %}\n\n\"{{ featureview.name }}__entity_dataframe\" AS (\n SELECT\n {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}\n \"entity_timestamp\",\n \"{{featureview.name}}__entity_row_unique_id\"\n FROM \"entity_dataframe\"\n GROUP BY\n {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}\n \"entity_timestamp\",\n \"{{featureview.name}}__entity_row_unique_id\"\n),\n\n/*\n This query template performs the point-in-time correctness join for a single feature set table\n to the provided entity table.\n\n 1. We first join the current feature_view to the entity dataframe that has been passed.\n This JOIN has the following logic:\n - For each row of the entity dataframe, only keep the rows where the `timestamp_field`\n is less than the one provided in the entity dataframe\n - If there a TTL for the current feature_view, also keep the rows where the `timestamp_field`\n is higher the the one provided minus the TTL\n - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been\n computed previously\n\n The output of this CTE will contain all the necessary information and already filtered out most\n of the data that is not relevant.\n*/\n\n\"{{ featureview.name }}__subquery\" AS (\n SELECT\n \"{{ featureview.timestamp_field }}\" as \"event_timestamp\",\n {{'\"' ~ featureview.created_timestamp_column ~ '\" as \"created_timestamp\",' if featureview.created_timestamp_column else '' }}\n {{featureview.entity_selections | join(', ')}}{% if featureview.entity_selections %},{% else %}{% endif %}\n {% for feature in featureview.features %}\n \"{{ feature }}\" as {% if full_feature_names %}\"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}\"{% else %}\"{{ featureview.field_mapping.get(feature, feature) }}\"{% endif %}{% if loop.last %}{% else %}, {% endif %}\n {% endfor %}\n FROM {{ featureview.table_subquery }}\n WHERE \"{{ featureview.timestamp_field }}\" <= '{{ featureview.max_event_timestamp }}'\n {% if featureview.ttl == 0 %}{% else %}\n AND \"{{ featureview.timestamp_field }}\" >= '{{ featureview.min_event_timestamp }}'\n {% endif %}\n),\n\n\"{{ featureview.name }}__base\" AS (\n SELECT\n \"subquery\".*,\n \"entity_dataframe\".\"entity_timestamp\",\n \"entity_dataframe\".\"{{featureview.name}}__entity_row_unique_id\"\n FROM \"{{ featureview.name }}__subquery\" AS \"subquery\"\n INNER JOIN \"{{ featureview.name }}__entity_dataframe\" AS \"entity_dataframe\"\n ON TRUE\n AND \"subquery\".\"event_timestamp\" <= \"entity_dataframe\".\"entity_timestamp\"\n\n {% if featureview.ttl == 0 %}{% else %}\n AND \"subquery\".\"event_timestamp\" >= TIMESTAMPADD(second,-{{ featureview.ttl }},\"entity_dataframe\".\"entity_timestamp\")\n {% endif %}\n\n {% for entity in featureview.entities %}\n AND \"subquery\".\"{{ entity }}\" = \"entity_dataframe\".\"{{ entity }}\"\n {% endfor %}\n),\n\n/*\n 2. If the `created_timestamp_column` has been set, we need to\n deduplicate the data first. This is done by calculating the\n `MAX(created_at_timestamp)` for each event_timestamp.\n We then join the data on the next CTE\n*/\n{% if featureview.created_timestamp_column %}\n\"{{ featureview.name }}__dedup\" AS (\n SELECT\n \"{{featureview.name}}__entity_row_unique_id\",\n \"event_timestamp\",\n MAX(\"created_timestamp\") AS \"created_timestamp\"\n FROM \"{{ featureview.name }}__base\"\n GROUP BY \"{{featureview.name}}__entity_row_unique_id\", \"event_timestamp\"\n),\n{% endif %}\n\n/*\n 3. The data has been filtered during the first CTE \"*__base\"\n Thus we only need to compute the latest timestamp of each feature.\n*/\n\"{{ featureview.name }}__latest\" AS (\n SELECT\n \"event_timestamp\",\n {% if featureview.created_timestamp_column %}\"created_timestamp\",{% endif %}\n \"{{featureview.name}}__entity_row_unique_id\"\n FROM\n (\n SELECT *,\n ROW_NUMBER() OVER(\n PARTITION BY \"{{featureview.name}}__entity_row_unique_id\"\n ORDER BY \"event_timestamp\" DESC{% if featureview.created_timestamp_column %},\"created_timestamp\" DESC{% endif %}\n ) AS \"row_number\"\n FROM \"{{ featureview.name }}__base\"\n {% if featureview.created_timestamp_column %}\n INNER JOIN \"{{ featureview.name }}__dedup\"\n USING (\"{{featureview.name}}__entity_row_unique_id\", \"event_timestamp\", \"created_timestamp\")\n {% endif %}\n )\n WHERE \"row_number\" = 1\n),\n\n/*\n 4. Once we know the latest value of each feature for a given timestamp,\n we can join again the data back to the original \"base\" dataset\n*/\n\"{{ featureview.name }}__cleaned\" AS (\n SELECT \"base\".*\n FROM \"{{ featureview.name }}__base\" AS \"base\"\n INNER JOIN \"{{ featureview.name }}__latest\"\n USING(\n \"{{featureview.name}}__entity_row_unique_id\",\n \"event_timestamp\"\n {% if featureview.created_timestamp_column %}\n ,\"created_timestamp\"\n {% endif %}\n )\n){% if loop.last %}{% else %}, {% endif %}\n\n\n{% endfor %}\n/*\n Joins the outputs of multiple time travel joins to a single table.\n The entity_dataframe dataset being our source of truth here.\n */\n\nSELECT \"{{ final_output_feature_names | join('\", \"')}}\"\nFROM \"entity_dataframe\"\n{% for featureview in featureviews %}\nLEFT JOIN (\n SELECT\n \"{{featureview.name}}__entity_row_unique_id\"\n {% for feature in featureview.features %}\n ,{% if full_feature_names %}\"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}\"{% else %}\"{{ featureview.field_mapping.get(feature, feature) }}\"{% endif %}\n {% endfor %}\n FROM \"{{ featureview.name }}__cleaned\"\n) \"{{ featureview.name }}__cleaned\" USING (\"{{featureview.name}}__entity_row_unique_id\")\n{% endfor %}\n\"\"\"\n", "path": "sdk/python/feast/infra/offline_stores/snowflake.py" } ]
[ { "content": "import contextlib\nimport os\nimport uuid\nimport warnings\nfrom datetime import datetime\nfrom functools import reduce\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ContextManager,\n Dict,\n Iterator,\n List,\n Optional,\n Tuple,\n Union,\n cast,\n)\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow\nfrom pydantic import Field, StrictStr\nfrom pydantic.typing import Literal\nfrom pytz import utc\n\nfrom feast import OnDemandFeatureView\nfrom feast.data_source import DataSource\nfrom feast.errors import (\n EntitySQLEmptyResults,\n InvalidEntityType,\n InvalidSparkSessionException,\n)\nfrom feast.feature_logging import LoggingConfig, LoggingSource\nfrom feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_VAL, FeatureView\nfrom feast.infra.offline_stores import offline_utils\nfrom feast.infra.offline_stores.offline_store import (\n OfflineStore,\n RetrievalJob,\n RetrievalMetadata,\n)\nfrom feast.infra.offline_stores.snowflake_source import (\n SavedDatasetSnowflakeStorage,\n SnowflakeLoggingDestination,\n SnowflakeSource,\n)\nfrom feast.infra.registry.base_registry import BaseRegistry\nfrom feast.infra.utils.snowflake.snowflake_utils import (\n GetSnowflakeConnection,\n execute_snowflake_statement,\n write_pandas,\n write_parquet,\n)\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\nfrom feast.saved_dataset import SavedDatasetStorage\nfrom feast.usage import log_exceptions_and_usage\n\ntry:\n from snowflake.connector import SnowflakeConnection\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"snowflake\", str(e))\n\nif TYPE_CHECKING:\n from pyspark.sql import DataFrame, SparkSession\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\nclass SnowflakeOfflineStoreConfig(FeastConfigBaseModel):\n \"\"\"Offline store config for Snowflake\"\"\"\n\n type: Literal[\"snowflake.offline\"] = \"snowflake.offline\"\n \"\"\" Offline store type selector \"\"\"\n\n config_path: Optional[str] = os.path.expanduser(\"~/.snowsql/config\")\n \"\"\" Snowflake config path -- absolute path required (Cant use ~)\"\"\"\n\n account: Optional[str] = None\n \"\"\" Snowflake deployment identifier -- drop .snowflakecomputing.com \"\"\"\n\n user: Optional[str] = None\n \"\"\" Snowflake user name \"\"\"\n\n password: Optional[str] = None\n \"\"\" Snowflake password \"\"\"\n\n role: Optional[str] = None\n \"\"\" Snowflake role name \"\"\"\n\n warehouse: Optional[str] = None\n \"\"\" Snowflake warehouse name \"\"\"\n\n authenticator: Optional[str] = None\n \"\"\" Snowflake authenticator name \"\"\"\n\n database: StrictStr\n \"\"\" Snowflake database name \"\"\"\n\n schema_: Optional[str] = Field(\"PUBLIC\", alias=\"schema\")\n \"\"\" Snowflake schema name \"\"\"\n\n storage_integration_name: Optional[str] = None\n \"\"\" Storage integration name in snowflake \"\"\"\n\n blob_export_location: Optional[str] = None\n \"\"\" Location (in S3, Google storage or Azure storage) where data is offloaded \"\"\"\n\n convert_timestamp_columns: Optional[bool] = None\n \"\"\" Convert timestamp columns on export to a Parquet-supported format \"\"\"\n\n class Config:\n allow_population_by_field_name = True\n\n\nclass SnowflakeOfflineStore(OfflineStore):\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def pull_latest_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n created_timestamp_column: Optional[str],\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(data_source, SnowflakeSource)\n\n from_expression = data_source.get_table_query_string()\n if not data_source.database and data_source.table:\n from_expression = f'\"{config.offline_store.database}\".\"{config.offline_store.schema_}\".{from_expression}'\n\n if join_key_columns:\n partition_by_join_key_string = '\"' + '\", \"'.join(join_key_columns) + '\"'\n partition_by_join_key_string = (\n \"PARTITION BY \" + partition_by_join_key_string\n )\n else:\n partition_by_join_key_string = \"\"\n\n timestamp_columns = [timestamp_field]\n if created_timestamp_column:\n timestamp_columns.append(created_timestamp_column)\n\n timestamp_desc_string = '\"' + '\" DESC, \"'.join(timestamp_columns) + '\" DESC'\n field_string = (\n '\"'\n + '\", \"'.join(join_key_columns + feature_name_columns + timestamp_columns)\n + '\"'\n )\n\n if config.offline_store.convert_timestamp_columns:\n select_fields = list(\n map(\n lambda field_name: f'\"{field_name}\"',\n join_key_columns + feature_name_columns,\n )\n )\n select_timestamps = list(\n map(\n lambda field_name: f\"to_varchar({field_name}, 'YYYY-MM-DD\\\"T\\\"HH24:MI:SS.FFTZH:TZM') as {field_name}\",\n timestamp_columns,\n )\n )\n inner_field_string = \", \".join(select_fields + select_timestamps)\n else:\n select_fields = list(\n map(\n lambda field_name: f'\"{field_name}\"',\n join_key_columns + feature_name_columns + timestamp_columns,\n )\n )\n inner_field_string = \", \".join(select_fields)\n\n if data_source.snowflake_options.warehouse:\n config.offline_store.warehouse = data_source.snowflake_options.warehouse\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n start_date = start_date.astimezone(tz=utc)\n end_date = end_date.astimezone(tz=utc)\n\n query = f\"\"\"\n SELECT\n {field_string}\n {f''', TRIM({repr(DUMMY_ENTITY_VAL)}::VARIANT,'\"') AS \"{DUMMY_ENTITY_ID}\"''' if not join_key_columns else \"\"}\n FROM (\n SELECT {inner_field_string},\n ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS \"_feast_row\"\n FROM {from_expression}\n WHERE \"{timestamp_field}\" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'\n )\n WHERE \"_feast_row\" = 1\n \"\"\"\n\n return SnowflakeRetrievalJob(\n query=query,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=False,\n on_demand_feature_views=None,\n )\n\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def pull_all_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(data_source, SnowflakeSource)\n\n from_expression = data_source.get_table_query_string()\n if not data_source.database and data_source.table:\n from_expression = f'\"{config.offline_store.database}\".\"{config.offline_store.schema_}\".{from_expression}'\n\n field_string = (\n '\"'\n + '\", \"'.join(join_key_columns + feature_name_columns + [timestamp_field])\n + '\"'\n )\n\n if data_source.snowflake_options.warehouse:\n config.offline_store.warehouse = data_source.snowflake_options.warehouse\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n start_date = start_date.astimezone(tz=utc)\n end_date = end_date.astimezone(tz=utc)\n\n query = f\"\"\"\n SELECT {field_string}\n FROM {from_expression}\n WHERE \"{timestamp_field}\" BETWEEN TIMESTAMP '{start_date}' AND TIMESTAMP '{end_date}'\n \"\"\"\n\n return SnowflakeRetrievalJob(\n query=query,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=False,\n )\n\n @staticmethod\n @log_exceptions_and_usage(offline_store=\"snowflake\")\n def get_historical_features(\n config: RepoConfig,\n feature_views: List[FeatureView],\n feature_refs: List[str],\n entity_df: Union[pd.DataFrame, str],\n registry: BaseRegistry,\n project: str,\n full_feature_names: bool = False,\n ) -> RetrievalJob:\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n for fv in feature_views:\n assert isinstance(fv.batch_source, SnowflakeSource)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n entity_schema = _get_entity_schema(entity_df, snowflake_conn, config)\n\n entity_df_event_timestamp_col = (\n offline_utils.infer_event_timestamp_from_entity_df(entity_schema)\n )\n\n entity_df_event_timestamp_range = _get_entity_df_event_timestamp_range(\n entity_df,\n entity_df_event_timestamp_col,\n snowflake_conn,\n )\n\n @contextlib.contextmanager\n def query_generator() -> Iterator[str]:\n\n table_name = offline_utils.get_temp_entity_table_name()\n\n _upload_entity_df(entity_df, snowflake_conn, config, table_name)\n\n expected_join_keys = offline_utils.get_expected_join_keys(\n project, feature_views, registry\n )\n\n offline_utils.assert_expected_columns_in_entity_df(\n entity_schema, expected_join_keys, entity_df_event_timestamp_col\n )\n\n # Build a query context containing all information required to template the Snowflake SQL query\n query_context = offline_utils.get_feature_view_query_context(\n feature_refs,\n feature_views,\n registry,\n project,\n entity_df_event_timestamp_range,\n )\n\n query_context = _fix_entity_selections_identifiers(query_context)\n\n # Generate the Snowflake SQL query from the query context\n query = offline_utils.build_point_in_time_query(\n query_context,\n left_table_query_string=table_name,\n entity_df_event_timestamp_col=entity_df_event_timestamp_col,\n entity_df_columns=entity_schema.keys(),\n query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN,\n full_feature_names=full_feature_names,\n )\n\n yield query\n\n return SnowflakeRetrievalJob(\n query=query_generator,\n snowflake_conn=snowflake_conn,\n config=config,\n full_feature_names=full_feature_names,\n on_demand_feature_views=OnDemandFeatureView.get_requested_odfvs(\n feature_refs, project, registry\n ),\n metadata=RetrievalMetadata(\n features=feature_refs,\n keys=list(entity_schema.keys() - {entity_df_event_timestamp_col}),\n min_event_timestamp=entity_df_event_timestamp_range[0],\n max_event_timestamp=entity_df_event_timestamp_range[1],\n ),\n )\n\n @staticmethod\n def write_logged_features(\n config: RepoConfig,\n data: Union[pyarrow.Table, Path],\n source: LoggingSource,\n logging_config: LoggingConfig,\n registry: BaseRegistry,\n ):\n assert isinstance(logging_config.destination, SnowflakeLoggingDestination)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n if isinstance(data, Path):\n write_parquet(\n snowflake_conn,\n data,\n source.get_schema(registry),\n table_name=logging_config.destination.table_name,\n auto_create_table=True,\n )\n else:\n write_pandas(\n snowflake_conn,\n data.to_pandas(),\n table_name=logging_config.destination.table_name,\n auto_create_table=True,\n )\n\n @staticmethod\n def offline_write_batch(\n config: RepoConfig,\n feature_view: FeatureView,\n table: pyarrow.Table,\n progress: Optional[Callable[[int], Any]],\n ):\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n assert isinstance(feature_view.batch_source, SnowflakeSource)\n\n pa_schema, column_names = offline_utils.get_pyarrow_schema_from_batch_source(\n config, feature_view.batch_source\n )\n if column_names != table.column_names:\n raise ValueError(\n f\"The input pyarrow table has schema {table.schema} with the incorrect columns {table.column_names}. \"\n f\"The schema is expected to be {pa_schema} with the columns (in this exact order) to be {column_names}.\"\n )\n\n if table.schema != pa_schema:\n table = table.cast(pa_schema)\n\n with GetSnowflakeConnection(config.offline_store) as conn:\n snowflake_conn = conn\n\n write_pandas(\n snowflake_conn,\n table.to_pandas(),\n table_name=feature_view.batch_source.table,\n auto_create_table=True,\n )\n\n\nclass SnowflakeRetrievalJob(RetrievalJob):\n def __init__(\n self,\n query: Union[str, Callable[[], ContextManager[str]]],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n full_feature_names: bool,\n on_demand_feature_views: Optional[List[OnDemandFeatureView]] = None,\n metadata: Optional[RetrievalMetadata] = None,\n ):\n\n if not isinstance(query, str):\n self._query_generator = query\n else:\n\n @contextlib.contextmanager\n def query_generator() -> Iterator[str]:\n assert isinstance(query, str)\n yield query\n\n self._query_generator = query_generator\n\n self.snowflake_conn = snowflake_conn\n self.config = config\n self._full_feature_names = full_feature_names\n self._on_demand_feature_views = on_demand_feature_views or []\n self._metadata = metadata\n self.export_path: Optional[str]\n if self.config.offline_store.blob_export_location:\n self.export_path = f\"{self.config.offline_store.blob_export_location}/{self.config.project}/{uuid.uuid4()}\"\n else:\n self.export_path = None\n\n @property\n def full_feature_names(self) -> bool:\n return self._full_feature_names\n\n @property\n def on_demand_feature_views(self) -> List[OnDemandFeatureView]:\n return self._on_demand_feature_views\n\n def _to_df_internal(self, timeout: Optional[int] = None) -> pd.DataFrame:\n with self._query_generator() as query:\n\n df = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_pandas_all()\n\n return df\n\n def _to_arrow_internal(self, timeout: Optional[int] = None) -> pyarrow.Table:\n with self._query_generator() as query:\n\n pa_table = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_arrow_all()\n\n if pa_table:\n return pa_table\n else:\n empty_result = execute_snowflake_statement(self.snowflake_conn, query)\n\n return pyarrow.Table.from_pandas(\n pd.DataFrame(columns=[md.name for md in empty_result.description])\n )\n\n def to_snowflake(self, table_name: str, temporary=False) -> None:\n \"\"\"Save dataset as a new Snowflake table\"\"\"\n if self.on_demand_feature_views:\n transformed_df = self.to_df()\n\n write_pandas(\n self.snowflake_conn, transformed_df, table_name, auto_create_table=True\n )\n\n return None\n\n with self._query_generator() as query:\n query = f'CREATE {\"TEMPORARY\" if temporary else \"\"} TABLE IF NOT EXISTS \"{table_name}\" AS ({query});\\n'\n\n execute_snowflake_statement(self.snowflake_conn, query)\n\n def to_sql(self) -> str:\n \"\"\"\n Returns the SQL query that will be executed in Snowflake to build the historical feature table.\n \"\"\"\n with self._query_generator() as query:\n return query\n\n def to_spark_df(self, spark_session: \"SparkSession\") -> \"DataFrame\":\n \"\"\"\n Method to convert snowflake query results to pyspark data frame.\n\n Args:\n spark_session: spark Session variable of current environment.\n\n Returns:\n spark_df: A pyspark dataframe.\n \"\"\"\n\n try:\n from pyspark.sql import DataFrame, SparkSession\n except ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"spark\", str(e))\n\n if isinstance(spark_session, SparkSession):\n with self._query_generator() as query:\n\n arrow_batches = execute_snowflake_statement(\n self.snowflake_conn, query\n ).fetch_arrow_batches()\n\n if arrow_batches:\n spark_df = reduce(\n DataFrame.unionAll,\n [\n spark_session.createDataFrame(batch.to_pandas())\n for batch in arrow_batches\n ],\n )\n\n return spark_df\n\n else:\n raise EntitySQLEmptyResults(query)\n\n else:\n raise InvalidSparkSessionException(spark_session)\n\n def persist(self, storage: SavedDatasetStorage, allow_overwrite: bool = False):\n assert isinstance(storage, SavedDatasetSnowflakeStorage)\n self.to_snowflake(table_name=storage.snowflake_options.table)\n\n @property\n def metadata(self) -> Optional[RetrievalMetadata]:\n return self._metadata\n\n def supports_remote_storage_export(self) -> bool:\n return (\n self.config.offline_store.storage_integration_name\n and self.config.offline_store.blob_export_location\n )\n\n def to_remote_storage(self) -> List[str]:\n if not self.export_path:\n raise ValueError(\n \"to_remote_storage() requires `blob_export_location` to be specified in config\"\n )\n if not self.config.offline_store.storage_integration_name:\n raise ValueError(\n \"to_remote_storage() requires `storage_integration_name` to be specified in config\"\n )\n\n table = f\"temporary_{uuid.uuid4().hex}\"\n self.to_snowflake(table, temporary=True)\n\n query = f\"\"\"\n COPY INTO '{self.export_path}/{table}' FROM \"{self.config.offline_store.database}\".\"{self.config.offline_store.schema_}\".\"{table}\"\\n\n STORAGE_INTEGRATION = {self.config.offline_store.storage_integration_name}\\n\n FILE_FORMAT = (TYPE = PARQUET)\n DETAILED_OUTPUT = TRUE\n HEADER = TRUE\n \"\"\"\n cursor = execute_snowflake_statement(self.snowflake_conn, query)\n\n file_name_column_index = [\n idx for idx, rm in enumerate(cursor.description) if rm.name == \"FILE_NAME\"\n ][0]\n return [\n f\"{self.export_path}/{row[file_name_column_index]}\"\n for row in cursor.fetchall()\n ]\n\n\ndef _get_entity_schema(\n entity_df: Union[pd.DataFrame, str],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n) -> Dict[str, np.dtype]:\n\n if isinstance(entity_df, pd.DataFrame):\n\n return dict(zip(entity_df.columns, entity_df.dtypes))\n\n else:\n\n query = f\"SELECT * FROM ({entity_df}) LIMIT 1\"\n limited_entity_df = execute_snowflake_statement(\n snowflake_conn, query\n ).fetch_pandas_all()\n\n return dict(zip(limited_entity_df.columns, limited_entity_df.dtypes))\n\n\ndef _upload_entity_df(\n entity_df: Union[pd.DataFrame, str],\n snowflake_conn: SnowflakeConnection,\n config: RepoConfig,\n table_name: str,\n) -> None:\n\n if isinstance(entity_df, pd.DataFrame):\n # Write the data from the DataFrame to the table\n # Known issues with following entity data types: BINARY\n write_pandas(\n snowflake_conn,\n entity_df,\n table_name,\n auto_create_table=True,\n create_temp_table=True,\n )\n\n return None\n elif isinstance(entity_df, str):\n # If the entity_df is a string (SQL query), create a Snowflake table out of it,\n query = f'CREATE TEMPORARY TABLE \"{table_name}\" AS ({entity_df})'\n execute_snowflake_statement(snowflake_conn, query)\n\n return None\n else:\n raise InvalidEntityType(type(entity_df))\n\n\ndef _fix_entity_selections_identifiers(query_context) -> list:\n\n for i, qc in enumerate(query_context):\n for j, es in enumerate(qc.entity_selections):\n query_context[i].entity_selections[j] = f'\"{es}\"'.replace(\" AS \", '\" AS \"')\n\n return query_context\n\n\ndef _get_entity_df_event_timestamp_range(\n entity_df: Union[pd.DataFrame, str],\n entity_df_event_timestamp_col: str,\n snowflake_conn: SnowflakeConnection,\n) -> Tuple[datetime, datetime]:\n if isinstance(entity_df, pd.DataFrame):\n entity_df_event_timestamp = entity_df.loc[\n :, entity_df_event_timestamp_col\n ].infer_objects()\n if pd.api.types.is_string_dtype(entity_df_event_timestamp):\n entity_df_event_timestamp = pd.to_datetime(\n entity_df_event_timestamp, utc=True\n )\n entity_df_event_timestamp_range = (\n entity_df_event_timestamp.min().to_pydatetime(),\n entity_df_event_timestamp.max().to_pydatetime(),\n )\n elif isinstance(entity_df, str):\n # If the entity_df is a string (SQL query), determine range\n # from table\n query = f'SELECT MIN(\"{entity_df_event_timestamp_col}\") AS \"min_value\", MAX(\"{entity_df_event_timestamp_col}\") AS \"max_value\" FROM ({entity_df})'\n results = execute_snowflake_statement(snowflake_conn, query).fetchall()\n\n entity_df_event_timestamp_range = cast(Tuple[datetime, datetime], results[0])\n if (\n entity_df_event_timestamp_range[0] is None\n or entity_df_event_timestamp_range[1] is None\n ):\n raise EntitySQLEmptyResults(entity_df)\n else:\n raise InvalidEntityType(type(entity_df))\n\n return entity_df_event_timestamp_range\n\n\nMULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = \"\"\"\n/*\n Compute a deterministic hash for the `left_table_query_string` that will be used throughout\n all the logic as the field to GROUP BY the data\n*/\nWITH \"entity_dataframe\" AS (\n SELECT *,\n \"{{entity_df_event_timestamp_col}}\" AS \"entity_timestamp\"\n {% for featureview in featureviews %}\n {% if featureview.entities %}\n ,(\n {% for entity in featureview.entities %}\n CAST(\"{{entity}}\" AS VARCHAR) ||\n {% endfor %}\n CAST(\"{{entity_df_event_timestamp_col}}\" AS VARCHAR)\n ) AS \"{{featureview.name}}__entity_row_unique_id\"\n {% else %}\n ,CAST(\"{{entity_df_event_timestamp_col}}\" AS VARCHAR) AS \"{{featureview.name}}__entity_row_unique_id\"\n {% endif %}\n {% endfor %}\n FROM \"{{ left_table_query_string }}\"\n),\n\n{% for featureview in featureviews %}\n\n\"{{ featureview.name }}__entity_dataframe\" AS (\n SELECT\n {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}\n \"entity_timestamp\",\n \"{{featureview.name}}__entity_row_unique_id\"\n FROM \"entity_dataframe\"\n GROUP BY\n {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %}\n \"entity_timestamp\",\n \"{{featureview.name}}__entity_row_unique_id\"\n),\n\n/*\n This query template performs the point-in-time correctness join for a single feature set table\n to the provided entity table.\n\n 1. We first join the current feature_view to the entity dataframe that has been passed.\n This JOIN has the following logic:\n - For each row of the entity dataframe, only keep the rows where the `timestamp_field`\n is less than the one provided in the entity dataframe\n - If there a TTL for the current feature_view, also keep the rows where the `timestamp_field`\n is higher the the one provided minus the TTL\n - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been\n computed previously\n\n The output of this CTE will contain all the necessary information and already filtered out most\n of the data that is not relevant.\n*/\n\n\"{{ featureview.name }}__subquery\" AS (\n SELECT\n \"{{ featureview.timestamp_field }}\" as \"event_timestamp\",\n {{'\"' ~ featureview.created_timestamp_column ~ '\" as \"created_timestamp\",' if featureview.created_timestamp_column else '' }}\n {{featureview.entity_selections | join(', ')}}{% if featureview.entity_selections %},{% else %}{% endif %}\n {% for feature in featureview.features %}\n \"{{ feature }}\" as {% if full_feature_names %}\"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}\"{% else %}\"{{ featureview.field_mapping.get(feature, feature) }}\"{% endif %}{% if loop.last %}{% else %}, {% endif %}\n {% endfor %}\n FROM {{ featureview.table_subquery }}\n WHERE \"{{ featureview.timestamp_field }}\" <= '{{ featureview.max_event_timestamp }}'\n {% if featureview.ttl == 0 %}{% else %}\n AND \"{{ featureview.timestamp_field }}\" >= '{{ featureview.min_event_timestamp }}'\n {% endif %}\n),\n\n\"{{ featureview.name }}__base\" AS (\n SELECT\n \"subquery\".*,\n \"entity_dataframe\".\"entity_timestamp\",\n \"entity_dataframe\".\"{{featureview.name}}__entity_row_unique_id\"\n FROM \"{{ featureview.name }}__subquery\" AS \"subquery\"\n INNER JOIN \"{{ featureview.name }}__entity_dataframe\" AS \"entity_dataframe\"\n ON TRUE\n AND \"subquery\".\"event_timestamp\" <= \"entity_dataframe\".\"entity_timestamp\"\n\n {% if featureview.ttl == 0 %}{% else %}\n AND \"subquery\".\"event_timestamp\" >= TIMESTAMPADD(second,-{{ featureview.ttl }},\"entity_dataframe\".\"entity_timestamp\")\n {% endif %}\n\n {% for entity in featureview.entities %}\n AND \"subquery\".\"{{ entity }}\" = \"entity_dataframe\".\"{{ entity }}\"\n {% endfor %}\n),\n\n/*\n 2. If the `created_timestamp_column` has been set, we need to\n deduplicate the data first. This is done by calculating the\n `MAX(created_at_timestamp)` for each event_timestamp.\n We then join the data on the next CTE\n*/\n{% if featureview.created_timestamp_column %}\n\"{{ featureview.name }}__dedup\" AS (\n SELECT\n \"{{featureview.name}}__entity_row_unique_id\",\n \"event_timestamp\",\n MAX(\"created_timestamp\") AS \"created_timestamp\"\n FROM \"{{ featureview.name }}__base\"\n GROUP BY \"{{featureview.name}}__entity_row_unique_id\", \"event_timestamp\"\n),\n{% endif %}\n\n/*\n 3. The data has been filtered during the first CTE \"*__base\"\n Thus we only need to compute the latest timestamp of each feature.\n*/\n\"{{ featureview.name }}__latest\" AS (\n SELECT\n \"event_timestamp\",\n {% if featureview.created_timestamp_column %}\"created_timestamp\",{% endif %}\n \"{{featureview.name}}__entity_row_unique_id\"\n FROM\n (\n SELECT *,\n ROW_NUMBER() OVER(\n PARTITION BY \"{{featureview.name}}__entity_row_unique_id\"\n ORDER BY \"event_timestamp\" DESC{% if featureview.created_timestamp_column %},\"created_timestamp\" DESC{% endif %}\n ) AS \"row_number\"\n FROM \"{{ featureview.name }}__base\"\n {% if featureview.created_timestamp_column %}\n INNER JOIN \"{{ featureview.name }}__dedup\"\n USING (\"{{featureview.name}}__entity_row_unique_id\", \"event_timestamp\", \"created_timestamp\")\n {% endif %}\n )\n WHERE \"row_number\" = 1\n),\n\n/*\n 4. Once we know the latest value of each feature for a given timestamp,\n we can join again the data back to the original \"base\" dataset\n*/\n\"{{ featureview.name }}__cleaned\" AS (\n SELECT \"base\".*\n FROM \"{{ featureview.name }}__base\" AS \"base\"\n INNER JOIN \"{{ featureview.name }}__latest\"\n USING(\n \"{{featureview.name}}__entity_row_unique_id\",\n \"event_timestamp\"\n {% if featureview.created_timestamp_column %}\n ,\"created_timestamp\"\n {% endif %}\n )\n){% if loop.last %}{% else %}, {% endif %}\n\n\n{% endfor %}\n/*\n Joins the outputs of multiple time travel joins to a single table.\n The entity_dataframe dataset being our source of truth here.\n */\n\nSELECT \"{{ final_output_feature_names | join('\", \"')}}\"\nFROM \"entity_dataframe\"\n{% for featureview in featureviews %}\nLEFT JOIN (\n SELECT\n \"{{featureview.name}}__entity_row_unique_id\"\n {% for feature in featureview.features %}\n ,{% if full_feature_names %}\"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}\"{% else %}\"{{ featureview.field_mapping.get(feature, feature) }}\"{% endif %}\n {% endfor %}\n FROM \"{{ featureview.name }}__cleaned\"\n) \"{{ featureview.name }}__cleaned\" USING (\"{{featureview.name}}__entity_row_unique_id\")\n{% endfor %}\n\"\"\"\n", "path": "sdk/python/feast/infra/offline_stores/snowflake.py" } ]
diff --git a/sdk/python/feast/infra/offline_stores/snowflake.py b/sdk/python/feast/infra/offline_stores/snowflake.py index 1dc18256fa4..847e0733810 100644 --- a/sdk/python/feast/infra/offline_stores/snowflake.py +++ b/sdk/python/feast/infra/offline_stores/snowflake.py @@ -556,7 +556,7 @@ def to_remote_storage(self) -> List[str]: ) table = f"temporary_{uuid.uuid4().hex}" - self.to_snowflake(table) + self.to_snowflake(table, temporary=True) query = f""" COPY INTO '{self.export_path}/{table}' FROM "{self.config.offline_store.database}"."{self.config.offline_store.schema_}"."{table}"\n
to_remote_storage() resulting in undeleted temporary tables in Snowflake ## Expected Behavior When calling get_historical_features.to_remote_storage(), any temporary tables created in Snowflake are deleted after the Snowflake session ends. ## Current Behavior When calling get_historical_features.to_remote_storage(), the temporary tables created during the join process are not deleted after the Snowflake session ends. These tables are set to a retention time of 1 day, but they are not deleted and still exist after 24 hours. I tested this with `to_df()` and the above described issue does not occur. I also tried explicitly ending the session to make sure that wasn't the issue, but even after confirming the session was ended, the issue still persists. ## Steps to reproduce 1. For the FeatureStore object, set the RepoConfig offline store config to specify the following: ```json { "blob_export_location": <s3_staging_url>, "storage_integration_name": <storage_integration>, "role": <stage_role>, "schema_": <stage_schema>, } ``` 2. Call `get_historical_features(entity_df=entity, features=features, full_feature_names=True).to_remote_storage()` 3. Check snowflake stage tables and look for tables created at the time of running that start with `temporary_` ### Specifications - Version: 0.30.0 - Platform: Ubuntu - Subsystem: ## Possible Solution No possible solution known at the time of reporting
kubeflow__pipelines-4187
[ { "content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfp.dsl as dsl\nimport kfp\n\n\[email protected]_component_from_func\ndef print_op(s: str):\n print(s)\n\[email protected](name='my-pipeline')\ndef pipeline2(my_pipe_param=10):\n loop_args = [{'A_a': 1, 'B_b': 2}, {'A_a': 10, 'B_b': 20}]\n with dsl.ParallelFor(loop_args, parallelism=1) as item:\n print_op(item)\n print_op(item.A_a)\n print_op(item.B_b)\n\n\nif __name__ == '__main__':\n kfp.compiler.Compiler().compile(pipeline, __file__ + '.yaml')\n", "path": "samples/core/loop_parallelism/loop_parallelism.py" } ]
[ { "content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfp.dsl as dsl\nimport kfp\n\n\[email protected]_component_from_func\ndef print_op(s: str):\n print(s)\n\[email protected](name='my-pipeline')\ndef pipeline():\n loop_args = [{'A_a': 1, 'B_b': 2}, {'A_a': 10, 'B_b': 20}]\n with dsl.ParallelFor(loop_args, parallelism=10) as item:\n print_op(item)\n print_op(item.A_a)\n print_op(item.B_b)\n\n\nif __name__ == '__main__':\n kfp.compiler.Compiler().compile(pipeline, __file__ + '.yaml')\n", "path": "samples/core/loop_parallelism/loop_parallelism.py" } ]
diff --git a/samples/core/loop_parallelism/loop_parallelism.py b/samples/core/loop_parallelism/loop_parallelism.py index 5a594face2e..995e31e9c84 100644 --- a/samples/core/loop_parallelism/loop_parallelism.py +++ b/samples/core/loop_parallelism/loop_parallelism.py @@ -21,9 +21,9 @@ def print_op(s: str): print(s) @dsl.pipeline(name='my-pipeline') -def pipeline2(my_pipe_param=10): +def pipeline(): loop_args = [{'A_a': 1, 'B_b': 2}, {'A_a': 10, 'B_b': 20}] - with dsl.ParallelFor(loop_args, parallelism=1) as item: + with dsl.ParallelFor(loop_args, parallelism=10) as item: print_op(item) print_op(item.A_a) print_op(item.B_b)
allow output artifact store configuration (vs hard coded) it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148 it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names. i suggest making it configurable, i can do such PR if we agree its needed. flexible pipeline service (host) path in client SDK when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from: `config.host = host if host else Client.IN_CLUSTER_DNS_NAME` to: `config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)` also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug if its acceptable i can submit a PR for the line change above
mitmproxy__mitmproxy-6866
[ { "content": "from __future__ import annotations\n\nimport binascii\nimport weakref\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom collections.abc import MutableMapping\nfrom typing import Optional\n\nimport ldap3\nimport passlib.apache\n\nfrom mitmproxy import connection\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import http\nfrom mitmproxy.net.http import status_codes\nfrom mitmproxy.proxy import mode_specs\nfrom mitmproxy.proxy.layers import modes\n\nREALM = \"mitmproxy\"\n\n\nclass ProxyAuth:\n validator: Validator | None = None\n\n def __init__(self) -> None:\n self.authenticated: MutableMapping[connection.Client, tuple[str, str]] = (\n weakref.WeakKeyDictionary()\n )\n \"\"\"Contains all connections that are permanently authenticated after an HTTP CONNECT\"\"\"\n\n def load(self, loader):\n loader.add_option(\n \"proxyauth\",\n Optional[str],\n None,\n \"\"\"\n Require proxy authentication. Format:\n \"username:pass\",\n \"any\" to accept any user/pass combination,\n \"@path\" to use an Apache htpasswd file,\n or \"ldap[s]:url_server_ldap[:port]:dn_auth:password:dn_subtree[?search_filter_key=...]\" for LDAP authentication.\n \"\"\",\n )\n\n def configure(self, updated):\n if \"proxyauth\" in updated:\n auth = ctx.options.proxyauth\n if auth:\n if auth == \"any\":\n self.validator = AcceptAll()\n elif auth.startswith(\"@\"):\n self.validator = Htpasswd(auth)\n elif ctx.options.proxyauth.startswith(\"ldap\"):\n self.validator = Ldap(auth)\n elif \":\" in ctx.options.proxyauth:\n self.validator = SingleUser(auth)\n else:\n raise exceptions.OptionsError(\"Invalid proxyauth specification.\")\n else:\n self.validator = None\n\n def socks5_auth(self, data: modes.Socks5AuthData) -> None:\n if self.validator and self.validator(data.username, data.password):\n data.valid = True\n self.authenticated[data.client_conn] = data.username, data.password\n\n def http_connect(self, f: http.HTTPFlow) -> None:\n if self.validator and self.authenticate_http(f):\n # Make a note that all further requests over this connection are ok.\n self.authenticated[f.client_conn] = f.metadata[\"proxyauth\"]\n\n def requestheaders(self, f: http.HTTPFlow) -> None:\n if self.validator:\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n else:\n self.authenticate_http(f)\n\n def authenticate_http(self, f: http.HTTPFlow) -> bool:\n \"\"\"\n Authenticate an HTTP request, returns if authentication was successful.\n\n If valid credentials are found, the matching authentication header is removed.\n In no or invalid credentials are found, flow.response is set to an error page.\n \"\"\"\n assert self.validator\n username = None\n password = None\n is_valid = False\n\n is_proxy = is_http_proxy(f)\n auth_header = http_auth_header(is_proxy)\n try:\n auth_value = f.request.headers.get(auth_header, \"\")\n scheme, username, password = parse_http_basic_auth(auth_value)\n is_valid = self.validator(username, password)\n except Exception:\n pass\n\n if is_valid:\n f.metadata[\"proxyauth\"] = (username, password)\n del f.request.headers[auth_header]\n return True\n else:\n f.response = make_auth_required_response(is_proxy)\n return False\n\n\ndef make_auth_required_response(is_proxy: bool) -> http.Response:\n if is_proxy:\n status_code = status_codes.PROXY_AUTH_REQUIRED\n headers = {\"Proxy-Authenticate\": f'Basic realm=\"{REALM}\"'}\n else:\n status_code = status_codes.UNAUTHORIZED\n headers = {\"WWW-Authenticate\": f'Basic realm=\"{REALM}\"'}\n\n reason = http.status_codes.RESPONSES[status_code]\n return http.Response.make(\n status_code,\n (\n f\"<html>\"\n f\"<head><title>{status_code} {reason}</title></head>\"\n f\"<body><h1>{status_code} {reason}</h1></body>\"\n f\"</html>\"\n ),\n headers,\n )\n\n\ndef http_auth_header(is_proxy: bool) -> str:\n if is_proxy:\n return \"Proxy-Authorization\"\n else:\n return \"Authorization\"\n\n\ndef is_http_proxy(f: http.HTTPFlow) -> bool:\n \"\"\"\n Returns:\n - True, if authentication is done as if mitmproxy is a proxy\n - False, if authentication is done as if mitmproxy is an HTTP server\n \"\"\"\n return isinstance(\n f.client_conn.proxy_mode, (mode_specs.RegularMode, mode_specs.UpstreamMode)\n )\n\n\ndef mkauth(username: str, password: str, scheme: str = \"basic\") -> str:\n \"\"\"\n Craft a basic auth string\n \"\"\"\n v = binascii.b2a_base64((username + \":\" + password).encode(\"utf8\")).decode(\"ascii\")\n return scheme + \" \" + v\n\n\ndef parse_http_basic_auth(s: str) -> tuple[str, str, str]:\n \"\"\"\n Parse a basic auth header.\n Raises a ValueError if the input is invalid.\n \"\"\"\n scheme, authinfo = s.split()\n if scheme.lower() != \"basic\":\n raise ValueError(\"Unknown scheme\")\n try:\n user, password = (\n binascii.a2b_base64(authinfo.encode()).decode(\"utf8\", \"replace\").split(\":\")\n )\n except binascii.Error as e:\n raise ValueError(str(e))\n return scheme, user, password\n\n\nclass Validator(ABC):\n \"\"\"Base class for all username/password validators.\"\"\"\n\n @abstractmethod\n def __call__(self, username: str, password: str) -> bool:\n raise NotImplementedError\n\n\nclass AcceptAll(Validator):\n def __call__(self, username: str, password: str) -> bool:\n return True\n\n\nclass SingleUser(Validator):\n def __init__(self, proxyauth: str):\n try:\n self.username, self.password = proxyauth.split(\":\")\n except ValueError:\n raise exceptions.OptionsError(\"Invalid single-user auth specification.\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.username == username and self.password == password\n\n\nclass Htpasswd(Validator):\n def __init__(self, proxyauth: str):\n path = proxyauth[1:]\n try:\n self.htpasswd = passlib.apache.HtpasswdFile(path)\n except (ValueError, OSError):\n raise exceptions.OptionsError(f\"Could not open htpasswd file: {path}\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.htpasswd.check_password(username, password)\n\n\nclass Ldap(Validator):\n conn: ldap3.Connection\n server: ldap3.Server\n dn_subtree: str\n filter_key: str\n\n def __init__(self, proxyauth: str):\n (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n self.dn_subtree,\n self.filter_key,\n ) = self.parse_spec(proxyauth)\n server = ldap3.Server(url, port=port, use_ssl=use_ssl)\n conn = ldap3.Connection(server, ldap_user, ldap_pass, auto_bind=True)\n self.conn = conn\n self.server = server\n\n @staticmethod\n def parse_spec(spec: str) -> tuple[bool, str, int | None, str, str, str, str]:\n try:\n if spec.count(\":\") > 4:\n (\n security,\n url,\n port_str,\n ldap_user,\n ldap_pass,\n dn_subtree,\n ) = spec.split(\":\")\n port = int(port_str)\n else:\n security, url, ldap_user, ldap_pass, dn_subtree = spec.split(\":\")\n port = None\n\n if \"?\" in dn_subtree:\n dn_subtree, search_str = dn_subtree.split(\"?\")\n key, value = search_str.split(\"=\")\n if key == \"search_filter_key\":\n search_filter_key = value\n else:\n raise ValueError\n else:\n search_filter_key = \"cn\"\n\n if security == \"ldaps\":\n use_ssl = True\n elif security == \"ldap\":\n use_ssl = False\n else:\n raise ValueError\n\n return (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n dn_subtree,\n search_filter_key,\n )\n except ValueError:\n raise exceptions.OptionsError(f\"Invalid LDAP specification: {spec}\")\n\n def __call__(self, username: str, password: str) -> bool:\n if not username or not password:\n return False\n self.conn.search(self.dn_subtree, f\"({self.filter_key}={username})\")\n if self.conn.response:\n c = ldap3.Connection(\n self.server, self.conn.response[0][\"dn\"], password, auto_bind=True\n )\n if c:\n return True\n return False\n", "path": "mitmproxy/addons/proxyauth.py" } ]
[ { "content": "from __future__ import annotations\n\nimport binascii\nimport weakref\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom collections.abc import MutableMapping\nfrom typing import Optional\n\nimport ldap3\nimport passlib.apache\n\nfrom mitmproxy import connection\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import http\nfrom mitmproxy.net.http import status_codes\nfrom mitmproxy.proxy import mode_specs\nfrom mitmproxy.proxy.layers import modes\n\nREALM = \"mitmproxy\"\n\n\nclass ProxyAuth:\n validator: Validator | None = None\n\n def __init__(self) -> None:\n self.authenticated: MutableMapping[connection.Client, tuple[str, str]] = (\n weakref.WeakKeyDictionary()\n )\n \"\"\"Contains all connections that are permanently authenticated after an HTTP CONNECT\"\"\"\n\n def load(self, loader):\n loader.add_option(\n \"proxyauth\",\n Optional[str],\n None,\n \"\"\"\n Require proxy authentication. Format:\n \"username:pass\",\n \"any\" to accept any user/pass combination,\n \"@path\" to use an Apache htpasswd file,\n or \"ldap[s]:url_server_ldap[:port]:dn_auth:password:dn_subtree[?search_filter_key=...]\" for LDAP authentication.\n \"\"\",\n )\n\n def configure(self, updated):\n if \"proxyauth\" in updated:\n auth = ctx.options.proxyauth\n if auth:\n if auth == \"any\":\n self.validator = AcceptAll()\n elif auth.startswith(\"@\"):\n self.validator = Htpasswd(auth)\n elif ctx.options.proxyauth.startswith(\"ldap\"):\n self.validator = Ldap(auth)\n elif \":\" in ctx.options.proxyauth:\n self.validator = SingleUser(auth)\n else:\n raise exceptions.OptionsError(\"Invalid proxyauth specification.\")\n else:\n self.validator = None\n\n def socks5_auth(self, data: modes.Socks5AuthData) -> None:\n if self.validator and self.validator(data.username, data.password):\n data.valid = True\n self.authenticated[data.client_conn] = data.username, data.password\n\n def http_connect(self, f: http.HTTPFlow) -> None:\n if self.validator and self.authenticate_http(f):\n # Make a note that all further requests over this connection are ok.\n self.authenticated[f.client_conn] = f.metadata[\"proxyauth\"]\n\n def requestheaders(self, f: http.HTTPFlow) -> None:\n if self.validator:\n # Is this connection authenticated by a previous HTTP CONNECT?\n if f.client_conn in self.authenticated:\n f.metadata[\"proxyauth\"] = self.authenticated[f.client_conn]\n elif f.is_replay:\n pass\n else:\n self.authenticate_http(f)\n\n def authenticate_http(self, f: http.HTTPFlow) -> bool:\n \"\"\"\n Authenticate an HTTP request, returns if authentication was successful.\n\n If valid credentials are found, the matching authentication header is removed.\n In no or invalid credentials are found, flow.response is set to an error page.\n \"\"\"\n assert self.validator\n username = None\n password = None\n is_valid = False\n\n is_proxy = is_http_proxy(f)\n auth_header = http_auth_header(is_proxy)\n try:\n auth_value = f.request.headers.get(auth_header, \"\")\n scheme, username, password = parse_http_basic_auth(auth_value)\n is_valid = self.validator(username, password)\n except Exception:\n pass\n\n if is_valid:\n f.metadata[\"proxyauth\"] = (username, password)\n del f.request.headers[auth_header]\n return True\n else:\n f.response = make_auth_required_response(is_proxy)\n return False\n\n\ndef make_auth_required_response(is_proxy: bool) -> http.Response:\n if is_proxy:\n status_code = status_codes.PROXY_AUTH_REQUIRED\n headers = {\"Proxy-Authenticate\": f'Basic realm=\"{REALM}\"'}\n else:\n status_code = status_codes.UNAUTHORIZED\n headers = {\"WWW-Authenticate\": f'Basic realm=\"{REALM}\"'}\n\n reason = http.status_codes.RESPONSES[status_code]\n return http.Response.make(\n status_code,\n (\n f\"<html>\"\n f\"<head><title>{status_code} {reason}</title></head>\"\n f\"<body><h1>{status_code} {reason}</h1></body>\"\n f\"</html>\"\n ),\n headers,\n )\n\n\ndef http_auth_header(is_proxy: bool) -> str:\n if is_proxy:\n return \"Proxy-Authorization\"\n else:\n return \"Authorization\"\n\n\ndef is_http_proxy(f: http.HTTPFlow) -> bool:\n \"\"\"\n Returns:\n - True, if authentication is done as if mitmproxy is a proxy\n - False, if authentication is done as if mitmproxy is an HTTP server\n \"\"\"\n return isinstance(\n f.client_conn.proxy_mode, (mode_specs.RegularMode, mode_specs.UpstreamMode)\n )\n\n\ndef mkauth(username: str, password: str, scheme: str = \"basic\") -> str:\n \"\"\"\n Craft a basic auth string\n \"\"\"\n v = binascii.b2a_base64((username + \":\" + password).encode(\"utf8\")).decode(\"ascii\")\n return scheme + \" \" + v\n\n\ndef parse_http_basic_auth(s: str) -> tuple[str, str, str]:\n \"\"\"\n Parse a basic auth header.\n Raises a ValueError if the input is invalid.\n \"\"\"\n scheme, authinfo = s.split()\n if scheme.lower() != \"basic\":\n raise ValueError(\"Unknown scheme\")\n try:\n user, password = (\n binascii.a2b_base64(authinfo.encode()).decode(\"utf8\", \"replace\").split(\":\")\n )\n except binascii.Error as e:\n raise ValueError(str(e))\n return scheme, user, password\n\n\nclass Validator(ABC):\n \"\"\"Base class for all username/password validators.\"\"\"\n\n @abstractmethod\n def __call__(self, username: str, password: str) -> bool:\n raise NotImplementedError\n\n\nclass AcceptAll(Validator):\n def __call__(self, username: str, password: str) -> bool:\n return True\n\n\nclass SingleUser(Validator):\n def __init__(self, proxyauth: str):\n try:\n self.username, self.password = proxyauth.split(\":\")\n except ValueError:\n raise exceptions.OptionsError(\"Invalid single-user auth specification.\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.username == username and self.password == password\n\n\nclass Htpasswd(Validator):\n def __init__(self, proxyauth: str):\n path = proxyauth[1:]\n try:\n self.htpasswd = passlib.apache.HtpasswdFile(path)\n except (ValueError, OSError):\n raise exceptions.OptionsError(f\"Could not open htpasswd file: {path}\")\n\n def __call__(self, username: str, password: str) -> bool:\n return self.htpasswd.check_password(username, password)\n\n\nclass Ldap(Validator):\n conn: ldap3.Connection\n server: ldap3.Server\n dn_subtree: str\n filter_key: str\n\n def __init__(self, proxyauth: str):\n (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n self.dn_subtree,\n self.filter_key,\n ) = self.parse_spec(proxyauth)\n server = ldap3.Server(url, port=port, use_ssl=use_ssl)\n conn = ldap3.Connection(server, ldap_user, ldap_pass, auto_bind=True)\n self.conn = conn\n self.server = server\n\n @staticmethod\n def parse_spec(spec: str) -> tuple[bool, str, int | None, str, str, str, str]:\n try:\n if spec.count(\":\") > 4:\n (\n security,\n url,\n port_str,\n ldap_user,\n ldap_pass,\n dn_subtree,\n ) = spec.split(\":\")\n port = int(port_str)\n else:\n security, url, ldap_user, ldap_pass, dn_subtree = spec.split(\":\")\n port = None\n\n if \"?\" in dn_subtree:\n dn_subtree, search_str = dn_subtree.split(\"?\")\n key, value = search_str.split(\"=\")\n if key == \"search_filter_key\":\n search_filter_key = value\n else:\n raise ValueError\n else:\n search_filter_key = \"cn\"\n\n if security == \"ldaps\":\n use_ssl = True\n elif security == \"ldap\":\n use_ssl = False\n else:\n raise ValueError\n\n return (\n use_ssl,\n url,\n port,\n ldap_user,\n ldap_pass,\n dn_subtree,\n search_filter_key,\n )\n except ValueError:\n raise exceptions.OptionsError(f\"Invalid LDAP specification: {spec}\")\n\n def __call__(self, username: str, password: str) -> bool:\n if not username or not password:\n return False\n self.conn.search(self.dn_subtree, f\"({self.filter_key}={username})\")\n if self.conn.response:\n c = ldap3.Connection(\n self.server, self.conn.response[0][\"dn\"], password, auto_bind=True\n )\n if c:\n return True\n return False\n", "path": "mitmproxy/addons/proxyauth.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 830fc1d7da..4f662f2deb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,8 @@ ([#6819](https://github.com/mitmproxy/mitmproxy/pull/6819), @mhils) * Set the `unbuffered` (stdout/stderr) flag for the `mitmdump` PyInstaller build. ([#6821](https://github.com/mitmproxy/mitmproxy/pull/6821), @Prinzhorn) +* Fix a bug where client replay would not work with proxyauth. + ([#6866](https://github.com/mitmproxy/mitmproxy/pull/6866), @mhils) ## 17 April 2024: mitmproxy 10.3.0 diff --git a/mitmproxy/addons/proxyauth.py b/mitmproxy/addons/proxyauth.py index 13e1ef02e2..9250251da3 100644 --- a/mitmproxy/addons/proxyauth.py +++ b/mitmproxy/addons/proxyauth.py @@ -76,6 +76,8 @@ def requestheaders(self, f: http.HTTPFlow) -> None: # Is this connection authenticated by a previous HTTP CONNECT? if f.client_conn in self.authenticated: f.metadata["proxyauth"] = self.authenticated[f.client_conn] + elif f.is_replay: + pass else: self.authenticate_http(f) diff --git a/test/mitmproxy/addons/test_proxyauth.py b/test/mitmproxy/addons/test_proxyauth.py index 743bc635bf..e715e9d11d 100644 --- a/test/mitmproxy/addons/test_proxyauth.py +++ b/test/mitmproxy/addons/test_proxyauth.py @@ -239,6 +239,11 @@ def test_handlers(self): assert not f2.response assert f2.metadata["proxyauth"] == ("test", "test") + f3 = tflow.tflow() + f3.is_replay = True + up.requestheaders(f3) + assert not f2.response + @pytest.mark.parametrize( "spec",
httpauth is not attached to replay request #### Problem Description I set mitmproxy to run in reverse mode as a proxy to real server, and then protect mitmproxy with a pair of user:pass in the proxyauth option. A regular request would go through, but a reply of that same request would return 401 Unauthorized #### Steps to reproduce the behavior: 1. turn on reverse mode in mitmweb 2. set basic auth in proxyauth in 'username:pass' format 3. initiate a success request 4. replay the request #### System Information Mitmproxy: 10.1.5 Python: 3.11.6 OpenSSL: OpenSSL 3.1.4 24 Oct 2023 Platform: Linux-4.14.276-211.499.amzn2.x86_64-x86_64-with-glibc2.31
bridgecrewio__checkov-2740
[ { "content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AzureFrontDoorEnablesWAF(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Front Door enables WAF\"\n id = \"CKV_AZURE_121\"\n supported_resources = ['azurerm_frontdoor']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"web_application_firewall_policy_link_id\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = AzureFrontDoorEnablesWAF()\n", "path": "checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py" } ]
[ { "content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AzureFrontDoorEnablesWAF(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Front Door enables WAF\"\n id = \"CKV_AZURE_121\"\n supported_resources = ['azurerm_frontdoor']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"frontend_endpoint/[0]/web_application_firewall_policy_link_id\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = AzureFrontDoorEnablesWAF()\n", "path": "checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py" } ]
diff --git a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py index 435d45350b..f94cc72870 100644 --- a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py +++ b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py @@ -12,7 +12,7 @@ def __init__(self): super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def get_inspected_key(self): - return "web_application_firewall_policy_link_id" + return "frontend_endpoint/[0]/web_application_firewall_policy_link_id" def get_expected_value(self): return ANY_VALUE diff --git a/tests/terraform/checks/resource/azure/example_AzureFrontDoorEnablesWAF/main.tf b/tests/terraform/checks/resource/azure/example_AzureFrontDoorEnablesWAF/main.tf new file mode 100644 index 0000000000..9548ee074d --- /dev/null +++ b/tests/terraform/checks/resource/azure/example_AzureFrontDoorEnablesWAF/main.tf @@ -0,0 +1,84 @@ +resource "azurerm_frontdoor" "pass" { + name = "example-FrontDoor" + resource_group_name = azurerm_resource_group.example.name + + routing_rule { + name = "exampleRoutingRule1" + accepted_protocols = ["Http", "Https"] + patterns_to_match = ["/*"] + frontend_endpoints = ["exampleFrontendEndpoint1"] + forwarding_configuration { + forwarding_protocol = "MatchRequest" + backend_pool_name = "exampleBackendBing" + } + } + + backend_pool_load_balancing { + name = "exampleLoadBalancingSettings1" + } + + backend_pool_health_probe { + name = "exampleHealthProbeSetting1" + } + + backend_pool { + name = "exampleBackendBing" + backend { + host_header = "www.bing.com" + address = "www.bing.com" + http_port = 80 + https_port = 443 + } + + load_balancing_name = "exampleLoadBalancingSettings1" + health_probe_name = "exampleHealthProbeSetting1" + } + + frontend_endpoint { + name = "exampleFrontendEndpoint1" + host_name = "example-FrontDoor.azurefd.net" + web_application_firewall_policy_link_id = azurerm_frontdoor_firewall_policy.test.id + } +} + +resource "azurerm_frontdoor" "fail" { + name = "example-FrontDoor" + resource_group_name = azurerm_resource_group.example.name + + routing_rule { + name = "exampleRoutingRule1" + accepted_protocols = ["Http", "Https"] + patterns_to_match = ["/*"] + frontend_endpoints = ["exampleFrontendEndpoint1"] + forwarding_configuration { + forwarding_protocol = "MatchRequest" + backend_pool_name = "exampleBackendBing" + } + } + + backend_pool_load_balancing { + name = "exampleLoadBalancingSettings1" + } + + backend_pool_health_probe { + name = "exampleHealthProbeSetting1" + } + + backend_pool { + name = "exampleBackendBing" + backend { + host_header = "www.bing.com" + address = "www.bing.com" + http_port = 80 + https_port = 443 + } + + load_balancing_name = "exampleLoadBalancingSettings1" + health_probe_name = "exampleHealthProbeSetting1" + } + + frontend_endpoint { + name = "exampleFrontendEndpoint1" + host_name = "example-FrontDoor.azurefd.net" + } +} \ No newline at end of file diff --git a/tests/terraform/checks/resource/azure/test_AzureFrontDoorEnablesWAF.py b/tests/terraform/checks/resource/azure/test_AzureFrontDoorEnablesWAF.py index ef4e9fc07b..7301fcdb72 100644 --- a/tests/terraform/checks/resource/azure/test_AzureFrontDoorEnablesWAF.py +++ b/tests/terraform/checks/resource/azure/test_AzureFrontDoorEnablesWAF.py @@ -1,116 +1,42 @@ +import os import unittest -import hcl2 - +from checkov.runner_filter import RunnerFilter +from checkov.terraform.runner import Runner from checkov.terraform.checks.resource.azure.AzureFrontDoorEnablesWAF import check -from checkov.common.models.enums import CheckResult class TestAzureFrontDoorEnablesWAF(unittest.TestCase): - def test_failure(self): - hcl_res = hcl2.loads(""" - resource "azurerm_frontdoor" "example" { - name = "example-FrontDoor" - location = "EastUS2" - resource_group_name = azurerm_resource_group.example.name - enforce_backend_pools_certificate_name_check = false - - routing_rule { - name = "exampleRoutingRule1" - accepted_protocols = ["Http", "Https"] - patterns_to_match = ["/*"] - frontend_endpoints = ["exampleFrontendEndpoint1"] - forwarding_configuration { - forwarding_protocol = "MatchRequest" - backend_pool_name = "exampleBackendBing" - } - } - - backend_pool_load_balancing { - name = "exampleLoadBalancingSettings1" - } - - backend_pool_health_probe { - name = "exampleHealthProbeSetting1" - } - - backend_pool { - name = "exampleBackendBing" - backend { - host_header = "www.bing.com" - address = "www.bing.com" - http_port = 80 - https_port = 443 - } - - load_balancing_name = "exampleLoadBalancingSettings1" - health_probe_name = "exampleHealthProbeSetting1" - } - - frontend_endpoint { - name = "exampleFrontendEndpoint1" - host_name = "example-FrontDoor.azurefd.net" - custom_https_provisioning_enabled = false - } - } - """) - resource_conf = hcl_res['resource'][0]['azurerm_frontdoor']['example'] - scan_result = check.scan_resource_conf(conf=resource_conf) - self.assertEqual(CheckResult.FAILED, scan_result) + def test(self): + runner = Runner() + current_dir = os.path.dirname(os.path.realpath(__file__)) + + test_files_dir = os.path.join(current_dir, "example_AzureFrontDoorEnablesWAF") + report = runner.run(root_folder=test_files_dir, + runner_filter=RunnerFilter(checks=[check.id])) + summary = report.get_summary() + + passing_resources = { + 'azurerm_frontdoor.pass', + } + + failing_resources = { + 'azurerm_frontdoor.fail', + } + skipped_resources = {} + + passed_check_resources = set([c.resource for c in report.passed_checks]) + failed_check_resources = set([c.resource for c in report.failed_checks]) - def test_success(self): - hcl_res = hcl2.loads(""" - resource "azurerm_frontdoor" "example" { - name = "example-FrontDoor" - location = "EastUS2" - resource_group_name = azurerm_resource_group.example.name - enforce_backend_pools_certificate_name_check = false - web_application_firewall_policy_link_id = "this_is_id" + self.assertEqual(summary['passed'], len(passing_resources)) + self.assertEqual(summary['failed'], len(failing_resources)) + self.assertEqual(summary['skipped'], len(skipped_resources)) + self.assertEqual(summary['parsing_errors'], 0) - routing_rule { - name = "exampleRoutingRule1" - accepted_protocols = ["Http", "Https"] - patterns_to_match = ["/*"] - frontend_endpoints = ["exampleFrontendEndpoint1"] - forwarding_configuration { - forwarding_protocol = "MatchRequest" - backend_pool_name = "exampleBackendBing" - } - } - - backend_pool_load_balancing { - name = "exampleLoadBalancingSettings1" - } - - backend_pool_health_probe { - name = "exampleHealthProbeSetting1" - } - - backend_pool { - name = "exampleBackendBing" - backend { - host_header = "www.bing.com" - address = "www.bing.com" - http_port = 80 - https_port = 443 - } - - load_balancing_name = "exampleLoadBalancingSettings1" - health_probe_name = "exampleHealthProbeSetting1" - } - - frontend_endpoint { - name = "exampleFrontendEndpoint1" - host_name = "example-FrontDoor.azurefd.net" - custom_https_provisioning_enabled = false - } - } - """) - resource_conf = hcl_res['resource'][0]['azurerm_frontdoor']['example'] - scan_result = check.scan_resource_conf(conf=resource_conf) - self.assertEqual(CheckResult.PASSED, scan_result) + self.assertEqual(passing_resources, passed_check_resources) + self.assertEqual(failing_resources, failed_check_resources) if __name__ == '__main__': - unittest.main() + unittest.main() \ No newline at end of file
Check Azure Front Door WAF enabled fails even when a WAF is correctly assigned **Describe the issue** [`CKV_AZURE_121`](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py) fails despite a Web Application Firewall policy being correctly applied. WAF policies are applied by specifying a value for `web_application_firewall_policy_link_id` inside a `frontend_endpoint` block within the `azurerm_frontdoor` resource itself. The [documentation](https://docs.bridgecrew.io/docs/ensure-that-azure-front-door-enables-waf) seems to expect that the `web_application_firewall_policy_link_id` attribute is defined in the resource block itself, rather than in a sub-block (`frontend_endpoint`). - [`azurerm_frontdoor` resource documentation reference](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/frontdoor#web_application_firewall_policy_link_id) **Examples** ```terraform resource "azurerm_frontdoor" "test" { name = "test-front-door" resource_group_name = var.resource_group_name enforce_backend_pools_certificate_name_check = false tags = var.tags frontend_endpoint { name = "DefaultFrontend" host_name = "test-front-door.azurefd.net" web_application_firewall_policy_link_id = azurerm_frontdoor_firewall_policy.test.id } # ... ``` **Version (please complete the following information):** - Checkov Version: 2.0.930 **Additional context**
data-for-change__anyway-1244
[ { "content": "# -*- coding: utf-8 -*-\nimport logging\nimport datetime\nimport json\nimport pandas as pd\nfrom collections import defaultdict\nfrom sqlalchemy import func\nfrom sqlalchemy import cast, Numeric\nfrom sqlalchemy import desc\nfrom flask import Response\nfrom .constants import CONST\nfrom .models import (NewsFlash, AccidentMarkerView, InvolvedMarkerView, RoadSegments)\nfrom .parsers import resolution_dict\nfrom .app_and_db import db\n\n'''\n Widget structure:\n {\n 'name': str,\n 'rank': int (Integer),\n 'data': {\n 'items': list (Array) | dictionary (Object),\n 'text': dictionary (Object) - can be empty\n }\n 'meta': dictionary (Object) - can be empty\n }\n'''\nclass Widget():\n def __init__(self, name, rank, items, text=None, meta=None):\n self.name = name\n self.rank = rank\n self.items = items\n self.text = text\n self.meta = meta\n\n def serialize(self):\n output = {}\n output['name'] = self.name\n output['rank'] = self.rank\n output['data'] = {}\n output['data']['items'] = self.items\n if self.text:\n output['data']['text'] = self.text\n if self.meta:\n output['meta'] = self.meta\n return output\n\n\ndef extract_news_flash_location(news_flash_id):\n news_flash_obj = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n if not news_flash_obj:\n logging.warn('could not find news flash id {}'.format(news_flash_id))\n return None\n resolution = news_flash_obj.resolution if news_flash_obj.resolution else None\n if not news_flash_obj or not resolution or resolution not in resolution_dict:\n logging.warn(\n 'could not find valid resolution for news flash id {}'.format(news_flash_id))\n return None\n data = {'resolution': resolution}\n for field in resolution_dict[resolution]:\n curr_field = getattr(news_flash_obj, field)\n if curr_field is not None:\n data[field] = curr_field\n gps = {}\n for field in ['lon', 'lat']:\n gps[field] = getattr(news_flash_obj, field)\n return {'name': 'location', 'data': data, 'gps': gps}\n\n\ndef get_query(table_obj, filters, start_time, end_time):\n query = db.session.query(table_obj)\n if start_time:\n query = query.filter(\n getattr(table_obj, 'accident_timestamp') >= start_time)\n if end_time:\n query = query.filter(\n getattr(table_obj, 'accident_timestamp') <= end_time)\n if filters:\n for field_name, value in filters.items():\n if isinstance(value, list):\n values = value\n else:\n values = [value]\n query = query.filter((getattr(table_obj, field_name)).in_(values))\n return query\n\ndef get_accident_count_by_accident_type(location_info, start_time, end_time):\n all_accident_type_count = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_type_hebrew',\n count='accident_type_hebrew',\n start_time=start_time,\n end_time=end_time)\n merged_accident_type_count = [{'accident_type': 'התנגשות', 'count': 0}]\n for item in all_accident_type_count:\n if 'התנגשות' in item['accident_type']:\n merged_accident_type_count[0]['count'] += item['count']\n else:\n merged_accident_type_count.append(item)\n return merged_accident_type_count\n\ndef get_top_road_segments_accidents_per_km(resolution, location_info, start_time=None, end_time=None, limit=5):\n if resolution != 'כביש בינעירוני': # relevent for non urban roads only\n return {}\n\n query = get_query(table_obj=AccidentMarkerView, filters=None,\n start_time=start_time, end_time=end_time)\n\n query = query.with_entities(\n AccidentMarkerView.road_segment_name,\n func.count(AccidentMarkerView.road_segment_name).label(\n 'total_accidents'),\n (RoadSegments.to_km - RoadSegments.from_km).label('segment_length'),\n cast((func.count(AccidentMarkerView.road_segment_name) / (RoadSegments.to_km - RoadSegments.from_km)),\n Numeric(10, 4)).label(\n 'accidents_per_km')) \\\n .filter(AccidentMarkerView.road1 == RoadSegments.road) \\\n .filter(AccidentMarkerView.road_segment_number == RoadSegments.segment) \\\n .filter(AccidentMarkerView.road1 == location_info['road1']) \\\n .filter(AccidentMarkerView.road_segment_name is not None) \\\n .group_by(AccidentMarkerView.road_segment_name, RoadSegments.from_km, RoadSegments.to_km) \\\n .order_by(desc('accidents_per_km')) \\\n .limit(limit)\n\n result = pd.read_sql_query(query.statement, query.session.bind)\n return result.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef get_accidents_stats(table_obj, filters=None, group_by=None, count=None, start_time=None, end_time=None):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n # get stats\n query = get_query(table_obj, filters, start_time, end_time)\n if group_by:\n query = query.group_by(group_by)\n query = query.with_entities(group_by, func.count(count))\n df = pd.read_sql_query(query.statement, query.session.bind)\n df.rename(columns={'count_1': 'count'}, inplace=True) # pylint: disable=no-member\n df.columns = [c.replace('_hebrew', '') for c in df.columns]\n return df.to_dict(orient='records') if group_by or count else df.to_dict() # pylint: disable=no-member\n\n\ndef get_injured_filters(location_info):\n new_filters = {}\n for curr_filter, curr_values in location_info.items():\n if curr_filter in ['region_hebrew', 'district_hebrew', 'district_hebrew', 'yishuv_name']:\n new_filter_name = 'accident_' + curr_filter\n new_filters[new_filter_name] = curr_values\n else:\n new_filters[curr_filter] = curr_values\n new_filters['injury_severity'] = [1, 2, 3, 4, 5]\n return new_filters\n\n\ndef get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit=10):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = get_query(table_obj, filters, start_time, end_time)\n query = query.with_entities(*entities)\n query = query.order_by(getattr(table_obj, \"accident_severity\"), getattr(\n table_obj, \"accident_timestamp\").desc())\n query = query.limit(limit)\n df = pd.read_sql_query(query.statement, query.session.bind)\n df.columns = [c.replace('_hebrew', '') for c in df.columns]\n return df.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef get_most_severe_accidents(table_obj, filters, start_time, end_time, limit=10):\n entities = 'longitude', 'latitude', 'accident_severity_hebrew', 'accident_timestamp', 'accident_type_hebrew'\n return get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit)\n\n\ndef get_accidents_heat_map(table_obj, filters, start_time, end_time):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = get_query(table_obj, filters, start_time, end_time)\n query = query.with_entities('longitude', 'latitude')\n df = pd.read_sql_query(query.statement, query.session.bind)\n return df.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef filter_and_group_injured_count_per_age_group(data_of_ages):\n import re\n range_dict = {0: 14, 15: 24, 25: 64, 65: 200}\n return_dict_by_required_age_group = defaultdict(int)\n\n for age_range_and_count in data_of_ages:\n age_range = age_range_and_count['age_group']\n count = age_range_and_count['count']\n\n # Parse the db age range\n match_parsing = re.match(\"([0-9]{2})\\\\-([0-9]{2})\", age_range)\n if match_parsing:\n regex_age_matches = match_parsing.groups()\n if len(regex_age_matches) != 2:\n return_dict_by_required_age_group[\"unknown\"] += count\n continue\n min_age_raw, max_age_raw = regex_age_matches\n else:\n match_parsing = re.match(\"([0-9]{2})\\\\+\", age_range) # e.g 85+\n if match_parsing:\n # We assume that no body live beyond age 200\n min_age_raw, max_age_raw = match_parsing.group(1), 200\n else:\n return_dict_by_required_age_group[\"unknown\"] += count\n continue\n\n # Find to what \"bucket\" to aggregate the data\n min_age = int(min_age_raw)\n max_age = int(max_age_raw)\n for item in range_dict.items():\n item_min_range, item_max_range = item\n if item_min_range <= min_age <= item_max_range and item_min_range <= max_age <= item_max_range:\n string_age_range = f'{item_min_range:02}-{item_max_range:02}'\n return_dict_by_required_age_group[string_age_range] += count\n break\n\n # Rename the last key\n return_dict_by_required_age_group[\"65+\"] = return_dict_by_required_age_group[\"65-200\"]\n del return_dict_by_required_age_group[\"65-200\"]\n\n return return_dict_by_required_age_group\n\n\ndef get_most_severe_accidents_table_title(location_text):\n return 'תאונות חמורות ב' + location_text\n\n\ndef get_accident_count_by_severity(location_info, location_text, start_time, end_time):\n count_by_severity = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_severity_hebrew',\n count='accident_severity_hebrew',\n start_time=start_time,\n end_time=end_time)\n severity_dict = {'קטלנית': 'fatal',\n 'קשה': 'severe',\n 'קלה': 'light'}\n items = {}\n total_accidents_count = 0\n start_year = start_time.year\n end_year = end_time.year\n for severity_and_count in count_by_severity:\n accident_severity_hebrew = severity_and_count['accident_severity']\n severity_english = severity_dict[accident_severity_hebrew]\n severity_count_text = 'severity_{}_count'.format(severity_english)\n items[severity_count_text] = severity_and_count['count']\n total_accidents_count += severity_and_count['count']\n items['start_year'] = start_year\n items['end_year'] = end_year\n items['total_accidents_count'] = total_accidents_count\n return items\n\n\ndef get_most_severe_accidents_table(location_info, start_time, end_time):\n entities = 'id', 'provider_code', 'accident_timestamp', 'accident_type_hebrew', 'accident_year'\n accidents = get_most_severe_accidents_with_entities(\n table_obj=AccidentMarkerView,\n filters=location_info,\n entities=entities,\n start_time=start_time,\n end_time=end_time)\n # Add casualties\n for accident in accidents:\n accident['type'] = accident['accident_type']\n dt = accident['accident_timestamp'].to_pydatetime()\n accident['date'] = dt.strftime(\"%d/%m/%y\")\n accident['hour'] = dt.strftime(\"%H:%M\")\n num = get_casualties_count_in_accident(\n accident['id'], accident['provider_code'], 1, accident['accident_year'])\n accident['killed_count'] = num\n num = get_casualties_count_in_accident(\n accident['id'], accident['provider_code'], [2, 3], accident['accident_year'])\n accident['injured_count'] = num\n del accident['accident_timestamp'], accident['accident_type'], accident['id'], accident['provider_code']\n return accidents\n\n\n# count of dead and severely injured\ndef get_casualties_count_in_accident(accident_id, provider_code, injury_severity, accident_year):\n filters = {'accident_id': accident_id,\n 'provider_code': provider_code,\n 'injury_severity': injury_severity,\n 'accident_year': accident_year}\n casualties = get_accidents_stats(table_obj=InvolvedMarkerView, filters=filters,\n group_by='injury_severity', count='injury_severity')\n res = 0\n for ca in casualties:\n res += ca['count']\n return res\n\n\n# generate text describing location or road segment of news flash\n# to be used by most severe accidents additional info widget\ndef get_news_flash_location_text(news_flash_id):\n news_flash_item = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n nf = news_flash_item.serialize()\n resolution = nf['resolution'] if nf['resolution'] else ''\n yishuv_name = nf['yishuv_name'] if nf['yishuv_name'] else ''\n road1 = str(int(nf['road1'])) if nf['road1'] else ''\n road2 = str(int(nf['road2'])) if nf['road2'] else ''\n street1_hebrew = nf['street1_hebrew'] if nf['street1_hebrew'] else ''\n road_segment_name = nf['road_segment_name'] if nf['road_segment_name'] else ''\n if resolution == 'כביש בינעירוני' and road1 and road_segment_name:\n res = 'כביש ' + road1 + ' במקטע ' + road_segment_name\n elif resolution == 'עיר' and not yishuv_name:\n res = nf['location']\n elif resolution == 'עיר' and yishuv_name:\n res = nf['yishuv_name']\n elif resolution == 'צומת בינעירוני' and road1 and road2:\n res = 'צומת כביש ' + road1 + ' עם כביש ' + road2\n elif resolution == 'צומת בינעירוני' and road1 and road_segment_name:\n res = 'כביש ' + road1 + ' במקטע ' + road_segment_name\n elif resolution == 'רחוב' and yishuv_name and street1_hebrew:\n res = ' רחוב ' + street1_hebrew + ' ב' + yishuv_name\n else:\n logging.warning(\n \"Did not found quality resolution. Using location field. News Flash id:{}\".format(nf['id']))\n res = nf['location']\n return res\n\n\ndef extract_news_flash_obj(news_flash_id):\n news_flash_obj = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n\n if not news_flash_obj:\n logging.warning('Could not find news flash id {}'.format(news_flash_id))\n return None\n\n return news_flash_obj\n\n\ndef sum_road_accidents_by_specific_type(road_data, field_name):\n dict_merge = defaultdict(int)\n for accident_data in road_data:\n if accident_data['accident_type'] == field_name:\n dict_merge[field_name] += accident_data['count']\n else:\n dict_merge['תאונות אחרות'] += accident_data['count']\n return dict_merge\n\n\ndef convert_roads_fatal_accidents_to_frontend_view(data_dict):\n data_list = []\n for key, value in data_dict.items():\n data_list.append({'desc': key, 'count': value})\n return data_list\n\n\ndef get_head_to_head_stat(news_flash_id, start_time, end_time):\n news_flash_obj = extract_news_flash_obj(news_flash_id)\n road_data = {}\n filter_dict = {'road_type': CONST.ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION,\n 'accident_severity': CONST.ACCIDENT_SEVERITY_DEADLY}\n all_roads_data = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=filter_dict,\n group_by='accident_type_hebrew', count='accident_type_hebrew',\n start_time=start_time, end_time=end_time)\n\n if news_flash_obj.road1 and news_flash_obj.road_segment_name:\n filter_dict.update({'road1': news_flash_obj.road1, 'road_segment_name': news_flash_obj.road_segment_name})\n road_data = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=filter_dict,\n group_by='accident_type_hebrew', count='accident_type_hebrew',\n start_time=start_time, end_time=end_time)\n\n road_data_dict = sum_road_accidents_by_specific_type(road_data, 'התנגשות חזית בחזית')\n all_roads_data_dict = sum_road_accidents_by_specific_type(all_roads_data, 'התנגשות חזית בחזית')\n\n return {'specific_road_segment_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(road_data_dict),\n 'all_roads_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(all_roads_data_dict)}\n\n#gets the latest date an accident has occured\ndef get_latest_accident_date(table_obj, filters):\n filters= filters or {}\n filters['provider_code'] = [CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = db.session.query(func.max(table_obj.accident_timestamp))\n df = pd.read_sql_query(query.statement, query.session.bind)\n return (df.to_dict(orient='records'))[0].get(\"max_1\") # pylint: disable=no-member\n\ndef create_infographics_data(news_flash_id, number_of_years_ago):\n output = {}\n try:\n number_of_years_ago = int(number_of_years_ago)\n except ValueError:\n return Response({})\n if number_of_years_ago < 0 or number_of_years_ago > 100:\n return Response({})\n location_info = extract_news_flash_location(news_flash_id)\n if location_info is None:\n return Response({})\n logging.debug('location_info:{}'.format(location_info))\n location_text = get_news_flash_location_text(news_flash_id)\n logging.debug('location_text:{}'.format(location_text))\n gps = location_info['gps']\n location_info = location_info['data']\n output['meta'] = {'location_info': location_info.copy(),\n 'location_text': location_text}\n output['widgets'] = []\n resolution = location_info.pop('resolution')\n if resolution is None:\n return Response({})\n\n if all(value is None for value in location_info.values()):\n return Response({})\n\n last_accident_date=get_latest_accident_date(table_obj=AccidentMarkerView, filters=None)\n #converting to datetime object to get the date\n end_time=last_accident_date.to_pydatetime().date()\n\n start_time = datetime.date(\n end_time.year + 1 - number_of_years_ago, 1, 1)\n\n #accident_severity count\n items = get_accident_count_by_severity(location_info=location_info,\n location_text=location_text,\n start_time=start_time,\n end_time=end_time)\n\n accident_count_by_severity = Widget(name='accident_count_by_severity',\n rank=1,\n items=items)\n output['widgets'].append(accident_count_by_severity.serialize())\n\n # most severe accidents table\n most_severe_accidents_table = Widget(name='most_severe_accidents_table',\n rank=2,\n items=get_most_severe_accidents_table(location_info, start_time, end_time),\n text={'title':get_most_severe_accidents_table_title(location_text)})\n output['widgets'].append(most_severe_accidents_table.serialize())\n\n # most severe accidents\n most_severe_accidents = Widget(name='most_severe_accidents',\n rank=3,\n items=get_most_severe_accidents(table_obj=AccidentMarkerView,\n filters=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(most_severe_accidents.serialize())\n\n # street view\n street_view = Widget(name='street_view',\n rank=4,\n items={'longitude': gps['lon'],\n 'latitude': gps['lat']})\n output['widgets'].append(street_view.serialize())\n\n # head to head accidents\n head_on_collisions_comparison = Widget(name='head_on_collisions_comparison',\n rank=5,\n items=get_head_to_head_stat(news_flash_id=news_flash_id,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(head_on_collisions_comparison.serialize())\n\n # accident_type count\n accident_count_by_accident_type = Widget(name='accident_count_by_accident_type',\n rank=6,\n items=get_accident_count_by_accident_type(location_info=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(accident_count_by_accident_type.serialize())\n\n # accidents heat map\n accidents_heat_map = Widget(name='accidents_heat_map',\n rank=7,\n items=get_accidents_heat_map(table_obj=AccidentMarkerView,\n filters=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(accidents_heat_map.serialize())\n\n # accident count by accident year\n accident_count_by_accident_year = Widget(name='accident_count_by_accident_year',\n rank=8,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_year',\n count='accident_year',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות תאונות'})\n output['widgets'].append(accident_count_by_accident_year.serialize())\n\n # injured count by accident year\n injured_count_by_accident_year = Widget(name='injured_count_by_accident_year',\n rank=9,\n items=get_accidents_stats(table_obj=InvolvedMarkerView,\n filters=get_injured_filters(location_info),\n group_by='accident_year',\n count='accident_year',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות פצועים'})\n output['widgets'].append(injured_count_by_accident_year.serialize())\n\n # accident count on day light\n accident_count_by_day_night = Widget(name='accident_count_by_day_night',\n rank=10,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='day_night_hebrew',\n count='day_night_hebrew',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות תאונות ביום ובלילה'})\n output['widgets'].append(accident_count_by_day_night.serialize())\n\n # accidents distribution count by hour\n accidents_count_by_hour = Widget(name='accidents_count_by_hour',\n rank=11,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_hour',\n count='accident_hour',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות תאונות לפי שעה'})\n output['widgets'].append(accidents_count_by_hour.serialize())\n\n # accident count by road_light\n accident_count_by_road_light = Widget(name='accident_count_by_road_light',\n rank=12,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='road_light_hebrew',\n count='road_light_hebrew',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות תאונות לפי תאורה'})\n output['widgets'].append(accident_count_by_road_light.serialize())\n\n # accident count by road_segment\n top_road_segments_accidents_per_km = Widget(name='top_road_segments_accidents_per_km',\n rank=13,\n items=get_top_road_segments_accidents_per_km(resolution=resolution,\n location_info=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(top_road_segments_accidents_per_km.serialize())\n\n # injured count per age group\n data_of_injured_count_per_age_group_raw = get_accidents_stats(table_obj=InvolvedMarkerView,\n filters=get_injured_filters(location_info),\n group_by='age_group_hebrew',\n count='age_group_hebrew',\n start_time=start_time,\n end_time=end_time)\n data_of_injured_count_per_age_group = filter_and_group_injured_count_per_age_group(data_of_injured_count_per_age_group_raw)\n injured_count_per_age_group = Widget(name='injured_count_per_age_group',\n rank=14,\n items=data_of_injured_count_per_age_group)\n output['widgets'].append(injured_count_per_age_group.serialize())\n\n # vision zero\n vision_zero = Widget(name='vision_zero',\n rank=15,\n items=['vision_zero_2_plus_1'])\n output['widgets'].append(vision_zero.serialize())\n\n return Response(json.dumps(output, default=str), mimetype=\"application/json\")\n", "path": "anyway/infographics_utils.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nimport logging\nimport datetime\nimport json\nimport pandas as pd\nfrom collections import defaultdict\nfrom sqlalchemy import func\nfrom sqlalchemy import cast, Numeric\nfrom sqlalchemy import desc\nfrom flask import Response\nfrom .constants import CONST\nfrom .models import (NewsFlash, AccidentMarkerView, InvolvedMarkerView, RoadSegments)\nfrom .parsers import resolution_dict\nfrom .app_and_db import db\n\n'''\n Widget structure:\n {\n 'name': str,\n 'rank': int (Integer),\n 'data': {\n 'items': list (Array) | dictionary (Object),\n 'text': dictionary (Object) - can be empty\n }\n 'meta': dictionary (Object) - can be empty\n }\n'''\nclass Widget():\n def __init__(self, name, rank, items, text=None, meta=None):\n self.name = name\n self.rank = rank\n self.items = items\n self.text = text\n self.meta = meta\n\n def serialize(self):\n output = {}\n output['name'] = self.name\n output['rank'] = self.rank\n output['data'] = {}\n output['data']['items'] = self.items\n if self.text:\n output['data']['text'] = self.text\n if self.meta:\n output['meta'] = self.meta\n return output\n\n\ndef extract_news_flash_location(news_flash_id):\n news_flash_obj = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n if not news_flash_obj:\n logging.warn('could not find news flash id {}'.format(news_flash_id))\n return None\n resolution = news_flash_obj.resolution if news_flash_obj.resolution else None\n if not news_flash_obj or not resolution or resolution not in resolution_dict:\n logging.warn(\n 'could not find valid resolution for news flash id {}'.format(news_flash_id))\n return None\n data = {'resolution': resolution}\n for field in resolution_dict[resolution]:\n curr_field = getattr(news_flash_obj, field)\n if curr_field is not None:\n data[field] = curr_field\n gps = {}\n for field in ['lon', 'lat']:\n gps[field] = getattr(news_flash_obj, field)\n return {'name': 'location', 'data': data, 'gps': gps}\n\n\ndef get_query(table_obj, filters, start_time, end_time):\n query = db.session.query(table_obj)\n if start_time:\n query = query.filter(\n getattr(table_obj, 'accident_timestamp') >= start_time)\n if end_time:\n query = query.filter(\n getattr(table_obj, 'accident_timestamp') <= end_time)\n if filters:\n for field_name, value in filters.items():\n if isinstance(value, list):\n values = value\n else:\n values = [value]\n query = query.filter((getattr(table_obj, field_name)).in_(values))\n return query\n\ndef get_accident_count_by_accident_type(location_info, start_time, end_time):\n all_accident_type_count = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_type_hebrew',\n count='accident_type_hebrew',\n start_time=start_time,\n end_time=end_time)\n merged_accident_type_count = [{'accident_type': 'התנגשות', 'count': 0}]\n for item in all_accident_type_count:\n if 'התנגשות' in item['accident_type']:\n merged_accident_type_count[0]['count'] += item['count']\n else:\n merged_accident_type_count.append(item)\n return merged_accident_type_count\n\ndef get_top_road_segments_accidents_per_km(resolution, location_info, start_time=None, end_time=None, limit=5):\n if resolution != 'כביש בינעירוני': # relevent for non urban roads only\n return {}\n\n query = get_query(table_obj=AccidentMarkerView, filters=None,\n start_time=start_time, end_time=end_time)\n\n query = query.with_entities(\n AccidentMarkerView.road_segment_name,\n func.count(AccidentMarkerView.road_segment_name).label(\n 'total_accidents'),\n (RoadSegments.to_km - RoadSegments.from_km).label('segment_length'),\n cast((func.count(AccidentMarkerView.road_segment_name) / (RoadSegments.to_km - RoadSegments.from_km)),\n Numeric(10, 4)).label(\n 'accidents_per_km')) \\\n .filter(AccidentMarkerView.road1 == RoadSegments.road) \\\n .filter(AccidentMarkerView.road_segment_number == RoadSegments.segment) \\\n .filter(AccidentMarkerView.road1 == location_info['road1']) \\\n .filter(AccidentMarkerView.road_segment_name is not None) \\\n .group_by(AccidentMarkerView.road_segment_name, RoadSegments.from_km, RoadSegments.to_km) \\\n .order_by(desc('accidents_per_km')) \\\n .limit(limit)\n\n result = pd.read_sql_query(query.statement, query.session.bind)\n return result.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef get_accidents_stats(table_obj, filters=None, group_by=None, count=None, start_time=None, end_time=None):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n # get stats\n query = get_query(table_obj, filters, start_time, end_time)\n if group_by:\n query = query.group_by(group_by)\n query = query.with_entities(group_by, func.count(count))\n df = pd.read_sql_query(query.statement, query.session.bind)\n df.rename(columns={'count_1': 'count'}, inplace=True) # pylint: disable=no-member\n df.columns = [c.replace('_hebrew', '') for c in df.columns]\n return df.to_dict(orient='records') if group_by or count else df.to_dict() # pylint: disable=no-member\n\n\ndef get_injured_filters(location_info):\n new_filters = {}\n for curr_filter, curr_values in location_info.items():\n if curr_filter in ['region_hebrew', 'district_hebrew', 'district_hebrew', 'yishuv_name']:\n new_filter_name = 'accident_' + curr_filter\n new_filters[new_filter_name] = curr_values\n else:\n new_filters[curr_filter] = curr_values\n new_filters['injury_severity'] = [1, 2, 3, 4, 5]\n return new_filters\n\n\ndef get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit=10):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = get_query(table_obj, filters, start_time, end_time)\n query = query.with_entities(*entities)\n query = query.order_by(getattr(table_obj, \"accident_severity\"), getattr(\n table_obj, \"accident_timestamp\").desc())\n query = query.limit(limit)\n df = pd.read_sql_query(query.statement, query.session.bind)\n df.columns = [c.replace('_hebrew', '') for c in df.columns]\n return df.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef get_most_severe_accidents(table_obj, filters, start_time, end_time, limit=10):\n entities = 'longitude', 'latitude', 'accident_severity_hebrew', 'accident_timestamp', 'accident_type_hebrew'\n return get_most_severe_accidents_with_entities(table_obj, filters, entities, start_time, end_time, limit)\n\n\ndef get_accidents_heat_map(table_obj, filters, start_time, end_time):\n filters = filters or {}\n filters['provider_code'] = [\n CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = get_query(table_obj, filters, start_time, end_time)\n query = query.with_entities('longitude', 'latitude')\n df = pd.read_sql_query(query.statement, query.session.bind)\n return df.to_dict(orient='records') # pylint: disable=no-member\n\n\ndef filter_and_group_injured_count_per_age_group(data_of_ages):\n import re\n range_dict = {0: 14, 15: 24, 25: 64, 65: 200}\n return_dict_by_required_age_group = defaultdict(int)\n\n for age_range_and_count in data_of_ages:\n age_range = age_range_and_count['age_group']\n count = age_range_and_count['count']\n\n # Parse the db age range\n match_parsing = re.match(\"([0-9]{2})\\\\-([0-9]{2})\", age_range)\n if match_parsing:\n regex_age_matches = match_parsing.groups()\n if len(regex_age_matches) != 2:\n return_dict_by_required_age_group[\"unknown\"] += count\n continue\n min_age_raw, max_age_raw = regex_age_matches\n else:\n match_parsing = re.match(\"([0-9]{2})\\\\+\", age_range) # e.g 85+\n if match_parsing:\n # We assume that no body live beyond age 200\n min_age_raw, max_age_raw = match_parsing.group(1), 200\n else:\n return_dict_by_required_age_group[\"unknown\"] += count\n continue\n\n # Find to what \"bucket\" to aggregate the data\n min_age = int(min_age_raw)\n max_age = int(max_age_raw)\n for item in range_dict.items():\n item_min_range, item_max_range = item\n if item_min_range <= min_age <= item_max_range and item_min_range <= max_age <= item_max_range:\n string_age_range = f'{item_min_range:02}-{item_max_range:02}'\n return_dict_by_required_age_group[string_age_range] += count\n break\n\n # Rename the last key\n return_dict_by_required_age_group[\"65+\"] = return_dict_by_required_age_group[\"65-200\"]\n del return_dict_by_required_age_group[\"65-200\"]\n\n return return_dict_by_required_age_group\n\n\ndef get_most_severe_accidents_table_title(location_text):\n return 'תאונות חמורות ב' + location_text\n\n\ndef get_accident_count_by_severity(location_info, location_text, start_time, end_time):\n count_by_severity = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_severity_hebrew',\n count='accident_severity_hebrew',\n start_time=start_time,\n end_time=end_time)\n severity_dict = {'קטלנית': 'fatal',\n 'קשה': 'severe',\n 'קלה': 'light'}\n items = {}\n total_accidents_count = 0\n start_year = start_time.year\n end_year = end_time.year\n for severity_and_count in count_by_severity:\n accident_severity_hebrew = severity_and_count['accident_severity']\n severity_english = severity_dict[accident_severity_hebrew]\n severity_count_text = 'severity_{}_count'.format(severity_english)\n items[severity_count_text] = severity_and_count['count']\n total_accidents_count += severity_and_count['count']\n items['start_year'] = start_year\n items['end_year'] = end_year\n items['total_accidents_count'] = total_accidents_count\n return items\n\n\ndef get_most_severe_accidents_table(location_info, start_time, end_time):\n entities = 'id', 'provider_code', 'accident_timestamp', 'accident_type_hebrew', 'accident_year'\n accidents = get_most_severe_accidents_with_entities(\n table_obj=AccidentMarkerView,\n filters=location_info,\n entities=entities,\n start_time=start_time,\n end_time=end_time)\n # Add casualties\n for accident in accidents:\n accident['type'] = accident['accident_type']\n dt = accident['accident_timestamp'].to_pydatetime()\n accident['date'] = dt.strftime(\"%d/%m/%y\")\n accident['hour'] = dt.strftime(\"%H:%M\")\n num = get_casualties_count_in_accident(\n accident['id'], accident['provider_code'], 1, accident['accident_year'])\n accident['killed_count'] = num\n num = get_casualties_count_in_accident(\n accident['id'], accident['provider_code'], [2, 3], accident['accident_year'])\n accident['injured_count'] = num\n del accident['accident_timestamp'], accident['accident_type'], accident['id'], accident['provider_code']\n return accidents\n\n\n# count of dead and severely injured\ndef get_casualties_count_in_accident(accident_id, provider_code, injury_severity, accident_year):\n filters = {'accident_id': accident_id,\n 'provider_code': provider_code,\n 'injury_severity': injury_severity,\n 'accident_year': accident_year}\n casualties = get_accidents_stats(table_obj=InvolvedMarkerView, filters=filters,\n group_by='injury_severity', count='injury_severity')\n res = 0\n for ca in casualties:\n res += ca['count']\n return res\n\n\n# generate text describing location or road segment of news flash\n# to be used by most severe accidents additional info widget\ndef get_news_flash_location_text(news_flash_id):\n news_flash_item = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n nf = news_flash_item.serialize()\n resolution = nf['resolution'] if nf['resolution'] else ''\n yishuv_name = nf['yishuv_name'] if nf['yishuv_name'] else ''\n road1 = str(int(nf['road1'])) if nf['road1'] else ''\n road2 = str(int(nf['road2'])) if nf['road2'] else ''\n street1_hebrew = nf['street1_hebrew'] if nf['street1_hebrew'] else ''\n road_segment_name = nf['road_segment_name'] if nf['road_segment_name'] else ''\n if resolution == 'כביש בינעירוני' and road1 and road_segment_name:\n res = 'כביש ' + road1 + ' במקטע ' + road_segment_name\n elif resolution == 'עיר' and not yishuv_name:\n res = nf['location']\n elif resolution == 'עיר' and yishuv_name:\n res = nf['yishuv_name']\n elif resolution == 'צומת בינעירוני' and road1 and road2:\n res = 'צומת כביש ' + road1 + ' עם כביש ' + road2\n elif resolution == 'צומת בינעירוני' and road1 and road_segment_name:\n res = 'כביש ' + road1 + ' במקטע ' + road_segment_name\n elif resolution == 'רחוב' and yishuv_name and street1_hebrew:\n res = ' רחוב ' + street1_hebrew + ' ב' + yishuv_name\n else:\n logging.warning(\n \"Did not found quality resolution. Using location field. News Flash id:{}\".format(nf['id']))\n res = nf['location']\n return res\n\n\ndef extract_news_flash_obj(news_flash_id):\n news_flash_obj = db.session.query(NewsFlash).filter(\n NewsFlash.id == news_flash_id).first()\n\n if not news_flash_obj:\n logging.warning('Could not find news flash id {}'.format(news_flash_id))\n return None\n\n return news_flash_obj\n\n\ndef sum_road_accidents_by_specific_type(road_data, field_name):\n dict_merge = defaultdict(int)\n dict_merge[field_name] = 0\n dict_merge['תאונות אחרות'] = 0\n\n for accident_data in road_data:\n if accident_data['accident_type'] == field_name:\n dict_merge[field_name] += accident_data['count']\n else:\n dict_merge['תאונות אחרות'] += accident_data['count']\n return dict_merge\n\n\ndef convert_roads_fatal_accidents_to_frontend_view(data_dict):\n data_list = []\n for key, value in data_dict.items():\n data_list.append({'desc': key, 'count': value})\n return data_list\n\n\ndef get_head_to_head_stat(news_flash_id, start_time, end_time):\n news_flash_obj = extract_news_flash_obj(news_flash_id)\n road_data = {}\n filter_dict = {'road_type': CONST.ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION,\n 'accident_severity': CONST.ACCIDENT_SEVERITY_DEADLY}\n all_roads_data = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=filter_dict,\n group_by='accident_type_hebrew', count='accident_type_hebrew',\n start_time=start_time, end_time=end_time)\n\n if news_flash_obj.road1 and news_flash_obj.road_segment_name:\n filter_dict.update({'road1': news_flash_obj.road1, 'road_segment_name': news_flash_obj.road_segment_name})\n road_data = get_accidents_stats(table_obj=AccidentMarkerView,\n filters=filter_dict,\n group_by='accident_type_hebrew', count='accident_type_hebrew',\n start_time=start_time, end_time=end_time)\n\n road_data_dict = sum_road_accidents_by_specific_type(road_data, 'התנגשות חזית בחזית')\n all_roads_data_dict = sum_road_accidents_by_specific_type(all_roads_data, 'התנגשות חזית בחזית')\n\n return {'specific_road_segment_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(road_data_dict),\n 'all_roads_fatal_accidents': convert_roads_fatal_accidents_to_frontend_view(all_roads_data_dict)}\n\n#gets the latest date an accident has occured\ndef get_latest_accident_date(table_obj, filters):\n filters= filters or {}\n filters['provider_code'] = [CONST.CBS_ACCIDENT_TYPE_1_CODE, CONST.CBS_ACCIDENT_TYPE_3_CODE]\n query = db.session.query(func.max(table_obj.accident_timestamp))\n df = pd.read_sql_query(query.statement, query.session.bind)\n return (df.to_dict(orient='records'))[0].get(\"max_1\") # pylint: disable=no-member\n\ndef create_infographics_data(news_flash_id, number_of_years_ago):\n output = {}\n try:\n number_of_years_ago = int(number_of_years_ago)\n except ValueError:\n return Response({})\n if number_of_years_ago < 0 or number_of_years_ago > 100:\n return Response({})\n location_info = extract_news_flash_location(news_flash_id)\n if location_info is None:\n return Response({})\n logging.debug('location_info:{}'.format(location_info))\n location_text = get_news_flash_location_text(news_flash_id)\n logging.debug('location_text:{}'.format(location_text))\n gps = location_info['gps']\n location_info = location_info['data']\n output['meta'] = {'location_info': location_info.copy(),\n 'location_text': location_text}\n output['widgets'] = []\n resolution = location_info.pop('resolution')\n if resolution is None:\n return Response({})\n\n if all(value is None for value in location_info.values()):\n return Response({})\n\n last_accident_date=get_latest_accident_date(table_obj=AccidentMarkerView, filters=None)\n #converting to datetime object to get the date\n end_time=last_accident_date.to_pydatetime().date()\n\n start_time = datetime.date(\n end_time.year + 1 - number_of_years_ago, 1, 1)\n\n #accident_severity count\n items = get_accident_count_by_severity(location_info=location_info,\n location_text=location_text,\n start_time=start_time,\n end_time=end_time)\n\n accident_count_by_severity = Widget(name='accident_count_by_severity',\n rank=1,\n items=items)\n output['widgets'].append(accident_count_by_severity.serialize())\n\n # most severe accidents table\n most_severe_accidents_table = Widget(name='most_severe_accidents_table',\n rank=2,\n items=get_most_severe_accidents_table(location_info, start_time, end_time),\n text={'title':get_most_severe_accidents_table_title(location_text)})\n output['widgets'].append(most_severe_accidents_table.serialize())\n\n # most severe accidents\n most_severe_accidents = Widget(name='most_severe_accidents',\n rank=3,\n items=get_most_severe_accidents(table_obj=AccidentMarkerView,\n filters=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(most_severe_accidents.serialize())\n\n # street view\n street_view = Widget(name='street_view',\n rank=4,\n items={'longitude': gps['lon'],\n 'latitude': gps['lat']})\n output['widgets'].append(street_view.serialize())\n\n # head to head accidents\n head_on_collisions_comparison = Widget(name='head_on_collisions_comparison',\n rank=5,\n items=get_head_to_head_stat(news_flash_id=news_flash_id,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(head_on_collisions_comparison.serialize())\n\n # accident_type count\n accident_count_by_accident_type = Widget(name='accident_count_by_accident_type',\n rank=6,\n items=get_accident_count_by_accident_type(location_info=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(accident_count_by_accident_type.serialize())\n\n # accidents heat map\n accidents_heat_map = Widget(name='accidents_heat_map',\n rank=7,\n items=get_accidents_heat_map(table_obj=AccidentMarkerView,\n filters=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(accidents_heat_map.serialize())\n\n # accident count by accident year\n accident_count_by_accident_year = Widget(name='accident_count_by_accident_year',\n rank=8,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_year',\n count='accident_year',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות תאונות'})\n output['widgets'].append(accident_count_by_accident_year.serialize())\n\n # injured count by accident year\n injured_count_by_accident_year = Widget(name='injured_count_by_accident_year',\n rank=9,\n items=get_accidents_stats(table_obj=InvolvedMarkerView,\n filters=get_injured_filters(location_info),\n group_by='accident_year',\n count='accident_year',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות פצועים'})\n output['widgets'].append(injured_count_by_accident_year.serialize())\n\n # accident count on day light\n accident_count_by_day_night = Widget(name='accident_count_by_day_night',\n rank=10,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='day_night_hebrew',\n count='day_night_hebrew',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות תאונות ביום ובלילה'})\n output['widgets'].append(accident_count_by_day_night.serialize())\n\n # accidents distribution count by hour\n accidents_count_by_hour = Widget(name='accidents_count_by_hour',\n rank=11,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='accident_hour',\n count='accident_hour',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות תאונות לפי שעה'})\n output['widgets'].append(accidents_count_by_hour.serialize())\n\n # accident count by road_light\n accident_count_by_road_light = Widget(name='accident_count_by_road_light',\n rank=12,\n items=get_accidents_stats(table_obj=AccidentMarkerView,\n filters=location_info,\n group_by='road_light_hebrew',\n count='road_light_hebrew',\n start_time=start_time,\n end_time=end_time),\n text={'title':'כמות תאונות לפי תאורה'})\n output['widgets'].append(accident_count_by_road_light.serialize())\n\n # accident count by road_segment\n top_road_segments_accidents_per_km = Widget(name='top_road_segments_accidents_per_km',\n rank=13,\n items=get_top_road_segments_accidents_per_km(resolution=resolution,\n location_info=location_info,\n start_time=start_time,\n end_time=end_time))\n output['widgets'].append(top_road_segments_accidents_per_km.serialize())\n\n # injured count per age group\n data_of_injured_count_per_age_group_raw = get_accidents_stats(table_obj=InvolvedMarkerView,\n filters=get_injured_filters(location_info),\n group_by='age_group_hebrew',\n count='age_group_hebrew',\n start_time=start_time,\n end_time=end_time)\n data_of_injured_count_per_age_group = filter_and_group_injured_count_per_age_group(data_of_injured_count_per_age_group_raw)\n injured_count_per_age_group = Widget(name='injured_count_per_age_group',\n rank=14,\n items=data_of_injured_count_per_age_group)\n output['widgets'].append(injured_count_per_age_group.serialize())\n\n # vision zero\n vision_zero = Widget(name='vision_zero',\n rank=15,\n items=['vision_zero_2_plus_1'])\n output['widgets'].append(vision_zero.serialize())\n\n return Response(json.dumps(output, default=str), mimetype=\"application/json\")\n", "path": "anyway/infographics_utils.py" } ]
diff --git a/anyway/infographics_utils.py b/anyway/infographics_utils.py index 672a3170e..62280da41 100644 --- a/anyway/infographics_utils.py +++ b/anyway/infographics_utils.py @@ -338,6 +338,9 @@ def extract_news_flash_obj(news_flash_id): def sum_road_accidents_by_specific_type(road_data, field_name): dict_merge = defaultdict(int) + dict_merge[field_name] = 0 + dict_merge['תאונות אחרות'] = 0 + for accident_data in road_data: if accident_data['accident_type'] == field_name: dict_merge[field_name] += accident_data['count']
Set default values of 0 to elements in head_on_collisions_comparison Example for the element that is missing ![image](https://user-images.githubusercontent.com/58947331/81738603-9f830c80-94a2-11ea-8dd3-798dc5356a1a.png)
pyca__cryptography-7406
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport getpass\nimport glob\nimport io\nimport os\nimport subprocess\nimport time\nimport zipfile\n\nimport click\n\nimport requests\n\n\ndef run(*args, **kwargs):\n print(\"[running] {0}\".format(list(args)))\n subprocess.check_call(list(args), **kwargs)\n\n\ndef wait_for_build_complete_github_actions(session, token, run_url):\n while True:\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n if response.json()[\"conclusion\"] is not None:\n break\n time.sleep(3)\n\n\ndef download_artifacts_github_actions(session, token, run_url):\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n\n response = session.get(\n response.json()[\"artifacts_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n paths = []\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n artifact[\"archive_download_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n with zipfile.ZipFile(io.BytesIO(response.content)) as z:\n for name in z.namelist():\n if not name.endswith(\".whl\"):\n continue\n p = z.open(name)\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n os.path.basename(name),\n )\n with open(out_path, \"wb\") as f:\n f.write(p.read())\n paths.append(out_path)\n return paths\n\n\ndef fetch_github_actions_wheels(token, version):\n session = requests.Session()\n\n response = session.get(\n (\n \"https://api.github.com/repos/pyca/cryptography/actions/workflows/\"\n \"wheel-builder.yml/runs?event=push\"\n ),\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n run_url = response.json()[\"workflow_runs\"][0][\"url\"]\n wait_for_build_complete_github_actions(session, token, run_url)\n return download_artifacts_github_actions(session, token, run_url)\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n github_token = getpass.getpass(\"Github person access token: \")\n\n # Tag and push the tag (this will trigger the wheel builder in Actions)\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n # Generate and upload vector packages\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n packages = glob.glob(\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n run(\"twine\", \"upload\", \"-s\", *packages)\n\n # Generate sdist for upload\n run(\"python\", \"setup.py\", \"sdist\")\n sdist = glob.glob(\"dist/cryptography-{0}*\".format(version))\n\n # Wait for Actions to complete and download the wheels\n github_actions_wheel_paths = fetch_github_actions_wheels(\n github_token, version\n )\n\n # Upload sdist and wheels\n run(\"twine\", \"upload\", \"-s\", *sdist)\n run(\"twine\", \"upload\", *github_actions_wheel_paths)\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py" } ]
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport getpass\nimport glob\nimport io\nimport os\nimport subprocess\nimport time\nimport zipfile\n\nimport click\n\nimport requests\n\n\ndef run(*args, **kwargs):\n print(\"[running] {0}\".format(list(args)))\n subprocess.check_call(list(args), **kwargs)\n\n\ndef wait_for_build_complete_github_actions(session, token, run_url):\n while True:\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n if response.json()[\"conclusion\"] is not None:\n break\n time.sleep(3)\n\n\ndef download_artifacts_github_actions(session, token, run_url):\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n\n response = session.get(\n response.json()[\"artifacts_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n paths = []\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n artifact[\"archive_download_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n with zipfile.ZipFile(io.BytesIO(response.content)) as z:\n for name in z.namelist():\n if not name.endswith(\".whl\"):\n continue\n p = z.open(name)\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n os.path.basename(name),\n )\n with open(out_path, \"wb\") as f:\n f.write(p.read())\n paths.append(out_path)\n return paths\n\n\ndef fetch_github_actions_wheels(token, version):\n session = requests.Session()\n\n response = session.get(\n (\n \"https://api.github.com/repos/pyca/cryptography/actions/workflows/\"\n \"wheel-builder.yml/runs?event=push\"\n ),\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n run_url = response.json()[\"workflow_runs\"][0][\"url\"]\n wait_for_build_complete_github_actions(session, token, run_url)\n return download_artifacts_github_actions(session, token, run_url)\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n print(\n f\"Create a new GH PAT at: \"\n f\"https://github.com/settings/tokens/new?\"\n f\"description={version}&scopes=repo\"\n )\n github_token = getpass.getpass(\"Github person access token: \")\n\n # Tag and push the tag (this will trigger the wheel builder in Actions)\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n # Generate and upload vector packages\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n packages = glob.glob(\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n run(\"twine\", \"upload\", \"-s\", *packages)\n\n # Generate sdist for upload\n run(\"python\", \"setup.py\", \"sdist\")\n sdist = glob.glob(\"dist/cryptography-{0}*\".format(version))\n\n # Wait for Actions to complete and download the wheels\n github_actions_wheel_paths = fetch_github_actions_wheels(\n github_token, version\n )\n\n # Upload sdist and wheels\n run(\"twine\", \"upload\", \"-s\", *sdist)\n run(\"twine\", \"upload\", *github_actions_wheel_paths)\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py" } ]
diff --git a/release.py b/release.py index 5bc996aab02e..70b4e106051f 100644 --- a/release.py +++ b/release.py @@ -103,6 +103,11 @@ def release(version): """ ``version`` should be a string like '0.4' or '1.0'. """ + print( + f"Create a new GH PAT at: " + f"https://github.com/settings/tokens/new?" + f"description={version}&scopes=repo" + ) github_token = getpass.getpass("Github person access token: ") # Tag and push the tag (this will trigger the wheel builder in Actions)
release.py should link to GH create PAT page We can pre-fill what permissions are needed to improve the UX of doing a release. Example URL: https://github.com/settings/tokens/new?description=foo&scopes=repo,workflow @reaperhulk do you know what scopes are required?
Project-MONAI__MONAI-2375
[ { "content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections.abc\nimport math\nimport pickle\nimport shutil\nimport sys\nimport tempfile\nimport threading\nimport time\nimport warnings\nfrom copy import deepcopy\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset as _TorchDataset\nfrom torch.utils.data import Subset\n\nfrom monai.data.utils import first, pickle_hashing\nfrom monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform\nfrom monai.utils import MAX_SEED, get_seed, min_version, optional_import\n\nif TYPE_CHECKING:\n from tqdm import tqdm\n\n has_tqdm = True\nelse:\n tqdm, has_tqdm = optional_import(\"tqdm\", \"4.47.0\", min_version, \"tqdm\")\n\nlmdb, _ = optional_import(\"lmdb\")\n\n\nclass Dataset(_TorchDataset):\n \"\"\"\n A generic dataset with a length property and an optional callable data transform\n when fetching a data sample.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, typical input data can be a list of dictionaries::\n\n [{ { {\n 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',\n 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',\n 'extra': 123 'extra': 456 'extra': 789\n }, }, }]\n \"\"\"\n\n def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:\n \"\"\"\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: a callable data transform on input data.\n\n \"\"\"\n self.data = data\n self.transform = transform\n\n def __len__(self) -> int:\n return len(self.data)\n\n def _transform(self, index: int):\n \"\"\"\n Fetch single data item from `self.data`.\n \"\"\"\n data_i = self.data[index]\n return apply_transform(self.transform, data_i) if self.transform is not None else data_i\n\n def __getitem__(self, index: Union[int, slice, Sequence[int]]):\n \"\"\"\n Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.\n \"\"\"\n if isinstance(index, slice):\n # dataset[:42]\n start, stop, step = index.indices(len(self))\n indices = range(start, stop, step)\n return Subset(dataset=self, indices=indices)\n if isinstance(index, collections.abc.Sequence):\n # dataset[[1, 3, 4]]\n return Subset(dataset=self, indices=index)\n return self._transform(index)\n\n\nclass PersistentDataset(Dataset):\n \"\"\"\n Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,\n it can operate transforms for specific fields. Results from the non-random transform components are computed\n when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, typical input data can be a list of dictionaries::\n\n [{ { {\n 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',\n 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',\n 'extra': 123 'extra': 456 'extra': 789\n }, }, }]\n\n For a composite transform like\n\n .. code-block:: python\n\n [ LoadImaged(keys=['image', 'label']),\n Orientationd(keys=['image', 'label'], axcodes='RAS'),\n ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),\n pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),\n ToTensord(keys=['image', 'label'])]\n\n Upon first use a filename based dataset will be processed by the transform for the\n [LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to\n the `cache_dir` before applying the remaining random dependant transforms\n [RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.\n\n Subsequent uses of a dataset directly read pre-processed results from `cache_dir`\n followed by applying the random dependant parts of transform processing.\n\n Note:\n The input data must be a list of file paths and will hash them as cache keys.\n\n When loading persistent cache content, it can't guarantee the cached data matches current\n transform chain, so please make sure to use exactly the same non-random transforms and the\n args as the cache content, otherwise, it may cause unexpected errors.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_dir: Optional[Union[Path, str]],\n hash_func: Callable[..., bytes] = pickle_hashing,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `PersistentDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_dir: If specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If `cache_dir` doesn't exist, will automatically create it.\n If `cache_dir` is `None`, there is effectively no caching.\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n\n \"\"\"\n if not isinstance(transform, Compose):\n transform = Compose(transform)\n super().__init__(data=data, transform=transform)\n self.cache_dir = Path(cache_dir) if cache_dir is not None else None\n self.hash_func = hash_func\n if self.cache_dir is not None:\n if not self.cache_dir.exists():\n self.cache_dir.mkdir(parents=True)\n if not self.cache_dir.is_dir():\n raise ValueError(\"cache_dir must be a directory.\")\n\n def _pre_transform(self, item_transformed):\n \"\"\"\n Process the data from original state up to the first random element.\n\n Args:\n item_transformed: The data to be transformed\n\n Returns:\n the transformed element up to the first identified\n random transform object\n\n \"\"\"\n for _transform in self.transform.transforms: # type:ignore\n # execute all the deterministic transforms\n if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n break\n # this is to be consistent with CacheDataset even though it's not in a multi-thread situation.\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item_transformed = apply_transform(_xform, item_transformed)\n return item_transformed\n\n def _post_transform(self, item_transformed):\n \"\"\"\n Process the data from before the first random transform to the final state ready for evaluation.\n\n Args:\n item_transformed: The data to be transformed (already processed up to the first random transform)\n\n Returns:\n the transformed element through the random transforms\n\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n start_post_randomize_run = False\n for _transform in self.transform.transforms:\n if (\n start_post_randomize_run\n or isinstance(_transform, Randomizable)\n or not isinstance(_transform, Transform)\n ):\n start_post_randomize_run = True\n item_transformed = apply_transform(_transform, item_transformed)\n return item_transformed\n\n def _cachecheck(self, item_transformed):\n \"\"\"\n A function to cache the expensive input data transform operations\n so that huge data sets (larger than computer memory) can be processed\n on the fly as needed, and intermediate results written to disk for\n future use.\n\n Args:\n item_transformed: The current data element to be mutated into transformed representation\n\n Returns:\n The transformed data_element, either from cache, or explicitly computing it.\n\n Warning:\n The current implementation does not encode transform information as part of the\n hashing mechanism used for generating cache names. If the transforms applied are\n changed in any way, the objects in the cache dir will be invalid. The hash for the\n cache is ONLY dependant on the input filename paths.\n\n \"\"\"\n hashfile = None\n if self.cache_dir is not None:\n data_item_md5 = self.hash_func(item_transformed).decode(\"utf-8\")\n hashfile = self.cache_dir / f\"{data_item_md5}.pt\"\n\n if hashfile is not None and hashfile.is_file(): # cache hit\n return torch.load(hashfile)\n\n _item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed\n if hashfile is not None:\n # NOTE: Writing to a temporary directory and then using a nearly atomic rename operation\n # to make the cache more robust to manual killing of parent process\n # which may leave partially written cache files in an incomplete state\n with tempfile.TemporaryDirectory() as tmpdirname:\n temp_hash_file = Path(tmpdirname) / hashfile.name\n torch.save(_item_transformed, temp_hash_file)\n if temp_hash_file.is_file() and not hashfile.is_file():\n # On Unix, if target exists and is a file, it will be replaced silently if the user has permission.\n # for more details: https://docs.python.org/3/library/shutil.html#shutil.move.\n try:\n shutil.move(temp_hash_file, hashfile)\n except FileExistsError:\n pass\n return _item_transformed\n\n def _transform(self, index: int):\n pre_random_item = self._cachecheck(self.data[index])\n return self._post_transform(pre_random_item)\n\n\nclass CacheNTransDataset(PersistentDataset):\n \"\"\"\n Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_n_trans: int,\n cache_dir: Optional[Union[Path, str]],\n hash_func: Callable[..., bytes] = pickle_hashing,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `PersistentDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_n_trans: cache the result of first N transforms.\n cache_dir: If specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If `cache_dir` doesn't exist, will automatically create it.\n If `cache_dir` is `None`, there is effectively no caching.\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n\n \"\"\"\n super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)\n self.cache_n_trans = cache_n_trans\n\n def _pre_transform(self, item_transformed):\n \"\"\"\n Process the data from original state up to the N element.\n\n Args:\n item_transformed: The data to be transformed\n\n Returns:\n the transformed element up to the N transform object\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for i, _transform in enumerate(self.transform.transforms):\n if i == self.cache_n_trans:\n break\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item_transformed = apply_transform(_xform, item_transformed)\n return item_transformed\n\n def _post_transform(self, item_transformed):\n \"\"\"\n Process the data from before the N + 1 transform to the final state ready for evaluation.\n\n Args:\n item_transformed: The data to be transformed (already processed up to the first N transform)\n\n Returns:\n the final transformed result\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for i, _transform in enumerate(self.transform.transforms):\n if i >= self.cache_n_trans:\n item_transformed = apply_transform(_transform, item_transformed)\n return item_transformed\n\n\nclass LMDBDataset(PersistentDataset):\n \"\"\"\n Extension of `PersistentDataset` using LMDB as the backend.\n\n See Also:\n :py:class:`monai.data.PersistentDataset`\n\n Examples:\n\n >>> items = [{\"data\": i} for i in range(5)]\n # [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]\n >>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd(\"data\", delay_time=1))\n >>> print(list(lmdb_ds)) # using the cached results\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_dir: Union[Path, str] = \"cache\",\n hash_func: Callable[..., bytes] = pickle_hashing,\n db_name: str = \"monai_cache\",\n progress: bool = True,\n pickle_protocol=pickle.HIGHEST_PROTOCOL,\n lmdb_kwargs: Optional[dict] = None,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `LMDBDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_dir: if specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If the cache_dir doesn't exist, will automatically create it. Defaults to \"./cache\".\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n db_name: lmdb database file name. Defaults to \"monai_cache\".\n progress: whether to display a progress bar.\n pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.\n https://docs.python.org/3/library/pickle.html#pickle-protocols\n lmdb_kwargs: additional keyword arguments to the lmdb environment.\n for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class\n \"\"\"\n super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)\n self.progress = progress\n if not self.cache_dir:\n raise ValueError(\"cache_dir must be specified.\")\n self.db_file = self.cache_dir / f\"{db_name}.lmdb\"\n self.pickle_protocol = pickle_protocol\n self.lmdb_kwargs = lmdb_kwargs or {}\n if not self.lmdb_kwargs.get(\"map_size\", 0):\n self.lmdb_kwargs[\"map_size\"] = 1024 ** 4 # default map_size\n self._read_env = None\n print(f\"Accessing lmdb file: {self.db_file.absolute()}.\")\n\n def _fill_cache_start_reader(self):\n # create cache\n self.lmdb_kwargs[\"readonly\"] = False\n env = lmdb.open(path=f\"{self.db_file}\", subdir=False, **self.lmdb_kwargs)\n if self.progress and not has_tqdm:\n warnings.warn(\"LMDBDataset: tqdm is not installed. not displaying the caching progress.\")\n for item in tqdm(self.data) if has_tqdm and self.progress else self.data:\n key = self.hash_func(item)\n done, retry, val = False, 5, None\n while not done and retry > 0:\n try:\n with env.begin(write=True) as txn:\n with txn.cursor() as cursor:\n done = cursor.set_key(key)\n if done:\n continue\n if val is None:\n val = self._pre_transform(deepcopy(item)) # keep the original hashed\n val = pickle.dumps(val, protocol=self.pickle_protocol)\n txn.put(key, val)\n done = True\n except lmdb.MapFullError:\n done, retry = False, retry - 1\n size = env.info()[\"map_size\"]\n new_size = size * 2\n warnings.warn(f\"Resizing the cache database from {int(size) >> 20}MB to {int(new_size) >> 20}MB.\")\n env.set_mapsize(new_size)\n except lmdb.MapResizedError:\n # the mapsize is increased by another process\n # set_mapsize with a size of 0 to adopt the new size,\n env.set_mapsize(0)\n if not done: # still has the map full error\n size = env.info()[\"map_size\"]\n env.close()\n raise ValueError(f\"LMDB map size reached, increase size above current size of {size}.\")\n size = env.info()[\"map_size\"]\n env.close()\n # read-only database env\n self.lmdb_kwargs[\"readonly\"] = True\n self.lmdb_kwargs[\"map_size\"] = size\n if self.lmdb_kwargs.get(\"lock\", None) is None:\n self.lmdb_kwargs[\"lock\"] = False\n if self.lmdb_kwargs.get(\"readahead\", None) is None:\n self.lmdb_kwargs[\"readahead\"] = False\n return lmdb.open(path=f\"{self.db_file}\", subdir=False, **self.lmdb_kwargs)\n\n def _cachecheck(self, item_transformed):\n \"\"\"\n if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.\n\n \"\"\"\n if self._read_env is None:\n self._read_env = self._fill_cache_start_reader()\n with self._read_env.begin(write=False) as txn:\n data = txn.get(self.hash_func(item_transformed))\n if data is None:\n warnings.warn(\"LMDBDataset: cache key not found, running fallback caching.\")\n return super()._cachecheck(item_transformed)\n try:\n return pickle.loads(data)\n except Exception as err:\n raise RuntimeError(\"Invalid cache value, corrupted lmdb file?\") from err\n\n def info(self):\n \"\"\"\n Returns: dataset info dictionary.\n\n \"\"\"\n if self._read_env is None:\n self._read_env = self._fill_cache_start_reader()\n out = dict(self._read_env.info())\n out[\"size\"] = len(self.data)\n out[\"filename\"] = f\"{self.db_file.absolute()}\"\n return out\n\n\nclass CacheDataset(Dataset):\n \"\"\"\n Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.\n\n By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.\n If the requested data is not in the cache, all transforms will run normally\n (see also :py:class:`monai.data.dataset.Dataset`).\n\n Users can set the cache rate or number of items to cache.\n It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.\n\n To improve the caching efficiency, please always put as many as possible non-random transforms\n before the randomized ones when composing the chain of transforms.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, if the transform is a `Compose` of::\n\n transforms = Compose([\n LoadImaged(),\n AddChanneld(),\n Spacingd(),\n Orientationd(),\n ScaleIntensityRanged(),\n RandCropByPosNegLabeld(),\n ToTensord()\n ])\n\n when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,\n this dataset will cache the results up to ``ScaleIntensityRanged``, as\n all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`\n can be cached. During training, the dataset will load the cached results and run\n ``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform\n and the outcome not cached.\n\n Note:\n `CacheDataset` executes non-random transforms and prepares cache content in the main process before\n the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process\n during training. it may take a long time to prepare cache content according to the size of expected cache data.\n So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to\n temporarily skip caching.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_num: int = sys.maxsize,\n cache_rate: float = 1.0,\n num_workers: Optional[int] = None,\n progress: bool = True,\n ) -> None:\n \"\"\"\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: transforms to execute operations on input data.\n cache_num: number of items to be cached. Default is `sys.maxsize`.\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n cache_rate: percentage of cached data in total, default is 1.0 (cache all).\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n num_workers: the number of worker processes to use.\n If num_workers is None then the number returned by os.cpu_count() is used.\n progress: whether to display a progress bar.\n \"\"\"\n if not isinstance(transform, Compose):\n transform = Compose(transform)\n super().__init__(data=data, transform=transform)\n self.progress = progress\n self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))\n self.num_workers = num_workers\n if self.num_workers is not None:\n self.num_workers = max(int(self.num_workers), 1)\n self._cache: List = self._fill_cache()\n\n def _fill_cache(self) -> List:\n if self.cache_num <= 0:\n return []\n if self.progress and not has_tqdm:\n warnings.warn(\"tqdm is not installed, will not show the caching progress bar.\")\n with ThreadPool(self.num_workers) as p:\n if self.progress and has_tqdm:\n return list(\n tqdm(\n p.imap(self._load_cache_item, range(self.cache_num)),\n total=self.cache_num,\n desc=\"Loading dataset\",\n )\n )\n return list(p.imap(self._load_cache_item, range(self.cache_num)))\n\n def _load_cache_item(self, idx: int):\n \"\"\"\n Args:\n idx: the index of the input data sequence.\n \"\"\"\n item = self.data[idx]\n for _transform in self.transform.transforms: # type:ignore\n # execute all the deterministic transforms\n if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n break\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item = apply_transform(_xform, item)\n return item\n\n def _transform(self, index: int):\n if index % len(self) >= self.cache_num: # support negative index\n # no cache for this index, execute all the transforms directly\n return super()._transform(index)\n # load data from cache and execute from the first random transform\n start_run = False\n if self._cache is None:\n self._cache = self._fill_cache()\n data = self._cache[index]\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for _transform in self.transform.transforms:\n if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n # only need to deep copy data on first non-deterministic transform\n if not start_run:\n start_run = True\n data = deepcopy(data)\n data = apply_transform(_transform, data)\n return data\n\n\nclass SmartCacheDataset(Randomizable, CacheDataset):\n \"\"\"\n Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.\n At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items\n in the cache are used for training. This ensures that data needed for training is readily available,\n keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic\n transform sequence before being fed to GPU. At the same time, another thread is preparing replacement\n items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart\n Cache replaces the same number of items with replacement items.\n Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.\n Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),\n where r is the configured replace rate).\n For more details, please refer to:\n https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.\n so the actual training images cached and replaced for every epoch are as below::\n\n epoch 1: [image1, image2, image3, image4]\n epoch 2: [image2, image3, image4, image5]\n epoch 3: [image3, image4, image5, image1]\n epoch 3: [image4, image5, image1, image2]\n epoch N: [image[N % 5] ...]\n\n The usage of `SmartCacheDataset` contains 4 steps:\n\n 1. Initialize `SmartCacheDataset` object and cache for the first epoch.\n 2. Call `start()` to run replacement thread in background.\n 3. Call `update_cache()` before every epoch to replace training items.\n 4. Call `shutdown()` when training ends.\n\n Note:\n This replacement will not work for below cases:\n 1. Set the `multiprocessing_context` of DataLoader to `spawn`.\n 2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.\n 3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.\n\n If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,\n otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.\n\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: transforms to execute operations on input data.\n replace_rate: percentage of the cached items to be replaced in every epoch.\n cache_num: number of items to be cached. Default is `sys.maxsize`.\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n cache_rate: percentage of cached data in total, default is 1.0 (cache all).\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n num_init_workers: the number of worker threads to initialize the cache for first epoch.\n If num_init_workers is None then the number returned by os.cpu_count() is used.\n num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.\n If num_replace_workers is None then the number returned by os.cpu_count() is used.\n progress: whether to display a progress bar when caching for the first epoch.\n shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.\n seed: random seed if shuffle is `True`, default to `0`.\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n replace_rate: float,\n cache_num: int = sys.maxsize,\n cache_rate: float = 1.0,\n num_init_workers: Optional[int] = None,\n num_replace_workers: Optional[int] = None,\n progress: bool = True,\n shuffle: bool = True,\n seed: int = 0,\n ) -> None:\n if shuffle:\n self.set_random_state(seed=seed)\n self.randomize(data)\n\n super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress)\n if self._cache is None:\n self._cache = self._fill_cache()\n if self.cache_num >= len(data):\n warnings.warn(\n \"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset.\"\n )\n if replace_rate <= 0:\n raise ValueError(\"replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.\")\n\n self.num_replace_workers: Optional[int] = num_replace_workers\n if self.num_replace_workers is not None:\n self.num_replace_workers = max(int(self.num_replace_workers), 1)\n\n self._total_num: int = len(data)\n self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)\n self._replacements: List[Any] = [None for _ in range(self._replace_num)]\n self._replace_data_idx: List[int] = list(range(self._replace_num))\n\n self._start_pos: int = 0\n self._update_lock: threading.Lock = threading.Lock()\n self._round: int = 1\n self._replace_done: bool = False\n self._replace_mgr: Optional[threading.Thread] = None\n\n self._compute_data_idx()\n\n def randomize(self, data: Sequence) -> None:\n try:\n self.R.shuffle(data)\n except TypeError as e:\n warnings.warn(f\"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.\")\n\n def _compute_data_idx(self):\n \"\"\"\n Update the replacement data position in the total data.\n\n \"\"\"\n for i in range(self._replace_num):\n pos: int = self._start_pos + self.cache_num + i\n if pos >= self._total_num:\n pos -= self._total_num\n self._replace_data_idx[i] = pos\n\n def is_started(self):\n \"\"\"\n Check whether the replacement thread is already started.\n\n \"\"\"\n if self._replace_mgr is None:\n return False\n return self._replace_mgr.is_alive()\n\n def start(self):\n \"\"\"\n Start the background thread to replace training items for every epoch.\n\n \"\"\"\n if self._replace_mgr is None or not self.is_started():\n self._restart()\n\n def _restart(self):\n \"\"\"\n Restart background thread if killed for some reason.\n\n \"\"\"\n self._round = 1\n self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)\n self._replace_mgr.start()\n\n def _try_update_cache(self):\n \"\"\"\n Update the cache items with new replacement for current epoch.\n\n \"\"\"\n with self._update_lock:\n if not self._replace_done:\n return False\n\n del self._cache[: self._replace_num]\n self._cache.extend(self._replacements)\n\n self._start_pos += self._replace_num\n if self._start_pos >= self._total_num:\n self._start_pos -= self._total_num\n\n self._compute_data_idx()\n\n # ready for next round\n self._round += 1\n self._replace_done = False\n return True\n\n def update_cache(self):\n \"\"\"\n Update cache items for current epoch, need to call this function before every epoch.\n If the cache has been shutdown before, need to restart the `_replace_mgr` thread.\n\n \"\"\"\n if not self._replace_mgr.is_alive():\n self._restart()\n\n # make sure update is done\n while not self._try_update_cache():\n time.sleep(0.01)\n\n def _try_shutdown(self):\n \"\"\"\n Wait for thread lock to shut down the background thread.\n\n \"\"\"\n with self._update_lock:\n if self._replace_done:\n self._round = 0\n self._replace_done = False\n return True\n return False\n\n def shutdown(self):\n \"\"\"\n Shut down the background thread for replacement.\n\n \"\"\"\n if not self.is_started():\n return\n\n # wait until replace mgr is done the current round\n while not self._try_shutdown():\n time.sleep(0.01)\n self._replace_mgr.join()\n\n def _replace_cache_thread(self, index: int):\n \"\"\"\n Execute deterministic transforms on the new data for replacement.\n\n \"\"\"\n pos: int = self._replace_data_idx[index]\n self._replacements[index] = self._load_cache_item(pos)\n\n def _compute_replacements(self):\n \"\"\"\n Compute expected items for the replacement of next epoch, execute deterministic transforms.\n It can support multi-threads to accelerate the computation progress.\n\n \"\"\"\n with ThreadPool(self.num_replace_workers) as p:\n p.map(self._replace_cache_thread, list(range(self._replace_num)))\n\n self._replace_done = True\n\n def _try_manage_replacement(self, check_round):\n \"\"\"\n Wait thread lock and replace training items in the background thread.\n\n \"\"\"\n with self._update_lock:\n if self._round <= 0:\n # shutdown replacement\n self._replace_done = True\n return True, -1\n\n if self._round != check_round:\n self._compute_replacements()\n return False, self._round\n\n def manage_replacement(self):\n \"\"\"\n Background thread for replacement.\n\n \"\"\"\n check_round: int = -1\n done = False\n while not done:\n done, check_round = self._try_manage_replacement(check_round)\n time.sleep(0.01)\n\n def __len__(self):\n \"\"\"\n The dataset length is given by cache_num instead of len(data).\n\n \"\"\"\n return self.cache_num\n\n\nclass ZipDataset(Dataset):\n \"\"\"\n Zip several PyTorch datasets and output data(with the same index) together in a tuple.\n If the output of single dataset is already a tuple, flatten it and extend to the result.\n For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),\n finally return (img, imgmeta, seg, segmeta).\n And if the datasets don't have same length, use the minimum length of them as the length\n of ZipDataset.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n Examples::\n\n >>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])\n >>> print(len(zip_data))\n 2\n >>> for item in zip_data:\n >>> print(item)\n [1, 4]\n [2, 5]\n\n \"\"\"\n\n def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:\n \"\"\"\n Args:\n datasets: list of datasets to zip together.\n transform: a callable data transform operates on the zipped item from `datasets`.\n \"\"\"\n super().__init__(list(datasets), transform=transform)\n\n def __len__(self) -> int:\n return min((len(dataset) for dataset in self.data))\n\n def _transform(self, index: int):\n def to_list(x):\n return list(x) if isinstance(x, (tuple, list)) else [x]\n\n data = []\n for dataset in self.data:\n data.extend(to_list(dataset[index]))\n if self.transform is not None:\n data = apply_transform(self.transform, data, map_items=False) # transform the list data\n # use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists\n return tuple(data)\n\n\nclass ArrayDataset(Randomizable, _TorchDataset):\n \"\"\"\n Dataset for segmentation and classification tasks based on array format input data and transforms.\n It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.\n The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.\n For example:\n If train based on Nifti format images without metadata, all transforms can be composed::\n\n img_transform = Compose(\n [\n LoadImage(image_only=True),\n AddChannel(),\n RandAdjustContrast()\n ]\n )\n ArrayDataset(img_file_list, img_transform=img_transform)\n\n If training based on images and the metadata, the array transforms can not be composed\n because several transforms receives multiple parameters or return multiple values. Then Users need\n to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix\n to `Spacing` transform::\n\n class TestCompose(Compose):\n def __call__(self, input_):\n img, metadata = self.transforms[0](input_)\n img = self.transforms[1](img)\n img, _, _ = self.transforms[2](img, metadata[\"affine\"])\n return self.transforms[3](img), metadata\n img_transform = TestCompose(\n [\n LoadImage(image_only=False),\n AddChannel(),\n Spacing(pixdim=(1.5, 1.5, 3.0)),\n RandAdjustContrast()\n ]\n )\n ArrayDataset(img_file_list, img_transform=img_transform)\n\n Examples::\n\n >>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)\n >>> print(ds[0])\n 1.1\n\n >>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])\n >>> print(ds[0])\n [1, 5]\n\n \"\"\"\n\n def __init__(\n self,\n img: Sequence,\n img_transform: Optional[Callable] = None,\n seg: Optional[Sequence] = None,\n seg_transform: Optional[Callable] = None,\n labels: Optional[Sequence] = None,\n label_transform: Optional[Callable] = None,\n ) -> None:\n \"\"\"\n Initializes the dataset with the filename lists. The transform `img_transform` is applied\n to the images and `seg_transform` to the segmentations.\n\n Args:\n img: sequence of images.\n img_transform: transform to apply to each element in `img`.\n seg: sequence of segmentations.\n seg_transform: transform to apply to each element in `seg`.\n labels: sequence of labels.\n label_transform: transform to apply to each element in `labels`.\n\n \"\"\"\n items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]\n self.set_random_state(seed=get_seed())\n datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]\n self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)\n\n self._seed = 0 # transform synchronization seed\n\n def __len__(self) -> int:\n return len(self.dataset)\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._seed = self.R.randint(MAX_SEED, dtype=\"uint32\")\n\n def __getitem__(self, index: int):\n self.randomize()\n if isinstance(self.dataset, ZipDataset):\n # set transforms of each zip component\n for dataset in self.dataset.data:\n transform = getattr(dataset, \"transform\", None)\n if isinstance(transform, Randomizable):\n transform.set_random_state(seed=self._seed)\n transform = getattr(self.dataset, \"transform\", None)\n if isinstance(transform, Randomizable):\n transform.set_random_state(seed=self._seed)\n return self.dataset[index]\n\n\nclass NPZDictItemDataset(Dataset):\n \"\"\"\n Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and\n stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts\n mapping names to an item extracted from the loaded arrays.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n Args:\n npzfile: Path to .npz file or stream containing .npz file data\n keys: Maps keys to load from file to name to store in dataset\n transform: Transform to apply to batch dict\n other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__\n \"\"\"\n\n def __init__(\n self,\n npzfile: Union[str, IO],\n keys: Dict[str, str],\n transform: Optional[Callable] = None,\n other_keys: Optional[Sequence[str]] = (),\n ):\n self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else \"STREAM\"\n self.keys: Dict[str, str] = dict(keys)\n dat = np.load(npzfile)\n\n self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}\n self.length = self.arrays[first(self.keys.values())].shape[0]\n\n self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}\n\n for k, v in self.arrays.items():\n if v.shape[0] != self.length:\n raise ValueError(\n \"All loaded arrays must have the same first dimension \"\n f\"size {self.length}, array `{k}` has size {v.shape[0]}\"\n )\n\n super().__init__([], transform)\n\n def __len__(self):\n return self.length\n\n def _transform(self, index: int):\n data = {k: v[index] for k, v in self.arrays.items()}\n\n if self.transform is not None:\n data = apply_transform(self.transform, data)\n\n return data\n", "path": "monai/data/dataset.py" } ]
[ { "content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections.abc\nimport math\nimport pickle\nimport shutil\nimport sys\nimport tempfile\nimport threading\nimport time\nimport warnings\nfrom copy import deepcopy\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset as _TorchDataset\nfrom torch.utils.data import Subset\n\nfrom monai.data.utils import first, pickle_hashing\nfrom monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform\nfrom monai.utils import MAX_SEED, get_seed, min_version, optional_import\n\nif TYPE_CHECKING:\n from tqdm import tqdm\n\n has_tqdm = True\nelse:\n tqdm, has_tqdm = optional_import(\"tqdm\", \"4.47.0\", min_version, \"tqdm\")\n\nlmdb, _ = optional_import(\"lmdb\")\n\n\nclass Dataset(_TorchDataset):\n \"\"\"\n A generic dataset with a length property and an optional callable data transform\n when fetching a data sample.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, typical input data can be a list of dictionaries::\n\n [{ { {\n 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',\n 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',\n 'extra': 123 'extra': 456 'extra': 789\n }, }, }]\n \"\"\"\n\n def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:\n \"\"\"\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: a callable data transform on input data.\n\n \"\"\"\n self.data = data\n self.transform = transform\n\n def __len__(self) -> int:\n return len(self.data)\n\n def _transform(self, index: int):\n \"\"\"\n Fetch single data item from `self.data`.\n \"\"\"\n data_i = self.data[index]\n return apply_transform(self.transform, data_i) if self.transform is not None else data_i\n\n def __getitem__(self, index: Union[int, slice, Sequence[int]]):\n \"\"\"\n Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.\n \"\"\"\n if isinstance(index, slice):\n # dataset[:42]\n start, stop, step = index.indices(len(self))\n indices = range(start, stop, step)\n return Subset(dataset=self, indices=indices)\n if isinstance(index, collections.abc.Sequence):\n # dataset[[1, 3, 4]]\n return Subset(dataset=self, indices=index)\n return self._transform(index)\n\n\nclass PersistentDataset(Dataset):\n \"\"\"\n Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,\n it can operate transforms for specific fields. Results from the non-random transform components are computed\n when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, typical input data can be a list of dictionaries::\n\n [{ { {\n 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',\n 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',\n 'extra': 123 'extra': 456 'extra': 789\n }, }, }]\n\n For a composite transform like\n\n .. code-block:: python\n\n [ LoadImaged(keys=['image', 'label']),\n Orientationd(keys=['image', 'label'], axcodes='RAS'),\n ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),\n pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),\n ToTensord(keys=['image', 'label'])]\n\n Upon first use a filename based dataset will be processed by the transform for the\n [LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to\n the `cache_dir` before applying the remaining random dependant transforms\n [RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.\n\n Subsequent uses of a dataset directly read pre-processed results from `cache_dir`\n followed by applying the random dependant parts of transform processing.\n\n Note:\n The input data must be a list of file paths and will hash them as cache keys.\n\n When loading persistent cache content, it can't guarantee the cached data matches current\n transform chain, so please make sure to use exactly the same non-random transforms and the\n args as the cache content, otherwise, it may cause unexpected errors.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_dir: Optional[Union[Path, str]],\n hash_func: Callable[..., bytes] = pickle_hashing,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `PersistentDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_dir: If specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If `cache_dir` doesn't exist, will automatically create it.\n If `cache_dir` is `None`, there is effectively no caching.\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n\n \"\"\"\n if not isinstance(transform, Compose):\n transform = Compose(transform)\n super().__init__(data=data, transform=transform)\n self.cache_dir = Path(cache_dir) if cache_dir is not None else None\n self.hash_func = hash_func\n if self.cache_dir is not None:\n if not self.cache_dir.exists():\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n if not self.cache_dir.is_dir():\n raise ValueError(\"cache_dir must be a directory.\")\n\n def _pre_transform(self, item_transformed):\n \"\"\"\n Process the data from original state up to the first random element.\n\n Args:\n item_transformed: The data to be transformed\n\n Returns:\n the transformed element up to the first identified\n random transform object\n\n \"\"\"\n for _transform in self.transform.transforms: # type:ignore\n # execute all the deterministic transforms\n if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n break\n # this is to be consistent with CacheDataset even though it's not in a multi-thread situation.\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item_transformed = apply_transform(_xform, item_transformed)\n return item_transformed\n\n def _post_transform(self, item_transformed):\n \"\"\"\n Process the data from before the first random transform to the final state ready for evaluation.\n\n Args:\n item_transformed: The data to be transformed (already processed up to the first random transform)\n\n Returns:\n the transformed element through the random transforms\n\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n start_post_randomize_run = False\n for _transform in self.transform.transforms:\n if (\n start_post_randomize_run\n or isinstance(_transform, Randomizable)\n or not isinstance(_transform, Transform)\n ):\n start_post_randomize_run = True\n item_transformed = apply_transform(_transform, item_transformed)\n return item_transformed\n\n def _cachecheck(self, item_transformed):\n \"\"\"\n A function to cache the expensive input data transform operations\n so that huge data sets (larger than computer memory) can be processed\n on the fly as needed, and intermediate results written to disk for\n future use.\n\n Args:\n item_transformed: The current data element to be mutated into transformed representation\n\n Returns:\n The transformed data_element, either from cache, or explicitly computing it.\n\n Warning:\n The current implementation does not encode transform information as part of the\n hashing mechanism used for generating cache names. If the transforms applied are\n changed in any way, the objects in the cache dir will be invalid. The hash for the\n cache is ONLY dependant on the input filename paths.\n\n \"\"\"\n hashfile = None\n if self.cache_dir is not None:\n data_item_md5 = self.hash_func(item_transformed).decode(\"utf-8\")\n hashfile = self.cache_dir / f\"{data_item_md5}.pt\"\n\n if hashfile is not None and hashfile.is_file(): # cache hit\n return torch.load(hashfile)\n\n _item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed\n if hashfile is not None:\n # NOTE: Writing to a temporary directory and then using a nearly atomic rename operation\n # to make the cache more robust to manual killing of parent process\n # which may leave partially written cache files in an incomplete state\n with tempfile.TemporaryDirectory() as tmpdirname:\n temp_hash_file = Path(tmpdirname) / hashfile.name\n torch.save(_item_transformed, temp_hash_file)\n if temp_hash_file.is_file() and not hashfile.is_file():\n # On Unix, if target exists and is a file, it will be replaced silently if the user has permission.\n # for more details: https://docs.python.org/3/library/shutil.html#shutil.move.\n try:\n shutil.move(temp_hash_file, hashfile)\n except FileExistsError:\n pass\n return _item_transformed\n\n def _transform(self, index: int):\n pre_random_item = self._cachecheck(self.data[index])\n return self._post_transform(pre_random_item)\n\n\nclass CacheNTransDataset(PersistentDataset):\n \"\"\"\n Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_n_trans: int,\n cache_dir: Optional[Union[Path, str]],\n hash_func: Callable[..., bytes] = pickle_hashing,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `PersistentDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_n_trans: cache the result of first N transforms.\n cache_dir: If specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If `cache_dir` doesn't exist, will automatically create it.\n If `cache_dir` is `None`, there is effectively no caching.\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n\n \"\"\"\n super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)\n self.cache_n_trans = cache_n_trans\n\n def _pre_transform(self, item_transformed):\n \"\"\"\n Process the data from original state up to the N element.\n\n Args:\n item_transformed: The data to be transformed\n\n Returns:\n the transformed element up to the N transform object\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for i, _transform in enumerate(self.transform.transforms):\n if i == self.cache_n_trans:\n break\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item_transformed = apply_transform(_xform, item_transformed)\n return item_transformed\n\n def _post_transform(self, item_transformed):\n \"\"\"\n Process the data from before the N + 1 transform to the final state ready for evaluation.\n\n Args:\n item_transformed: The data to be transformed (already processed up to the first N transform)\n\n Returns:\n the final transformed result\n \"\"\"\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for i, _transform in enumerate(self.transform.transforms):\n if i >= self.cache_n_trans:\n item_transformed = apply_transform(_transform, item_transformed)\n return item_transformed\n\n\nclass LMDBDataset(PersistentDataset):\n \"\"\"\n Extension of `PersistentDataset` using LMDB as the backend.\n\n See Also:\n :py:class:`monai.data.PersistentDataset`\n\n Examples:\n\n >>> items = [{\"data\": i} for i in range(5)]\n # [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]\n >>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd(\"data\", delay_time=1))\n >>> print(list(lmdb_ds)) # using the cached results\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_dir: Union[Path, str] = \"cache\",\n hash_func: Callable[..., bytes] = pickle_hashing,\n db_name: str = \"monai_cache\",\n progress: bool = True,\n pickle_protocol=pickle.HIGHEST_PROTOCOL,\n lmdb_kwargs: Optional[dict] = None,\n ) -> None:\n \"\"\"\n Args:\n data: input data file paths to load and transform to generate dataset for model.\n `LMDBDataset` expects input data to be a list of serializable\n and hashes them as cache keys using `hash_func`.\n transform: transforms to execute operations on input data.\n cache_dir: if specified, this is the location for persistent storage\n of pre-computed transformed data tensors. The cache_dir is computed once, and\n persists on disk until explicitly removed. Different runs, programs, experiments\n may share a common cache dir provided that the transforms pre-processing is consistent.\n If the cache_dir doesn't exist, will automatically create it. Defaults to \"./cache\".\n hash_func: a callable to compute hash from data items to be cached.\n defaults to `monai.data.utils.pickle_hashing`.\n db_name: lmdb database file name. Defaults to \"monai_cache\".\n progress: whether to display a progress bar.\n pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.\n https://docs.python.org/3/library/pickle.html#pickle-protocols\n lmdb_kwargs: additional keyword arguments to the lmdb environment.\n for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class\n \"\"\"\n super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)\n self.progress = progress\n if not self.cache_dir:\n raise ValueError(\"cache_dir must be specified.\")\n self.db_file = self.cache_dir / f\"{db_name}.lmdb\"\n self.pickle_protocol = pickle_protocol\n self.lmdb_kwargs = lmdb_kwargs or {}\n if not self.lmdb_kwargs.get(\"map_size\", 0):\n self.lmdb_kwargs[\"map_size\"] = 1024 ** 4 # default map_size\n self._read_env = None\n print(f\"Accessing lmdb file: {self.db_file.absolute()}.\")\n\n def _fill_cache_start_reader(self):\n # create cache\n self.lmdb_kwargs[\"readonly\"] = False\n env = lmdb.open(path=f\"{self.db_file}\", subdir=False, **self.lmdb_kwargs)\n if self.progress and not has_tqdm:\n warnings.warn(\"LMDBDataset: tqdm is not installed. not displaying the caching progress.\")\n for item in tqdm(self.data) if has_tqdm and self.progress else self.data:\n key = self.hash_func(item)\n done, retry, val = False, 5, None\n while not done and retry > 0:\n try:\n with env.begin(write=True) as txn:\n with txn.cursor() as cursor:\n done = cursor.set_key(key)\n if done:\n continue\n if val is None:\n val = self._pre_transform(deepcopy(item)) # keep the original hashed\n val = pickle.dumps(val, protocol=self.pickle_protocol)\n txn.put(key, val)\n done = True\n except lmdb.MapFullError:\n done, retry = False, retry - 1\n size = env.info()[\"map_size\"]\n new_size = size * 2\n warnings.warn(f\"Resizing the cache database from {int(size) >> 20}MB to {int(new_size) >> 20}MB.\")\n env.set_mapsize(new_size)\n except lmdb.MapResizedError:\n # the mapsize is increased by another process\n # set_mapsize with a size of 0 to adopt the new size,\n env.set_mapsize(0)\n if not done: # still has the map full error\n size = env.info()[\"map_size\"]\n env.close()\n raise ValueError(f\"LMDB map size reached, increase size above current size of {size}.\")\n size = env.info()[\"map_size\"]\n env.close()\n # read-only database env\n self.lmdb_kwargs[\"readonly\"] = True\n self.lmdb_kwargs[\"map_size\"] = size\n if self.lmdb_kwargs.get(\"lock\", None) is None:\n self.lmdb_kwargs[\"lock\"] = False\n if self.lmdb_kwargs.get(\"readahead\", None) is None:\n self.lmdb_kwargs[\"readahead\"] = False\n return lmdb.open(path=f\"{self.db_file}\", subdir=False, **self.lmdb_kwargs)\n\n def _cachecheck(self, item_transformed):\n \"\"\"\n if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.\n\n \"\"\"\n if self._read_env is None:\n self._read_env = self._fill_cache_start_reader()\n with self._read_env.begin(write=False) as txn:\n data = txn.get(self.hash_func(item_transformed))\n if data is None:\n warnings.warn(\"LMDBDataset: cache key not found, running fallback caching.\")\n return super()._cachecheck(item_transformed)\n try:\n return pickle.loads(data)\n except Exception as err:\n raise RuntimeError(\"Invalid cache value, corrupted lmdb file?\") from err\n\n def info(self):\n \"\"\"\n Returns: dataset info dictionary.\n\n \"\"\"\n if self._read_env is None:\n self._read_env = self._fill_cache_start_reader()\n out = dict(self._read_env.info())\n out[\"size\"] = len(self.data)\n out[\"filename\"] = f\"{self.db_file.absolute()}\"\n return out\n\n\nclass CacheDataset(Dataset):\n \"\"\"\n Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.\n\n By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.\n If the requested data is not in the cache, all transforms will run normally\n (see also :py:class:`monai.data.dataset.Dataset`).\n\n Users can set the cache rate or number of items to cache.\n It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.\n\n To improve the caching efficiency, please always put as many as possible non-random transforms\n before the randomized ones when composing the chain of transforms.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, if the transform is a `Compose` of::\n\n transforms = Compose([\n LoadImaged(),\n AddChanneld(),\n Spacingd(),\n Orientationd(),\n ScaleIntensityRanged(),\n RandCropByPosNegLabeld(),\n ToTensord()\n ])\n\n when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,\n this dataset will cache the results up to ``ScaleIntensityRanged``, as\n all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`\n can be cached. During training, the dataset will load the cached results and run\n ``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform\n and the outcome not cached.\n\n Note:\n `CacheDataset` executes non-random transforms and prepares cache content in the main process before\n the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process\n during training. it may take a long time to prepare cache content according to the size of expected cache data.\n So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to\n temporarily skip caching.\n\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n cache_num: int = sys.maxsize,\n cache_rate: float = 1.0,\n num_workers: Optional[int] = None,\n progress: bool = True,\n ) -> None:\n \"\"\"\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: transforms to execute operations on input data.\n cache_num: number of items to be cached. Default is `sys.maxsize`.\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n cache_rate: percentage of cached data in total, default is 1.0 (cache all).\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n num_workers: the number of worker processes to use.\n If num_workers is None then the number returned by os.cpu_count() is used.\n progress: whether to display a progress bar.\n \"\"\"\n if not isinstance(transform, Compose):\n transform = Compose(transform)\n super().__init__(data=data, transform=transform)\n self.progress = progress\n self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))\n self.num_workers = num_workers\n if self.num_workers is not None:\n self.num_workers = max(int(self.num_workers), 1)\n self._cache: List = self._fill_cache()\n\n def _fill_cache(self) -> List:\n if self.cache_num <= 0:\n return []\n if self.progress and not has_tqdm:\n warnings.warn(\"tqdm is not installed, will not show the caching progress bar.\")\n with ThreadPool(self.num_workers) as p:\n if self.progress and has_tqdm:\n return list(\n tqdm(\n p.imap(self._load_cache_item, range(self.cache_num)),\n total=self.cache_num,\n desc=\"Loading dataset\",\n )\n )\n return list(p.imap(self._load_cache_item, range(self.cache_num)))\n\n def _load_cache_item(self, idx: int):\n \"\"\"\n Args:\n idx: the index of the input data sequence.\n \"\"\"\n item = self.data[idx]\n for _transform in self.transform.transforms: # type:ignore\n # execute all the deterministic transforms\n if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n break\n _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform\n item = apply_transform(_xform, item)\n return item\n\n def _transform(self, index: int):\n if index % len(self) >= self.cache_num: # support negative index\n # no cache for this index, execute all the transforms directly\n return super()._transform(index)\n # load data from cache and execute from the first random transform\n start_run = False\n if self._cache is None:\n self._cache = self._fill_cache()\n data = self._cache[index]\n if not isinstance(self.transform, Compose):\n raise ValueError(\"transform must be an instance of monai.transforms.Compose.\")\n for _transform in self.transform.transforms:\n if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):\n # only need to deep copy data on first non-deterministic transform\n if not start_run:\n start_run = True\n data = deepcopy(data)\n data = apply_transform(_transform, data)\n return data\n\n\nclass SmartCacheDataset(Randomizable, CacheDataset):\n \"\"\"\n Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.\n At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items\n in the cache are used for training. This ensures that data needed for training is readily available,\n keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic\n transform sequence before being fed to GPU. At the same time, another thread is preparing replacement\n items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart\n Cache replaces the same number of items with replacement items.\n Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.\n Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),\n where r is the configured replace rate).\n For more details, please refer to:\n https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.\n so the actual training images cached and replaced for every epoch are as below::\n\n epoch 1: [image1, image2, image3, image4]\n epoch 2: [image2, image3, image4, image5]\n epoch 3: [image3, image4, image5, image1]\n epoch 3: [image4, image5, image1, image2]\n epoch N: [image[N % 5] ...]\n\n The usage of `SmartCacheDataset` contains 4 steps:\n\n 1. Initialize `SmartCacheDataset` object and cache for the first epoch.\n 2. Call `start()` to run replacement thread in background.\n 3. Call `update_cache()` before every epoch to replace training items.\n 4. Call `shutdown()` when training ends.\n\n Note:\n This replacement will not work for below cases:\n 1. Set the `multiprocessing_context` of DataLoader to `spawn`.\n 2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.\n 3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.\n\n If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,\n otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.\n\n Args:\n data: input data to load and transform to generate dataset for model.\n transform: transforms to execute operations on input data.\n replace_rate: percentage of the cached items to be replaced in every epoch.\n cache_num: number of items to be cached. Default is `sys.maxsize`.\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n cache_rate: percentage of cached data in total, default is 1.0 (cache all).\n will take the minimum of (cache_num, data_length x cache_rate, data_length).\n num_init_workers: the number of worker threads to initialize the cache for first epoch.\n If num_init_workers is None then the number returned by os.cpu_count() is used.\n num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.\n If num_replace_workers is None then the number returned by os.cpu_count() is used.\n progress: whether to display a progress bar when caching for the first epoch.\n shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.\n seed: random seed if shuffle is `True`, default to `0`.\n \"\"\"\n\n def __init__(\n self,\n data: Sequence,\n transform: Union[Sequence[Callable], Callable],\n replace_rate: float,\n cache_num: int = sys.maxsize,\n cache_rate: float = 1.0,\n num_init_workers: Optional[int] = None,\n num_replace_workers: Optional[int] = None,\n progress: bool = True,\n shuffle: bool = True,\n seed: int = 0,\n ) -> None:\n if shuffle:\n self.set_random_state(seed=seed)\n self.randomize(data)\n\n super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress)\n if self._cache is None:\n self._cache = self._fill_cache()\n if self.cache_num >= len(data):\n warnings.warn(\n \"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset.\"\n )\n if replace_rate <= 0:\n raise ValueError(\"replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.\")\n\n self.num_replace_workers: Optional[int] = num_replace_workers\n if self.num_replace_workers is not None:\n self.num_replace_workers = max(int(self.num_replace_workers), 1)\n\n self._total_num: int = len(data)\n self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)\n self._replacements: List[Any] = [None for _ in range(self._replace_num)]\n self._replace_data_idx: List[int] = list(range(self._replace_num))\n\n self._start_pos: int = 0\n self._update_lock: threading.Lock = threading.Lock()\n self._round: int = 1\n self._replace_done: bool = False\n self._replace_mgr: Optional[threading.Thread] = None\n\n self._compute_data_idx()\n\n def randomize(self, data: Sequence) -> None:\n try:\n self.R.shuffle(data)\n except TypeError as e:\n warnings.warn(f\"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.\")\n\n def _compute_data_idx(self):\n \"\"\"\n Update the replacement data position in the total data.\n\n \"\"\"\n for i in range(self._replace_num):\n pos: int = self._start_pos + self.cache_num + i\n if pos >= self._total_num:\n pos -= self._total_num\n self._replace_data_idx[i] = pos\n\n def is_started(self):\n \"\"\"\n Check whether the replacement thread is already started.\n\n \"\"\"\n if self._replace_mgr is None:\n return False\n return self._replace_mgr.is_alive()\n\n def start(self):\n \"\"\"\n Start the background thread to replace training items for every epoch.\n\n \"\"\"\n if self._replace_mgr is None or not self.is_started():\n self._restart()\n\n def _restart(self):\n \"\"\"\n Restart background thread if killed for some reason.\n\n \"\"\"\n self._round = 1\n self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)\n self._replace_mgr.start()\n\n def _try_update_cache(self):\n \"\"\"\n Update the cache items with new replacement for current epoch.\n\n \"\"\"\n with self._update_lock:\n if not self._replace_done:\n return False\n\n del self._cache[: self._replace_num]\n self._cache.extend(self._replacements)\n\n self._start_pos += self._replace_num\n if self._start_pos >= self._total_num:\n self._start_pos -= self._total_num\n\n self._compute_data_idx()\n\n # ready for next round\n self._round += 1\n self._replace_done = False\n return True\n\n def update_cache(self):\n \"\"\"\n Update cache items for current epoch, need to call this function before every epoch.\n If the cache has been shutdown before, need to restart the `_replace_mgr` thread.\n\n \"\"\"\n if not self._replace_mgr.is_alive():\n self._restart()\n\n # make sure update is done\n while not self._try_update_cache():\n time.sleep(0.01)\n\n def _try_shutdown(self):\n \"\"\"\n Wait for thread lock to shut down the background thread.\n\n \"\"\"\n with self._update_lock:\n if self._replace_done:\n self._round = 0\n self._replace_done = False\n return True\n return False\n\n def shutdown(self):\n \"\"\"\n Shut down the background thread for replacement.\n\n \"\"\"\n if not self.is_started():\n return\n\n # wait until replace mgr is done the current round\n while not self._try_shutdown():\n time.sleep(0.01)\n self._replace_mgr.join()\n\n def _replace_cache_thread(self, index: int):\n \"\"\"\n Execute deterministic transforms on the new data for replacement.\n\n \"\"\"\n pos: int = self._replace_data_idx[index]\n self._replacements[index] = self._load_cache_item(pos)\n\n def _compute_replacements(self):\n \"\"\"\n Compute expected items for the replacement of next epoch, execute deterministic transforms.\n It can support multi-threads to accelerate the computation progress.\n\n \"\"\"\n with ThreadPool(self.num_replace_workers) as p:\n p.map(self._replace_cache_thread, list(range(self._replace_num)))\n\n self._replace_done = True\n\n def _try_manage_replacement(self, check_round):\n \"\"\"\n Wait thread lock and replace training items in the background thread.\n\n \"\"\"\n with self._update_lock:\n if self._round <= 0:\n # shutdown replacement\n self._replace_done = True\n return True, -1\n\n if self._round != check_round:\n self._compute_replacements()\n return False, self._round\n\n def manage_replacement(self):\n \"\"\"\n Background thread for replacement.\n\n \"\"\"\n check_round: int = -1\n done = False\n while not done:\n done, check_round = self._try_manage_replacement(check_round)\n time.sleep(0.01)\n\n def __len__(self):\n \"\"\"\n The dataset length is given by cache_num instead of len(data).\n\n \"\"\"\n return self.cache_num\n\n\nclass ZipDataset(Dataset):\n \"\"\"\n Zip several PyTorch datasets and output data(with the same index) together in a tuple.\n If the output of single dataset is already a tuple, flatten it and extend to the result.\n For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),\n finally return (img, imgmeta, seg, segmeta).\n And if the datasets don't have same length, use the minimum length of them as the length\n of ZipDataset.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n Examples::\n\n >>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])\n >>> print(len(zip_data))\n 2\n >>> for item in zip_data:\n >>> print(item)\n [1, 4]\n [2, 5]\n\n \"\"\"\n\n def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:\n \"\"\"\n Args:\n datasets: list of datasets to zip together.\n transform: a callable data transform operates on the zipped item from `datasets`.\n \"\"\"\n super().__init__(list(datasets), transform=transform)\n\n def __len__(self) -> int:\n return min((len(dataset) for dataset in self.data))\n\n def _transform(self, index: int):\n def to_list(x):\n return list(x) if isinstance(x, (tuple, list)) else [x]\n\n data = []\n for dataset in self.data:\n data.extend(to_list(dataset[index]))\n if self.transform is not None:\n data = apply_transform(self.transform, data, map_items=False) # transform the list data\n # use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists\n return tuple(data)\n\n\nclass ArrayDataset(Randomizable, _TorchDataset):\n \"\"\"\n Dataset for segmentation and classification tasks based on array format input data and transforms.\n It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.\n The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.\n For example:\n If train based on Nifti format images without metadata, all transforms can be composed::\n\n img_transform = Compose(\n [\n LoadImage(image_only=True),\n AddChannel(),\n RandAdjustContrast()\n ]\n )\n ArrayDataset(img_file_list, img_transform=img_transform)\n\n If training based on images and the metadata, the array transforms can not be composed\n because several transforms receives multiple parameters or return multiple values. Then Users need\n to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix\n to `Spacing` transform::\n\n class TestCompose(Compose):\n def __call__(self, input_):\n img, metadata = self.transforms[0](input_)\n img = self.transforms[1](img)\n img, _, _ = self.transforms[2](img, metadata[\"affine\"])\n return self.transforms[3](img), metadata\n img_transform = TestCompose(\n [\n LoadImage(image_only=False),\n AddChannel(),\n Spacing(pixdim=(1.5, 1.5, 3.0)),\n RandAdjustContrast()\n ]\n )\n ArrayDataset(img_file_list, img_transform=img_transform)\n\n Examples::\n\n >>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)\n >>> print(ds[0])\n 1.1\n\n >>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])\n >>> print(ds[0])\n [1, 5]\n\n \"\"\"\n\n def __init__(\n self,\n img: Sequence,\n img_transform: Optional[Callable] = None,\n seg: Optional[Sequence] = None,\n seg_transform: Optional[Callable] = None,\n labels: Optional[Sequence] = None,\n label_transform: Optional[Callable] = None,\n ) -> None:\n \"\"\"\n Initializes the dataset with the filename lists. The transform `img_transform` is applied\n to the images and `seg_transform` to the segmentations.\n\n Args:\n img: sequence of images.\n img_transform: transform to apply to each element in `img`.\n seg: sequence of segmentations.\n seg_transform: transform to apply to each element in `seg`.\n labels: sequence of labels.\n label_transform: transform to apply to each element in `labels`.\n\n \"\"\"\n items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]\n self.set_random_state(seed=get_seed())\n datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]\n self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)\n\n self._seed = 0 # transform synchronization seed\n\n def __len__(self) -> int:\n return len(self.dataset)\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._seed = self.R.randint(MAX_SEED, dtype=\"uint32\")\n\n def __getitem__(self, index: int):\n self.randomize()\n if isinstance(self.dataset, ZipDataset):\n # set transforms of each zip component\n for dataset in self.dataset.data:\n transform = getattr(dataset, \"transform\", None)\n if isinstance(transform, Randomizable):\n transform.set_random_state(seed=self._seed)\n transform = getattr(self.dataset, \"transform\", None)\n if isinstance(transform, Randomizable):\n transform.set_random_state(seed=self._seed)\n return self.dataset[index]\n\n\nclass NPZDictItemDataset(Dataset):\n \"\"\"\n Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and\n stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts\n mapping names to an item extracted from the loaded arrays.\n If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,\n for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset\n\n Args:\n npzfile: Path to .npz file or stream containing .npz file data\n keys: Maps keys to load from file to name to store in dataset\n transform: Transform to apply to batch dict\n other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__\n \"\"\"\n\n def __init__(\n self,\n npzfile: Union[str, IO],\n keys: Dict[str, str],\n transform: Optional[Callable] = None,\n other_keys: Optional[Sequence[str]] = (),\n ):\n self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else \"STREAM\"\n self.keys: Dict[str, str] = dict(keys)\n dat = np.load(npzfile)\n\n self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}\n self.length = self.arrays[first(self.keys.values())].shape[0]\n\n self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}\n\n for k, v in self.arrays.items():\n if v.shape[0] != self.length:\n raise ValueError(\n \"All loaded arrays must have the same first dimension \"\n f\"size {self.length}, array `{k}` has size {v.shape[0]}\"\n )\n\n super().__init__([], transform)\n\n def __len__(self):\n return self.length\n\n def _transform(self, index: int):\n data = {k: v[index] for k, v in self.arrays.items()}\n\n if self.transform is not None:\n data = apply_transform(self.transform, data)\n\n return data\n", "path": "monai/data/dataset.py" } ]
diff --git a/monai/data/dataset.py b/monai/data/dataset.py index fef086f320..76c879e3a6 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -168,7 +168,7 @@ def __init__( self.hash_func = hash_func if self.cache_dir is not None: if not self.cache_dir.exists(): - self.cache_dir.mkdir(parents=True) + self.cache_dir.mkdir(parents=True, exist_ok=True) if not self.cache_dir.is_dir(): raise ValueError("cache_dir must be a directory.") diff --git a/tests/test_persistentdataset.py b/tests/test_persistentdataset.py index 3bd52d0caa..09488b1214 100644 --- a/tests/test_persistentdataset.py +++ b/tests/test_persistentdataset.py @@ -10,18 +10,15 @@ # limitations under the License. import os -import shutil import tempfile import unittest import nibabel as nib import numpy as np -import torch.distributed as dist from parameterized import parameterized from monai.data import PersistentDataset, json_hashing from monai.transforms import Compose, LoadImaged, SimulateDelayd, Transform -from tests.utils import DistCall, DistTestCase TEST_CASE_1 = [ Compose( @@ -127,29 +124,5 @@ def test_shape(self, transform, expected_shape): self.assertTupleEqual(d["image"].shape, expected_shape) -class TestDistDataset(DistTestCase): - def setUp(self): - self.tempdir = tempfile.mkdtemp() - - def tearDown(self): - shutil.rmtree(self.tempdir) - - @DistCall(nnodes=1, nproc_per_node=2) - def test_mp_dataset(self): - print("persistent", dist.get_rank()) - items = [[list(range(i))] for i in range(5)] - ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir) - self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) - ds1 = PersistentDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir) - self.assertEqual(list(ds1), list(ds)) - self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) - - ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir, hash_func=json_hashing) - self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) - ds1 = PersistentDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir, hash_func=json_hashing) - self.assertEqual(list(ds1), list(ds)) - self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) - - if __name__ == "__main__": unittest.main() diff --git a/tests/test_persistentdataset_dist.py b/tests/test_persistentdataset_dist.py new file mode 100644 index 0000000000..d45bba03e5 --- /dev/null +++ b/tests/test_persistentdataset_dist.py @@ -0,0 +1,78 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch.distributed as dist + +from monai.data import PersistentDataset, json_hashing +from monai.transforms import Transform +from tests.utils import DistCall, DistTestCase + + +class _InplaceXform(Transform): + def __call__(self, data): + if data: + data[0] = data[0] + np.pi + else: + data.append(1) + return data + + +class TestDistDataset(DistTestCase): + def setUp(self): + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tempdir) + + @DistCall(nnodes=1, nproc_per_node=2) + def test_mp_dataset(self): + print("persistent", dist.get_rank()) + items = [[list(range(i))] for i in range(5)] + ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + ds1 = PersistentDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir) + self.assertEqual(list(ds1), list(ds)) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + + ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir, hash_func=json_hashing) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + ds1 = PersistentDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir, hash_func=json_hashing) + self.assertEqual(list(ds1), list(ds)) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + + +class TestDistCreateDataset(DistTestCase): + def setUp(self): + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tempdir) + + @DistCall(nnodes=1, nproc_per_node=2) + def test_mp_dataset(self): + print("persistent", dist.get_rank()) + items = [[list(range(i))] for i in range(5)] + cache_dir = os.path.join(self.tempdir, "test") + ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=cache_dir) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + ds1 = PersistentDataset(items, transform=_InplaceXform(), cache_dir=cache_dir) + self.assertEqual(list(ds1), list(ds)) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + + +if __name__ == "__main__": + unittest.main()
error in multi-process accessing of the same non-exist persistent cache **Is your feature request related to a problem? Please describe.** the persistent dataset will first check the existence of a cache directory and the create a new one if needed: https://github.com/Project-MONAI/MONAI/blob/feb3a334b7bbf302b13a6da80e0b022a4cf75a4e/monai/data/dataset.py#L163-L165 these steps may run into a race condition in a multiprocess context. ```py python -m tests.test_persistentdataset persistent 1 persistent 0 create /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test create /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test Process Process-2: Traceback (most recent call last): File "/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap self.run() File "/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py", line 99, in run self._target(*self._args, **self._kwargs) File "MONAI/tests/utils.py", line 296, in run_process raise e File "MONAI/tests/utils.py", line 287, in run_process func(*args, **kwargs) File "MONAI/tests/utils.py", line 471, in _call_original_func return f(*args, **kwargs) File "MONAI/tests/test_persistentdataset.py", line 166, in test_mp_dataset ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=cache_dir) File "MONAI/monai/data/dataset.py", line 172, in __init__ self.cache_dir.mkdir(parents=True) File "/usr/local/anaconda3/envs/py37/lib/python3.7/pathlib.py", line 1273, in mkdir self._accessor.mkdir(self, mode) FileExistsError: [Errno 17] File exists: '/var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test' F ====================================================================== FAIL: test_mp_dataset (__main__.TestDistCreateDataset) ---------------------------------------------------------------------- Traceback (most recent call last): File MONAI/tests/utils.py", line 343, in _wrapper assert results.get(), "Distributed call failed." AssertionError: Distributed call failed. ---------------------------------------------------------------------- ``` error in multi-process accessing of the same non-exist persistent cache **Is your feature request related to a problem? Please describe.** the persistent dataset will first check the existence of a cache directory and the create a new one if needed: https://github.com/Project-MONAI/MONAI/blob/feb3a334b7bbf302b13a6da80e0b022a4cf75a4e/monai/data/dataset.py#L163-L165 these steps may run into a race condition in a multiprocess context. ```py python -m tests.test_persistentdataset persistent 1 persistent 0 create /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test create /var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test Process Process-2: Traceback (most recent call last): File "/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap self.run() File "/usr/local/anaconda3/envs/py37/lib/python3.7/multiprocessing/process.py", line 99, in run self._target(*self._args, **self._kwargs) File "MONAI/tests/utils.py", line 296, in run_process raise e File "MONAI/tests/utils.py", line 287, in run_process func(*args, **kwargs) File "MONAI/tests/utils.py", line 471, in _call_original_func return f(*args, **kwargs) File "MONAI/tests/test_persistentdataset.py", line 166, in test_mp_dataset ds = PersistentDataset(items, transform=_InplaceXform(), cache_dir=cache_dir) File "MONAI/monai/data/dataset.py", line 172, in __init__ self.cache_dir.mkdir(parents=True) File "/usr/local/anaconda3/envs/py37/lib/python3.7/pathlib.py", line 1273, in mkdir self._accessor.mkdir(self, mode) FileExistsError: [Errno 17] File exists: '/var/folders/6f/fdkl7m0x7sz3nj_t7p3ccgz00000gp/T/tmpu578spse/test' F ====================================================================== FAIL: test_mp_dataset (__main__.TestDistCreateDataset) ---------------------------------------------------------------------- Traceback (most recent call last): File MONAI/tests/utils.py", line 343, in _wrapper assert results.get(), "Distributed call failed." AssertionError: Distributed call failed. ---------------------------------------------------------------------- ```
nonebot__nonebot2-61
[ { "content": "\"\"\"\n事件处理\n========\n\nNoneBot 内部处理并按优先级分发事件给所有事件响应器,提供了多个插槽以进行事件的预处理等。\n\"\"\"\n\nimport asyncio\nfrom datetime import datetime\n\nfrom nonebot.log import logger\nfrom nonebot.rule import TrieRule\nfrom nonebot.utils import escape_tag\nfrom nonebot.matcher import matchers, Matcher\nfrom nonebot.typing import Set, Type, Union, Optional, Iterable, NoReturn, Bot, Event\nfrom nonebot.exception import IgnoredException, StopPropagation\nfrom nonebot.typing import EventPreProcessor, RunPreProcessor, EventPostProcessor, RunPostProcessor\n\n_event_preprocessors: Set[EventPreProcessor] = set()\n_event_postprocessors: Set[EventPostProcessor] = set()\n_run_preprocessors: Set[RunPreProcessor] = set()\n_run_postprocessors: Set[RunPostProcessor] = set()\n\n\ndef event_preprocessor(func: EventPreProcessor) -> EventPreProcessor:\n \"\"\"\n :说明:\n 事件预处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之前执行。\n :参数:\n 事件预处理函数接收三个参数。\n\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n * ``state: dict``: 当前 State\n \"\"\"\n _event_preprocessors.add(func)\n return func\n\n\ndef event_postprocessor(func: EventPostProcessor) -> EventPostProcessor:\n \"\"\"\n :说明:\n 事件后处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之后执行。\n :参数:\n 事件后处理函数接收三个参数。\n\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n * ``state: dict``: 当前事件运行前 State\n \"\"\"\n _event_postprocessors.add(func)\n return func\n\n\ndef run_preprocessor(func: RunPreProcessor) -> RunPreProcessor:\n \"\"\"\n :说明:\n 运行预处理。装饰一个函数,使它在每次事件响应器运行前执行。\n :参数:\n 运行预处理函数接收四个参数。\n\n * ``matcher: Matcher``: 当前要运行的事件响应器\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n * ``state: dict``: 当前 State\n \"\"\"\n _run_preprocessors.add(func)\n return func\n\n\ndef run_postprocessor(func: RunPostProcessor) -> RunPostProcessor:\n \"\"\"\n :说明:\n 运行后处理。装饰一个函数,使它在每次事件响应器运行后执行。\n :参数:\n 运行后处理函数接收五个参数。\n\n * ``matcher: Matcher``: 运行完毕的事件响应器\n * ``exception: Optional[Exception]``: 事件响应器运行错误(如果存在)\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n * ``state: dict``: 当前 State\n \"\"\"\n _run_postprocessors.add(func)\n return func\n\n\nasync def _check_matcher(priority: int, bot: Bot, event: Event,\n state: dict) -> Iterable[Type[Matcher]]:\n current_matchers = matchers[priority].copy()\n\n async def _check(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Optional[Type[Matcher]]:\n try:\n if await Matcher.check_perm(\n bot, event) and await Matcher.check_rule(bot, event, state):\n return Matcher\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>\"\n )\n return None\n\n async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:\n if Matcher.temp or (Matcher.expire_time and\n datetime.now() > Matcher.expire_time):\n return Matcher\n return None\n\n checking_tasks = [\n _check(Matcher, bot, event, state) for Matcher in current_matchers\n ]\n checking_expire_tasks = [\n _check_expire(Matcher) for Matcher in current_matchers\n ]\n results = await asyncio.gather(*checking_tasks, return_exceptions=True)\n expired = await asyncio.gather(*checking_expire_tasks)\n for expired_matcher in filter(lambda x: x and x in results, expired):\n try:\n matchers[priority].remove(expired_matcher)\n except Exception:\n pass\n return filter(lambda x: x, results)\n\n\nasync def _run_matcher(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Union[None, NoReturn]:\n logger.info(f\"Event will be handled by {Matcher}\")\n\n matcher = Matcher()\n\n coros = list(\n map(lambda x: x(matcher, bot, event, state), _run_preprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(colors=True).info(\n f\"Matcher {matcher} running is <b>cancelled</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPreProcessors. \"\n \"Running cancelled!</bg #f8bbd0></r>\")\n return\n\n exception = None\n\n try:\n logger.debug(f\"Running matcher {matcher}\")\n await matcher.run(bot, event, state)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>\"\n )\n exception = e\n\n coros = list(\n map(lambda x: x(matcher, exception, bot, event, state),\n _run_postprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>\"\n )\n\n if matcher.block:\n raise StopPropagation\n\n\nasync def handle_event(bot: Bot, event: Event):\n \"\"\"\n :说明:\n 处理一个事件。调用该函数以实现分发事件。\n :参数:\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n :示例:\n\n .. code-block:: python\n\n import asyncio\n asyncio.create_task(handle_event(bot, event))\n \"\"\"\n show_log = True\n log_msg = f\"<m>{bot.type.upper()} </m>| {event.self_id} [{event.name}]: \"\n if event.type == \"message\":\n log_msg += f\"Message {event.id} from \"\n log_msg += str(event.user_id)\n if event.detail_type == \"group\":\n log_msg += f\"@[群:{event.group_id}]:\"\n\n log_msg += ' \"' + \"\".join(\n map(\n lambda x: escape_tag(str(x))\n if x.type == \"text\" else f\"<le>{escape_tag(str(x))}</le>\",\n event.message)) + '\"' # type: ignore\n elif event.type == \"notice\":\n log_msg += f\"Notice {event.raw_event}\"\n elif event.type == \"request\":\n log_msg += f\"Request {event.raw_event}\"\n elif event.type == \"meta_event\":\n # log_msg += f\"MetaEvent {event.detail_type}\"\n show_log = False\n if show_log:\n logger.opt(colors=True).info(log_msg)\n\n state = {}\n coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))\n if coros:\n try:\n logger.debug(\"Running PreProcessors...\")\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(\n colors=True).info(f\"Event {event.name} is <b>ignored</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPreProcessors. \"\n \"Event ignored!</bg #f8bbd0></r>\")\n return\n\n # Trie Match\n _, _ = TrieRule.get_value(bot, event, state)\n\n break_flag = False\n for priority in sorted(matchers.keys()):\n if break_flag:\n break\n\n if show_log:\n logger.debug(f\"Checking for matchers in priority {priority}...\")\n\n run_matchers = await _check_matcher(priority, bot, event, state)\n\n pending_tasks = [\n _run_matcher(matcher, bot, event, state.copy())\n for matcher in run_matchers\n ]\n\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n\n for result in results:\n if result is StopPropagation:\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n\n coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))\n if coros:\n try:\n logger.debug(\"Running PostProcessors...\")\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>\"\n )\n", "path": "nonebot/message.py" } ]
[ { "content": "\"\"\"\n事件处理\n========\n\nNoneBot 内部处理并按优先级分发事件给所有事件响应器,提供了多个插槽以进行事件的预处理等。\n\"\"\"\n\nimport asyncio\nfrom datetime import datetime\n\nfrom nonebot.log import logger\nfrom nonebot.rule import TrieRule\nfrom nonebot.utils import escape_tag\nfrom nonebot.matcher import matchers, Matcher\nfrom nonebot.typing import Set, Type, Union, Optional, Iterable, NoReturn, Bot, Event\nfrom nonebot.exception import IgnoredException, StopPropagation\nfrom nonebot.typing import EventPreProcessor, RunPreProcessor, EventPostProcessor, RunPostProcessor\n\n_event_preprocessors: Set[EventPreProcessor] = set()\n_event_postprocessors: Set[EventPostProcessor] = set()\n_run_preprocessors: Set[RunPreProcessor] = set()\n_run_postprocessors: Set[RunPostProcessor] = set()\n\n\ndef event_preprocessor(func: EventPreProcessor) -> EventPreProcessor:\n \"\"\"\n :说明:\n 事件预处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之前执行。\n :参数:\n 事件预处理函数接收三个参数。\n\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n * ``state: dict``: 当前 State\n \"\"\"\n _event_preprocessors.add(func)\n return func\n\n\ndef event_postprocessor(func: EventPostProcessor) -> EventPostProcessor:\n \"\"\"\n :说明:\n 事件后处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之后执行。\n :参数:\n 事件后处理函数接收三个参数。\n\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n * ``state: dict``: 当前事件运行前 State\n \"\"\"\n _event_postprocessors.add(func)\n return func\n\n\ndef run_preprocessor(func: RunPreProcessor) -> RunPreProcessor:\n \"\"\"\n :说明:\n 运行预处理。装饰一个函数,使它在每次事件响应器运行前执行。\n :参数:\n 运行预处理函数接收四个参数。\n\n * ``matcher: Matcher``: 当前要运行的事件响应器\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n * ``state: dict``: 当前 State\n \"\"\"\n _run_preprocessors.add(func)\n return func\n\n\ndef run_postprocessor(func: RunPostProcessor) -> RunPostProcessor:\n \"\"\"\n :说明:\n 运行后处理。装饰一个函数,使它在每次事件响应器运行后执行。\n :参数:\n 运行后处理函数接收五个参数。\n\n * ``matcher: Matcher``: 运行完毕的事件响应器\n * ``exception: Optional[Exception]``: 事件响应器运行错误(如果存在)\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n * ``state: dict``: 当前 State\n \"\"\"\n _run_postprocessors.add(func)\n return func\n\n\nasync def _check_matcher(priority: int, bot: Bot, event: Event,\n state: dict) -> Iterable[Type[Matcher]]:\n current_matchers = matchers[priority].copy()\n\n async def _check(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Optional[Type[Matcher]]:\n try:\n if await Matcher.check_perm(\n bot, event) and await Matcher.check_rule(bot, event, state):\n return Matcher\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>\"\n )\n return None\n\n async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:\n if Matcher.temp or (Matcher.expire_time and\n datetime.now() > Matcher.expire_time):\n return Matcher\n return None\n\n checking_tasks = [\n _check(Matcher, bot, event, state) for Matcher in current_matchers\n ]\n checking_expire_tasks = [\n _check_expire(Matcher) for Matcher in current_matchers\n ]\n results = await asyncio.gather(*checking_tasks, return_exceptions=True)\n expired = await asyncio.gather(*checking_expire_tasks)\n for expired_matcher in filter(lambda x: x and x in results, expired):\n try:\n matchers[priority].remove(expired_matcher)\n except Exception:\n pass\n return filter(lambda x: x, results)\n\n\nasync def _run_matcher(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Union[None, NoReturn]:\n logger.info(f\"Event will be handled by {Matcher}\")\n\n matcher = Matcher()\n\n coros = list(\n map(lambda x: x(matcher, bot, event, state), _run_preprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(colors=True).info(\n f\"Matcher {matcher} running is <b>cancelled</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPreProcessors. \"\n \"Running cancelled!</bg #f8bbd0></r>\")\n return\n\n exception = None\n\n try:\n logger.debug(f\"Running matcher {matcher}\")\n await matcher.run(bot, event, state)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>\"\n )\n exception = e\n\n coros = list(\n map(lambda x: x(matcher, exception, bot, event, state),\n _run_postprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>\"\n )\n\n if matcher.block:\n raise StopPropagation\n\n\nasync def handle_event(bot: Bot, event: Event):\n \"\"\"\n :说明:\n 处理一个事件。调用该函数以实现分发事件。\n :参数:\n * ``bot: Bot``: Bot 对象\n * ``event: Event``: Event 对象\n :示例:\n\n .. code-block:: python\n\n import asyncio\n asyncio.create_task(handle_event(bot, event))\n \"\"\"\n show_log = True\n log_msg = f\"<m>{bot.type.upper()} </m>| {event.self_id} [{event.name}]: \"\n if event.type == \"message\":\n log_msg += f\"Message {event.id} from \"\n log_msg += str(event.user_id)\n if event.detail_type == \"group\":\n log_msg += f\"@[群:{event.group_id}]:\"\n\n log_msg += ' \"' + \"\".join(\n map(\n lambda x: escape_tag(str(x))\n if x.type == \"text\" else f\"<le>{escape_tag(str(x))}</le>\",\n event.message)) + '\"' # type: ignore\n elif event.type == \"notice\":\n log_msg += f\"Notice {event.raw_event}\"\n elif event.type == \"request\":\n log_msg += f\"Request {event.raw_event}\"\n elif event.type == \"meta_event\":\n # log_msg += f\"MetaEvent {event.detail_type}\"\n show_log = False\n if show_log:\n logger.opt(colors=True).info(log_msg)\n\n state = {}\n coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))\n if coros:\n try:\n logger.debug(\"Running PreProcessors...\")\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(\n colors=True).info(f\"Event {event.name} is <b>ignored</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPreProcessors. \"\n \"Event ignored!</bg #f8bbd0></r>\")\n return\n\n # Trie Match\n _, _ = TrieRule.get_value(bot, event, state)\n\n break_flag = False\n for priority in sorted(matchers.keys()):\n if break_flag:\n break\n\n if show_log:\n logger.debug(f\"Checking for matchers in priority {priority}...\")\n\n run_matchers = await _check_matcher(priority, bot, event, state)\n\n pending_tasks = [\n _run_matcher(matcher, bot, event, state.copy())\n for matcher in run_matchers\n ]\n\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n\n for result in results:\n if isinstance(result, StopPropagation):\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n\n coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))\n if coros:\n try:\n logger.debug(\"Running PostProcessors...\")\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>\"\n )\n", "path": "nonebot/message.py" } ]
diff --git a/archive/2.0.0a5/README.md b/archive/2.0.0a6/README.md similarity index 100% rename from archive/2.0.0a5/README.md rename to archive/2.0.0a6/README.md diff --git a/archive/2.0.0a6/advanced/README.md b/archive/2.0.0a6/advanced/README.md new file mode 100644 index 000000000000..92c6af3eb19f --- /dev/null +++ b/archive/2.0.0a6/advanced/README.md @@ -0,0 +1,7 @@ +# 深入 + +## 它如何工作? + +<!-- TODO: how to work --> + +~~未填坑~~ diff --git a/archive/2.0.0a6/advanced/permission.md b/archive/2.0.0a6/advanced/permission.md new file mode 100644 index 000000000000..30e200024508 --- /dev/null +++ b/archive/2.0.0a6/advanced/permission.md @@ -0,0 +1 @@ +# 权限控制 \ No newline at end of file diff --git a/archive/2.0.0a6/advanced/runtime-hook.md b/archive/2.0.0a6/advanced/runtime-hook.md new file mode 100644 index 000000000000..ed3968a48a9d --- /dev/null +++ b/archive/2.0.0a6/advanced/runtime-hook.md @@ -0,0 +1 @@ +# 运行时插槽 \ No newline at end of file diff --git a/archive/2.0.0a6/advanced/scheduler.md b/archive/2.0.0a6/advanced/scheduler.md new file mode 100644 index 000000000000..99d906d34a61 --- /dev/null +++ b/archive/2.0.0a6/advanced/scheduler.md @@ -0,0 +1 @@ +# 定时任务 \ No newline at end of file diff --git a/archive/2.0.0a5/api/README.md b/archive/2.0.0a6/api/README.md similarity index 100% rename from archive/2.0.0a5/api/README.md rename to archive/2.0.0a6/api/README.md diff --git a/archive/2.0.0a5/api/adapters/README.md b/archive/2.0.0a6/api/adapters/README.md similarity index 100% rename from archive/2.0.0a5/api/adapters/README.md rename to archive/2.0.0a6/api/adapters/README.md diff --git a/archive/2.0.0a5/api/adapters/cqhttp.md b/archive/2.0.0a6/api/adapters/cqhttp.md similarity index 100% rename from archive/2.0.0a5/api/adapters/cqhttp.md rename to archive/2.0.0a6/api/adapters/cqhttp.md diff --git a/archive/2.0.0a5/api/config.md b/archive/2.0.0a6/api/config.md similarity index 100% rename from archive/2.0.0a5/api/config.md rename to archive/2.0.0a6/api/config.md diff --git a/archive/2.0.0a5/api/drivers/README.md b/archive/2.0.0a6/api/drivers/README.md similarity index 100% rename from archive/2.0.0a5/api/drivers/README.md rename to archive/2.0.0a6/api/drivers/README.md diff --git a/archive/2.0.0a5/api/drivers/fastapi.md b/archive/2.0.0a6/api/drivers/fastapi.md similarity index 100% rename from archive/2.0.0a5/api/drivers/fastapi.md rename to archive/2.0.0a6/api/drivers/fastapi.md diff --git a/archive/2.0.0a5/api/exception.md b/archive/2.0.0a6/api/exception.md similarity index 100% rename from archive/2.0.0a5/api/exception.md rename to archive/2.0.0a6/api/exception.md diff --git a/archive/2.0.0a5/api/log.md b/archive/2.0.0a6/api/log.md similarity index 100% rename from archive/2.0.0a5/api/log.md rename to archive/2.0.0a6/api/log.md diff --git a/archive/2.0.0a5/api/matcher.md b/archive/2.0.0a6/api/matcher.md similarity index 100% rename from archive/2.0.0a5/api/matcher.md rename to archive/2.0.0a6/api/matcher.md diff --git a/archive/2.0.0a5/api/message.md b/archive/2.0.0a6/api/message.md similarity index 100% rename from archive/2.0.0a5/api/message.md rename to archive/2.0.0a6/api/message.md diff --git a/archive/2.0.0a5/api/nonebot.md b/archive/2.0.0a6/api/nonebot.md similarity index 100% rename from archive/2.0.0a5/api/nonebot.md rename to archive/2.0.0a6/api/nonebot.md diff --git a/archive/2.0.0a5/api/permission.md b/archive/2.0.0a6/api/permission.md similarity index 100% rename from archive/2.0.0a5/api/permission.md rename to archive/2.0.0a6/api/permission.md diff --git a/archive/2.0.0a5/api/plugin.md b/archive/2.0.0a6/api/plugin.md similarity index 100% rename from archive/2.0.0a5/api/plugin.md rename to archive/2.0.0a6/api/plugin.md diff --git a/archive/2.0.0a5/api/rule.md b/archive/2.0.0a6/api/rule.md similarity index 100% rename from archive/2.0.0a5/api/rule.md rename to archive/2.0.0a6/api/rule.md diff --git a/archive/2.0.0a5/api/sched.md b/archive/2.0.0a6/api/sched.md similarity index 100% rename from archive/2.0.0a5/api/sched.md rename to archive/2.0.0a6/api/sched.md diff --git a/archive/2.0.0a5/api/typing.md b/archive/2.0.0a6/api/typing.md similarity index 100% rename from archive/2.0.0a5/api/typing.md rename to archive/2.0.0a6/api/typing.md diff --git a/archive/2.0.0a5/api/utils.md b/archive/2.0.0a6/api/utils.md similarity index 100% rename from archive/2.0.0a5/api/utils.md rename to archive/2.0.0a6/api/utils.md diff --git a/archive/2.0.0a5/guide/README.md b/archive/2.0.0a6/guide/README.md similarity index 96% rename from archive/2.0.0a5/guide/README.md rename to archive/2.0.0a6/guide/README.md index 43b7f32557ec..cacb58ac1986 100644 --- a/archive/2.0.0a5/guide/README.md +++ b/archive/2.0.0a6/guide/README.md @@ -18,12 +18,6 @@ NoneBot2 是一个可扩展的 Python 异步机器人框架,它会对机器人 需要注意的是,NoneBot 仅支持 Python 3.7+ 及 CQHTTP(OneBot) 插件 v11+。 -## 它如何工作? - -<!-- TODO: how to work --> - -~~未填坑~~ - ## 特色 - 提供直观的测试前端 diff --git a/archive/2.0.0a5/guide/basic-configuration.md b/archive/2.0.0a6/guide/basic-configuration.md similarity index 100% rename from archive/2.0.0a5/guide/basic-configuration.md rename to archive/2.0.0a6/guide/basic-configuration.md diff --git a/archive/2.0.0a5/guide/creating-a-handler.md b/archive/2.0.0a6/guide/creating-a-handler.md similarity index 100% rename from archive/2.0.0a5/guide/creating-a-handler.md rename to archive/2.0.0a6/guide/creating-a-handler.md diff --git a/archive/2.0.0a5/guide/creating-a-matcher.md b/archive/2.0.0a6/guide/creating-a-matcher.md similarity index 100% rename from archive/2.0.0a5/guide/creating-a-matcher.md rename to archive/2.0.0a6/guide/creating-a-matcher.md diff --git a/archive/2.0.0a5/guide/creating-a-plugin.md b/archive/2.0.0a6/guide/creating-a-plugin.md similarity index 100% rename from archive/2.0.0a5/guide/creating-a-plugin.md rename to archive/2.0.0a6/guide/creating-a-plugin.md diff --git a/archive/2.0.0a5/guide/creating-a-project.md b/archive/2.0.0a6/guide/creating-a-project.md similarity index 100% rename from archive/2.0.0a5/guide/creating-a-project.md rename to archive/2.0.0a6/guide/creating-a-project.md diff --git a/archive/2.0.0a5/guide/end-or-start.md b/archive/2.0.0a6/guide/end-or-start.md similarity index 79% rename from archive/2.0.0a5/guide/end-or-start.md rename to archive/2.0.0a6/guide/end-or-start.md index f1b0baa98ee1..117801148485 100644 --- a/archive/2.0.0a5/guide/end-or-start.md +++ b/archive/2.0.0a6/guide/end-or-start.md @@ -5,3 +5,5 @@ - 请千万注意事件处理器的优先级设定 - 在匹配规则中请勿使用耗时极长的函数 - 同一个用户可以**跨群**(**私聊**)继续他的事件处理(除非做出权限限制,将在后续介绍) + +如果你还不能满足,前往 [进阶](../advanced/README.md) 获得更多的功能信息。 diff --git a/archive/2.0.0a5/guide/getting-started.md b/archive/2.0.0a6/guide/getting-started.md similarity index 100% rename from archive/2.0.0a5/guide/getting-started.md rename to archive/2.0.0a6/guide/getting-started.md diff --git a/archive/2.0.0a5/guide/installation.md b/archive/2.0.0a6/guide/installation.md similarity index 100% rename from archive/2.0.0a5/guide/installation.md rename to archive/2.0.0a6/guide/installation.md diff --git a/archive/2.0.0a5/guide/loading-a-plugin.md b/archive/2.0.0a6/guide/loading-a-plugin.md similarity index 76% rename from archive/2.0.0a5/guide/loading-a-plugin.md rename to archive/2.0.0a6/guide/loading-a-plugin.md index bb7db02df95e..b648b4b6295a 100644 --- a/archive/2.0.0a5/guide/loading-a-plugin.md +++ b/archive/2.0.0a6/guide/loading-a-plugin.md @@ -83,7 +83,39 @@ if __name__ == "__main__": <!-- TODO: 子插件 --> -~~待填坑~~ +在插件中同样可以加载子插件,例如如下插件目录结构: + +<!-- prettier-ignore-start --> +:::vue +foo_plugin +├── `plugins` +│ ├── `sub_plugin1` +│ │ └── \_\_init\_\_.py +│ └── `sub_plugin2.py` +├── `__init__.py` +└── config.py +::: +<!-- prettier-ignore-end --> + +在插件目录下的 `__init__.py` 中添加如下代码: + +```python +from pathlib import Path + +import nonebot + +# store all subplugins +_sub_plugins = set() +# load sub plugins +_sub_plugins |= nonebot.load_plugins( + str((Path(__file__).parent / "plugins").resolve())) +``` + +插件将会被加载并存储于 `_sub_plugins` 中。 + +:::tip 提示 +如果在父插件中需要定义事件响应器,应在**子插件被加载后**进行定义 +::: ## 运行结果 diff --git a/archive/2.0.0a5/sidebar.config.json b/archive/2.0.0a6/sidebar.config.json similarity index 87% rename from archive/2.0.0a5/sidebar.config.json rename to archive/2.0.0a6/sidebar.config.json index 94ec57abf3e0..bee7b291fe88 100644 --- a/archive/2.0.0a5/sidebar.config.json +++ b/archive/2.0.0a6/sidebar.config.json @@ -15,6 +15,10 @@ "text": "指南", "link": "/guide/" }, + { + "text": "进阶", + "link": "/advanced/" + }, { "text": "API", "link": "/api/" @@ -22,6 +26,10 @@ { "text": "插件广场", "link": "/plugin-store" + }, + { + "text": "更新日志", + "link": "/changelog" } ], "sidebarDepth": 2, @@ -52,6 +60,19 @@ ] } ], + "/advanced/": [ + { + "title": "进阶", + "collapsable": false, + "sidebar": "auto", + "children": [ + "", + "scheduler", + "permission", + "runtime-hook" + ] + } + ], "/api/": [ { "title": "NoneBot Api Reference", diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 009bc113eacf..8faed7a543a2 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -79,8 +79,10 @@ module.exports = context => ({ nav: [ { text: "主页", link: "/" }, { text: "指南", link: "/guide/" }, + { text: "进阶", link: "/advanced/" }, { text: "API", link: "/api/" }, - { text: "插件广场", link: "/plugin-store" } + { text: "插件广场", link: "/plugin-store" }, + { text: "更新日志", link: "/changelog" } ], sidebarDepth: 2, sidebar: { @@ -110,6 +112,19 @@ module.exports = context => ({ ] } ], + "/advanced/": [ + { + title: "进阶", + collapsable: false, + sidebar: "auto", + children: [ + "", + "scheduler", + "permission", + "runtime-hook" + ] + } + ], "/api/": [ { title: "NoneBot Api Reference", diff --git a/docs/.vuepress/versions.json b/docs/.vuepress/versions.json index aa00d565a6b9..b28457942352 100644 --- a/docs/.vuepress/versions.json +++ b/docs/.vuepress/versions.json @@ -1,4 +1,4 @@ [ - "2.0.0a5", + "2.0.0a6", "2.0.0a4" ] \ No newline at end of file diff --git a/docs/advanced/README.md b/docs/advanced/README.md new file mode 100644 index 000000000000..92c6af3eb19f --- /dev/null +++ b/docs/advanced/README.md @@ -0,0 +1,7 @@ +# 深入 + +## 它如何工作? + +<!-- TODO: how to work --> + +~~未填坑~~ diff --git a/docs/advanced/permission.md b/docs/advanced/permission.md new file mode 100644 index 000000000000..30e200024508 --- /dev/null +++ b/docs/advanced/permission.md @@ -0,0 +1 @@ +# 权限控制 \ No newline at end of file diff --git a/docs/advanced/runtime-hook.md b/docs/advanced/runtime-hook.md new file mode 100644 index 000000000000..ed3968a48a9d --- /dev/null +++ b/docs/advanced/runtime-hook.md @@ -0,0 +1 @@ +# 运行时插槽 \ No newline at end of file diff --git a/docs/advanced/scheduler.md b/docs/advanced/scheduler.md new file mode 100644 index 000000000000..99d906d34a61 --- /dev/null +++ b/docs/advanced/scheduler.md @@ -0,0 +1 @@ +# 定时任务 \ No newline at end of file diff --git a/docs/guide/README.md b/docs/guide/README.md index 43b7f32557ec..cacb58ac1986 100644 --- a/docs/guide/README.md +++ b/docs/guide/README.md @@ -18,12 +18,6 @@ NoneBot2 是一个可扩展的 Python 异步机器人框架,它会对机器人 需要注意的是,NoneBot 仅支持 Python 3.7+ 及 CQHTTP(OneBot) 插件 v11+。 -## 它如何工作? - -<!-- TODO: how to work --> - -~~未填坑~~ - ## 特色 - 提供直观的测试前端 diff --git a/docs/guide/end-or-start.md b/docs/guide/end-or-start.md index f1b0baa98ee1..117801148485 100644 --- a/docs/guide/end-or-start.md +++ b/docs/guide/end-or-start.md @@ -5,3 +5,5 @@ - 请千万注意事件处理器的优先级设定 - 在匹配规则中请勿使用耗时极长的函数 - 同一个用户可以**跨群**(**私聊**)继续他的事件处理(除非做出权限限制,将在后续介绍) + +如果你还不能满足,前往 [进阶](../advanced/README.md) 获得更多的功能信息。 diff --git a/docs/guide/loading-a-plugin.md b/docs/guide/loading-a-plugin.md index bb7db02df95e..b648b4b6295a 100644 --- a/docs/guide/loading-a-plugin.md +++ b/docs/guide/loading-a-plugin.md @@ -83,7 +83,39 @@ if __name__ == "__main__": <!-- TODO: 子插件 --> -~~待填坑~~ +在插件中同样可以加载子插件,例如如下插件目录结构: + +<!-- prettier-ignore-start --> +:::vue +foo_plugin +├── `plugins` +│ ├── `sub_plugin1` +│ │ └── \_\_init\_\_.py +│ └── `sub_plugin2.py` +├── `__init__.py` +└── config.py +::: +<!-- prettier-ignore-end --> + +在插件目录下的 `__init__.py` 中添加如下代码: + +```python +from pathlib import Path + +import nonebot + +# store all subplugins +_sub_plugins = set() +# load sub plugins +_sub_plugins |= nonebot.load_plugins( + str((Path(__file__).parent / "plugins").resolve())) +``` + +插件将会被加载并存储于 `_sub_plugins` 中。 + +:::tip 提示 +如果在父插件中需要定义事件响应器,应在**子插件被加载后**进行定义 +::: ## 运行结果 diff --git a/nonebot/message.py b/nonebot/message.py index af0849a5f36d..90af9a96adce 100644 --- a/nonebot/message.py +++ b/nonebot/message.py @@ -244,7 +244,7 @@ async def handle_event(bot: Bot, event: Event): results = await asyncio.gather(*pending_tasks, return_exceptions=True) for result in results: - if result is StopPropagation: + if isinstance(result, StopPropagation): if not break_flag: break_flag = True logger.debug("Stop event propagation") diff --git a/pages/changelog.md b/pages/changelog.md new file mode 100644 index 000000000000..ef8bad8fedab --- /dev/null +++ b/pages/changelog.md @@ -0,0 +1,14 @@ +--- +sidebar: auto +--- + +# 更新日志 + +## v2.0.0a6 + +- 修复 block 失效问题 (hotfix) + +## v2.0.0a5 + +- 更新插件指南文档 +- 修复临时事件响应器运行后删除造成的多次响应问题 diff --git a/poetry.lock b/poetry.lock index 9e3185f498f7..a4d8902d6a75 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1026,7 +1026,7 @@ reference = "aliyun" [[package]] name = "regex" -version = "2020.11.11" +version = "2020.11.13" description = "Alternative regular expression module, to replace re." category = "main" optional = true @@ -1120,7 +1120,7 @@ reference = "aliyun" [[package]] name = "sphinx" -version = "3.3.0" +version = "3.3.1" description = "Python documentation generator" category = "dev" optional = false @@ -1366,7 +1366,7 @@ reference = "aliyun" [[package]] name = "urllib3" -version = "1.26.1" +version = "1.26.2" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false @@ -1508,14 +1508,14 @@ reference = "aliyun" [extras] cli = ["nb-cli"] -full = ["nb-cli", "nonebot-test"] +full = ["nb-cli", "nonebot-test", "apscheduler"] scheduler = ["apscheduler"] test = ["nonebot-test"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "3760d7d6c8119c6fa29e171cabbecc5e705d2bb3faff82f7211a19e27925abfe" +content-hash = "1d1ddae66f1495be07658ec5a004c4819445f785317c556030c9edb05612917e" [metadata.files] aiofiles = [ @@ -1909,47 +1909,47 @@ pyyaml = [ {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, ] regex = [ - {file = "regex-2020.11.11-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd7bee615680d940dd44ac0a479f2bc5f73d6ca63a5915cd8d30739c14ca522c"}, - {file = "regex-2020.11.11-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:3002ee2d4e8bbe4656237627203d8290a562d1fc1962deee470905ab63570345"}, - {file = "regex-2020.11.11-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:064d2fc83ab4ee0055fcc1ef38ec60e505742850a40061f854ac64cb3d8d6dd3"}, - {file = "regex-2020.11.11-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:83a390a653c13be1ab26287240df1fd9324ca8a0d31b603fa57cd7d9520648fa"}, - {file = "regex-2020.11.11-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:412969d58ecd4f576510ec88bcb7602e9e582bbef78859ed8c9ca4de4f9e891c"}, - {file = "regex-2020.11.11-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:ccfea4911ac28a8f744096bce1559e0bd86b09a53c8a9d5856ca8e1f5f4de1f5"}, - {file = "regex-2020.11.11-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:cefcdb2ac3b67fd9f7244820ce1965c8cf352366199cc1358d67c6cc3c5c8bbc"}, - {file = "regex-2020.11.11-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:9e8b3187f6beea8e56cb4b33c35049cbe376cf69aefaee5bc035309d88c98ca5"}, - {file = "regex-2020.11.11-cp36-cp36m-win32.whl", hash = "sha256:787e44e5f4fd027dd90b5ee0240b05dc1752cb43c2903617f25baa495fe551e9"}, - {file = "regex-2020.11.11-cp36-cp36m-win_amd64.whl", hash = "sha256:a9f76d9122359b09e38f27cd9c41729169171cf0fd73ec5b22cc4628f9e486ca"}, - {file = "regex-2020.11.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6d128368def4b0cd95c0fc9d99a89ae73c083b25e67f27a410830e30f9df0edc"}, - {file = "regex-2020.11.11-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:df50ba964812606663ca9d23d374036bc5ae3d71e86168409cdd84ca7948d8a3"}, - {file = "regex-2020.11.11-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:d1e57c16c4840f1c3543507742e99b8398609474a0e6a6925476914479de3488"}, - {file = "regex-2020.11.11-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:6e50b3b417ab2fd67bfa6235f0df4782fe2ff8be83f0c4435e1dc43d25052ee8"}, - {file = "regex-2020.11.11-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:bb17a7fe9c47167337009ce18cd6e6b3edf3ca0063bf6bed6ce02515129c016a"}, - {file = "regex-2020.11.11-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:826d0119f14f9a9ce25999a13ed5922c785b50e469800f6e5a6721318650ef49"}, - {file = "regex-2020.11.11-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:8cc3717146ce4040419639cf45455663a002a554806ddac46304acc5bd41dae2"}, - {file = "regex-2020.11.11-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:86ad88c7c2512094a85b0a01ce053bab1e28eafb8f3868bb8c22f4903e33f147"}, - {file = "regex-2020.11.11-cp37-cp37m-win32.whl", hash = "sha256:e03867f3baf64ecab47dfc9ddb58afc67acb6a0f80f6cf8ff9fa82962ec4d1cd"}, - {file = "regex-2020.11.11-cp37-cp37m-win_amd64.whl", hash = "sha256:56d1e298bb6482d0466399a6383181bf2627c37ad414e205b3ce0f85aa140be7"}, - {file = "regex-2020.11.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:19ac2bf0048a2f4d460ee20647e84ca160512a7ee8af844dc9207720778470f1"}, - {file = "regex-2020.11.11-cp38-cp38-manylinux1_i686.whl", hash = "sha256:84ab584dcb5e81815040d86148805a808acb0bee303d19638fe2f9488d704bc1"}, - {file = "regex-2020.11.11-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:4159ecf20dffea07f4a7241b2a236f90eb622c7e8caab9f43caba5f27ca37284"}, - {file = "regex-2020.11.11-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:8060be04baec546fe3afa6975d2998e15d1b655d7255f0e6b0ed3f482cccc218"}, - {file = "regex-2020.11.11-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:cdb98be55db1b94c950822cbc10d3d768f01e184365851ebb42cd377486ced7b"}, - {file = "regex-2020.11.11-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:11d9100bd874ce8b2a037db9150e732cd768359fc25fe5f77973208aa24eb13e"}, - {file = "regex-2020.11.11-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:0951c78fa4cb26d1278a4b3784fcf973fc97ec39c07483328a74b034b0cc569c"}, - {file = "regex-2020.11.11-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:c8b1ad791debd67221fb1266f8d09730ae927acacb32d0dad9fd07a7d341a28f"}, - {file = "regex-2020.11.11-cp38-cp38-win32.whl", hash = "sha256:beae9db1545f8116cfc9301a9601e9c975bb56ca22a38ac0fe06a72c3460f31a"}, - {file = "regex-2020.11.11-cp38-cp38-win_amd64.whl", hash = "sha256:48e94218f06317b6d32feb4ecff8b6025695450009bcb3291fb23daf79689431"}, - {file = "regex-2020.11.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c67fd5f3ad81f8301184354014e8e7510ab77e0c7e450a427d77f28ae8effbef"}, - {file = "regex-2020.11.11-cp39-cp39-manylinux1_i686.whl", hash = "sha256:e7cdd5ee8053c82607432b7ebad37e2ece54548fef2b254f7bce6f7831904586"}, - {file = "regex-2020.11.11-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:394b5be4fa72354a78763b317f82997ad881896dd4a860e429a6fa74afaacb07"}, - {file = "regex-2020.11.11-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:3b46a4c73ec1f25361147a7a0fd86084f3627dc78d09bcbe14e70db12683efec"}, - {file = "regex-2020.11.11-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:267d1b13f863e664150948ce2a9ed4927bf4ac7a068780f1ee8af83352aa17a2"}, - {file = "regex-2020.11.11-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:68267a7a5fb0bd9676b86f967143b6a6ecefb3eed4042ecc9e7f0e014aef8f74"}, - {file = "regex-2020.11.11-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:e899b69dd5d26655cb454835ea2fceb18832c9ee9c4fb45dc4cf8a6089d35312"}, - {file = "regex-2020.11.11-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:396411bb5a7849aeda9c49873b8295919fdc118c50b57122b09cb2097047c118"}, - {file = "regex-2020.11.11-cp39-cp39-win32.whl", hash = "sha256:32f8714c4bcc4b0d2aa259b1647e3c5b6cfe2e923c6c124234a5e03408224227"}, - {file = "regex-2020.11.11-cp39-cp39-win_amd64.whl", hash = "sha256:bf02ab95ff5261ba108725dbd795bf6395eaac1b8468b41472d82d35b12b0295"}, - {file = "regex-2020.11.11.tar.gz", hash = "sha256:0a235841237d4487329bcabcb5b902858f7967f5e684e08e968367f25b2c3d37"}, + {file = "regex-2020.11.13-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8b882a78c320478b12ff024e81dc7d43c1462aa4a3341c754ee65d857a521f85"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a63f1a07932c9686d2d416fb295ec2c01ab246e89b4d58e5fa468089cab44b70"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:6e4b08c6f8daca7d8f07c8d24e4331ae7953333dbd09c648ed6ebd24db5a10ee"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:bba349276b126947b014e50ab3316c027cac1495992f10e5682dc677b3dfa0c5"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:56e01daca75eae420bce184edd8bb341c8eebb19dd3bce7266332258f9fb9dd7"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:6a8ce43923c518c24a2579fda49f093f1397dad5d18346211e46f134fc624e31"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:1ab79fcb02b930de09c76d024d279686ec5d532eb814fd0ed1e0051eb8bd2daa"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:9801c4c1d9ae6a70aeb2128e5b4b68c45d4f0af0d1535500884d644fa9b768c6"}, + {file = "regex-2020.11.13-cp36-cp36m-win32.whl", hash = "sha256:49cae022fa13f09be91b2c880e58e14b6da5d10639ed45ca69b85faf039f7a4e"}, + {file = "regex-2020.11.13-cp36-cp36m-win_amd64.whl", hash = "sha256:749078d1eb89484db5f34b4012092ad14b327944ee7f1c4f74d6279a6e4d1884"}, + {file = "regex-2020.11.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b2f4007bff007c96a173e24dcda236e5e83bde4358a557f9ccf5e014439eae4b"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:38c8fd190db64f513fe4e1baa59fed086ae71fa45083b6936b52d34df8f86a88"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5862975b45d451b6db51c2e654990c1820523a5b07100fc6903e9c86575202a0"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:262c6825b309e6485ec2493ffc7e62a13cf13fb2a8b6d212f72bd53ad34118f1"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:bafb01b4688833e099d79e7efd23f99172f501a15c44f21ea2118681473fdba0"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:e32f5f3d1b1c663af7f9c4c1e72e6ffe9a78c03a31e149259f531e0fed826512"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:3bddc701bdd1efa0d5264d2649588cbfda549b2899dc8d50417e47a82e1387ba"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:02951b7dacb123d8ea6da44fe45ddd084aa6777d4b2454fa0da61d569c6fa538"}, + {file = "regex-2020.11.13-cp37-cp37m-win32.whl", hash = "sha256:0d08e71e70c0237883d0bef12cad5145b84c3705e9c6a588b2a9c7080e5af2a4"}, + {file = "regex-2020.11.13-cp37-cp37m-win_amd64.whl", hash = "sha256:1fa7ee9c2a0e30405e21031d07d7ba8617bc590d391adfc2b7f1e8b99f46f444"}, + {file = "regex-2020.11.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:baf378ba6151f6e272824b86a774326f692bc2ef4cc5ce8d5bc76e38c813a55f"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e3faaf10a0d1e8e23a9b51d1900b72e1635c2d5b0e1bea1c18022486a8e2e52d"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:2a11a3e90bd9901d70a5b31d7dd85114755a581a5da3fc996abfefa48aee78af"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:d1ebb090a426db66dd80df8ca85adc4abfcbad8a7c2e9a5ec7513ede522e0a8f"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:b2b1a5ddae3677d89b686e5c625fc5547c6e492bd755b520de5332773a8af06b"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:2c99e97d388cd0a8d30f7c514d67887d8021541b875baf09791a3baad48bb4f8"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:c084582d4215593f2f1d28b65d2a2f3aceff8342aa85afd7be23a9cad74a0de5"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:a3d748383762e56337c39ab35c6ed4deb88df5326f97a38946ddd19028ecce6b"}, + {file = "regex-2020.11.13-cp38-cp38-win32.whl", hash = "sha256:7913bd25f4ab274ba37bc97ad0e21c31004224ccb02765ad984eef43e04acc6c"}, + {file = "regex-2020.11.13-cp38-cp38-win_amd64.whl", hash = "sha256:6c54ce4b5d61a7129bad5c5dc279e222afd00e721bf92f9ef09e4fae28755683"}, + {file = "regex-2020.11.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1862a9d9194fae76a7aaf0150d5f2a8ec1da89e8b55890b1786b8f88a0f619dc"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux1_i686.whl", hash = "sha256:4902e6aa086cbb224241adbc2f06235927d5cdacffb2425c73e6570e8d862364"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7a25fcbeae08f96a754b45bdc050e1fb94b95cab046bf56b016c25e9ab127b3e"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:d2d8ce12b7c12c87e41123997ebaf1a5767a5be3ec545f64675388970f415e2e"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:f7d29a6fc4760300f86ae329e3b6ca28ea9c20823df123a2ea8693e967b29917"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:717881211f46de3ab130b58ec0908267961fadc06e44f974466d1887f865bd5b"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:3128e30d83f2e70b0bed9b2a34e92707d0877e460b402faca908c6667092ada9"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:8f6a2229e8ad946e36815f2a03386bb8353d4bde368fdf8ca5f0cb97264d3b5c"}, + {file = "regex-2020.11.13-cp39-cp39-win32.whl", hash = "sha256:f8f295db00ef5f8bae530fc39af0b40486ca6068733fb860b42115052206466f"}, + {file = "regex-2020.11.13-cp39-cp39-win_amd64.whl", hash = "sha256:a15f64ae3a027b64496a71ab1f722355e570c3fac5ba2801cafce846bf5af01d"}, + {file = "regex-2020.11.13.tar.gz", hash = "sha256:83d6b356e116ca119db8e7c6fc2983289d87b27b3fac238cfe5dca529d884562"}, ] requests = [ {file = "requests-2.25.0-py2.py3-none-any.whl", hash = "sha256:e786fa28d8c9154e6a4de5d46a1d921b8749f8b74e28bde23768e5e16eece998"}, @@ -1972,8 +1972,8 @@ snowballstemmer = [ {file = "snowballstemmer-2.0.0.tar.gz", hash = "sha256:df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52"}, ] sphinx = [ - {file = "Sphinx-3.3.0-py3-none-any.whl", hash = "sha256:3abdb2c57a65afaaa4f8573cbabd5465078eb6fd282c1e4f87f006875a7ec0c7"}, - {file = "Sphinx-3.3.0.tar.gz", hash = "sha256:1c21e7c5481a31b531e6cbf59c3292852ccde175b504b00ce2ff0b8f4adc3649"}, + {file = "Sphinx-3.3.1-py3-none-any.whl", hash = "sha256:d4e59ad4ea55efbb3c05cde3bfc83bfc14f0c95aa95c3d75346fcce186a47960"}, + {file = "Sphinx-3.3.1.tar.gz", hash = "sha256:1e8d592225447104d1172be415bc2972bd1357e3e12fdc76edf2261105db4300"}, ] sphinx-markdown-builder = [] sphinxcontrib-applehelp = [ @@ -2023,8 +2023,8 @@ untokenize = [ {file = "untokenize-0.1.1.tar.gz", hash = "md5:50d325dff09208c624cc603fad33bb0d"}, ] urllib3 = [ - {file = "urllib3-1.26.1-py2.py3-none-any.whl", hash = "sha256:61ad24434555a42c0439770462df38b47d05d9e8e353d93ec3742900975e3e65"}, - {file = "urllib3-1.26.1.tar.gz", hash = "sha256:097116a6f16f13482d2a2e56792088b9b2920f4eb6b4f84a2c90555fb673db74"}, + {file = "urllib3-1.26.2-py2.py3-none-any.whl", hash = "sha256:d8ff90d979214d7b4f8ce956e80f4028fc6860e4431f731ea4a8c08f23f99473"}, + {file = "urllib3-1.26.2.tar.gz", hash = "sha256:19188f96923873c92ccb987120ec4acaa12f0461fa9ce5d3d0772bc965a39e08"}, ] uvicorn = [ {file = "uvicorn-0.11.8-py3-none-any.whl", hash = "sha256:4b70ddb4c1946e39db9f3082d53e323dfd50634b95fd83625d778729ef1730ef"}, diff --git a/pyproject.toml b/pyproject.toml index afb696c4815c..c9f38794205e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,13 +1,13 @@ [tool.poetry] name = "nonebot2" -version = "2.0.0-alpha.5" +version = "2.0.0-alpha.6" description = "An asynchronous python bot framework." authors = ["yanyongyu <[email protected]>"] license = "MIT" readme = "README.md" -homepage = "https://docs.nonebot.dev/" +homepage = "https://v2.nonebot.dev/" repository = "https://github.com/nonebot/nonebot2" -documentation = "https://docs.nonebot.dev/" +documentation = "https://v2.nonebot.dev/" keywords = ["bot", "qq", "qqbot", "mirai", "coolq"] classifiers = [ "Development Status :: 5 - Production/Stable", @@ -43,7 +43,7 @@ sphinx-markdown-builder = { git = "https://github.com/nonebot/sphinx-markdown-bu cli = ["nb-cli"] test = ["nonebot-test"] scheduler = ["apscheduler"] -full = ["nb-cli", "nonebot-test", "scheduler"] +full = ["nb-cli", "nonebot-test", "apscheduler"] [[tool.poetry.source]] name = "aliyun"
修复阻止事件传递信号无效的问题 抛出的异常应为 StopPropagation 的一个实例
cisagov__manage.get.gov-1551
[ { "content": "from django.db import models\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\n\nclass Contact(TimeStampedModel):\n\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"First name\",\n verbose_name=\"first name / given name\",\n db_index=True,\n )\n middle_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Middle name (optional)\",\n )\n last_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Last name\",\n verbose_name=\"last name / family name\",\n db_index=True,\n )\n title = models.TextField(\n null=True,\n blank=True,\n help_text=\"Title\",\n verbose_name=\"title or role in your organization\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n help_text=\"Email\",\n db_index=True,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user:\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py" } ]
[ { "content": "from django.db import models\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\n\nclass Contact(TimeStampedModel):\n\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"First name\",\n verbose_name=\"first name / given name\",\n db_index=True,\n )\n middle_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Middle name (optional)\",\n )\n last_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Last name\",\n verbose_name=\"last name / family name\",\n db_index=True,\n )\n title = models.TextField(\n null=True,\n blank=True,\n help_text=\"Title\",\n verbose_name=\"title or role in your organization\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n help_text=\"Email\",\n db_index=True,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user and (not self.user.first_name or not self.user.last_name):\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py" } ]
diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py index 6b3b6ddb2..06cf83887 100644 --- a/src/registrar/models/contact.py +++ b/src/registrar/models/contact.py @@ -64,7 +64,7 @@ def save(self, *args, **kwargs): super().save(*args, **kwargs) # Update the related User object's first_name and last_name - if self.user: + if self.user and (not self.user.first_name or not self.user.last_name): self.user.first_name = self.first_name self.user.last_name = self.last_name self.user.save() diff --git a/src/registrar/tests/test_admin.py b/src/registrar/tests/test_admin.py index 9d6add249..8c8dd5ab5 100644 --- a/src/registrar/tests/test_admin.py +++ b/src/registrar/tests/test_admin.py @@ -1110,8 +1110,8 @@ def test_alphabetically_sorted_fk_fields_domain_application(self): tested_fields = [ DomainApplication.authorizing_official.field, DomainApplication.submitter.field, - # DomainApplication.investigator.field, - # DomainApplication.creator.field, + DomainApplication.investigator.field, + DomainApplication.creator.field, DomainApplication.requested_domain.field, ] diff --git a/src/registrar/tests/test_models.py b/src/registrar/tests/test_models.py index 0e0839382..96f78bf9f 100644 --- a/src/registrar/tests/test_models.py +++ b/src/registrar/tests/test_models.py @@ -672,6 +672,12 @@ def test_check_domain_invitations_on_login_caps_email(self): class TestContact(TestCase): def setUp(self): + self.email_for_invalid = "[email protected]" + self.invalid_user, _ = User.objects.get_or_create( + username=self.email_for_invalid, email=self.email_for_invalid, first_name="", last_name="" + ) + self.invalid_contact, _ = Contact.objects.get_or_create(user=self.invalid_user) + self.email = "[email protected]" self.user, _ = User.objects.get_or_create(email=self.email, first_name="Jeff", last_name="Lebowski") self.contact, _ = Contact.objects.get_or_create(user=self.user) @@ -683,6 +689,31 @@ def tearDown(self): def test_saving_contact_updates_user_first_last_names(self): """When a contact is updated, we propagate the changes to the linked user if it exists.""" + + # User and Contact are created and linked as expected. + # An empty User object should create an empty contact. + self.assertEqual(self.invalid_contact.first_name, "") + self.assertEqual(self.invalid_contact.last_name, "") + self.assertEqual(self.invalid_user.first_name, "") + self.assertEqual(self.invalid_user.last_name, "") + + # Manually update the contact - mimicking production (pre-existing data) + self.invalid_contact.first_name = "Joey" + self.invalid_contact.last_name = "Baloney" + self.invalid_contact.save() + + # Refresh the user object to reflect the changes made in the database + self.invalid_user.refresh_from_db() + + # Updating the contact's first and last names propagate to the user + self.assertEqual(self.invalid_contact.first_name, "Joey") + self.assertEqual(self.invalid_contact.last_name, "Baloney") + self.assertEqual(self.invalid_user.first_name, "Joey") + self.assertEqual(self.invalid_user.last_name, "Baloney") + + def test_saving_contact_does_not_update_user_first_last_names(self): + """When a contact is updated, we avoid propagating the changes to the linked user if it already has a value""" + # User and Contact are created and linked as expected self.assertEqual(self.contact.first_name, "Jeff") self.assertEqual(self.contact.last_name, "Lebowski") @@ -699,11 +730,11 @@ def test_saving_contact_updates_user_first_last_names(self): # Updating the contact's first and last names propagate to the user self.assertEqual(self.contact.first_name, "Joey") self.assertEqual(self.contact.last_name, "Baloney") - self.assertEqual(self.user.first_name, "Joey") - self.assertEqual(self.user.last_name, "Baloney") + self.assertEqual(self.user.first_name, "Jeff") + self.assertEqual(self.user.last_name, "Lebowski") def test_saving_contact_does_not_update_user_email(self): - """When a contact's email is updated, the change is not propagated to the lined user.""" + """When a contact's email is updated, the change is not propagated to the user.""" self.contact.email = "[email protected]" self.contact.save() @@ -713,3 +744,16 @@ def test_saving_contact_does_not_update_user_email(self): # Updating the contact's email does not propagate self.assertEqual(self.contact.email, "[email protected]") self.assertEqual(self.user.email, "[email protected]") + + def test_saving_contact_does_not_update_user_email_when_none(self): + """When a contact's email is updated, and the first/last name is none, + the change is not propagated to the user.""" + self.invalid_contact.email = "[email protected]" + self.invalid_contact.save() + + # Refresh the user object to reflect the changes made in the database + self.invalid_user.refresh_from_db() + + # Updating the contact's email does not propagate + self.assertEqual(self.invalid_contact.email, "[email protected]") + self.assertEqual(self.invalid_user.email, "[email protected]") diff --git a/src/registrar/tests/test_models_domain.py b/src/registrar/tests/test_models_domain.py index 39f63c942..d21aabb9e 100644 --- a/src/registrar/tests/test_models_domain.py +++ b/src/registrar/tests/test_models_domain.py @@ -761,7 +761,6 @@ def test_not_disclosed_on_other_contacts(self): self.assertEqual(expected_contact.email, actual_contact.email) def test_convert_public_contact_to_epp(self): - self.maxDiff = None domain, _ = Domain.objects.get_or_create(name="freeman.gov") dummy_contact = domain.get_default_security_contact() test_disclose = self._convertPublicContactToEpp(dummy_contact, disclose_email=True).__dict__ diff --git a/src/registrar/tests/test_reports.py b/src/registrar/tests/test_reports.py index 112b2ba34..4b854a0a0 100644 --- a/src/registrar/tests/test_reports.py +++ b/src/registrar/tests/test_reports.py @@ -152,7 +152,6 @@ def side_effect(Bucket, Key): @boto3_mocking.patching def test_load_federal_report(self): """Tests the get_current_federal api endpoint""" - self.maxDiff = None mock_client = MagicMock() mock_client_instance = mock_client.return_value
User/Contact bug: Signals double saving, overwriting data for test cases ### Current Behavior A bug was introduced [with a PR](https://github.com/cisagov/manage.get.gov/pull/1491) that fixed our oidc login clearing out user information. To fix this, the Contact object was linked to the User object, such that they would remain in sync. However, this introduced a sneaky double-save bug wherein if signals get called more than once for the same contact object (as we found happens in our test cases), it overrides information when it should not do so. ![image](https://github.com/cisagov/manage.get.gov/assets/141044360/f9d35949-5138-4337-a189-9e2a843191f5) ![image](https://github.com/cisagov/manage.get.gov/assets/141044360/917e6db7-6cfd-48c8-a9b0-e0b8f01f2831) ### Expected Behavior When a `Contact` object is saved, the `save()` function checks to see if `self.user` is not None. When it is not, it will update the `self.user` object with whatever value is located at `self`. It appears that in certain situations, (such as our test cases), the `User` field is behaving as if it were a one-to-many relationship (one Contact object to many User objects). This should not be the case, and `Contact` should only update one `User` field. ### Steps to Reproduce 1. Create a new test case in `test_admin.py`, and create 3 fake User objects. Populate them with unique data. Then, log their values. Note that their data seems to get overwritten. ### Environment _No response_ ### Additional Context This bug seems to be originating in the `signals.py` file in the ` handle_profile` class. By passing in a flag that disables the save behavior (or just commenting it out), this issue seems to resolve. ![image](https://github.com/cisagov/manage.get.gov/assets/141044360/e19a1508-72c7-405a-96db-3540305dcc4b) ### Issue Links 🔄 Relates to: [#1464 / #1468](https://github.com/cisagov/manage.get.gov/pull/1491) 🔄 Relates to: [this PR](https://github.com/cisagov/manage.get.gov/pull/1543) (pinpoints where the behavior is occurring but does not solve it)
mars-project__mars-679
[ { "content": "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport json\nimport logging\nimport threading\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport pyarrow\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.server.server import Server\nimport jinja2\nfrom tornado import web, ioloop\n\nfrom ..compat import six\nfrom ..utils import get_next_port\nfrom ..scheduler import ResourceActor, SessionActor\nfrom ..api import MarsAPI\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_jinja_env():\n from datetime import datetime\n from ..utils import readable_size\n\n _jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),\n )\n\n def format_ts(value):\n if value is None or np.isnan(value):\n return None\n return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')\n\n _jinja_env.filters['format_ts'] = format_ts\n _jinja_env.filters['readable_size'] = readable_size\n return _jinja_env\n\n\nclass BokehStaticFileHandler(web.StaticFileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n from bokeh import server\n path_parts = path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, cls).get_absolute_path(root, path)\n\n def validate_absolute_path(self, root, absolute_path):\n from bokeh import server\n path_parts = absolute_path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, self).validate_absolute_path(root, absolute_path)\n\n\nclass MarsRequestHandler(web.RequestHandler):\n def initialize(self, scheduler_ip):\n self._scheduler = scheduler_ip\n self.web_api = MarsWebAPI(scheduler_ip)\n\n\nclass MarsWebAPI(MarsAPI):\n def __init__(self, scheduler_ip):\n super(MarsWebAPI, self).__init__(scheduler_ip)\n\n def get_tasks_info(self, select_session_id=None):\n from ..scheduler import GraphState\n\n sessions = defaultdict(dict)\n for session_id, session_ref in six.iteritems(self.session_manager.get_sessions()):\n if select_session_id and session_id != select_session_id:\n continue\n session_desc = sessions[session_id]\n session_desc['id'] = session_id\n session_desc['name'] = session_id\n session_desc['tasks'] = dict()\n session_ref = self.actor_client.actor_ref(session_ref)\n for graph_key, graph_meta_ref in six.iteritems(session_ref.get_graph_meta_refs()):\n task_desc = dict()\n\n state = self.get_graph_state(session_id, graph_key)\n if state == GraphState.PREPARING:\n task_desc['state'] = state.name.lower()\n session_desc['tasks'][graph_key] = task_desc\n continue\n\n graph_meta_ref = self.actor_client.actor_ref(graph_meta_ref)\n task_desc['id'] = graph_key\n task_desc['state'] = graph_meta_ref.get_state().value\n start_time, end_time, graph_size = graph_meta_ref.get_graph_info()\n task_desc['start_time'] = start_time\n task_desc['end_time'] = end_time\n task_desc['graph_size'] = graph_size\n\n session_desc['tasks'][graph_key] = task_desc\n return sessions\n\n def get_task_detail(self, session_id, task_id):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.calc_stats()\n\n def get_operand_info(self, session_id, task_id, state=None):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.get_operand_info(state=state)\n\n def get_workers_meta(self):\n resource_uid = ResourceActor.default_uid()\n resource_ref = self.get_actor_ref(resource_uid)\n return resource_ref.get_workers_meta()\n\n def query_worker_events(self, endpoint, category, time_start=None, time_end=None):\n from ..worker import EventsActor\n ref = self.actor_client.actor_ref(EventsActor.default_uid(), address=endpoint)\n return ref.query_by_time(category, time_start=time_start, time_end=time_end)\n\n def write_mutable_tensor(self, session_id, name, payload_type, body):\n from ..serialize import dataserializer\n from ..tensor.core import Indexes\n session_uid = SessionActor.gen_uid(session_id)\n session_ref = self.get_actor_ref(session_uid)\n\n index_json_size = np.asscalar(np.frombuffer(body[0:8], dtype=np.int64))\n index_json = json.loads(body[8:8+index_json_size].decode('ascii'))\n index = Indexes.from_json(index_json).indexes\n if payload_type is None:\n value = dataserializer.loads(body[8+index_json_size:], raw=False)\n elif payload_type == 'tensor':\n tensor_chunk_offset = 8 + index_json_size\n with pyarrow.BufferReader(body[tensor_chunk_offset:]) as reader:\n value = pyarrow.read_tensor(reader).to_numpy()\n elif payload_type == 'record_batch':\n schema_size = np.asscalar(np.frombuffer(body[8+index_json_size:8+index_json_size+8], dtype=np.int64))\n schema_offset = 8 + index_json_size + 8\n with pyarrow.BufferReader(body[schema_offset:schema_offset+schema_size]) as reader:\n schema = pyarrow.read_schema(reader)\n record_batch_offset = schema_offset + schema_size\n with pyarrow.BufferReader(body[record_batch_offset:]) as reader:\n record_batch = pyarrow.read_record_batch(reader, schema)\n value = record_batch.to_pandas().to_records(index=False)\n else:\n raise ValueError('Not supported payload type: %s' % payload_type)\n return session_ref.write_mutable_tensor(name, index, value)\n\n\nclass MarsWeb(object):\n def __init__(self, port=None, scheduler_ip=None):\n self._port = port\n self._scheduler_ip = scheduler_ip\n self._server = None\n self._server_thread = None\n\n @property\n def port(self):\n return self._port\n\n @staticmethod\n def _configure_loop():\n try:\n ioloop.IOLoop.current()\n except RuntimeError:\n if six.PY3:\n import asyncio\n asyncio.set_event_loop(asyncio.new_event_loop())\n loop = None\n try:\n loop = ioloop.IOLoop.current()\n except: # noqa: E722\n pass\n if loop is None:\n raise\n else:\n raise\n\n def _try_start_web_server(self):\n static_path = os.path.join(os.path.dirname(__file__), 'static')\n\n handlers = dict()\n for p, h in _bokeh_apps.items():\n handlers[p] = Application(FunctionHandler(functools.partial(h, self._scheduler_ip)))\n\n handler_kwargs = {'scheduler_ip': self._scheduler_ip}\n extra_patterns = [\n ('/static/(.*)', BokehStaticFileHandler, {'path': static_path})\n ]\n for p, h in _web_handlers.items():\n extra_patterns.append((p, h, handler_kwargs))\n\n retrial = 5\n while retrial:\n try:\n if self._port is None:\n use_port = get_next_port()\n else:\n use_port = self._port\n\n self._server = Server(\n handlers, allow_websocket_origin=['*'],\n address='0.0.0.0', port=use_port,\n extra_patterns=extra_patterns,\n )\n self._server.start()\n self._port = use_port\n logger.info('Mars UI started at 0.0.0.0:%d', self._port)\n break\n except OSError:\n if self._port is not None:\n raise\n retrial -= 1\n if retrial == 0:\n raise\n\n def start(self, event=None, block=False):\n self._configure_loop()\n self._try_start_web_server()\n\n if not block:\n self._server_thread = threading.Thread(target=self._server.io_loop.start)\n self._server_thread.daemon = True\n self._server_thread.start()\n\n if event:\n event.set()\n else:\n if event:\n event.set()\n\n self._server.io_loop.start()\n\n def stop(self):\n if self._server is not None:\n self._server.io_loop.stop()\n self._server.stop()\n\n\n_bokeh_apps = dict()\n_web_handlers = dict()\n\n\ndef register_bokeh_app(pattern, handler):\n _bokeh_apps[pattern] = handler\n\n\ndef register_web_handler(pattern, handler):\n _web_handlers[pattern] = handler\n", "path": "mars/web/server.py" } ]
[ { "content": "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport json\nimport logging\nimport threading\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport pyarrow\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.server.server import Server\nimport jinja2\nfrom tornado import web, ioloop\n\nfrom ..compat import six\nfrom ..utils import get_next_port\nfrom ..scheduler import ResourceActor, SessionActor\nfrom ..api import MarsAPI\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_jinja_env():\n from datetime import datetime\n from ..utils import readable_size\n\n _jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),\n )\n\n def format_ts(value):\n if value is None or np.isnan(value):\n return None\n return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')\n\n _jinja_env.filters['format_ts'] = format_ts\n _jinja_env.filters['readable_size'] = readable_size\n return _jinja_env\n\n\nclass BokehStaticFileHandler(web.StaticFileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n from bokeh import server\n path_parts = path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, cls).get_absolute_path(root, path)\n\n def validate_absolute_path(self, root, absolute_path):\n from bokeh import server\n path_parts = absolute_path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, self).validate_absolute_path(root, absolute_path)\n\n\nclass MarsRequestHandler(web.RequestHandler):\n def initialize(self, scheduler_ip):\n self._scheduler = scheduler_ip\n self.web_api = MarsWebAPI(scheduler_ip)\n\n\nclass MarsWebAPI(MarsAPI):\n def __init__(self, scheduler_ip):\n super(MarsWebAPI, self).__init__(scheduler_ip)\n\n def get_tasks_info(self, select_session_id=None):\n from ..scheduler import GraphState\n\n sessions = defaultdict(dict)\n for session_id, session_ref in six.iteritems(self.session_manager.get_sessions()):\n if select_session_id and session_id != select_session_id:\n continue\n session_desc = sessions[session_id]\n session_desc['id'] = session_id\n session_desc['name'] = session_id\n session_desc['tasks'] = dict()\n session_ref = self.actor_client.actor_ref(session_ref)\n for graph_key, graph_meta_ref in six.iteritems(session_ref.get_graph_meta_refs()):\n task_desc = dict()\n\n state = self.get_graph_state(session_id, graph_key)\n if state == GraphState.PREPARING:\n task_desc['state'] = state.name.lower()\n session_desc['tasks'][graph_key] = task_desc\n continue\n\n graph_meta_ref = self.actor_client.actor_ref(graph_meta_ref)\n task_desc['id'] = graph_key\n task_desc['state'] = graph_meta_ref.get_state().value\n start_time, end_time, graph_size = graph_meta_ref.get_graph_info()\n task_desc['start_time'] = start_time\n task_desc['end_time'] = end_time\n task_desc['graph_size'] = graph_size\n\n session_desc['tasks'][graph_key] = task_desc\n return sessions\n\n def get_task_detail(self, session_id, task_id):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.calc_stats()\n\n def get_operand_info(self, session_id, task_id, state=None):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.get_operand_info(state=state)\n\n def get_workers_meta(self):\n resource_uid = ResourceActor.default_uid()\n resource_ref = self.get_actor_ref(resource_uid)\n return resource_ref.get_workers_meta()\n\n def query_worker_events(self, endpoint, category, time_start=None, time_end=None):\n from ..worker import EventsActor\n ref = self.actor_client.actor_ref(EventsActor.default_uid(), address=endpoint)\n return ref.query_by_time(category, time_start=time_start, time_end=time_end)\n\n def write_mutable_tensor(self, session_id, name, payload_type, body):\n from ..serialize import dataserializer\n from ..tensor.core import Indexes\n session_uid = SessionActor.gen_uid(session_id)\n session_ref = self.get_actor_ref(session_uid)\n\n index_json_size = np.asscalar(np.frombuffer(body[0:8], dtype=np.int64))\n index_json = json.loads(body[8:8+index_json_size].decode('ascii'))\n index = Indexes.from_json(index_json).indexes\n if payload_type is None:\n value = dataserializer.loads(body[8+index_json_size:], raw=False)\n elif payload_type == 'tensor':\n tensor_chunk_offset = 8 + index_json_size\n with pyarrow.BufferReader(body[tensor_chunk_offset:]) as reader:\n value = pyarrow.read_tensor(reader).to_numpy()\n elif payload_type == 'record_batch':\n schema_size = np.asscalar(np.frombuffer(body[8+index_json_size:8+index_json_size+8], dtype=np.int64))\n schema_offset = 8 + index_json_size + 8\n with pyarrow.BufferReader(body[schema_offset:schema_offset+schema_size]) as reader:\n schema = pyarrow.read_schema(reader)\n record_batch_offset = schema_offset + schema_size\n with pyarrow.BufferReader(body[record_batch_offset:]) as reader:\n record_batch = pyarrow.read_record_batch(reader, schema)\n value = record_batch.to_pandas().to_records(index=False)\n else:\n raise ValueError('Not supported payload type: %s' % payload_type)\n return session_ref.write_mutable_tensor(name, index, value)\n\n\nclass MarsWeb(object):\n def __init__(self, port=None, scheduler_ip=None):\n self._port = port\n self._scheduler_ip = scheduler_ip\n self._server = None\n self._server_thread = None\n\n @property\n def port(self):\n return self._port\n\n @staticmethod\n def _configure_loop():\n try:\n ioloop.IOLoop.current()\n except RuntimeError:\n if six.PY3:\n import asyncio\n asyncio.set_event_loop(asyncio.new_event_loop())\n loop = None\n try:\n loop = ioloop.IOLoop.current()\n except: # noqa: E722\n pass\n if loop is None:\n raise\n else:\n raise\n\n def _try_start_web_server(self):\n static_path = os.path.join(os.path.dirname(__file__), 'static')\n\n handlers = dict()\n for p, h in _bokeh_apps.items():\n handlers[p] = Application(FunctionHandler(functools.partial(h, self._scheduler_ip)))\n\n handler_kwargs = {'scheduler_ip': self._scheduler_ip}\n extra_patterns = [\n ('/static/(.*)', BokehStaticFileHandler, {'path': static_path})\n ]\n for p, h in _web_handlers.items():\n extra_patterns.append((p, h, handler_kwargs))\n\n retrial = 5\n while retrial:\n try:\n if self._port is None:\n use_port = get_next_port()\n else:\n use_port = self._port\n\n self._server = Server(\n handlers, allow_websocket_origin=['*'],\n address='0.0.0.0', port=use_port,\n extra_patterns=extra_patterns,\n http_server_kwargs={'max_buffer_size': 2 ** 32},\n )\n self._server.start()\n self._port = use_port\n logger.info('Mars UI started at 0.0.0.0:%d', self._port)\n break\n except OSError:\n if self._port is not None:\n raise\n retrial -= 1\n if retrial == 0:\n raise\n\n def start(self, event=None, block=False):\n self._configure_loop()\n self._try_start_web_server()\n\n if not block:\n self._server_thread = threading.Thread(target=self._server.io_loop.start)\n self._server_thread.daemon = True\n self._server_thread.start()\n\n if event:\n event.set()\n else:\n if event:\n event.set()\n\n self._server.io_loop.start()\n\n def stop(self):\n if self._server is not None:\n self._server.io_loop.stop()\n self._server.stop()\n\n\n_bokeh_apps = dict()\n_web_handlers = dict()\n\n\ndef register_bokeh_app(pattern, handler):\n _bokeh_apps[pattern] = handler\n\n\ndef register_web_handler(pattern, handler):\n _web_handlers[pattern] = handler\n", "path": "mars/web/server.py" } ]
diff --git a/mars/web/server.py b/mars/web/server.py index a15a26a93e..7497815a66 100644 --- a/mars/web/server.py +++ b/mars/web/server.py @@ -214,6 +214,7 @@ def _try_start_web_server(self): handlers, allow_websocket_origin=['*'], address='0.0.0.0', port=use_port, extra_patterns=extra_patterns, + http_server_kwargs={'max_buffer_size': 2 ** 32}, ) self._server.start() self._port = use_port
[BUG] Web session doesn't work with large data source **Describe the bug** The default `max_buffer_size` of tornado HTTP server is 100M, when constructing dataframes from large pandas dataframes it will raise `Content-Length too long`.
mozmeao__snippets-service-1017
[ { "content": "import re\n\nfrom django.contrib import admin, messages\nfrom django.db import transaction\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import (RelatedDropdownFilter,\n RelatedOnlyDropdownFilter)\n\nfrom snippets.base import forms, models, slack\nfrom snippets.base.admin import actions, filters\n\n\nMATCH_LOCALE_REGEX = re.compile(r'(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = models.JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass IconAdmin(admin.ModelAdmin):\n search_fields = [\n 'name',\n 'image',\n ]\n readonly_fields = [\n 'height',\n 'width',\n 'preview',\n 'creator',\n 'created',\n 'snippets',\n ]\n list_display_links = [\n 'id',\n 'name',\n ]\n list_display = [\n 'id',\n 'name',\n 'width',\n 'height',\n 'preview',\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippets.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n super().save_model(request, obj, form, change)\n\n def preview(self, obj):\n text = f'<img style=\"max-width:120px; max-height:120px;\" src=\"{obj.image.url}\"/>'\n return mark_safe(text)\n\n def snippets(self, obj):\n \"\"\"Snippets using this icon.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(template.render({'snippets': obj.snippets, 'type': 'Icon'}))\n\n\nclass SimpleTemplateInline(admin.StackedInline):\n model = models.SimpleTemplate\n form = forms.SimpleTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_snippet',\n ]\n raw_id_fields = [\n 'section_title_icon',\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': ('title_icon', 'title'),\n }),\n ('Section', {\n 'fields': ('section_title_icon', 'section_title_text', 'section_title_url',),\n }),\n ('Main', {\n 'fields': ('icon', 'text', 'button_label', 'button_color', 'button_url'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'tall', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FundraisingTemplateInline(admin.StackedInline):\n model = models.FundraisingTemplate\n form = forms.FundraisingTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'eoy_snippet',\n ]\n raw_id_fields = [\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': (\n 'title_icon',\n 'title'\n ),\n }),\n ('Main', {\n 'fields': (\n 'icon',\n 'text',\n 'text_color',\n 'background_color',\n 'highlight_color',\n )\n }),\n ('Form Configuration', {\n 'fields': (\n 'donation_form_url',\n 'currency_code',\n 'locale',\n 'selected_button',\n 'button_label',\n 'button_color',\n 'button_background_color',\n 'monthly_checkbox_label_text',\n )\n }),\n ('Donation', {\n 'fields': (\n ('donation_amount_first', 'donation_amount_second',\n 'donation_amount_third', 'donation_amount_fourth',),\n )\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'test', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FxASignupTemplateInline(admin.StackedInline):\n model = models.FxASignupTemplate\n form = forms.FxASignupTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'fxa_signup_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_dismiss_button_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'utm_term',\n 'utm_campaign',\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass NewsletterTemplateInline(admin.StackedInline):\n model = models.NewsletterTemplate\n form = forms.NewsletterTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'newsletter_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_privacy_html',\n 'scene2_newsletter',\n 'scene2_dismiss_button_text',\n 'locale',\n 'success_text',\n 'error_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass SendToDeviceTemplateInline(admin.StackedInline):\n model = models.SendToDeviceTemplate\n form = forms.SendToDeviceTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'send_to_device_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n 'scene2_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_icon',\n 'scene2_text',\n\n 'scene2_button_label',\n 'scene2_input_placeholder',\n 'scene2_disclaimer_html',\n 'scene2_dismiss_button_text',\n\n 'locale',\n 'country',\n ('include_sms', 'message_id_sms',),\n 'message_id_email',\n 'success_title',\n 'success_text',\n 'error_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass SimpleBelowSearchTemplateInline(admin.StackedInline):\n model = models.SimpleBelowSearchTemplate\n form = forms.SimpleBelowSearchTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_below_search_snippet',\n ]\n raw_id_fields = [\n 'icon',\n ]\n\n fieldsets = (\n ('Main', {\n 'fields': ('icon', 'text'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'do_not_autoblock'),\n })\n\n )\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n inlines = [\n SimpleTemplateInline,\n FundraisingTemplateInline,\n FxASignupTemplateInline,\n NewsletterTemplateInline,\n SendToDeviceTemplateInline,\n SimpleBelowSearchTemplateInline,\n ]\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'locale_list',\n 'modified',\n )\n list_filter = (\n filters.ModifiedFilter,\n filters.TemplateFilter,\n ('locales', RelatedOnlyDropdownFilter),\n ('targets', RelatedOnlyDropdownFilter),\n 'status',\n filters.ChannelFilter,\n ('campaign', RelatedDropdownFilter),\n ('category', RelatedDropdownFilter),\n filters.ScheduledFilter,\n )\n search_fields = (\n 'name',\n 'id',\n 'campaign__name',\n 'targets__name',\n 'category__name',\n )\n autocomplete_fields = (\n 'campaign',\n 'category',\n )\n preserve_filters = True\n readonly_fields = (\n 'id',\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n\n )\n filter_horizontal = (\n 'targets',\n 'locales',\n )\n save_on_top = True\n save_as = True\n view_on_site = False\n actions = (\n actions.duplicate_snippets_action,\n 'make_published',\n )\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'id',\n 'name',\n 'status',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n )\n }),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code>&lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt;</code>\n <br/>\n ''' # noqa\n ),\n 'fields': ('template_chooser',),\n 'classes': ('template-fieldset',)\n }),\n ('Publishing Options', {\n 'fields': (\n 'campaign',\n 'category',\n 'targets',\n ('publish_start', 'publish_end'),\n 'locales',\n 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified'), 'for_qa'),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ASRSnippetAdmin.css',\n 'css/admin/IDFieldHighlight.css',\n 'css/admin/InlineTemplates.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n 'js/admin/clipboard.min.js',\n 'js/admin/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url_light_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlLight\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlLight\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_light_theme.short_description = 'Light Themed Preview URL'\n\n def preview_url_dark_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlDark\">{obj.get_preview_url(dark=True)}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlDark\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_dark_theme.short_description = 'Dark Themed Preview URL'\n\n def change_view(self, request, *args, **kwargs):\n if request.method == 'POST' and '_saveasnew' in request.POST:\n # Always saved cloned snippets as un-published and un-check ready for review.\n post_data = request.POST.copy()\n post_data['status'] = models.STATUS_CHOICES['Draft']\n request.POST = post_data\n return super().change_view(request, *args, **kwargs)\n\n def get_readonly_fields(self, request, obj):\n if not request.user.is_superuser:\n return self.readonly_fields + ('for_qa',)\n return self.readonly_fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n if request.user.is_superuser:\n return queryset\n return queryset.filter(for_qa=False)\n\n def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n form.current_user = request.user\n return form\n\n def make_published(self, request, queryset):\n clean_queryset = queryset.exclude(status=models.STATUS_CHOICES['Published'])\n no_snippets = clean_queryset.count()\n no_already_published_snippets = queryset.count() - no_snippets\n\n snippets = []\n with transaction.atomic():\n for snippet in clean_queryset:\n snippet.status = models.STATUS_CHOICES['Published']\n snippet.save()\n snippets.append(snippet)\n\n for snippet in snippets:\n slack.send_slack('asr_published', snippet)\n\n if no_already_published_snippets:\n messages.warning(\n request, f'Skipped {no_already_published_snippets} already published snippets.')\n messages.success(request, f'Published {no_snippets} snippets.')\n\n make_published.short_description = 'Publish selected snippets'\n\n # Only users with Publishing permissions on all channels are allowed to\n # mark snippets for publication in bulk.\n make_published.allowed_permissions = (\n 'global_publish',\n )\n\n def has_global_publish_permission(self, request):\n return request.user.has_perms([\n 'base.%s' % perm for perm in [\n 'publish_on_release',\n 'publish_on_beta',\n 'publish_on_aurora',\n 'publish_on_nightly',\n 'publish_on_esr',\n ]\n ])\n\n def locale_list(self, obj):\n num_locales = obj.locales.count()\n locales = obj.locales.all()[:3]\n active_locales = ', '.join([str(locale) for locale in locales])\n if num_locales > 3:\n active_locales += ' and {0} more.'.format(num_locales - 3)\n return active_locales\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',\n 'published_snippets_in_category', 'total_snippets_in_category')\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'name',\n 'description',\n 'published_snippets_in_category',\n 'total_snippets_in_category',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n 'description',\n )\n\n list_display = (\n 'name',\n 'published_snippets_in_category',\n 'total_snippets_in_category',\n )\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.category')\n super().save_model(request, obj, form, change)\n\n def published_snippets_in_category(self, obj):\n return obj.asrsnippets.filter(status=models.STATUS_CHOICES['Published']).count()\n\n def total_snippets_in_category(self, obj):\n return obj.asrsnippets.count()\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n save_on_top = True\n readonly_fields = (\n 'created',\n 'modified',\n 'creator',\n 'jexl_expr',\n 'snippets',\n )\n filter_horizontal = (\n 'client_match_rules',\n )\n search_fields = (\n 'name',\n )\n list_display = (\n 'name',\n 'number_of_snippets',\n 'number_of_published_snippets',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n\n )\n }),\n ('Accounts and Sync', {\n 'fields': (\n 'filtr_uses_firefox_sync',\n 'filtr_desktop_devices_count',\n 'filtr_mobile_devices_count',\n 'filtr_total_devices_count',\n ),\n }),\n ('Advanced Targeting', {\n 'fields': (\n 'client_match_rules',\n )\n }),\n ('Snippets', {\n 'fields': (\n 'snippets',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippets.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n\n def number_of_snippets(self, obj):\n return obj.asrsnippet_set.count()\n\n def number_of_published_snippets(self, obj):\n return obj.asrsnippet_set.filter(status=models.STATUS_CHOICES['Published']).count()\n\n def snippets(self, obj):\n \"\"\"Snippets using this Target.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(template.render({'snippets': obj.asrsnippet_set.all().order_by('id'),\n 'type': 'Target'}))\n", "path": "snippets/base/admin/adminmodels.py" } ]
[ { "content": "import re\n\nfrom django.contrib import admin, messages\nfrom django.db import transaction\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import (RelatedDropdownFilter,\n RelatedOnlyDropdownFilter)\n\nfrom snippets.base import forms, models, slack\nfrom snippets.base.admin import actions, filters\n\n\nMATCH_LOCALE_REGEX = re.compile(r'(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = models.JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass IconAdmin(admin.ModelAdmin):\n search_fields = [\n 'name',\n 'image',\n ]\n readonly_fields = [\n 'height',\n 'width',\n 'preview',\n 'creator',\n 'created',\n 'snippets',\n ]\n list_display_links = [\n 'id',\n 'name',\n ]\n list_display = [\n 'id',\n 'name',\n 'width',\n 'height',\n 'preview',\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippets.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n super().save_model(request, obj, form, change)\n\n def preview(self, obj):\n template = get_template('base/preview_image.jinja')\n return mark_safe(template.render({'image': obj.image}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this icon.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(template.render({'snippets': obj.snippets, 'type': 'Icon'}))\n\n\nclass SimpleTemplateInline(admin.StackedInline):\n model = models.SimpleTemplate\n form = forms.SimpleTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_snippet',\n ]\n raw_id_fields = [\n 'section_title_icon',\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': ('title_icon', 'title'),\n }),\n ('Section', {\n 'fields': ('section_title_icon', 'section_title_text', 'section_title_url',),\n }),\n ('Main', {\n 'fields': ('icon', 'text', 'button_label', 'button_color', 'button_url'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'tall', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FundraisingTemplateInline(admin.StackedInline):\n model = models.FundraisingTemplate\n form = forms.FundraisingTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'eoy_snippet',\n ]\n raw_id_fields = [\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': (\n 'title_icon',\n 'title'\n ),\n }),\n ('Main', {\n 'fields': (\n 'icon',\n 'text',\n 'text_color',\n 'background_color',\n 'highlight_color',\n )\n }),\n ('Form Configuration', {\n 'fields': (\n 'donation_form_url',\n 'currency_code',\n 'locale',\n 'selected_button',\n 'button_label',\n 'button_color',\n 'button_background_color',\n 'monthly_checkbox_label_text',\n )\n }),\n ('Donation', {\n 'fields': (\n ('donation_amount_first', 'donation_amount_second',\n 'donation_amount_third', 'donation_amount_fourth',),\n )\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'test', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FxASignupTemplateInline(admin.StackedInline):\n model = models.FxASignupTemplate\n form = forms.FxASignupTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'fxa_signup_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_dismiss_button_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'utm_term',\n 'utm_campaign',\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass NewsletterTemplateInline(admin.StackedInline):\n model = models.NewsletterTemplate\n form = forms.NewsletterTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'newsletter_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_privacy_html',\n 'scene2_newsletter',\n 'scene2_dismiss_button_text',\n 'locale',\n 'success_text',\n 'error_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass SendToDeviceTemplateInline(admin.StackedInline):\n model = models.SendToDeviceTemplate\n form = forms.SendToDeviceTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'send_to_device_snippet',\n ]\n raw_id_fields = [\n 'scene1_title_icon',\n 'scene1_icon',\n 'scene2_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_icon',\n 'scene2_text',\n\n 'scene2_button_label',\n 'scene2_input_placeholder',\n 'scene2_disclaimer_html',\n 'scene2_dismiss_button_text',\n\n 'locale',\n 'country',\n ('include_sms', 'message_id_sms',),\n 'message_id_email',\n 'success_title',\n 'success_text',\n 'error_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock'\n ),\n })\n )\n\n\nclass SimpleBelowSearchTemplateInline(admin.StackedInline):\n model = models.SimpleBelowSearchTemplate\n form = forms.SimpleBelowSearchTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_below_search_snippet',\n ]\n raw_id_fields = [\n 'icon',\n ]\n\n fieldsets = (\n ('Main', {\n 'fields': ('icon', 'text'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'do_not_autoblock'),\n })\n\n )\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n inlines = [\n SimpleTemplateInline,\n FundraisingTemplateInline,\n FxASignupTemplateInline,\n NewsletterTemplateInline,\n SendToDeviceTemplateInline,\n SimpleBelowSearchTemplateInline,\n ]\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'locale_list',\n 'modified',\n )\n list_filter = (\n filters.ModifiedFilter,\n filters.TemplateFilter,\n ('locales', RelatedOnlyDropdownFilter),\n ('targets', RelatedOnlyDropdownFilter),\n 'status',\n filters.ChannelFilter,\n ('campaign', RelatedDropdownFilter),\n ('category', RelatedDropdownFilter),\n filters.ScheduledFilter,\n )\n search_fields = (\n 'name',\n 'id',\n 'campaign__name',\n 'targets__name',\n 'category__name',\n )\n autocomplete_fields = (\n 'campaign',\n 'category',\n )\n preserve_filters = True\n readonly_fields = (\n 'id',\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n\n )\n filter_horizontal = (\n 'targets',\n 'locales',\n )\n save_on_top = True\n save_as = True\n view_on_site = False\n actions = (\n actions.duplicate_snippets_action,\n 'make_published',\n )\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'id',\n 'name',\n 'status',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n )\n }),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code>&lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt;</code>\n <br/>\n ''' # noqa\n ),\n 'fields': ('template_chooser',),\n 'classes': ('template-fieldset',)\n }),\n ('Publishing Options', {\n 'fields': (\n 'campaign',\n 'category',\n 'targets',\n ('publish_start', 'publish_end'),\n 'locales',\n 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified'), 'for_qa'),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ASRSnippetAdmin.css',\n 'css/admin/IDFieldHighlight.css',\n 'css/admin/InlineTemplates.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n 'js/admin/clipboard.min.js',\n 'js/admin/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url_light_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlLight\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlLight\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_light_theme.short_description = 'Light Themed Preview URL'\n\n def preview_url_dark_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlDark\">{obj.get_preview_url(dark=True)}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlDark\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_dark_theme.short_description = 'Dark Themed Preview URL'\n\n def change_view(self, request, *args, **kwargs):\n if request.method == 'POST' and '_saveasnew' in request.POST:\n # Always saved cloned snippets as un-published and un-check ready for review.\n post_data = request.POST.copy()\n post_data['status'] = models.STATUS_CHOICES['Draft']\n request.POST = post_data\n return super().change_view(request, *args, **kwargs)\n\n def get_readonly_fields(self, request, obj):\n if not request.user.is_superuser:\n return self.readonly_fields + ('for_qa',)\n return self.readonly_fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n if request.user.is_superuser:\n return queryset\n return queryset.filter(for_qa=False)\n\n def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n form.current_user = request.user\n return form\n\n def make_published(self, request, queryset):\n clean_queryset = queryset.exclude(status=models.STATUS_CHOICES['Published'])\n no_snippets = clean_queryset.count()\n no_already_published_snippets = queryset.count() - no_snippets\n\n snippets = []\n with transaction.atomic():\n for snippet in clean_queryset:\n snippet.status = models.STATUS_CHOICES['Published']\n snippet.save()\n snippets.append(snippet)\n\n for snippet in snippets:\n slack.send_slack('asr_published', snippet)\n\n if no_already_published_snippets:\n messages.warning(\n request, f'Skipped {no_already_published_snippets} already published snippets.')\n messages.success(request, f'Published {no_snippets} snippets.')\n\n make_published.short_description = 'Publish selected snippets'\n\n # Only users with Publishing permissions on all channels are allowed to\n # mark snippets for publication in bulk.\n make_published.allowed_permissions = (\n 'global_publish',\n )\n\n def has_global_publish_permission(self, request):\n return request.user.has_perms([\n 'base.%s' % perm for perm in [\n 'publish_on_release',\n 'publish_on_beta',\n 'publish_on_aurora',\n 'publish_on_nightly',\n 'publish_on_esr',\n ]\n ])\n\n def locale_list(self, obj):\n num_locales = obj.locales.count()\n locales = obj.locales.all()[:3]\n active_locales = ', '.join([str(locale) for locale in locales])\n if num_locales > 3:\n active_locales += ' and {0} more.'.format(num_locales - 3)\n return active_locales\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',\n 'published_snippets_in_category', 'total_snippets_in_category')\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'name',\n 'description',\n 'published_snippets_in_category',\n 'total_snippets_in_category',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n 'description',\n )\n\n list_display = (\n 'name',\n 'published_snippets_in_category',\n 'total_snippets_in_category',\n )\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.category')\n super().save_model(request, obj, form, change)\n\n def published_snippets_in_category(self, obj):\n return obj.asrsnippets.filter(status=models.STATUS_CHOICES['Published']).count()\n\n def total_snippets_in_category(self, obj):\n return obj.asrsnippets.count()\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n save_on_top = True\n readonly_fields = (\n 'created',\n 'modified',\n 'creator',\n 'jexl_expr',\n 'snippets',\n )\n filter_horizontal = (\n 'client_match_rules',\n )\n search_fields = (\n 'name',\n )\n list_display = (\n 'name',\n 'number_of_snippets',\n 'number_of_published_snippets',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n\n )\n }),\n ('Accounts and Sync', {\n 'fields': (\n 'filtr_uses_firefox_sync',\n 'filtr_desktop_devices_count',\n 'filtr_mobile_devices_count',\n 'filtr_total_devices_count',\n ),\n }),\n ('Advanced Targeting', {\n 'fields': (\n 'client_match_rules',\n )\n }),\n ('Snippets', {\n 'fields': (\n 'snippets',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippets.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n\n def number_of_snippets(self, obj):\n return obj.asrsnippet_set.count()\n\n def number_of_published_snippets(self, obj):\n return obj.asrsnippet_set.filter(status=models.STATUS_CHOICES['Published']).count()\n\n def snippets(self, obj):\n \"\"\"Snippets using this Target.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(template.render({'snippets': obj.asrsnippet_set.all().order_by('id'),\n 'type': 'Target'}))\n", "path": "snippets/base/admin/adminmodels.py" } ]
diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py index 69a041dbd..1bf6f83ed 100644 --- a/snippets/base/admin/adminmodels.py +++ b/snippets/base/admin/adminmodels.py @@ -142,8 +142,8 @@ def save_model(self, request, obj, form, change): super().save_model(request, obj, form, change) def preview(self, obj): - text = f'<img style="max-width:120px; max-height:120px;" src="{obj.image.url}"/>' - return mark_safe(text) + template = get_template('base/preview_image.jinja') + return mark_safe(template.render({'image': obj.image})) def snippets(self, obj): """Snippets using this icon.""" diff --git a/snippets/base/templates/base/preview_image.jinja b/snippets/base/templates/base/preview_image.jinja new file mode 100644 index 000000000..40858e9dc --- /dev/null +++ b/snippets/base/templates/base/preview_image.jinja @@ -0,0 +1,27 @@ +<style> + .container { + display: grid; + grid-template-columns: 200px 200px; + } + img { + padding: 20px; + max-width: 90px; + max-height: 90px; + } + .light { + background-color: #ffffff; + } + .dark { + background-color: #38383d; + } +</style> +<div class="container"> + <div> + Light Theme Background + <img class="light" src="{{ image.url }}"/> + </div> + <div> + Dark Theme Background + <img class="dark" src="{{ image.url }}"/> + </div> +</div>
Create a preview with background (light and dark) for Icons
awslabs__gluonts-1132
[ { "content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import List\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n# First-party imports\nfrom gluonts.core.component import validated\n\n\nclass TimeFeature:\n \"\"\"\n Base class for features that only depend on time.\n \"\"\"\n\n @validated()\n def __init__(self, normalized: bool = True):\n self.normalized = normalized\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n pass\n\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\n\nclass MinuteOfHour(TimeFeature):\n \"\"\"\n Minute of hour encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.minute / 59.0 - 0.5\n else:\n return index.minute.map(float)\n\n\nclass HourOfDay(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.hour / 23.0 - 0.5\n else:\n return index.hour.map(float)\n\n\nclass DayOfWeek(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofweek / 6.0 - 0.5\n else:\n return index.dayofweek.map(float)\n\n\nclass DayOfMonth(TimeFeature):\n \"\"\"\n Day of month encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.day / 30.0 - 0.5\n else:\n return index.day.map(float)\n\n\nclass DayOfYear(TimeFeature):\n \"\"\"\n Day of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofyear / 364.0 - 0.5\n else:\n return index.dayofyear.map(float)\n\n\nclass MonthOfYear(TimeFeature):\n \"\"\"\n Month of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.month / 11.0 - 0.5\n else:\n return index.month.map(float)\n\n\nclass WeekOfYear(TimeFeature):\n \"\"\"\n Week of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.weekofyear / 51.0 - 0.5\n else:\n return index.weekofyear.map(float)\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n \"\"\"\n Returns a list of time features that will be appropriate for the given frequency string.\n\n Parameters\n ----------\n\n freq_str\n Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n\n \"\"\"\n\n features_by_offsets = {\n offsets.YearOffset: [],\n offsets.MonthOffset: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Minute: [\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature_classes in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return [cls() for cls in feature_classes]\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n\n The following frequencies are supported:\n\n Y - yearly\n alias: A\n M - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n \"\"\"\n raise RuntimeError(supported_freq_msg)\n", "path": "src/gluonts/time_feature/_base.py" } ]
[ { "content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import List\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n# First-party imports\nfrom gluonts.core.component import validated\n\n\nclass TimeFeature:\n \"\"\"\n Base class for features that only depend on time.\n \"\"\"\n\n @validated()\n def __init__(self, normalized: bool = True):\n self.normalized = normalized\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n pass\n\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\n\nclass MinuteOfHour(TimeFeature):\n \"\"\"\n Minute of hour encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.minute / 59.0 - 0.5\n else:\n return index.minute.map(float)\n\n\nclass HourOfDay(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.hour / 23.0 - 0.5\n else:\n return index.hour.map(float)\n\n\nclass DayOfWeek(TimeFeature):\n \"\"\"\n Hour of day encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofweek / 6.0 - 0.5\n else:\n return index.dayofweek.map(float)\n\n\nclass DayOfMonth(TimeFeature):\n \"\"\"\n Day of month encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.day / 30.0 - 0.5\n else:\n return index.day.map(float)\n\n\nclass DayOfYear(TimeFeature):\n \"\"\"\n Day of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.dayofyear / 364.0 - 0.5\n else:\n return index.dayofyear.map(float)\n\n\nclass MonthOfYear(TimeFeature):\n \"\"\"\n Month of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.month / 11.0 - 0.5\n else:\n return index.month.map(float)\n\n\nclass WeekOfYear(TimeFeature):\n \"\"\"\n Week of year encoded as value between [-0.5, 0.5]\n \"\"\"\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n if self.normalized:\n return index.weekofyear / 51.0 - 0.5\n else:\n return index.weekofyear.map(float)\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n \"\"\"\n Returns a list of time features that will be appropriate for the given frequency string.\n\n Parameters\n ----------\n\n freq_str\n Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n\n \"\"\"\n\n features_by_offsets = {\n offsets.YearEnd: [],\n offsets.MonthEnd: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Minute: [\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature_classes in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return [cls() for cls in feature_classes]\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n\n The following frequencies are supported:\n\n Y - yearly\n alias: A\n M - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n \"\"\"\n raise RuntimeError(supported_freq_msg)\n", "path": "src/gluonts/time_feature/_base.py" } ]
diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 895376dc03..e436802f33 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,7 +1,7 @@ holidays>=0.9 matplotlib~=3.0 numpy~=1.16 -pandas>=1.0,<1.1 +pandas>=1.0 pydantic~=1.1,<1.7 tqdm~=4.23 toolz~=0.10 diff --git a/src/gluonts/time_feature/_base.py b/src/gluonts/time_feature/_base.py index 25e6e27830..0f795ae416 100644 --- a/src/gluonts/time_feature/_base.py +++ b/src/gluonts/time_feature/_base.py @@ -136,8 +136,8 @@ def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: """ features_by_offsets = { - offsets.YearOffset: [], - offsets.MonthOffset: [MonthOfYear], + offsets.YearEnd: [], + offsets.MonthEnd: [MonthOfYear], offsets.Week: [DayOfMonth, WeekOfYear], offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
Update pandas dependency As documented in #967, pandas will be fixing the breaking change that led us to fix the dependency to `<1.1`, see pandas-dev/pandas#37267 Once that is resolved, we could remove the constraint. *Edit:* we should also make sure to find a solution to #965 first
holoviz__panel-956
[ { "content": "\"\"\"\nThe input widgets generally allow entering arbitrary information into\na text field or similar.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport ast\n\nfrom base64 import b64decode\nfrom datetime import datetime\nfrom six import string_types\n\nimport param\n\nfrom bokeh.models.widgets import (\n CheckboxGroup as _BkCheckboxGroup, ColorPicker as _BkColorPicker,\n DatePicker as _BkDatePicker, Div as _BkDiv, TextInput as _BkTextInput,\n PasswordInput as _BkPasswordInput, Spinner as _BkSpinner, \n FileInput as _BkFileInput, TextAreaInput as _BkTextAreaInput)\n\nfrom ..util import as_unicode\nfrom .base import Widget\n\n\nclass TextInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n _widget_type = _BkTextInput\n\nclass PasswordInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n _widget_type = _BkPasswordInput\n\nclass TextAreaInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n max_length = param.Integer(default=5000)\n \n _widget_type = _BkTextAreaInput\n\nclass FileInput(Widget):\n\n accept = param.String(default=None)\n\n filename = param.String(default=None)\n\n mime_type = param.String(default=None)\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkFileInput\n\n _rename = {'name': None, 'filename': None}\n\n def _process_param_change(self, msg):\n msg = super(FileInput, self)._process_param_change(msg)\n if 'value' in msg:\n msg.pop('value')\n if 'mime_type' in msg:\n msg.pop('mime_type')\n return msg\n\n def _filter_properties(self, properties):\n properties = super(FileInput, self)._filter_properties(properties)\n return properties + ['value', 'mime_type']\n\n def _process_property_change(self, msg):\n msg = super(FileInput, self)._process_property_change(msg)\n if 'value' in msg:\n msg['value'] = b64decode(msg['value'])\n return msg\n\n def save(self, filename):\n \"\"\"\n Saves the uploaded FileInput data to a file or BytesIO object.\n\n Arguments\n ---------\n filename (str): File path or file-like object\n \"\"\"\n if isinstance(filename, string_types):\n with open(filename, 'wb') as f:\n f.write(self.value)\n else:\n filename.write(self.value)\n\n\nclass StaticText(Widget):\n\n style = param.Dict(default=None, doc=\"\"\"\n Dictionary of CSS property:value pairs to apply to this Div.\"\"\")\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkDiv\n\n _format = '<b>{title}</b>: {value}'\n\n _rename = {'name': 'title', 'value': 'text'}\n\n def _process_param_change(self, msg):\n msg = super(StaticText, self)._process_property_change(msg)\n msg.pop('title', None)\n if 'value' in msg:\n text = as_unicode(msg.pop('value'))\n if self.name:\n text = self._format.format(title=self.name, value=text)\n msg['text'] = text\n return msg\n\n\nclass DatePicker(Widget):\n\n value = param.Date(default=None)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n _widget_type = _BkDatePicker\n\n _rename = {'start': 'min_date', 'end': 'max_date', 'name': 'title'}\n\n def _process_property_change(self, msg):\n msg = super(DatePicker, self)._process_property_change(msg)\n if 'value' in msg:\n if isinstance(msg['value'], string_types):\n msg['value'] = datetime.strptime(msg['value'][4:], '%b %d %Y')\n return msg\n\n\nclass ColorPicker(Widget):\n\n value = param.Color(default=None, doc=\"\"\"\n The selected color\"\"\")\n\n _widget_type = _BkColorPicker\n\n _rename = {'value': 'color', 'name': 'title'}\n\n\nclass Spinner(Widget):\n\n start = param.Number(default=None, doc=\"\"\"\n Optional minimum allowable value\"\"\")\n\n end = param.Number(default=None, doc=\"\"\"\n Optional maximum allowable value\"\"\")\n\n value = param.Number(default=0, doc=\"\"\"\n The initial value of the spinner\"\"\")\n\n step = param.Number(default=1, doc=\"\"\"\n The step added or subtracted to the current value\"\"\")\n\n _widget_type = _BkSpinner\n\n _rename = {'name': 'title', 'start': 'low', 'end': 'high'}\n\n\nclass LiteralInput(Widget):\n \"\"\"\n LiteralInput allows declaring Python literals using a text\n input widget. Optionally a type may be declared.\n \"\"\"\n\n type = param.ClassSelector(default=None, class_=(type, tuple),\n is_instance=True)\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkTextInput\n\n def __init__(self, **params):\n super(LiteralInput, self).__init__(**params)\n self._state = ''\n self._validate(None)\n self.param.watch(self._validate, 'value')\n\n def _validate(self, event):\n if self.type is None: return\n new = self.value\n if not isinstance(new, self.type):\n if event:\n self.value = event.old\n types = repr(self.type) if isinstance(self.type, tuple) else self.type.__name__\n raise ValueError('LiteralInput expected %s type but value %s '\n 'is of type %s.' %\n (types, new, type(new).__name__))\n\n def _process_property_change(self, msg):\n msg = super(LiteralInput, self)._process_property_change(msg)\n new_state = ''\n if 'value' in msg:\n value = msg.pop('value')\n try:\n value = ast.literal_eval(value)\n except:\n new_state = ' (invalid)'\n value = self.value\n else:\n if self.type and not isinstance(value, self.type):\n new_state = ' (wrong type)'\n value = self.value\n msg['value'] = value\n msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state\n self._state = new_state\n self.param.trigger('name')\n return msg\n\n def _process_param_change(self, msg):\n msg = super(LiteralInput, self)._process_param_change(msg)\n msg.pop('type', None)\n if 'value' in msg:\n msg['value'] = '' if msg['value'] is None else as_unicode(msg['value'])\n msg['title'] = self.name\n return msg\n\n\nclass DatetimeInput(LiteralInput):\n \"\"\"\n DatetimeInput allows declaring Python literals using a text\n input widget. Optionally a type may be declared.\n \"\"\"\n\n format = param.String(default='%Y-%m-%d %H:%M:%S', doc=\"\"\"\n Datetime format used for parsing and formatting the datetime.\"\"\")\n\n value = param.Date(default=None)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n type = datetime\n\n def __init__(self, **params):\n super(DatetimeInput, self).__init__(**params)\n self.param.watch(self._validate, 'value')\n self._validate(None)\n\n def _validate(self, event):\n new = self.value\n if new is not None and ((self.start is not None and self.start > new) or\n (self.end is not None and self.end < new)):\n value = datetime.strftime(new, self.format)\n start = datetime.strftime(self.start, self.format)\n end = datetime.strftime(self.end, self.format)\n if event:\n self.value = event.old\n raise ValueError('DatetimeInput value must be between {start} and {end}, '\n 'supplied value is {value}'.format(start=start, end=end,\n value=value))\n\n def _process_property_change(self, msg):\n msg = Widget._process_property_change(self, msg)\n new_state = ''\n if 'value' in msg:\n value = msg.pop('value')\n try:\n value = datetime.strptime(value, self.format)\n except:\n new_state = ' (invalid)'\n value = self.value\n else:\n if value is not None and ((self.start is not None and self.start > value) or\n (self.end is not None and self.end < value)):\n new_state = ' (out of bounds)'\n value = self.value\n msg['value'] = value\n msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state\n self._state = new_state\n return msg\n\n def _process_param_change(self, msg):\n msg = {k: v for k, v in msg.items() if k not in ('type', 'format', 'start', 'end')}\n if 'value' in msg:\n value = msg['value']\n if value is None:\n value = ''\n else:\n value = datetime.strftime(msg['value'], self.format)\n msg['value'] = value\n msg['title'] = self.name\n return msg\n\n\nclass Checkbox(Widget):\n\n value = param.Boolean(default=False)\n\n _supports_embed = True\n\n _widget_type = _BkCheckboxGroup\n\n def _process_property_change(self, msg):\n msg = super(Checkbox, self)._process_property_change(msg)\n if 'active' in msg:\n msg['value'] = 0 in msg.pop('active')\n return msg\n\n def _process_param_change(self, msg):\n msg = super(Checkbox, self)._process_param_change(msg)\n if 'value' in msg:\n msg['active'] = [0] if msg.pop('value', None) else []\n if 'title' in msg:\n msg['labels'] = [msg.pop('title')]\n return msg\n\n def _get_embed_state(self, root, max_opts=3):\n return (self, self._models[root.ref['id']][0], [False, True],\n lambda x: 0 in x.active, 'active', 'cb_obj.active.indexOf(0) >= 0')\n", "path": "panel/widgets/input.py" } ]
[ { "content": "\"\"\"\nThe input widgets generally allow entering arbitrary information into\na text field or similar.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport ast\n\nfrom base64 import b64decode\nfrom datetime import datetime\nfrom six import string_types\n\nimport param\n\nfrom bokeh.models.widgets import (\n CheckboxGroup as _BkCheckboxGroup, ColorPicker as _BkColorPicker,\n DatePicker as _BkDatePicker, Div as _BkDiv, TextInput as _BkTextInput,\n PasswordInput as _BkPasswordInput, Spinner as _BkSpinner, \n FileInput as _BkFileInput, TextAreaInput as _BkTextAreaInput)\n\nfrom ..util import as_unicode\nfrom .base import Widget\n\n\nclass TextInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n _widget_type = _BkTextInput\n\nclass PasswordInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n _widget_type = _BkPasswordInput\n\nclass TextAreaInput(Widget):\n\n value = param.String(default='', allow_None=True)\n\n placeholder = param.String(default='')\n\n max_length = param.Integer(default=5000)\n \n _widget_type = _BkTextAreaInput\n\nclass FileInput(Widget):\n\n accept = param.String(default=None)\n\n filename = param.String(default=None)\n\n mime_type = param.String(default=None)\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkFileInput\n\n _rename = {'name': None, 'filename': None}\n\n def _process_param_change(self, msg):\n msg = super(FileInput, self)._process_param_change(msg)\n if 'value' in msg:\n msg.pop('value')\n if 'mime_type' in msg:\n msg.pop('mime_type')\n return msg\n\n def _filter_properties(self, properties):\n properties = super(FileInput, self)._filter_properties(properties)\n return properties + ['value', 'mime_type', 'filename']\n\n def _process_property_change(self, msg):\n msg = super(FileInput, self)._process_property_change(msg)\n if 'value' in msg:\n msg['value'] = b64decode(msg['value'])\n return msg\n\n def save(self, filename):\n \"\"\"\n Saves the uploaded FileInput data to a file or BytesIO object.\n\n Arguments\n ---------\n filename (str): File path or file-like object\n \"\"\"\n if isinstance(filename, string_types):\n with open(filename, 'wb') as f:\n f.write(self.value)\n else:\n filename.write(self.value)\n\n\nclass StaticText(Widget):\n\n style = param.Dict(default=None, doc=\"\"\"\n Dictionary of CSS property:value pairs to apply to this Div.\"\"\")\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkDiv\n\n _format = '<b>{title}</b>: {value}'\n\n _rename = {'name': 'title', 'value': 'text'}\n\n def _process_param_change(self, msg):\n msg = super(StaticText, self)._process_property_change(msg)\n msg.pop('title', None)\n if 'value' in msg:\n text = as_unicode(msg.pop('value'))\n if self.name:\n text = self._format.format(title=self.name, value=text)\n msg['text'] = text\n return msg\n\n\nclass DatePicker(Widget):\n\n value = param.Date(default=None)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n _widget_type = _BkDatePicker\n\n _rename = {'start': 'min_date', 'end': 'max_date', 'name': 'title'}\n\n def _process_property_change(self, msg):\n msg = super(DatePicker, self)._process_property_change(msg)\n if 'value' in msg:\n if isinstance(msg['value'], string_types):\n msg['value'] = datetime.strptime(msg['value'][4:], '%b %d %Y')\n return msg\n\n\nclass ColorPicker(Widget):\n\n value = param.Color(default=None, doc=\"\"\"\n The selected color\"\"\")\n\n _widget_type = _BkColorPicker\n\n _rename = {'value': 'color', 'name': 'title'}\n\n\nclass Spinner(Widget):\n\n start = param.Number(default=None, doc=\"\"\"\n Optional minimum allowable value\"\"\")\n\n end = param.Number(default=None, doc=\"\"\"\n Optional maximum allowable value\"\"\")\n\n value = param.Number(default=0, doc=\"\"\"\n The initial value of the spinner\"\"\")\n\n step = param.Number(default=1, doc=\"\"\"\n The step added or subtracted to the current value\"\"\")\n\n _widget_type = _BkSpinner\n\n _rename = {'name': 'title', 'start': 'low', 'end': 'high'}\n\n\nclass LiteralInput(Widget):\n \"\"\"\n LiteralInput allows declaring Python literals using a text\n input widget. Optionally a type may be declared.\n \"\"\"\n\n type = param.ClassSelector(default=None, class_=(type, tuple),\n is_instance=True)\n\n value = param.Parameter(default=None)\n\n _widget_type = _BkTextInput\n\n def __init__(self, **params):\n super(LiteralInput, self).__init__(**params)\n self._state = ''\n self._validate(None)\n self.param.watch(self._validate, 'value')\n\n def _validate(self, event):\n if self.type is None: return\n new = self.value\n if not isinstance(new, self.type):\n if event:\n self.value = event.old\n types = repr(self.type) if isinstance(self.type, tuple) else self.type.__name__\n raise ValueError('LiteralInput expected %s type but value %s '\n 'is of type %s.' %\n (types, new, type(new).__name__))\n\n def _process_property_change(self, msg):\n msg = super(LiteralInput, self)._process_property_change(msg)\n new_state = ''\n if 'value' in msg:\n value = msg.pop('value')\n try:\n value = ast.literal_eval(value)\n except:\n new_state = ' (invalid)'\n value = self.value\n else:\n if self.type and not isinstance(value, self.type):\n new_state = ' (wrong type)'\n value = self.value\n msg['value'] = value\n msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state\n self._state = new_state\n self.param.trigger('name')\n return msg\n\n def _process_param_change(self, msg):\n msg = super(LiteralInput, self)._process_param_change(msg)\n msg.pop('type', None)\n if 'value' in msg:\n msg['value'] = '' if msg['value'] is None else as_unicode(msg['value'])\n msg['title'] = self.name\n return msg\n\n\nclass DatetimeInput(LiteralInput):\n \"\"\"\n DatetimeInput allows declaring Python literals using a text\n input widget. Optionally a type may be declared.\n \"\"\"\n\n format = param.String(default='%Y-%m-%d %H:%M:%S', doc=\"\"\"\n Datetime format used for parsing and formatting the datetime.\"\"\")\n\n value = param.Date(default=None)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n type = datetime\n\n def __init__(self, **params):\n super(DatetimeInput, self).__init__(**params)\n self.param.watch(self._validate, 'value')\n self._validate(None)\n\n def _validate(self, event):\n new = self.value\n if new is not None and ((self.start is not None and self.start > new) or\n (self.end is not None and self.end < new)):\n value = datetime.strftime(new, self.format)\n start = datetime.strftime(self.start, self.format)\n end = datetime.strftime(self.end, self.format)\n if event:\n self.value = event.old\n raise ValueError('DatetimeInput value must be between {start} and {end}, '\n 'supplied value is {value}'.format(start=start, end=end,\n value=value))\n\n def _process_property_change(self, msg):\n msg = Widget._process_property_change(self, msg)\n new_state = ''\n if 'value' in msg:\n value = msg.pop('value')\n try:\n value = datetime.strptime(value, self.format)\n except:\n new_state = ' (invalid)'\n value = self.value\n else:\n if value is not None and ((self.start is not None and self.start > value) or\n (self.end is not None and self.end < value)):\n new_state = ' (out of bounds)'\n value = self.value\n msg['value'] = value\n msg['name'] = msg.get('title', self.name).replace(self._state, '') + new_state\n self._state = new_state\n return msg\n\n def _process_param_change(self, msg):\n msg = {k: v for k, v in msg.items() if k not in ('type', 'format', 'start', 'end')}\n if 'value' in msg:\n value = msg['value']\n if value is None:\n value = ''\n else:\n value = datetime.strftime(msg['value'], self.format)\n msg['value'] = value\n msg['title'] = self.name\n return msg\n\n\nclass Checkbox(Widget):\n\n value = param.Boolean(default=False)\n\n _supports_embed = True\n\n _widget_type = _BkCheckboxGroup\n\n def _process_property_change(self, msg):\n msg = super(Checkbox, self)._process_property_change(msg)\n if 'active' in msg:\n msg['value'] = 0 in msg.pop('active')\n return msg\n\n def _process_param_change(self, msg):\n msg = super(Checkbox, self)._process_param_change(msg)\n if 'value' in msg:\n msg['active'] = [0] if msg.pop('value', None) else []\n if 'title' in msg:\n msg['labels'] = [msg.pop('title')]\n return msg\n\n def _get_embed_state(self, root, max_opts=3):\n return (self, self._models[root.ref['id']][0], [False, True],\n lambda x: 0 in x.active, 'active', 'cb_obj.active.indexOf(0) >= 0')\n", "path": "panel/widgets/input.py" } ]
diff --git a/panel/tests/widgets/test_input.py b/panel/tests/widgets/test_input.py index 9423b971ad..0f034e2770 100644 --- a/panel/tests/widgets/test_input.py +++ b/panel/tests/widgets/test_input.py @@ -59,10 +59,11 @@ def test_file_input(document, comm): assert isinstance(widget, BkFileInput) - file_input._comm_change({'mime_type': 'text/plain', 'value': 'U29tZSB0ZXh0Cg=='}) + file_input._comm_change({'mime_type': 'text/plain', 'value': 'U29tZSB0ZXh0Cg==', 'filename': 'testfile'}) assert file_input.value == b'Some text\n' assert file_input.mime_type == 'text/plain' assert file_input.accept == '.txt' + assert file_input.filename == 'testfile' def test_literal_input(document, comm): diff --git a/panel/widgets/input.py b/panel/widgets/input.py index f63c471c44..46ea437498 100644 --- a/panel/widgets/input.py +++ b/panel/widgets/input.py @@ -72,7 +72,7 @@ def _process_param_change(self, msg): def _filter_properties(self, properties): properties = super(FileInput, self)._filter_properties(properties) - return properties + ['value', 'mime_type'] + return properties + ['value', 'mime_type', 'filename'] def _process_property_change(self, msg): msg = super(FileInput, self)._process_property_change(msg)
Fileinput widget always returns `None` for name of uploaded file As of panel 0.7.0 (with bokeh 1.4.0), the Fileinput widget does not set the name of the uploaded file. E.g. in the following code ```python import panel as pn inp = pn.widgets.FileInput(name='Upload') btn = pn.widgets.Button(name='Upload', button_type='primary') r= pn.Column(pn.Row(inp, btn),) def on_click_parse(event): #print(inp.get_param_values()) print(inp.filename) btn.on_click(on_click_parse) r.servable() ``` it always prints `None` to the terminal. I can already see where the issue is and will open a PR.
electricitymaps__electricitymaps-contrib-3796
[ { "content": "#!/usr/bin/env python3\n\nimport json\n\n# The arrow library is used to handle datetimes consistently with other parsers\nimport arrow\n\n# The request library is used to fetch content through HTTP\nimport requests\n\n\ntimezone = 'Canada/Atlantic'\n\n\ndef _find_pei_key(pei_list, sought_key):\n matching_item = [item for item in pei_list\n if 'header' in item['data']\n and item['data']['header'].startswith(sought_key)]\n\n if not matching_item:\n return None\n\n return matching_item[0]['data']['actualValue']\n\n\ndef _get_pei_info(requests_obj):\n url = 'https://wdf.princeedwardisland.ca/workflow'\n request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}\n headers = {'Content-Type': 'application/json'}\n response = requests_obj.post(url, data=json.dumps(request), headers=headers)\n\n raw_data = response.json().get('data', [])\n\n datetime_item = [item['data']['text'] for item in raw_data\n if 'text' in item['data']]\n if not datetime_item:\n # unable to get a timestamp, return empty\n return None\n datetime_text = datetime_item[0][len('Last updated '):]\n data_timestamp = arrow.get(datetime_text, 'MMMM D, YYYY HH:mm A').replace(tzinfo='Canada/Atlantic')\n\n # see https://ruk.ca/content/new-api-endpoint-pei-wind for more info\n data = {\n 'pei_load': _find_pei_key(raw_data, 'Total On-Island Load'),\n 'pei_wind_gen': _find_pei_key(raw_data, 'Total On-Island Wind Generation'),\n 'pei_fossil_gen': _find_pei_key(raw_data, 'Total On-Island Fossil Fuel Generation'),\n 'pei_wind_used': _find_pei_key(raw_data, 'Wind Power Used On Island'),\n 'pei_wind_exported': _find_pei_key(raw_data, 'Wind Power Exported Off Island'),\n 'datetime': data_timestamp.datetime\n }\n\n # the following keys are always required downstream, if we don't have them, no sense returning\n if data['pei_wind_gen'] is None or data['pei_fossil_gen'] is None:\n return None\n\n return data\n\n\ndef fetch_production(zone_key='CA-PE', session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n requests_obj = session or requests.session()\n pei_info = _get_pei_info(requests_obj)\n\n if pei_info is None:\n return None\n\n data = {\n 'datetime': pei_info['datetime'],\n 'zoneKey': zone_key,\n 'production': {\n 'wind': pei_info['pei_wind_gen'],\n\n # These are oil-fueled (\"heavy fuel oil\" and \"diesel\") generators\n # used as peakers and back-up\n 'oil': pei_info['pei_fossil_gen'],\n\n # specify some sources that definitely aren't present on PEI as zero,\n # this allows the analyzer to better estimate CO2eq\n 'coal': 0,\n 'hydro': 0,\n 'nuclear': 0,\n 'geothermal': 0\n },\n 'storage': {},\n 'source': 'princeedwardisland.ca'\n }\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known power exchange (in MW) between two regions.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n if sorted_zone_keys != 'CA-NB->CA-PE':\n raise NotImplementedError('This exchange pair is not implemented')\n\n requests_obj = session or requests.session()\n pei_info = _get_pei_info(requests_obj)\n\n if pei_info is None or pei_info['pei_load'] is None:\n return None\n\n # PEI imports most of its electricity. Everything not generated on island\n # is imported from New Brunswick.\n # In case of wind, some is paper-\"exported\" even if there is a net import,\n # and 'pei_wind_used'/'data5' indicates their accounting of part of the load\n # served by non-exported wind.\n # https://www.princeedwardisland.ca/en/feature/pei-wind-energy says:\n # \"Wind Power Exported Off-Island is that portion of wind generation that is supplying\n # contracts elsewhere. The actual electricity from this portion of wind generation\n # may stay within PEI but is satisfying a contractual arrangement in another jurisdiction.\"\n # We are ignoring these paper exports, as they are an accounting/legal detail\n # that doesn't actually reflect what happens on the wires.\n # (New Brunswick being the only interconnection with PEI, \"exporting\" wind power to NB\n # then \"importing\" a balance of NB electricity likely doesn't actually happen.)\n imported_from_nb = (pei_info['pei_load'] - pei_info['pei_fossil_gen'] - pei_info['pei_wind_gen'])\n\n # In expected result, \"net\" represents an export.\n # We have sorted_zone_keys 'CA-NB->CA-PE', so it's export *from* NB,\n # and import *to* PEI.\n data = {\n 'datetime': pei_info['datetime'],\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': imported_from_nb,\n 'source': 'princeedwardisland.ca'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n\n print('fetch_exchange(\"CA-PE\", \"CA-NB\") ->')\n print(fetch_exchange(\"CA-PE\", \"CA-NB\"))\n", "path": "parsers/CA_PE.py" } ]
[ { "content": "#!/usr/bin/env python3\n\nimport json\n\n# The arrow library is used to handle datetimes consistently with other parsers\nimport arrow\n\n# The request library is used to fetch content through HTTP\nimport requests\n\n\ntimezone = 'Canada/Atlantic'\n\n\ndef _find_pei_key(pei_list, sought_key):\n matching_item = [item for item in pei_list\n if 'header' in item['data']\n and item['data']['header'].startswith(sought_key)]\n\n if not matching_item:\n return None\n\n return matching_item[0]['data']['actualValue']\n\n\ndef _get_pei_info(requests_obj):\n url = 'https://wdf.princeedwardisland.ca/api/workflow'\n request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}\n headers = {'Content-Type': 'application/json'}\n response = requests_obj.post(url, data=json.dumps(request), headers=headers)\n\n raw_data = response.json().get('data', [])\n\n datetime_item = [item['data']['text'] for item in raw_data\n if 'text' in item['data']]\n if not datetime_item:\n # unable to get a timestamp, return empty\n return None\n datetime_text = datetime_item[0][len('Last updated '):]\n data_timestamp = arrow.get(datetime_text, 'MMMM D, YYYY HH:mm A').replace(tzinfo='Canada/Atlantic')\n\n # see https://ruk.ca/content/new-api-endpoint-pei-wind for more info\n data = {\n 'pei_load': _find_pei_key(raw_data, 'Total On-Island Load'),\n 'pei_wind_gen': _find_pei_key(raw_data, 'Total On-Island Wind Generation'),\n 'pei_fossil_gen': _find_pei_key(raw_data, 'Total On-Island Fossil Fuel Generation'),\n 'pei_wind_used': _find_pei_key(raw_data, 'Wind Power Used On Island'),\n 'pei_wind_exported': _find_pei_key(raw_data, 'Wind Power Exported Off Island'),\n 'datetime': data_timestamp.datetime\n }\n\n # the following keys are always required downstream, if we don't have them, no sense returning\n if data['pei_wind_gen'] is None or data['pei_fossil_gen'] is None:\n return None\n\n return data\n\n\ndef fetch_production(zone_key='CA-PE', session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n requests_obj = session or requests.session()\n pei_info = _get_pei_info(requests_obj)\n\n if pei_info is None:\n return None\n\n data = {\n 'datetime': pei_info['datetime'],\n 'zoneKey': zone_key,\n 'production': {\n 'wind': pei_info['pei_wind_gen'],\n\n # These are oil-fueled (\"heavy fuel oil\" and \"diesel\") generators\n # used as peakers and back-up\n 'oil': pei_info['pei_fossil_gen'],\n\n # specify some sources that definitely aren't present on PEI as zero,\n # this allows the analyzer to better estimate CO2eq\n 'coal': 0,\n 'hydro': 0,\n 'nuclear': 0,\n 'geothermal': 0\n },\n 'storage': {},\n 'source': 'princeedwardisland.ca'\n }\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known power exchange (in MW) between two regions.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n if sorted_zone_keys != 'CA-NB->CA-PE':\n raise NotImplementedError('This exchange pair is not implemented')\n\n requests_obj = session or requests.session()\n pei_info = _get_pei_info(requests_obj)\n\n if pei_info is None or pei_info['pei_load'] is None:\n return None\n\n # PEI imports most of its electricity. Everything not generated on island\n # is imported from New Brunswick.\n # In case of wind, some is paper-\"exported\" even if there is a net import,\n # and 'pei_wind_used'/'data5' indicates their accounting of part of the load\n # served by non-exported wind.\n # https://www.princeedwardisland.ca/en/feature/pei-wind-energy says:\n # \"Wind Power Exported Off-Island is that portion of wind generation that is supplying\n # contracts elsewhere. The actual electricity from this portion of wind generation\n # may stay within PEI but is satisfying a contractual arrangement in another jurisdiction.\"\n # We are ignoring these paper exports, as they are an accounting/legal detail\n # that doesn't actually reflect what happens on the wires.\n # (New Brunswick being the only interconnection with PEI, \"exporting\" wind power to NB\n # then \"importing\" a balance of NB electricity likely doesn't actually happen.)\n imported_from_nb = (pei_info['pei_load'] - pei_info['pei_fossil_gen'] - pei_info['pei_wind_gen'])\n\n # In expected result, \"net\" represents an export.\n # We have sorted_zone_keys 'CA-NB->CA-PE', so it's export *from* NB,\n # and import *to* PEI.\n data = {\n 'datetime': pei_info['datetime'],\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': imported_from_nb,\n 'source': 'princeedwardisland.ca'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n\n print('fetch_exchange(\"CA-PE\", \"CA-NB\") ->')\n print(fetch_exchange(\"CA-PE\", \"CA-NB\"))\n", "path": "parsers/CA_PE.py" } ]
diff --git a/parsers/CA_PE.py b/parsers/CA_PE.py index efc0a1c9e3..6a66e5f401 100644 --- a/parsers/CA_PE.py +++ b/parsers/CA_PE.py @@ -24,7 +24,7 @@ def _find_pei_key(pei_list, sought_key): def _get_pei_info(requests_obj): - url = 'https://wdf.princeedwardisland.ca/workflow' + url = 'https://wdf.princeedwardisland.ca/api/workflow' request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'} headers = {'Content-Type': 'application/json'} response = requests_obj.post(url, data=json.dumps(request), headers=headers)
CA-PE production parser down ## Description This is an automatic error report generated for Canada Prince Edward Island (CA-PE). Issues: - No recent data found for `production` parser ## Suggestions - Try running the parser locally using the command `poetry run test_parser CA-PE production` - <a href="https://storage.googleapis.com/electricitymap-parser-logs/CA-PE.html">Explore the runtime logs</a> You can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).
openshift__openshift-ansible-8921
[ { "content": "'''\n---\nmodule: openshift_logging_facts\nversion_added: \"\"\nshort_description: Gather facts about the OpenShift logging stack\ndescription:\n - Determine the current facts about the OpenShift logging stack (e.g. cluster size)\noptions:\nauthor: Red Hat, Inc\n'''\n\nimport copy\nimport json\n\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\nfrom subprocess import * # noqa: F402,F403\n\n# ignore pylint errors related to the module_utils import\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\nfrom ansible.module_utils.basic import * # noqa: F402,F403\n\nimport yaml\n\nEXAMPLES = \"\"\"\n- action: opneshift_logging_facts\n\"\"\"\n\nRETURN = \"\"\"\n\"\"\"\n\nDEFAULT_OC_OPTIONS = [\"-o\", \"json\"]\n\n# constants used for various labels and selectors\nCOMPONENT_KEY = \"component\"\nLOGGING_INFRA_KEY = \"logging-infra\"\n\n# selectors for filtering resources\nDS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + \"=\" + \"fluentd\"\nLOGGING_SELECTOR = LOGGING_INFRA_KEY + \"=\" + \"support\"\nROUTE_SELECTOR = \"component=support,logging-infra=support,provider=openshift\"\n# pylint: disable=line-too-long\nCOMPONENTS = [\"kibana\", \"curator\", \"elasticsearch\", \"fluentd\", \"kibana_ops\", \"curator_ops\", \"elasticsearch_ops\", \"mux\", \"eventrouter\"]\n\n\nclass OCBaseCommand(object):\n ''' The base class used to query openshift '''\n\n def __init__(self, binary, kubeconfig, namespace):\n ''' the init method of OCBaseCommand class '''\n self.binary = binary\n self.kubeconfig = kubeconfig\n self.user = self.get_system_admin(self.kubeconfig)\n self.namespace = namespace\n\n # pylint: disable=no-self-use\n def get_system_admin(self, kubeconfig):\n ''' Retrieves the system admin '''\n with open(kubeconfig, 'r') as kubeconfig_file:\n config = yaml.load(kubeconfig_file)\n for user in config[\"users\"]:\n if user[\"name\"].startswith(\"system:admin\"):\n return user[\"name\"]\n raise Exception(\"Unable to find system:admin in: \" + kubeconfig)\n\n # pylint: disable=too-many-arguments, dangerous-default-value\n def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):\n ''' Wrapper method for the \"oc\" command '''\n cmd = [self.binary, sub, kind]\n if name is not None:\n cmd = cmd + [name]\n if namespace is not None:\n cmd = cmd + [\"-n\", namespace]\n if add_options is None:\n add_options = []\n cmd = cmd + [\"--user=\" + self.user, \"--config=\" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options\n try:\n process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405\n out, err = process.communicate(cmd)\n if len(err) > 0:\n if 'not found' in err:\n return {'items': []}\n if 'No resources found' in err:\n return {'items': []}\n raise Exception(err)\n except Exception as excp:\n err = \"There was an exception trying to run the command '\" + \" \".join(cmd) + \"' \" + str(excp)\n raise Exception(err)\n\n return json.loads(out)\n\n\nclass OpenshiftLoggingFacts(OCBaseCommand):\n ''' The class structure for holding the OpenshiftLogging Facts'''\n name = \"facts\"\n\n def __init__(self, logger, binary, kubeconfig, namespace):\n ''' The init method for OpenshiftLoggingFacts '''\n super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)\n self.logger = logger\n self.facts = dict()\n\n def default_keys_for(self, kind):\n ''' Sets the default key values for kind '''\n for comp in COMPONENTS:\n self.add_facts_for(comp, kind)\n\n def add_facts_for(self, comp, kind, name=None, facts=None):\n ''' Add facts for the provided kind '''\n if comp not in self.facts:\n self.facts[comp] = dict()\n if kind not in self.facts[comp]:\n self.facts[comp][kind] = dict()\n if name:\n self.facts[comp][kind][name] = facts\n\n def facts_for_routes(self, namespace):\n ''' Gathers facts for Routes in logging namespace '''\n self.default_keys_for(\"routes\")\n route_list = self.oc_command(\"get\", \"routes\", namespace=namespace, add_options=[\"-l\", ROUTE_SELECTOR])\n if len(route_list[\"items\"]) == 0:\n return None\n for route in route_list[\"items\"]:\n name = route[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"routes\", name, dict(host=route[\"spec\"][\"host\"]))\n self.facts[\"agl_namespace\"] = namespace\n\n def facts_for_daemonsets(self, namespace):\n ''' Gathers facts for Daemonsets in logging namespace '''\n self.default_keys_for(\"daemonsets\")\n ds_list = self.oc_command(\"get\", \"daemonsets\", namespace=namespace,\n add_options=[\"-l\", LOGGING_INFRA_KEY + \"=fluentd\"])\n if len(ds_list[\"items\"]) == 0:\n return\n for ds_item in ds_list[\"items\"]:\n name = ds_item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n spec = ds_item[\"spec\"][\"template\"][\"spec\"]\n result = dict(\n selector=ds_item[\"spec\"][\"selector\"],\n containers=dict(),\n nodeSelector=spec[\"nodeSelector\"],\n serviceAccount=spec[\"serviceAccount\"],\n terminationGracePeriodSeconds=spec[\"terminationGracePeriodSeconds\"]\n )\n for container in spec[\"containers\"]:\n result[\"containers\"][container[\"name\"]] = container\n self.add_facts_for(comp, \"daemonsets\", name, result)\n\n def facts_for_pvcs(self, namespace):\n ''' Gathers facts for PVCS in logging namespace'''\n self.default_keys_for(\"pvcs\")\n pvclist = self.oc_command(\"get\", \"pvc\", namespace=namespace, add_options=[\"-l\", LOGGING_INFRA_KEY])\n if len(pvclist[\"items\"]) == 0:\n return\n for pvc in pvclist[\"items\"]:\n name = pvc[\"metadata\"][\"name\"]\n comp = self.comp(name)\n self.add_facts_for(comp, \"pvcs\", name, dict())\n\n def facts_for_deploymentconfigs(self, namespace):\n ''' Gathers facts for DeploymentConfigs in logging namespace '''\n self.default_keys_for(\"deploymentconfigs\")\n dclist = self.oc_command(\"get\", \"deploymentconfigs\", namespace=namespace, add_options=[\"-l\", LOGGING_INFRA_KEY])\n if len(dclist[\"items\"]) == 0:\n return\n dcs = dclist[\"items\"]\n for dc_item in dcs:\n name = dc_item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n spec = dc_item[\"spec\"][\"template\"][\"spec\"]\n facts = dict(\n name=name,\n selector=dc_item[\"spec\"][\"selector\"],\n replicas=dc_item[\"spec\"][\"replicas\"],\n serviceAccount=spec[\"serviceAccount\"],\n containers=dict(),\n volumes=dict()\n )\n if \"nodeSelector\" in spec:\n facts[\"nodeSelector\"] = spec[\"nodeSelector\"]\n if \"supplementalGroups\" in spec[\"securityContext\"]:\n facts[\"storageGroups\"] = spec[\"securityContext\"][\"supplementalGroups\"]\n facts[\"spec\"] = spec\n if \"volumes\" in spec:\n for vol in spec[\"volumes\"]:\n clone = copy.deepcopy(vol)\n clone.pop(\"name\", None)\n facts[\"volumes\"][vol[\"name\"]] = clone\n for container in spec[\"containers\"]:\n facts[\"containers\"][container[\"name\"]] = container\n self.add_facts_for(comp, \"deploymentconfigs\", name, facts)\n\n def facts_for_services(self, namespace):\n ''' Gathers facts for services in logging namespace '''\n self.default_keys_for(\"services\")\n servicelist = self.oc_command(\"get\", \"services\", namespace=namespace, add_options=[\"-l\", LOGGING_SELECTOR])\n if len(servicelist[\"items\"]) == 0:\n return\n for service in servicelist[\"items\"]:\n name = service[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"services\", name, dict())\n\n # pylint: disable=too-many-arguments\n def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):\n '''Extracts facts in logging namespace from configmap'''\n if yaml_file is not None:\n if config_key.endswith(\".yml\") or config_key.endswith(\".yaml\"):\n config_facts = yaml.load(yaml_file)\n self.facts[comp][kind][name][config_key] = config_facts\n self.facts[comp][kind][name][config_key][\"raw\"] = yaml_file\n\n def facts_for_configmaps(self, namespace):\n ''' Gathers facts for configmaps in logging namespace '''\n self.default_keys_for(\"configmaps\")\n a_list = self.oc_command(\"get\", \"configmaps\", namespace=namespace)\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"configmaps\", name, dict(item[\"data\"]))\n if comp in [\"elasticsearch\", \"elasticsearch_ops\"]:\n for config_key in item[\"data\"]:\n self.facts_from_configmap(comp, \"configmaps\", name, config_key, item[\"data\"][config_key])\n\n def facts_for_oauthclients(self, namespace):\n ''' Gathers facts for oauthclients used with logging '''\n self.default_keys_for(\"oauthclients\")\n a_list = self.oc_command(\"get\", \"oauthclients\", namespace=namespace, add_options=[\"-l\", LOGGING_SELECTOR])\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n result = dict(\n redirectURIs=item[\"redirectURIs\"]\n )\n self.add_facts_for(comp, \"oauthclients\", name, result)\n\n def facts_for_secrets(self, namespace):\n ''' Gathers facts for secrets in the logging namespace '''\n self.default_keys_for(\"secrets\")\n a_list = self.oc_command(\"get\", \"secrets\", namespace=namespace)\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None and item[\"type\"] == \"Opaque\":\n result = dict(\n keys=item[\"data\"].keys()\n )\n self.add_facts_for(comp, \"secrets\", name, result)\n\n def facts_for_sccs(self):\n ''' Gathers facts for SCCs used with logging '''\n self.default_keys_for(\"sccs\")\n scc = self.oc_command(\"get\", \"securitycontextconstraints.v1.security.openshift.io\", name=\"privileged\")\n if len(scc[\"users\"]) == 0:\n return\n for item in scc[\"users\"]:\n comp = self.comp(item)\n if comp is not None:\n self.add_facts_for(comp, \"sccs\", \"privileged\", dict())\n\n def facts_for_clusterrolebindings(self, namespace):\n ''' Gathers ClusterRoleBindings used with logging '''\n self.default_keys_for(\"clusterrolebindings\")\n role = self.oc_command(\"get\", \"clusterrolebindings\", name=\"cluster-readers\")\n if \"subjects\" not in role or len(role[\"subjects\"]) == 0:\n return\n for item in role[\"subjects\"]:\n comp = self.comp(item[\"name\"])\n if comp is not None and namespace == item.get(\"namespace\"):\n self.add_facts_for(comp, \"clusterrolebindings\", \"cluster-readers\", dict())\n\n# this needs to end up nested under the service account...\n def facts_for_rolebindings(self, namespace):\n ''' Gathers facts for RoleBindings used with logging '''\n self.default_keys_for(\"rolebindings\")\n role = self.oc_command(\"get\", \"rolebindings\", namespace=namespace, name=\"logging-elasticsearch-view-role\")\n if \"subjects\" not in role or len(role[\"subjects\"]) == 0:\n return\n for item in role[\"subjects\"]:\n comp = self.comp(item[\"name\"])\n if comp is not None and namespace == item.get(\"namespace\"):\n self.add_facts_for(comp, \"rolebindings\", \"logging-elasticsearch-view-role\", dict())\n\n # pylint: disable=no-self-use, too-many-return-statements\n def comp(self, name):\n ''' Does a comparison to evaluate the logging component '''\n if name.startswith(\"logging-curator-ops\"):\n return \"curator_ops\"\n elif name.startswith(\"logging-kibana-ops\") or name.startswith(\"kibana-ops\"):\n return \"kibana_ops\"\n elif name.startswith(\"logging-es-ops\") or name.startswith(\"logging-elasticsearch-ops\"):\n return \"elasticsearch_ops\"\n elif name.startswith(\"logging-curator\"):\n return \"curator\"\n elif name.startswith(\"logging-kibana\") or name.startswith(\"kibana\"):\n return \"kibana\"\n elif name.startswith(\"logging-es\") or name.startswith(\"logging-elasticsearch\"):\n return \"elasticsearch\"\n elif name.startswith(\"logging-fluentd\") or name.endswith(\"aggregated-logging-fluentd\"):\n return \"fluentd\"\n elif name.startswith(\"logging-mux\"):\n return \"mux\"\n elif name.startswith(\"logging-eventrouter\"):\n return \"eventrouter\"\n else:\n return None\n\n def build_facts(self):\n ''' Builds the logging facts and returns them '''\n self.facts_for_routes(self.namespace)\n self.facts_for_daemonsets(self.namespace)\n self.facts_for_deploymentconfigs(self.namespace)\n self.facts_for_services(self.namespace)\n self.facts_for_configmaps(self.namespace)\n self.facts_for_sccs()\n self.facts_for_oauthclients(self.namespace)\n self.facts_for_clusterrolebindings(self.namespace)\n self.facts_for_rolebindings(self.namespace)\n self.facts_for_secrets(self.namespace)\n self.facts_for_pvcs(self.namespace)\n\n return self.facts\n\n\ndef main():\n ''' The main method '''\n module = AnsibleModule( # noqa: F405\n argument_spec=dict(\n admin_kubeconfig={\"default\": \"/etc/origin/master/admin.kubeconfig\", \"type\": \"str\"},\n oc_bin={\"required\": True, \"type\": \"str\"},\n openshift_logging_namespace={\"required\": True, \"type\": \"str\"}\n ),\n supports_check_mode=False\n )\n try:\n cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],\n module.params['openshift_logging_namespace'])\n module.exit_json(\n ansible_facts={\"openshift_logging_facts\": cmd.build_facts()}\n )\n # ignore broad-except error to avoid stack trace to ansible user\n # pylint: disable=broad-except\n except Exception as error:\n module.fail_json(msg=str(error))\n\n\nif __name__ == '__main__':\n main()\n", "path": "roles/openshift_logging/library/openshift_logging_facts.py" } ]
[ { "content": "'''\n---\nmodule: openshift_logging_facts\nversion_added: \"\"\nshort_description: Gather facts about the OpenShift logging stack\ndescription:\n - Determine the current facts about the OpenShift logging stack (e.g. cluster size)\noptions:\nauthor: Red Hat, Inc\n'''\n\nimport copy\nimport json\n\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\nfrom subprocess import * # noqa: F402,F403\n\n# ignore pylint errors related to the module_utils import\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\nfrom ansible.module_utils.basic import * # noqa: F402,F403\n\nimport yaml\n\nEXAMPLES = \"\"\"\n- action: opneshift_logging_facts\n\"\"\"\n\nRETURN = \"\"\"\n\"\"\"\n\nDEFAULT_OC_OPTIONS = [\"-o\", \"json\"]\n\n# constants used for various labels and selectors\nCOMPONENT_KEY = \"component\"\nLOGGING_INFRA_KEY = \"logging-infra\"\n\n# selectors for filtering resources\nDS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + \"=\" + \"fluentd\"\nLOGGING_SELECTOR = LOGGING_INFRA_KEY + \"=\" + \"support\"\nROUTE_SELECTOR = \"component=support,logging-infra=support,provider=openshift\"\n# pylint: disable=line-too-long\nCOMPONENTS = [\"kibana\", \"curator\", \"elasticsearch\", \"fluentd\", \"kibana_ops\", \"curator_ops\", \"elasticsearch_ops\", \"mux\", \"eventrouter\"]\n\n\nclass OCBaseCommand(object):\n ''' The base class used to query openshift '''\n\n def __init__(self, binary, kubeconfig, namespace):\n ''' the init method of OCBaseCommand class '''\n self.binary = binary\n self.kubeconfig = kubeconfig\n self.user = self.get_system_admin(self.kubeconfig)\n self.namespace = namespace\n\n # pylint: disable=no-self-use\n def get_system_admin(self, kubeconfig):\n ''' Retrieves the system admin '''\n with open(kubeconfig, 'r') as kubeconfig_file:\n config = yaml.load(kubeconfig_file)\n for user in config[\"users\"]:\n if user[\"name\"].startswith(\"system:admin\"):\n return user[\"name\"]\n raise Exception(\"Unable to find system:admin in: \" + kubeconfig)\n\n # pylint: disable=too-many-arguments, dangerous-default-value\n def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):\n ''' Wrapper method for the \"oc\" command '''\n cmd = [self.binary, sub, kind]\n if name is not None:\n cmd = cmd + [name]\n if namespace is not None:\n cmd = cmd + [\"-n\", namespace]\n if add_options is None:\n add_options = []\n cmd = cmd + [\"--user=\" + self.user, \"--config=\" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options\n try:\n process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405\n out, err = process.communicate(cmd)\n err = err.decode(encoding='utf8', errors='replace')\n if len(err) > 0:\n if 'not found' in err:\n return {'items': []}\n if 'No resources found' in err:\n return {'items': []}\n raise Exception(err)\n except Exception as excp:\n err = \"There was an exception trying to run the command '\" + \" \".join(cmd) + \"' \" + str(excp)\n raise Exception(err)\n\n return json.loads(out)\n\n\nclass OpenshiftLoggingFacts(OCBaseCommand):\n ''' The class structure for holding the OpenshiftLogging Facts'''\n name = \"facts\"\n\n def __init__(self, logger, binary, kubeconfig, namespace):\n ''' The init method for OpenshiftLoggingFacts '''\n super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)\n self.logger = logger\n self.facts = dict()\n\n def default_keys_for(self, kind):\n ''' Sets the default key values for kind '''\n for comp in COMPONENTS:\n self.add_facts_for(comp, kind)\n\n def add_facts_for(self, comp, kind, name=None, facts=None):\n ''' Add facts for the provided kind '''\n if comp not in self.facts:\n self.facts[comp] = dict()\n if kind not in self.facts[comp]:\n self.facts[comp][kind] = dict()\n if name:\n self.facts[comp][kind][name] = facts\n\n def facts_for_routes(self, namespace):\n ''' Gathers facts for Routes in logging namespace '''\n self.default_keys_for(\"routes\")\n route_list = self.oc_command(\"get\", \"routes\", namespace=namespace, add_options=[\"-l\", ROUTE_SELECTOR])\n if len(route_list[\"items\"]) == 0:\n return None\n for route in route_list[\"items\"]:\n name = route[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"routes\", name, dict(host=route[\"spec\"][\"host\"]))\n self.facts[\"agl_namespace\"] = namespace\n\n def facts_for_daemonsets(self, namespace):\n ''' Gathers facts for Daemonsets in logging namespace '''\n self.default_keys_for(\"daemonsets\")\n ds_list = self.oc_command(\"get\", \"daemonsets\", namespace=namespace,\n add_options=[\"-l\", LOGGING_INFRA_KEY + \"=fluentd\"])\n if len(ds_list[\"items\"]) == 0:\n return\n for ds_item in ds_list[\"items\"]:\n name = ds_item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n spec = ds_item[\"spec\"][\"template\"][\"spec\"]\n result = dict(\n selector=ds_item[\"spec\"][\"selector\"],\n containers=dict(),\n nodeSelector=spec[\"nodeSelector\"],\n serviceAccount=spec[\"serviceAccount\"],\n terminationGracePeriodSeconds=spec[\"terminationGracePeriodSeconds\"]\n )\n for container in spec[\"containers\"]:\n result[\"containers\"][container[\"name\"]] = container\n self.add_facts_for(comp, \"daemonsets\", name, result)\n\n def facts_for_pvcs(self, namespace):\n ''' Gathers facts for PVCS in logging namespace'''\n self.default_keys_for(\"pvcs\")\n pvclist = self.oc_command(\"get\", \"pvc\", namespace=namespace, add_options=[\"-l\", LOGGING_INFRA_KEY])\n if len(pvclist[\"items\"]) == 0:\n return\n for pvc in pvclist[\"items\"]:\n name = pvc[\"metadata\"][\"name\"]\n comp = self.comp(name)\n self.add_facts_for(comp, \"pvcs\", name, dict())\n\n def facts_for_deploymentconfigs(self, namespace):\n ''' Gathers facts for DeploymentConfigs in logging namespace '''\n self.default_keys_for(\"deploymentconfigs\")\n dclist = self.oc_command(\"get\", \"deploymentconfigs\", namespace=namespace, add_options=[\"-l\", LOGGING_INFRA_KEY])\n if len(dclist[\"items\"]) == 0:\n return\n dcs = dclist[\"items\"]\n for dc_item in dcs:\n name = dc_item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n spec = dc_item[\"spec\"][\"template\"][\"spec\"]\n facts = dict(\n name=name,\n selector=dc_item[\"spec\"][\"selector\"],\n replicas=dc_item[\"spec\"][\"replicas\"],\n serviceAccount=spec[\"serviceAccount\"],\n containers=dict(),\n volumes=dict()\n )\n if \"nodeSelector\" in spec:\n facts[\"nodeSelector\"] = spec[\"nodeSelector\"]\n if \"supplementalGroups\" in spec[\"securityContext\"]:\n facts[\"storageGroups\"] = spec[\"securityContext\"][\"supplementalGroups\"]\n facts[\"spec\"] = spec\n if \"volumes\" in spec:\n for vol in spec[\"volumes\"]:\n clone = copy.deepcopy(vol)\n clone.pop(\"name\", None)\n facts[\"volumes\"][vol[\"name\"]] = clone\n for container in spec[\"containers\"]:\n facts[\"containers\"][container[\"name\"]] = container\n self.add_facts_for(comp, \"deploymentconfigs\", name, facts)\n\n def facts_for_services(self, namespace):\n ''' Gathers facts for services in logging namespace '''\n self.default_keys_for(\"services\")\n servicelist = self.oc_command(\"get\", \"services\", namespace=namespace, add_options=[\"-l\", LOGGING_SELECTOR])\n if len(servicelist[\"items\"]) == 0:\n return\n for service in servicelist[\"items\"]:\n name = service[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"services\", name, dict())\n\n # pylint: disable=too-many-arguments\n def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):\n '''Extracts facts in logging namespace from configmap'''\n if yaml_file is not None:\n if config_key.endswith(\".yml\") or config_key.endswith(\".yaml\"):\n config_facts = yaml.load(yaml_file)\n self.facts[comp][kind][name][config_key] = config_facts\n self.facts[comp][kind][name][config_key][\"raw\"] = yaml_file\n\n def facts_for_configmaps(self, namespace):\n ''' Gathers facts for configmaps in logging namespace '''\n self.default_keys_for(\"configmaps\")\n a_list = self.oc_command(\"get\", \"configmaps\", namespace=namespace)\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n self.add_facts_for(comp, \"configmaps\", name, dict(item[\"data\"]))\n if comp in [\"elasticsearch\", \"elasticsearch_ops\"]:\n for config_key in item[\"data\"]:\n self.facts_from_configmap(comp, \"configmaps\", name, config_key, item[\"data\"][config_key])\n\n def facts_for_oauthclients(self, namespace):\n ''' Gathers facts for oauthclients used with logging '''\n self.default_keys_for(\"oauthclients\")\n a_list = self.oc_command(\"get\", \"oauthclients\", namespace=namespace, add_options=[\"-l\", LOGGING_SELECTOR])\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None:\n result = dict(\n redirectURIs=item[\"redirectURIs\"]\n )\n self.add_facts_for(comp, \"oauthclients\", name, result)\n\n def facts_for_secrets(self, namespace):\n ''' Gathers facts for secrets in the logging namespace '''\n self.default_keys_for(\"secrets\")\n a_list = self.oc_command(\"get\", \"secrets\", namespace=namespace)\n if len(a_list[\"items\"]) == 0:\n return\n for item in a_list[\"items\"]:\n name = item[\"metadata\"][\"name\"]\n comp = self.comp(name)\n if comp is not None and item[\"type\"] == \"Opaque\":\n result = dict(\n keys=item[\"data\"].keys()\n )\n self.add_facts_for(comp, \"secrets\", name, result)\n\n def facts_for_sccs(self):\n ''' Gathers facts for SCCs used with logging '''\n self.default_keys_for(\"sccs\")\n scc = self.oc_command(\"get\", \"securitycontextconstraints.v1.security.openshift.io\", name=\"privileged\")\n if len(scc[\"users\"]) == 0:\n return\n for item in scc[\"users\"]:\n comp = self.comp(item)\n if comp is not None:\n self.add_facts_for(comp, \"sccs\", \"privileged\", dict())\n\n def facts_for_clusterrolebindings(self, namespace):\n ''' Gathers ClusterRoleBindings used with logging '''\n self.default_keys_for(\"clusterrolebindings\")\n role = self.oc_command(\"get\", \"clusterrolebindings\", name=\"cluster-readers\")\n if \"subjects\" not in role or len(role[\"subjects\"]) == 0:\n return\n for item in role[\"subjects\"]:\n comp = self.comp(item[\"name\"])\n if comp is not None and namespace == item.get(\"namespace\"):\n self.add_facts_for(comp, \"clusterrolebindings\", \"cluster-readers\", dict())\n\n# this needs to end up nested under the service account...\n def facts_for_rolebindings(self, namespace):\n ''' Gathers facts for RoleBindings used with logging '''\n self.default_keys_for(\"rolebindings\")\n role = self.oc_command(\"get\", \"rolebindings\", namespace=namespace, name=\"logging-elasticsearch-view-role\")\n if \"subjects\" not in role or len(role[\"subjects\"]) == 0:\n return\n for item in role[\"subjects\"]:\n comp = self.comp(item[\"name\"])\n if comp is not None and namespace == item.get(\"namespace\"):\n self.add_facts_for(comp, \"rolebindings\", \"logging-elasticsearch-view-role\", dict())\n\n # pylint: disable=no-self-use, too-many-return-statements\n def comp(self, name):\n ''' Does a comparison to evaluate the logging component '''\n if name.startswith(\"logging-curator-ops\"):\n return \"curator_ops\"\n elif name.startswith(\"logging-kibana-ops\") or name.startswith(\"kibana-ops\"):\n return \"kibana_ops\"\n elif name.startswith(\"logging-es-ops\") or name.startswith(\"logging-elasticsearch-ops\"):\n return \"elasticsearch_ops\"\n elif name.startswith(\"logging-curator\"):\n return \"curator\"\n elif name.startswith(\"logging-kibana\") or name.startswith(\"kibana\"):\n return \"kibana\"\n elif name.startswith(\"logging-es\") or name.startswith(\"logging-elasticsearch\"):\n return \"elasticsearch\"\n elif name.startswith(\"logging-fluentd\") or name.endswith(\"aggregated-logging-fluentd\"):\n return \"fluentd\"\n elif name.startswith(\"logging-mux\"):\n return \"mux\"\n elif name.startswith(\"logging-eventrouter\"):\n return \"eventrouter\"\n else:\n return None\n\n def build_facts(self):\n ''' Builds the logging facts and returns them '''\n self.facts_for_routes(self.namespace)\n self.facts_for_daemonsets(self.namespace)\n self.facts_for_deploymentconfigs(self.namespace)\n self.facts_for_services(self.namespace)\n self.facts_for_configmaps(self.namespace)\n self.facts_for_sccs()\n self.facts_for_oauthclients(self.namespace)\n self.facts_for_clusterrolebindings(self.namespace)\n self.facts_for_rolebindings(self.namespace)\n self.facts_for_secrets(self.namespace)\n self.facts_for_pvcs(self.namespace)\n\n return self.facts\n\n\ndef main():\n ''' The main method '''\n module = AnsibleModule( # noqa: F405\n argument_spec=dict(\n admin_kubeconfig={\"default\": \"/etc/origin/master/admin.kubeconfig\", \"type\": \"str\"},\n oc_bin={\"required\": True, \"type\": \"str\"},\n openshift_logging_namespace={\"required\": True, \"type\": \"str\"}\n ),\n supports_check_mode=False\n )\n try:\n cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],\n module.params['openshift_logging_namespace'])\n module.exit_json(\n ansible_facts={\"openshift_logging_facts\": cmd.build_facts()}\n )\n # ignore broad-except error to avoid stack trace to ansible user\n # pylint: disable=broad-except\n except Exception as error:\n module.fail_json(msg=str(error))\n\n\nif __name__ == '__main__':\n main()\n", "path": "roles/openshift_logging/library/openshift_logging_facts.py" } ]
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index b535c800cb2..b3c434602b4 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -76,6 +76,7 @@ def oc_command(self, sub, kind, namespace=None, name=None, add_options=None): try: process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405 out, err = process.communicate(cmd) + err = err.decode(encoding='utf8', errors='replace') if len(err) > 0: if 'not found' in err: return {'items': []}
installation of Logging fails on "Gather OpenShift Logging Facts" #### Description On a developer setup, using Fedora 25 as the OS and executing `cd ${GOPATH}/src/github.com/openshift/origin/_output/local/bin/linux/amd64 && sudo ./openshift start` for an OpenShift cluster, the Ansible Playbook for Logging fails during the task "Gather OpenShift Logging Facts". This is the script that is used to install Logging via the playbook: https://paste.fedoraproject.org/paste/TAxemZhC59HT-WMGxTiVBl5M1UNdIGYhyRLivL9gydE= ```bash #!/bin/bash cat > /tmp/metrics.inventory <<EOF [oo_first_master] openshift [oo_first_master:vars] #openshift_deployment_type=origin #openshift_release=v3.6 #openshift_image_tag=v3.6.0 #openshift_metrics_image_prefix=registry.ops.openshift.com/openshift3 #openshift_metrics_image_version=3.6.0 openshift_deployment_type=origin openshift_release=v1.5 openshift_image_tag=v1.5.0 openshift_logging_install_logging=true # for more options, see https://github.com/openshift/openshift-ansible/blob/master/roles/openshift_logging/defaults/main.yml EOF sudo ansible-playbook /mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/playbooks/byo/openshift-cluster/openshift-logging.yml \ -vv \ -e 'ansible_python_interpreter=/usr/bin/python3' \ -c local \ -i /tmp/metrics.inventory ``` This is the actual message: https://paste.fedoraproject.org/paste/IZ06Z5xI2bmOxGg8nK4RRl5M1UNdIGYhyRLivL9gydE= ``` TASK [openshift_logging : Gather OpenShift Logging Facts] ********************************************************************************************************************************************************* task path: /mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/roles/openshift_logging/tasks/install_logging.yaml:2 fatal: [openshift]: FAILED! => {"changed": false, "failed": true, "msg": "There was an exception trying to run the command 'oc get routes -n logging --user=system:admin/192-168-2-111:8443 --config=/tmp/openshift-logging-ansible-QNj1vf/admin.kubeconfig -o json -l component=support, logging-infra=support, provider=openshift' a bytes-like object is required, not 'str'"} to retry, use: --limit @/mnt/storage/jpkroehling/Projects/ansible/src/github.com/openshift/openshift-ansible/playbooks/byo/openshift-cluster/openshift-logging.retry PLAY RECAP ******************************************************************************************************************************************************************************************************** localhost : ok=2 changed=0 unreachable=0 failed=0 openshift : ok=19 changed=0 unreachable=0 failed=1 ``` ##### Version ```bash $ ansible --version ansible 2.3.0.0 config file = /etc/ansible/ansible.cfg configured module search path = Default w/o overrides python version = 2.7.13 (default, Jan 12 2017, 17:59:37) [GCC 6.3.1 20161221 (Red Hat 6.3.1-1)] $ git describe openshift-ansible-3.6.67-1-28-g74e4c9d ```